aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--MAINTAINERS31
-rw-r--r--Makefile61
-rw-r--r--Makefile.dis3
-rw-r--r--Makefile.hw7
-rw-r--r--Makefile.objs4
-rw-r--r--Makefile.target9
-rw-r--r--Makefile.user4
-rw-r--r--arch_init.c234
-rw-r--r--arch_init.h1
-rw-r--r--audio/audio.c1
-rw-r--r--bitops.h116
-rw-r--r--block-migration.c161
-rw-r--r--block.c471
-rw-r--r--block.h76
-rw-r--r--block/blkdebug.c107
-rw-r--r--block/curl.c6
-rw-r--r--block/iscsi.c155
-rw-r--r--block/nbd.c18
-rw-r--r--block/qcow.c4
-rw-r--r--block/qcow2-cluster.c5
-rw-r--r--block/qcow2-refcount.c7
-rw-r--r--block/qcow2-snapshot.c6
-rw-r--r--block/qcow2.c146
-rw-r--r--block/qcow2.h21
-rw-r--r--block/qed.c12
-rw-r--r--block/raw.c2
-rw-r--r--block/rbd.c4
-rw-r--r--block/sheepdog.c264
-rw-r--r--block/vvfat.c58
-rw-r--r--block_int.h27
-rw-r--r--blockdev.c42
-rw-r--r--blockdev.h5
-rw-r--r--bsd-user/main.c4
-rwxr-xr-xconfigure199
-rw-r--r--console.c26
-rw-r--r--coroutine-ucontext.c32
-rw-r--r--cpu-all.h1
-rw-r--r--cpu-common.h4
-rw-r--r--cpu-defs.h11
-rw-r--r--cpu-exec.c37
-rw-r--r--cpus.c111
-rw-r--r--cutils.c241
-rw-r--r--default-configs/or32-linux-user.mak1
-rw-r--r--default-configs/or32-softmmu.mak4
-rw-r--r--default-configs/pci.mak3
-rw-r--r--device_tree.c30
-rw-r--r--device_tree.h4
-rw-r--r--disas.c19
-rw-r--r--dma-helpers.c212
-rw-r--r--dma.h218
-rw-r--r--docs/qapi-code-gen.txt2
-rw-r--r--docs/specs/qcow2.txt14
-rw-r--r--docs/usb-storage.txt38
-rw-r--r--elf.h2
-rw-r--r--error.c3
-rw-r--r--error.h2
-rw-r--r--event_notifier.c30
-rw-r--r--event_notifier.h8
-rw-r--r--exec-obsolete.h50
-rw-r--r--exec.c83
-rw-r--r--gdbstub.c64
-rw-r--r--hmp-commands.hx8
-rw-r--r--hmp.c22
-rw-r--r--hmp.h2
-rw-r--r--hw/9pfs/virtio-9p.c23
-rw-r--r--hw/Makefile.objs4
-rw-r--r--hw/ac97.c3
-rw-r--r--hw/apic-msidef.h30
-rw-r--r--hw/apic.c38
-rw-r--r--hw/apic.h6
-rw-r--r--hw/apic_common.c16
-rw-r--r--hw/apic_internal.h1
-rw-r--r--hw/arm-misc.h2
-rw-r--r--hw/arm/Makefile.objs3
-rw-r--r--hw/arm_boot.c46
-rw-r--r--hw/arm_gic.c2
-rw-r--r--hw/block-common.c64
-rw-r--r--hw/block-common.h79
-rw-r--r--hw/bt-l2cap.c11
-rw-r--r--hw/cadence_gem.c14
-rw-r--r--hw/cirrus_vga.c8
-rw-r--r--hw/dp8393x.c9
-rw-r--r--hw/e1000.c15
-rw-r--r--hw/eepro100.c28
-rw-r--r--hw/es1370.c3
-rw-r--r--hw/escc.c1
-rw-r--r--hw/esp-pci.c518
-rw-r--r--hw/esp.c319
-rw-r--r--hw/esp.h119
-rw-r--r--hw/etraxfs_eth.c10
-rw-r--r--hw/exynos4210.c37
-rw-r--r--hw/exynos4210.h3
-rw-r--r--hw/exynos4210_i2c.c334
-rw-r--r--hw/exynos4210_mct.c4
-rw-r--r--hw/exynos4210_pwm.c2
-rw-r--r--hw/exynos4210_rtc.c592
-rw-r--r--hw/exynos4_boards.c2
-rw-r--r--hw/fdc.c244
-rw-r--r--hw/fdc.h10
-rw-r--r--hw/hd-geometry.c157
-rw-r--r--hw/highbank.c2
-rw-r--r--hw/i386/Makefile.objs2
-rw-r--r--hw/ide.h4
-rw-r--r--hw/ide/ahci.c33
-rw-r--r--hw/ide/ahci.h3
-rw-r--r--hw/ide/atapi.c31
-rw-r--r--hw/ide/cmd646.c10
-rw-r--r--hw/ide/core.c50
-rw-r--r--hw/ide/ich.c6
-rw-r--r--hw/ide/internal.h8
-rw-r--r--hw/ide/macio.c4
-rw-r--r--hw/ide/piix.c7
-rw-r--r--hw/ide/qdev.c42
-rw-r--r--hw/ide/via.c7
-rw-r--r--hw/imx.h34
-rw-r--r--hw/imx_avic.c408
-rw-r--r--hw/imx_ccm.c321
-rw-r--r--hw/imx_serial.c467
-rw-r--r--hw/imx_timer.c689
-rw-r--r--hw/integratorcp.c2
-rw-r--r--hw/intel-hda.c3
-rw-r--r--hw/ioh3420.c8
-rw-r--r--hw/ivshmem.c81
-rw-r--r--hw/kzm.c154
-rw-r--r--hw/lan9118.c16
-rw-r--r--hw/lance.c4
-rw-r--r--hw/lsi53c895a.c55
-rw-r--r--hw/mcf5208.c2
-rw-r--r--hw/mcf_fec.c9
-rw-r--r--hw/megasas.c2209
-rw-r--r--hw/mfi.h1249
-rw-r--r--hw/milkymist-minimac2.c8
-rw-r--r--hw/mips_jazz.c2
-rw-r--r--hw/mips_mipssim.c2
-rw-r--r--hw/mips_r4k.c2
-rw-r--r--hw/mipsnet.c8
-rw-r--r--hw/msi.c17
-rw-r--r--hw/msi.h1
-rw-r--r--hw/msix.c290
-rw-r--r--hw/msix.h19
-rw-r--r--hw/musicpal.c8
-rw-r--r--hw/ne2000-isa.c4
-rw-r--r--hw/ne2000.c21
-rw-r--r--hw/ne2000.h4
-rw-r--r--hw/omap.h8
-rw-r--r--hw/opencores_eth.c10
-rw-r--r--hw/openrisc/Makefile.objs3
-rw-r--r--hw/openrisc_pic.c60
-rw-r--r--hw/openrisc_sim.c150
-rw-r--r--hw/openrisc_timer.c101
-rw-r--r--hw/pc.c99
-rw-r--r--hw/pc_piix.c36
-rw-r--r--hw/pci.c74
-rw-r--r--hw/pci.h67
-rw-r--r--hw/pci_bridge.c3
-rw-r--r--hw/pci_bridge.h2
-rw-r--r--hw/pci_bridge_dev.c13
-rw-r--r--hw/pci_ids.h4
-rw-r--r--hw/pci_internals.h3
-rw-r--r--hw/pcnet-pci.c9
-rw-r--r--hw/pcnet.c6
-rw-r--r--hw/pcnet.h6
-rw-r--r--hw/piix_pci.c20
-rw-r--r--hw/pl011.c4
-rw-r--r--hw/ppc/Makefile.objs2
-rw-r--r--hw/ppce500_spin.c2
-rw-r--r--hw/qdev-dma.h10
-rw-r--r--hw/qdev-monitor.c4
-rw-r--r--hw/qdev-properties.c323
-rw-r--r--hw/qdev.c7
-rw-r--r--hw/qdev.h26
-rw-r--r--hw/qxl.c138
-rw-r--r--hw/qxl.h8
-rw-r--r--hw/rtl8139.c39
-rw-r--r--hw/s390-virtio-bus.c1
-rw-r--r--hw/scsi-bus.c292
-rw-r--r--hw/scsi-defs.h19
-rw-r--r--hw/scsi-disk.c870
-rw-r--r--hw/scsi-generic.c13
-rw-r--r--hw/scsi.h15
-rw-r--r--hw/sh_serial.c6
-rw-r--r--hw/smc91c111.c8
-rw-r--r--hw/spapr.c3
-rw-r--r--hw/spapr.h18
-rw-r--r--hw/spapr_iommu.c246
-rw-r--r--hw/spapr_llan.c69
-rw-r--r--hw/spapr_pci.c17
-rw-r--r--hw/spapr_pci.h1
-rw-r--r--hw/spapr_vio.c281
-rw-r--r--hw/spapr_vio.h73
-rw-r--r--hw/spapr_vscsi.c26
-rw-r--r--hw/spapr_vty.c2
-rw-r--r--hw/stellaris_enet.c8
-rw-r--r--hw/sun4m.c18
-rw-r--r--hw/usb.h6
-rw-r--r--hw/usb/Makefile.objs1
-rw-r--r--hw/usb/bus.c13
-rw-r--r--hw/usb/core.c23
-rw-r--r--hw/usb/dev-network.c10
-rw-r--r--hw/usb/dev-storage.c21
-rw-r--r--hw/usb/dev-uas.c779
-rw-r--r--hw/usb/hcd-ehci.c246
-rw-r--r--hw/usb/hcd-ohci.c93
-rw-r--r--hw/usb/hcd-uhci.c21
-rw-r--r--hw/usb/host-linux.c94
-rw-r--r--hw/usb/libhw.c21
-rw-r--r--hw/usb/redirect.c2
-rw-r--r--hw/vexpress.c15
-rw-r--r--hw/vga-isa-mm.c5
-rw-r--r--hw/vga-isa.c8
-rw-r--r--hw/vga-pci.c8
-rw-r--r--hw/vga.c27
-rw-r--r--hw/vga_int.h10
-rw-r--r--hw/vhost.c4
-rw-r--r--hw/vhost_net.c26
-rw-r--r--hw/vhost_net.h2
-rw-r--r--hw/virtio-balloon.c4
-rw-r--r--hw/virtio-blk.c25
-rw-r--r--hw/virtio-blk.h2
-rw-r--r--hw/virtio-net.c26
-rw-r--r--hw/virtio-pci.c131
-rw-r--r--hw/virtio-pci.h1
-rw-r--r--hw/virtio-scsi.c113
-rw-r--r--hw/virtio-serial-bus.c10
-rw-r--r--hw/virtio.c49
-rw-r--r--hw/virtio.h5
-rw-r--r--hw/vmware_vga.c22
-rw-r--r--hw/watchdog.c2
-rw-r--r--hw/wdt_i6300esb.c4
-rw-r--r--hw/xen-host-pci-device.c396
-rw-r--r--hw/xen-host-pci-device.h55
-rw-r--r--hw/xen_backend.c6
-rw-r--r--hw/xen_backend.h1
-rw-r--r--hw/xen_common.h9
-rw-r--r--hw/xen_console.c5
-rw-r--r--hw/xen_devconfig.c10
-rw-r--r--hw/xen_disk.c6
-rw-r--r--hw/xen_nic.c16
-rw-r--r--hw/xen_platform.c8
-rw-r--r--hw/xen_pt.c849
-rw-r--r--hw/xen_pt.h301
-rw-r--r--hw/xen_pt_config_init.c1869
-rw-r--r--hw/xen_pt_msi.c620
-rw-r--r--hw/xenfb.c13
-rw-r--r--hw/xgmac.c8
-rw-r--r--hw/xilinx_axienet.c9
-rw-r--r--hw/xilinx_ethlite.c8
-rw-r--r--hw/xio3130_downstream.c8
-rw-r--r--hw/xio3130_upstream.c8
-rw-r--r--hw/xtensa_lx60.c8
-rw-r--r--hw/xtensa_sim.c5
-rw-r--r--include/qemu/cpu.h7
-rw-r--r--iov.c193
-rw-r--r--iov.h77
-rw-r--r--kvm-all.c44
-rw-r--r--kvm-stub.c10
-rw-r--r--kvm.h4
-rw-r--r--linux-aio.c4
-rw-r--r--linux-user/alpha/syscall_nr.h2
-rw-r--r--linux-user/elfload.c41
-rw-r--r--linux-user/main.c119
-rw-r--r--linux-user/mmap.c30
-rw-r--r--linux-user/openrisc/syscall.h24
-rw-r--r--linux-user/openrisc/syscall_nr.h506
-rw-r--r--linux-user/openrisc/target_signal.h26
-rw-r--r--linux-user/openrisc/termbits.h294
-rw-r--r--linux-user/signal.c245
-rw-r--r--linux-user/strace.c12
-rw-r--r--linux-user/strace.list3
-rw-r--r--linux-user/syscall.c104
-rw-r--r--linux-user/syscall_defs.h275
-rw-r--r--memory.c18
-rw-r--r--memory.h9
-rw-r--r--migration.c24
-rw-r--r--migration.h13
-rw-r--r--monitor.c77
-rw-r--r--net.c1069
-rw-r--r--net.h102
-rw-r--r--net/Makefile.objs2
-rw-r--r--net/dump.c49
-rw-r--r--net/dump.h5
-rw-r--r--net/hub.c339
-rw-r--r--net/hub.h29
-rw-r--r--net/queue.c38
-rw-r--r--net/queue.h25
-rw-r--r--net/slirp.c196
-rw-r--r--net/slirp.h4
-rw-r--r--net/socket.c246
-rw-r--r--net/socket.h5
-rw-r--r--net/tap-aix.c2
-rw-r--r--net/tap-bsd.c2
-rw-r--r--net/tap-haiku.c2
-rw-r--r--net/tap-linux.c9
-rw-r--r--net/tap-solaris.c2
-rw-r--r--net/tap-win32.c36
-rw-r--r--net/tap.c192
-rw-r--r--net/tap.h26
-rw-r--r--net/vde.c32
-rw-r--r--net/vde.h5
-rw-r--r--os-posix.c5
-rw-r--r--osdep.c29
-rw-r--r--osdep.h6
-rw-r--r--oslib-posix.c6
-rw-r--r--pc-bios/keymaps/fi2
-rw-r--r--poison.h1
-rw-r--r--posix-aio-compat.c8
-rw-r--r--qapi-schema-guest.json20
-rw-r--r--qapi-schema.json407
-rw-r--r--qapi/Makefile.objs2
-rw-r--r--qapi/opts-visitor.c427
-rw-r--r--qapi/opts-visitor.h31
-rw-r--r--qapi/qapi-visit-core.c19
-rw-r--r--qapi/qapi-visit-core.h3
-rw-r--r--qemu-bridge-helper.c24
-rw-r--r--qemu-common.h87
-rw-r--r--qemu-config.h2
-rw-r--r--qemu-coroutine-io.c83
-rw-r--r--qemu-doc.texi12
-rw-r--r--qemu-ga.c2
-rw-r--r--qemu-img.c32
-rw-r--r--qemu-io.c16
-rw-r--r--qemu-log.c32
-rw-r--r--qemu-log.h12
-rw-r--r--qemu-nbd.c75
-rw-r--r--qemu-option-internal.h (renamed from ui/vnc-jobs-sync.c)70
-rw-r--r--qemu-option.c36
-rw-r--r--qemu-option.h12
-rw-r--r--qemu-options.hx67
-rw-r--r--qemu-sockets.c4
-rw-r--r--qemu-tech.texi2
-rw-r--r--qemu-thread-posix.c2
-rw-r--r--qemu-thread-win32.c2
-rw-r--r--qemu-thread.h3
-rw-r--r--qemu-timer.c12
-rw-r--r--qga/Makefile.objs2
-rw-r--r--qga/commands-posix.c114
-rw-r--r--qga/commands-win32.c11
-rw-r--r--qmp-commands.hx16
-rw-r--r--roms/Makefile17
-rw-r--r--roms/config.vga.cirrus3
-rw-r--r--roms/config.vga.isavga3
-rw-r--r--roms/config.vga.qxl6
-rw-r--r--roms/config.vga.stdvga3
-rw-r--r--roms/config.vga.vmware6
-rw-r--r--rules.mak3
-rw-r--r--savevm.c99
-rwxr-xr-xscripts/checkpatch.pl5
-rwxr-xr-xscripts/make-release24
-rw-r--r--scripts/qapi-visit.py150
-rw-r--r--scripts/qapi.py53
-rwxr-xr-xscripts/simpletrace.py116
-rw-r--r--scripts/tracetool/backend/simple.py90
-rw-r--r--slirp/if.c5
-rw-r--r--slirp/libslirp.h1
-rw-r--r--slirp/main.h1
-rw-r--r--slirp/slirp.c3
-rw-r--r--slirp/tcp_subr.c7
-rw-r--r--sysemu.h6
-rw-r--r--target-alpha/cpu.h15
-rw-r--r--target-arm/cpu.c6
-rw-r--r--target-arm/cpu.h15
-rw-r--r--target-arm/helper.c441
-rw-r--r--target-arm/machine.c10
-rw-r--r--target-arm/translate.c4
-rw-r--r--target-i386/Makefile.objs13
-rw-r--r--target-i386/cc_helper.c387
-rw-r--r--target-i386/cc_helper_template.h (renamed from target-i386/helper_template.h)91
-rw-r--r--target-i386/cpu.c36
-rw-r--r--target-i386/cpu.h67
-rw-r--r--target-i386/excp_helper.c129
-rw-r--r--target-i386/fpu_helper.c1304
-rw-r--r--target-i386/helper.c5
-rw-r--r--target-i386/helper.h4
-rw-r--r--target-i386/int_helper.c500
-rw-r--r--target-i386/kvm.c13
-rw-r--r--target-i386/mem_helper.c161
-rw-r--r--target-i386/misc_helper.c603
-rw-r--r--target-i386/op_helper.c5923
-rw-r--r--target-i386/ops_sse.h1049
-rw-r--r--target-i386/seg_helper.c2475
-rw-r--r--target-i386/shift_helper_template.h110
-rw-r--r--target-i386/smm_helper.c307
-rw-r--r--target-i386/svm_helper.c716
-rw-r--r--target-i386/translate.c253
-rw-r--r--target-mips/translate.c1
-rw-r--r--target-openrisc/Makefile.objs4
-rw-r--r--target-openrisc/cpu.c220
-rw-r--r--target-openrisc/cpu.h458
-rw-r--r--target-openrisc/exception.c27
-rw-r--r--target-openrisc/exception.h28
-rw-r--r--target-openrisc/exception_helper.c29
-rw-r--r--target-openrisc/fpu_helper.c300
-rw-r--r--target-openrisc/helper.h70
-rw-r--r--target-openrisc/int_helper.c79
-rw-r--r--target-openrisc/interrupt.c74
-rw-r--r--target-openrisc/interrupt_helper.c57
-rw-r--r--target-openrisc/machine.c47
-rw-r--r--target-openrisc/mmu.c243
-rw-r--r--target-openrisc/mmu_helper.c63
-rw-r--r--target-openrisc/sys_helper.c287
-rw-r--r--target-openrisc/translate.c1835
-rw-r--r--target-ppc/kvm.c4
-rw-r--r--target-s390x/kvm.c35
-rw-r--r--target-xtensa/cpu.h6
-rw-r--r--target-xtensa/translate.c16
-rw-r--r--targphys.h16
-rw-r--r--tci.c1
-rw-r--r--tests/Makefile6
-rw-r--r--tests/fdc-test.c102
-rw-r--r--tests/hd-geo-test.c428
-rw-r--r--tests/libqtest.c35
-rw-r--r--tests/qemu-iotests/031.out20
-rw-r--r--tests/qemu-iotests/036.out4
-rwxr-xr-xtests/qemu-iotests/039136
-rw-r--r--tests/qemu-iotests/039.out53
-rw-r--r--tests/qemu-iotests/common11
-rw-r--r--tests/qemu-iotests/common.rc17
-rw-r--r--tests/qemu-iotests/group1
-rwxr-xr-xtests/qemu-iotests/qed.py235
-rw-r--r--tests/tcg/openrisc/Makefile71
-rw-r--r--tests/tcg/openrisc/test_add.c43
-rw-r--r--tests/tcg/openrisc/test_addc.c38
-rw-r--r--tests/tcg/openrisc/test_addi.c33
-rw-r--r--tests/tcg/openrisc/test_addic.c33
-rw-r--r--tests/tcg/openrisc/test_and_or.c65
-rw-r--r--tests/tcg/openrisc/test_bf.c47
-rw-r--r--tests/tcg/openrisc/test_bnf.c51
-rw-r--r--tests/tcg/openrisc/test_div.c54
-rw-r--r--tests/tcg/openrisc/test_divu.c34
-rw-r--r--tests/tcg/openrisc/test_extx.c78
-rw-r--r--tests/tcg/openrisc/test_fx.c57
-rw-r--r--tests/tcg/openrisc/test_j.c26
-rw-r--r--tests/tcg/openrisc/test_jal.c26
-rw-r--r--tests/tcg/openrisc/test_lf_add.c39
-rw-r--r--tests/tcg/openrisc/test_lf_div.c37
-rw-r--r--tests/tcg/openrisc/test_lf_eqs.c88
-rw-r--r--tests/tcg/openrisc/test_lf_ges.c88
-rw-r--r--tests/tcg/openrisc/test_lf_gts.c86
-rw-r--r--tests/tcg/openrisc/test_lf_les.c88
-rw-r--r--tests/tcg/openrisc/test_lf_lts.c92
-rw-r--r--tests/tcg/openrisc/test_lf_mul.c22
-rw-r--r--tests/tcg/openrisc/test_lf_nes.c89
-rw-r--r--tests/tcg/openrisc/test_lf_rem.c32
-rw-r--r--tests/tcg/openrisc/test_lf_sub.c35
-rw-r--r--tests/tcg/openrisc/test_logic.c105
-rw-r--r--tests/tcg/openrisc/test_lx.c84
-rw-r--r--tests/tcg/openrisc/test_movhi.c31
-rw-r--r--tests/tcg/openrisc/test_mul.c61
-rw-r--r--tests/tcg/openrisc/test_muli.c48
-rw-r--r--tests/tcg/openrisc/test_mulu.c48
-rw-r--r--tests/tcg/openrisc/test_sfeq.c43
-rw-r--r--tests/tcg/openrisc/test_sfeqi.c39
-rw-r--r--tests/tcg/openrisc/test_sfges.c44
-rw-r--r--tests/tcg/openrisc/test_sfgesi.c40
-rw-r--r--tests/tcg/openrisc/test_sfgeu.c44
-rw-r--r--tests/tcg/openrisc/test_sfgeui.c41
-rw-r--r--tests/tcg/openrisc/test_sfgts.c45
-rw-r--r--tests/tcg/openrisc/test_sfgtsi.c41
-rw-r--r--tests/tcg/openrisc/test_sfgtu.c43
-rw-r--r--tests/tcg/openrisc/test_sfgtui.c42
-rw-r--r--tests/tcg/openrisc/test_sfles.c26
-rw-r--r--tests/tcg/openrisc/test_sflesi.c39
-rw-r--r--tests/tcg/openrisc/test_sfleu.c43
-rw-r--r--tests/tcg/openrisc/test_sfleui.c39
-rw-r--r--tests/tcg/openrisc/test_sflts.c43
-rw-r--r--tests/tcg/openrisc/test_sfltsi.c39
-rw-r--r--tests/tcg/openrisc/test_sfltu.c43
-rw-r--r--tests/tcg/openrisc/test_sfltui.c39
-rw-r--r--tests/tcg/openrisc/test_sfne.c43
-rw-r--r--tests/tcg/openrisc/test_sfnei.c39
-rw-r--r--tests/tcg/openrisc/test_sub.c35
-rw-r--r--tests/test-iov.c260
-rw-r--r--tests/test-qmp-commands.c42
-rw-r--r--tests/test-qmp-input-visitor.c24
-rw-r--r--trace-events132
-rw-r--r--trace/control.c3
-rw-r--r--trace/simple.c271
-rw-r--r--trace/simple.h40
-rw-r--r--ui/Makefile.objs6
-rw-r--r--ui/spice-display.c2
-rw-r--r--ui/vnc-auth-vencrypt.c3
-rw-r--r--ui/vnc-jobs.c (renamed from ui/vnc-jobs-async.c)0
-rw-r--r--ui/vnc-jobs.h16
-rw-r--r--ui/vnc.c33
-rw-r--r--ui/vnc.h17
-rw-r--r--user-exec.c21
-rw-r--r--vl.c75
-rw-r--r--vmstate.h18
-rw-r--r--xen-all.c18
490 files changed, 39510 insertions, 12402 deletions
diff --git a/.gitignore b/.gitignore
index 9859c7d746..824c0d24df 100644
--- a/.gitignore
+++ b/.gitignore
@@ -41,12 +41,14 @@ qemu-io
qemu-ga
qemu-bridge-helper
qemu-monitor.texi
+vscclient
QMP/qmp-commands.txt
test-coroutine
test-qmp-input-visitor
test-qmp-output-visitor
test-string-input-visitor
test-string-output-visitor
+test-visitor-serialization
fsdev/virtfs-proxy-helper.1
fsdev/virtfs-proxy-helper.pod
.gdbinit
@@ -69,6 +71,10 @@ fsdev/virtfs-proxy-helper.pod
*.vr
*.d
*.o
+*.lo
+*.la
+*.pc
+.libs
*.swp
*.orig
.pc
diff --git a/MAINTAINERS b/MAINTAINERS
index b45f0750b2..2d219d2ea0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -207,6 +207,12 @@ M: qemu-devel@nongnu.org
S: Orphan
F: hw/gumstix.c
+i.MX31
+M: Peter Chubb <peter.chubb@nicta.com.au>
+S: Odd fixes
+F: hw/imx*
+F: hw/kzm.c
+
Integrator CP
M: Paul Brook <paul@codesourcery.com>
M: Peter Maydell <peter.maydell@linaro.org>
@@ -311,6 +317,11 @@ M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
S: Maintained
F: hw/petalogix_s3adsp1800.c
+petalogix_ml605
+M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+S: Maintained
+F: hw/petalogix_ml605_mmu.c
+
MIPS Machines
-------------
Jazz
@@ -477,6 +488,17 @@ S: Supported
F: hw/virtio-serial*
F: hw/virtio-console*
+Xilinx EDK
+M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+M: Edgar E. Iglesias <edgar.iglesias@gmail.com>
+S: Maintained
+F: hw/xilinx_axi*
+F: hw/xilinx_uartlite.c
+F: hw/xilinx_intc.c
+F: hw/xilinx_ethlite.c
+F: hw/xilinx_timer.c
+F: hw/xilinx.h
+
Subsystems
----------
Audio
@@ -495,6 +517,12 @@ M: Anthony Liguori <aliguori@us.ibm.com>
S: Maintained
F: qemu-char.c
+Device Tree
+M: Peter Crosthwaite <peter.crosthwaite@petalogix.com>
+M: Alexander Graf <agraf@suse.de>
+S: Maintained
+F: device-tree.[ch]
+
GDB stub
M: qemu-devel@nongnu.org
S: Odd Fixes
@@ -532,9 +560,10 @@ F: monitor.c
Network device layer
M: Anthony Liguori <aliguori@us.ibm.com>
-M: Mark McLoughlin <markmc@redhat.com>
+M: Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
S: Maintained
F: net/
+T: git git://github.com/stefanha/qemu.git net
Network Block Device (NBD)
M: Paolo Bonzini <pbonzini@redhat.com>
diff --git a/Makefile b/Makefile
index 827e1adac3..000b46c379 100644
--- a/Makefile
+++ b/Makefile
@@ -6,7 +6,7 @@ BUILD_DIR=$(CURDIR)
# All following code might depend on configuration variables
ifneq ($(wildcard config-host.mak),)
# Put the all: rule here so that config-host.mak can contain dependencies.
-all: build-all
+all:
include config-host.mak
include $(SRC_PATH)/rules.mak
config-host.mak: $(SRC_PATH)/configure
@@ -31,7 +31,7 @@ Makefile: ;
configure: ;
.PHONY: all clean cscope distclean dvi html info install install-doc \
- pdf recurse-all speed tar tarbin test build-all
+ pdf recurse-all speed test dist
$(call set-vpath, $(SRC_PATH))
@@ -82,7 +82,7 @@ defconfig:
-include config-all-devices.mak
-build-all: $(DOCS) $(TOOLS) $(HELPERS-y) recurse-all
+all: $(DOCS) $(TOOLS) $(HELPERS-y) recurse-all
config-host.h: config-host.h-timestamp
config-host.h-timestamp: config-host.mak
@@ -156,7 +156,8 @@ vscclient$(EXESUF): $(libcacard-y) $(oslib-obj-y) $(trace-obj-y) qemu-timer-comm
qemu-img.o: qemu-img-cmds.h
tools-obj-y = $(oslib-obj-y) $(trace-obj-y) qemu-tool.o qemu-timer.o \
- qemu-timer-common.o main-loop.o notify.o iohandler.o cutils.o async.o
+ qemu-timer-common.o main-loop.o notify.o \
+ iohandler.o cutils.o iov.o async.o
tools-obj-$(CONFIG_POSIX) += compatfd.o
qemu-img$(EXESUF): qemu-img.o $(tools-obj-y) $(block-obj-y)
@@ -171,9 +172,8 @@ fsdev/virtfs-proxy-helper$(EXESUF): LIBS += -lcap
qemu-img-cmds.h: $(SRC_PATH)/qemu-img-cmds.hx
$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@," GEN $@")
-qapi-dir := $(BUILD_DIR)/qapi-generated
qemu-ga$(EXESUF): LIBS = $(LIBS_QGA)
-qemu-ga$(EXESUF): QEMU_CFLAGS += -I $(qapi-dir)
+qemu-ga$(EXESUF): QEMU_CFLAGS += -I qga/qapi-generated
gen-out-type = $(subst .,-,$(suffix $@))
@@ -181,15 +181,15 @@ ifneq ($(wildcard config-host.mak),)
include $(SRC_PATH)/tests/Makefile
endif
-$(qapi-dir)/qga-qapi-types.c $(qapi-dir)/qga-qapi-types.h :\
+qga/qapi-generated/qga-qapi-types.c qga/qapi-generated/qga-qapi-types.h :\
$(SRC_PATH)/qapi-schema-guest.json $(SRC_PATH)/scripts/qapi-types.py
- $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-types.py $(gen-out-type) -o "$(qapi-dir)" -p "qga-" < $<, " GEN $@")
-$(qapi-dir)/qga-qapi-visit.c $(qapi-dir)/qga-qapi-visit.h :\
+ $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-types.py $(gen-out-type) -o qga/qapi-generated -p "qga-" < $<, " GEN $@")
+qga/qapi-generated/qga-qapi-visit.c qga/qapi-generated/qga-qapi-visit.h :\
$(SRC_PATH)/qapi-schema-guest.json $(SRC_PATH)/scripts/qapi-visit.py
- $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-visit.py $(gen-out-type) -o "$(qapi-dir)" -p "qga-" < $<, " GEN $@")
-$(qapi-dir)/qga-qmp-commands.h $(qapi-dir)/qga-qmp-marshal.c :\
+ $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-visit.py $(gen-out-type) -o qga/qapi-generated -p "qga-" < $<, " GEN $@")
+qga/qapi-generated/qga-qmp-commands.h qga/qapi-generated/qga-qmp-marshal.c :\
$(SRC_PATH)/qapi-schema-guest.json $(SRC_PATH)/scripts/qapi-commands.py
- $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py $(gen-out-type) -o "$(qapi-dir)" -p "qga-" < $<, " GEN $@")
+ $(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py $(gen-out-type) -o qga/qapi-generated -p "qga-" < $<, " GEN $@")
qapi-types.c qapi-types.h :\
$(SRC_PATH)/qapi-schema.json $(SRC_PATH)/scripts/qapi-types.py
@@ -201,12 +201,10 @@ qmp-commands.h qmp-marshal.c :\
$(SRC_PATH)/qapi-schema.json $(SRC_PATH)/scripts/qapi-commands.py
$(call quiet-command,$(PYTHON) $(SRC_PATH)/scripts/qapi-commands.py $(gen-out-type) -m -o "." < $<, " GEN $@")
-QGALIB_OBJ=$(addprefix $(qapi-dir)/, qga-qapi-types.o qga-qapi-visit.o qga-qmp-marshal.o)
-QGALIB_GEN=$(addprefix $(qapi-dir)/, qga-qapi-types.h qga-qapi-visit.h qga-qmp-commands.h)
-$(QGALIB_OBJ): $(QGALIB_GEN)
+QGALIB_GEN=$(addprefix qga/qapi-generated/, qga-qapi-types.h qga-qapi-visit.h qga-qmp-commands.h)
$(qga-obj-y) qemu-ga.o: $(QGALIB_GEN)
-qemu-ga$(EXESUF): qemu-ga.o $(qga-obj-y) $(tools-obj-y) $(qapi-obj-y) $(qobject-obj-y) $(version-obj-y) $(QGALIB_OBJ)
+qemu-ga$(EXESUF): qemu-ga.o $(qga-obj-y) $(tools-obj-y) $(qapi-obj-y) $(qobject-obj-y) $(version-obj-y)
QEMULIBS=libhw32 libhw64 libuser libdis libdis-user
@@ -217,8 +215,8 @@ clean:
rm -f *.o *.d *.a *.lo $(TOOLS) $(HELPERS-y) qemu-ga TAGS cscope.* *.pod *~ */*~
rm -Rf .libs
rm -f slirp/*.o slirp/*.d audio/*.o audio/*.d block/*.o block/*.d net/*.o net/*.d fsdev/*.o fsdev/*.d ui/*.o ui/*.d qapi/*.o qapi/*.d qga/*.o qga/*.d
- rm -f qom/*.o qom/*.d
- rm -f usb/*.o usb/*.d hw/*.o hw/*.d
+ rm -f qom/*.o qom/*.d libuser/qom/*.o libuser/qom/*.d
+ rm -f hw/usb/*.o hw/usb/*.d hw/*.o hw/*.d
rm -f qemu-img-cmds.h
rm -f trace/*.o trace/*.d
rm -f trace-dtrace.dtrace trace-dtrace.dtrace-timestamp
@@ -226,13 +224,21 @@ clean:
rm -f trace-dtrace.h trace-dtrace.h-timestamp
rm -f $(foreach f,$(GENERATED_HEADERS),$(f) $(f)-timestamp)
rm -f $(foreach f,$(GENERATED_SOURCES),$(f) $(f)-timestamp)
- rm -rf $(qapi-dir)
+ rm -rf qapi-generated
+ rm -rf qga/qapi-generated
$(MAKE) -C tests/tcg clean
for d in $(ALL_SUBDIRS) $(QEMULIBS) libcacard; do \
if test -d $$d; then $(MAKE) -C $$d $@ || exit 1; fi; \
rm -f $$d/qemu-options.def; \
done
+VERSION ?= $(shell cat VERSION)
+
+dist: qemu-$(VERSION).tar.bz2
+
+qemu-%.tar.bz2:
+ $(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
+
distclean: clean
rm -f config-host.mak config-host.h* config-host.ld $(DOCS) qemu-options.texi qemu-img-cmds.texi qemu-monitor.texi
rm -f config-all-devices.mak
@@ -250,7 +256,8 @@ distclean: clean
KEYMAPS=da en-gb et fr fr-ch is lt modifiers no pt-br sv \
ar de en-us fi fr-be hr it lv nl pl ru th \
-common de-ch es fo fr-ca hu ja mk nl-be pt sl tr
+common de-ch es fo fr-ca hu ja mk nl-be pt sl tr \
+bepo
ifdef INSTALL_BLOBS
BLOBS=bios.bin sgabios.bin vgabios.bin vgabios-cirrus.bin \
@@ -390,20 +397,10 @@ qemu-doc.dvi qemu-doc.html qemu-doc.info qemu-doc.pdf: \
qemu-img.texi qemu-nbd.texi qemu-options.texi \
qemu-monitor.texi qemu-img-cmds.texi
-VERSION ?= $(shell cat VERSION)
-FILE = qemu-$(VERSION)
-
-# tar release (use 'make -k tar' on a checkouted tree)
-tar:
- rm -rf /tmp/$(FILE)
- cp -r . /tmp/$(FILE)
- cd /tmp && tar zcvf ~/$(FILE).tar.gz $(FILE) --exclude CVS --exclude .git --exclude .svn
- rm -rf /tmp/$(FILE)
-
# Add a dependency on the generated files, so that they are always
# rebuilt before other object files
Makefile: $(GENERATED_HEADERS)
# Include automatically generated dependency files
-# All subdir dependencies come automatically from our recursive subdir rules
--include $(wildcard *.d)
+# Dependencies in Makefile.objs files come from our recursive subdir rules
+-include $(wildcard *.d tests/*.d)
diff --git a/Makefile.dis b/Makefile.dis
index 09060f0a1a..2cfec6a358 100644
--- a/Makefile.dis
+++ b/Makefile.dis
@@ -18,6 +18,3 @@ all: $(libdis-y)
clean:
rm -f *.o *.d *.a *~
-
-# Include automatically generated dependency files
--include $(wildcard *.d)
diff --git a/Makefile.hw b/Makefile.hw
index 2bcbaffb4f..59f5b48350 100644
--- a/Makefile.hw
+++ b/Makefile.hw
@@ -19,8 +19,5 @@ all: $(hw-obj-y)
@true
clean:
- rm -f $(addsuffix /*.o, $(dir $(sort $(hw-obj-y))))
- rm -f $(addsuffix /*.d, $(dir $(sort $(hw-obj-y))))
-
-# Include automatically generated dependency files
--include $(patsubst %.o, %.d, $(hw-obj-y))
+ rm -f $(addsuffix *.o, $(sort $(dir $(hw-obj-y))))
+ rm -f $(addsuffix *.d, $(sort $(dir $(hw-obj-y))))
diff --git a/Makefile.objs b/Makefile.objs
index 625c4d5da7..5ebbcfa171 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -41,7 +41,7 @@ coroutine-obj-$(CONFIG_WIN32) += coroutine-win32.o
#######################################################################
# block-obj-y is code used by both qemu system emulation and qemu-img
-block-obj-y = cutils.o cache-utils.o qemu-option.o module.o async.o
+block-obj-y = cutils.o iov.o cache-utils.o qemu-option.o module.o async.o
block-obj-y += nbd.o block.o aio.o aes.o qemu-config.o qemu-progress.o qemu-sockets.o
block-obj-y += $(coroutine-obj-y) $(qobject-obj-y) $(version-obj-y)
block-obj-$(CONFIG_POSIX) += posix-aio-compat.o
@@ -101,7 +101,7 @@ common-obj-$(CONFIG_SLIRP) += slirp/
user-obj-y =
user-obj-y += envlist.o path.o
user-obj-y += tcg-runtime.o host-utils.o
-user-obj-y += cutils.o cache-utils.o
+user-obj-y += cutils.o iov.o cache-utils.o
user-obj-y += module.o
user-obj-y += qemu-user.o
user-obj-y += $(trace-obj-y)
diff --git a/Makefile.target b/Makefile.target
index 8f12b0fe88..7892a8df63 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -108,7 +108,7 @@ ifdef CONFIG_BSD_USER
QEMU_CFLAGS+=-I$(SRC_PATH)/bsd-user -I$(SRC_PATH)/bsd-user/$(TARGET_ARCH)
obj-y += bsd-user/
-obj-y += gdbstub.o user-exec.o
+obj-y += gdbstub.o user-exec.o $(oslib-obj-y)
endif #CONFIG_BSD_USER
@@ -193,8 +193,8 @@ qmp-commands-old.h: $(SRC_PATH)/qmp-commands.hx
$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@," GEN $(TARGET_DIR)$@")
clean:
- rm -f *.o *.a *~ $(PROGS) nwfpe/*.o fpu/*.o
- rm -f *.d */*.d tcg/*.o ide/*.o 9pfs/*.o kvm/*.o
+ rm -f *.a *~ $(PROGS)
+ rm -f $(shell find . -name '*.[od]')
rm -f hmp-commands.h qmp-commands-old.h gdbstub-xml.c
ifdef CONFIG_TRACE_SYSTEMTAP
rm -f *.stp
@@ -214,6 +214,3 @@ endif
GENERATED_HEADERS += config-target.h
Makefile: $(GENERATED_HEADERS)
-
-# Include automatically generated dependency files
--include $(wildcard *.d fpu/*.d tcg/*.d)
diff --git a/Makefile.user b/Makefile.user
index 0ffefe813f..9302d33245 100644
--- a/Makefile.user
+++ b/Makefile.user
@@ -10,6 +10,7 @@ $(call set-vpath, $(SRC_PATH))
QEMU_CFLAGS+=-I..
QEMU_CFLAGS += -I$(SRC_PATH)/include
+QEMU_CFLAGS += -DCONFIG_USER_ONLY
include $(SRC_PATH)/Makefile.objs
@@ -21,6 +22,3 @@ clean:
for d in . trace; do \
rm -f $$d/*.o $$d/*.d $$d/*.a $$d/*~; \
done
-
-# Include automatically generated dependency files
--include $(wildcard *.d)
diff --git a/arch_init.c b/arch_init.c
index a9e8b7442b..60823baabd 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -44,6 +44,14 @@
#include "exec-memory.h"
#include "hw/pcspk.h"
+#ifdef DEBUG_ARCH_INIT
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stdout, "arch_init: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
#ifdef TARGET_SPARC
int graphic_width = 1024;
int graphic_height = 768;
@@ -71,6 +79,8 @@ int graphic_depth = 15;
#define QEMU_ARCH QEMU_ARCH_MICROBLAZE
#elif defined(TARGET_MIPS)
#define QEMU_ARCH QEMU_ARCH_MIPS
+#elif defined(TARGET_OPENRISC)
+#define QEMU_ARCH QEMU_ARCH_OPENRISC
#elif defined(TARGET_PPC)
#define QEMU_ARCH QEMU_ARCH_PPC
#elif defined(TARGET_S390X)
@@ -161,14 +171,34 @@ static int is_dup_page(uint8_t *page)
return 1;
}
+static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
+ int cont, int flag)
+{
+ qemu_put_be64(f, offset | cont | flag);
+ if (!cont) {
+ qemu_put_byte(f, strlen(block->idstr));
+ qemu_put_buffer(f, (uint8_t *)block->idstr,
+ strlen(block->idstr));
+ }
+
+}
+
static RAMBlock *last_block;
static ram_addr_t last_offset;
+/*
+ * ram_save_block: Writes a page of memory to the stream f
+ *
+ * Returns: 0: if the page hasn't changed
+ * -1: if there are no more dirty pages
+ * n: the amount of bytes written in other case
+ */
+
static int ram_save_block(QEMUFile *f)
{
RAMBlock *block = last_block;
ram_addr_t offset = last_offset;
- int bytes_sent = 0;
+ int bytes_sent = -1;
MemoryRegion *mr;
if (!block)
@@ -187,21 +217,11 @@ static int ram_save_block(QEMUFile *f)
p = memory_region_get_ram_ptr(mr) + offset;
if (is_dup_page(p)) {
- qemu_put_be64(f, offset | cont | RAM_SAVE_FLAG_COMPRESS);
- if (!cont) {
- qemu_put_byte(f, strlen(block->idstr));
- qemu_put_buffer(f, (uint8_t *)block->idstr,
- strlen(block->idstr));
- }
+ save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS);
qemu_put_byte(f, *p);
bytes_sent = 1;
} else {
- qemu_put_be64(f, offset | cont | RAM_SAVE_FLAG_PAGE);
- if (!cont) {
- qemu_put_byte(f, strlen(block->idstr));
- qemu_put_buffer(f, (uint8_t *)block->idstr,
- strlen(block->idstr));
- }
+ save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
bytes_sent = TARGET_PAGE_SIZE;
}
@@ -228,20 +248,7 @@ static uint64_t bytes_transferred;
static ram_addr_t ram_save_remaining(void)
{
- RAMBlock *block;
- ram_addr_t count = 0;
-
- QLIST_FOREACH(block, &ram_list.blocks, next) {
- ram_addr_t addr;
- for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
- if (memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
- DIRTY_MEMORY_MIGRATION)) {
- count++;
- }
- }
- }
-
- return count;
+ return ram_list.dirty_pages;
}
uint64_t ram_bytes_remaining(void)
@@ -294,60 +301,88 @@ static void sort_ram_list(void)
g_free(blocks);
}
-int ram_save_live(QEMUFile *f, int stage, void *opaque)
+static void migration_end(void)
{
- ram_addr_t addr;
- uint64_t bytes_transferred_last;
- double bwidth = 0;
- uint64_t expected_time = 0;
- int ret;
+ memory_global_dirty_log_stop();
+}
- if (stage < 0) {
- memory_global_dirty_log_stop();
- return 0;
- }
+static void ram_migration_cancel(void *opaque)
+{
+ migration_end();
+}
- memory_global_sync_dirty_bitmap(get_system_memory());
+#define MAX_WAIT 50 /* ms, half buffered_file limit */
- if (stage == 1) {
- RAMBlock *block;
- bytes_transferred = 0;
- last_block = NULL;
- last_offset = 0;
- sort_ram_list();
-
- /* Make sure all dirty bits are set */
- QLIST_FOREACH(block, &ram_list.blocks, next) {
- for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
- if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
- DIRTY_MEMORY_MIGRATION)) {
- memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
- }
+static int ram_save_setup(QEMUFile *f, void *opaque)
+{
+ ram_addr_t addr;
+ RAMBlock *block;
+
+ bytes_transferred = 0;
+ last_block = NULL;
+ last_offset = 0;
+ sort_ram_list();
+
+ /* Make sure all dirty bits are set */
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
+ if (!memory_region_get_dirty(block->mr, addr, TARGET_PAGE_SIZE,
+ DIRTY_MEMORY_MIGRATION)) {
+ memory_region_set_dirty(block->mr, addr, TARGET_PAGE_SIZE);
}
}
+ }
- memory_global_dirty_log_start();
+ memory_global_dirty_log_start();
- qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
+ qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
- QLIST_FOREACH(block, &ram_list.blocks, next) {
- qemu_put_byte(f, strlen(block->idstr));
- qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
- qemu_put_be64(f, block->length);
- }
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ qemu_put_byte(f, strlen(block->idstr));
+ qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
+ qemu_put_be64(f, block->length);
}
+ qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+
+ return 0;
+}
+
+static int ram_save_iterate(QEMUFile *f, void *opaque)
+{
+ uint64_t bytes_transferred_last;
+ double bwidth = 0;
+ int ret;
+ int i;
+ uint64_t expected_time;
+
bytes_transferred_last = bytes_transferred;
bwidth = qemu_get_clock_ns(rt_clock);
+ i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0) {
int bytes_sent;
bytes_sent = ram_save_block(f);
- bytes_transferred += bytes_sent;
- if (bytes_sent == 0) { /* no more blocks */
+ /* no more blocks to sent */
+ if (bytes_sent < 0) {
break;
}
+ bytes_transferred += bytes_sent;
+ /* we want to check in the 1st loop, just in case it was the 1st time
+ and we had to sync the dirty bitmap.
+ qemu_get_clock_ns() is a bit expensive, so we only check each some
+ iterations
+ */
+ if ((i & 63) == 0) {
+ uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000;
+ if (t1 > MAX_WAIT) {
+ DPRINTF("big wait: " PRIu64 " milliseconds, %d iterations\n",
+ t1, i);
+ break;
+ }
+ }
+ i++;
}
if (ret < 0) {
@@ -363,22 +398,44 @@ int ram_save_live(QEMUFile *f, int stage, void *opaque)
bwidth = 0.000001;
}
+ qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+
+ expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
+
+ DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n",
+ expected_time, migrate_max_downtime());
+
+ if (expected_time <= migrate_max_downtime()) {
+ memory_global_sync_dirty_bitmap(get_system_memory());
+ expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
+
+ return expected_time <= migrate_max_downtime();
+ }
+ return 0;
+}
+
+static int ram_save_complete(QEMUFile *f, void *opaque)
+{
+ memory_global_sync_dirty_bitmap(get_system_memory());
+
/* try transferring iterative blocks of memory */
- if (stage == 3) {
+
+ /* flush all remaining blocks regardless of rate limiting */
+ while (true) {
int bytes_sent;
- /* flush all remaining blocks regardless of rate limiting */
- while ((bytes_sent = ram_save_block(f)) != 0) {
- bytes_transferred += bytes_sent;
+ bytes_sent = ram_save_block(f);
+ /* no more blocks to sent */
+ if (bytes_sent < 0) {
+ break;
}
- memory_global_dirty_log_stop();
+ bytes_transferred += bytes_sent;
}
+ memory_global_dirty_log_stop();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
- expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
-
- return (stage == 2) && (expected_time <= migrate_max_downtime());
+ return 0;
}
static inline void *host_from_stream_offset(QEMUFile *f,
@@ -411,11 +468,14 @@ static inline void *host_from_stream_offset(QEMUFile *f,
return NULL;
}
-int ram_load(QEMUFile *f, void *opaque, int version_id)
+static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
ram_addr_t addr;
- int flags;
+ int flags, ret = 0;
int error;
+ static uint64_t seq_iter;
+
+ seq_iter++;
if (version_id < 4 || version_id > 4) {
return -EINVAL;
@@ -445,8 +505,10 @@ int ram_load(QEMUFile *f, void *opaque, int version_id)
QLIST_FOREACH(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id))) {
- if (block->length != length)
- return -EINVAL;
+ if (block->length != length) {
+ ret = -EINVAL;
+ goto done;
+ }
break;
}
}
@@ -454,7 +516,8 @@ int ram_load(QEMUFile *f, void *opaque, int version_id)
if (!block) {
fprintf(stderr, "Unknown ramblock \"%s\", cannot "
"accept migration\n", id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto done;
}
total_ram_bytes -= length;
@@ -483,18 +546,33 @@ int ram_load(QEMUFile *f, void *opaque, int version_id)
void *host;
host = host_from_stream_offset(f, addr, flags);
+ if (!host) {
+ return -EINVAL;
+ }
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
}
error = qemu_file_get_error(f);
if (error) {
- return error;
+ ret = error;
+ goto done;
}
} while (!(flags & RAM_SAVE_FLAG_EOS));
- return 0;
+done:
+ DPRINTF("Completed load of VM with exit code %d seq iteration " PRIu64 "\n",
+ ret, seq_iter);
+ return ret;
}
+SaveVMHandlers savevm_ram_handlers = {
+ .save_live_setup = ram_save_setup,
+ .save_live_iterate = ram_save_iterate,
+ .save_live_complete = ram_save_complete,
+ .load_state = ram_load,
+ .cancel = ram_migration_cancel,
+};
+
#ifdef HAS_AUDIO
struct soundhw {
const char *name;
@@ -602,7 +680,7 @@ void select_soundhw(const char *optarg)
{
struct soundhw *c;
- if (*optarg == '?') {
+ if (is_help_option(optarg)) {
show_valid_cards:
printf("Valid sound card names (comma separated):\n");
@@ -610,7 +688,7 @@ void select_soundhw(const char *optarg)
printf ("%-11s %s\n", c->name, c->descr);
}
printf("\n-soundhw all will enable all of the above\n");
- exit(*optarg != '?');
+ exit(!is_help_option(optarg));
}
else {
size_t l;
diff --git a/arch_init.h b/arch_init.h
index c7cb94a932..3dfea3b4f3 100644
--- a/arch_init.h
+++ b/arch_init.h
@@ -16,6 +16,7 @@ enum {
QEMU_ARCH_SH4 = 1024,
QEMU_ARCH_SPARC = 2048,
QEMU_ARCH_XTENSA = 4096,
+ QEMU_ARCH_OPENRISC = 8192,
};
extern const uint32_t arch_type;
diff --git a/audio/audio.c b/audio/audio.c
index 583ee51eab..1c7738930b 100644
--- a/audio/audio.c
+++ b/audio/audio.c
@@ -818,6 +818,7 @@ static int audio_attach_capture (HWVoiceOut *hw)
sw->active = hw->enabled;
sw->conv = noop_conv;
sw->ratio = ((int64_t) hw_cap->info.freq << 32) / sw->info.freq;
+ sw->vol = nominal_volume;
sw->rate = st_rate_start (sw->info.freq, hw_cap->info.freq);
if (!sw->rate) {
dolog ("Could not start rate conversion for `%s'\n", SW_NAME (sw));
diff --git a/bitops.h b/bitops.h
index 07d1a0638f..74e14e5724 100644
--- a/bitops.h
+++ b/bitops.h
@@ -114,10 +114,10 @@ static inline unsigned long ffz(unsigned long word)
* @nr: the bit to set
* @addr: the address to start counting from
*/
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
*p |= mask;
}
@@ -127,10 +127,10 @@ static inline void set_bit(int nr, volatile unsigned long *addr)
* @nr: Bit to clear
* @addr: Address to start counting from
*/
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
*p &= ~mask;
}
@@ -140,10 +140,10 @@ static inline void clear_bit(int nr, volatile unsigned long *addr)
* @nr: Bit to change
* @addr: Address to start counting from
*/
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
*p ^= mask;
}
@@ -153,10 +153,10 @@ static inline void change_bit(int nr, volatile unsigned long *addr)
* @nr: Bit to set
* @addr: Address to count from
*/
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
unsigned long old = *p;
*p = old | mask;
@@ -168,10 +168,10 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
* @nr: Bit to clear
* @addr: Address to count from
*/
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
unsigned long old = *p;
*p = old & ~mask;
@@ -183,10 +183,10 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
* @nr: Bit to change
* @addr: Address to count from
*/
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(int nr, unsigned long *addr)
{
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long *p = addr + BIT_WORD(nr);
unsigned long old = *p;
*p = old ^ mask;
@@ -198,7 +198,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* @nr: bit number to test
* @addr: Address to start counting from
*/
-static inline int test_bit(int nr, const volatile unsigned long *addr)
+static inline int test_bit(int nr, const unsigned long *addr)
{
return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
}
@@ -269,4 +269,94 @@ static inline unsigned long hweight_long(unsigned long w)
return count;
}
+/**
+ * extract32:
+ * @value: the value to extract the bit field from
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ *
+ * Extract from the 32 bit input @value the bit field specified by the
+ * @start and @length parameters, and return it. The bit field must
+ * lie entirely within the 32 bit word. It is valid to request that
+ * all 32 bits are returned (ie @length 32 and @start 0).
+ *
+ * Returns: the value of the bit field extracted from the input value.
+ */
+static inline uint32_t extract32(uint32_t value, int start, int length)
+{
+ assert(start >= 0 && length > 0 && length <= 32 - start);
+ return (value >> start) & (~0U >> (32 - length));
+}
+
+/**
+ * extract64:
+ * @value: the value to extract the bit field from
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ *
+ * Extract from the 64 bit input @value the bit field specified by the
+ * @start and @length parameters, and return it. The bit field must
+ * lie entirely within the 64 bit word. It is valid to request that
+ * all 64 bits are returned (ie @length 64 and @start 0).
+ *
+ * Returns: the value of the bit field extracted from the input value.
+ */
+static inline uint64_t extract64(uint64_t value, int start, int length)
+{
+ assert(start >= 0 && length > 0 && length <= 64 - start);
+ return (value >> start) & (~0ULL >> (64 - length));
+}
+
+/**
+ * deposit32:
+ * @value: initial value to insert bit field into
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ * @fieldval: the value to insert into the bit field
+ *
+ * Deposit @fieldval into the 32 bit @value at the bit field specified
+ * by the @start and @length parameters, and return the modified
+ * @value. Bits of @value outside the bit field are not modified.
+ * Bits of @fieldval above the least significant @length bits are
+ * ignored. The bit field must lie entirely within the 32 bit word.
+ * It is valid to request that all 32 bits are modified (ie @length
+ * 32 and @start 0).
+ *
+ * Returns: the modified @value.
+ */
+static inline uint32_t deposit32(uint32_t value, int start, int length,
+ uint32_t fieldval)
+{
+ uint32_t mask;
+ assert(start >= 0 && length > 0 && length <= 32 - start);
+ mask = (~0U >> (32 - length)) << start;
+ return (value & ~mask) | ((fieldval << start) & mask);
+}
+
+/**
+ * deposit64:
+ * @value: initial value to insert bit field into
+ * @start: the lowest bit in the bit field (numbered from 0)
+ * @length: the length of the bit field
+ * @fieldval: the value to insert into the bit field
+ *
+ * Deposit @fieldval into the 64 bit @value at the bit field specified
+ * by the @start and @length parameters, and return the modified
+ * @value. Bits of @value outside the bit field are not modified.
+ * Bits of @fieldval above the least significant @length bits are
+ * ignored. The bit field must lie entirely within the 64 bit word.
+ * It is valid to request that all 64 bits are modified (ie @length
+ * 64 and @start 0).
+ *
+ * Returns: the modified @value.
+ */
+static inline uint64_t deposit64(uint64_t value, int start, int length,
+ uint64_t fieldval)
+{
+ uint64_t mask;
+ assert(start >= 0 && length > 0 && length <= 64 - start);
+ mask = (~0ULL >> (64 - length)) << start;
+ return (value & ~mask) | ((fieldval << start) & mask);
+}
+
#endif
diff --git a/block-migration.c b/block-migration.c
index fd2ffff0d5..7def8ab197 100644
--- a/block-migration.c
+++ b/block-migration.c
@@ -536,30 +536,44 @@ static void blk_mig_cleanup(void)
}
}
-static int block_save_live(QEMUFile *f, int stage, void *opaque)
+static void block_migration_cancel(void *opaque)
+{
+ blk_mig_cleanup();
+}
+
+static int block_save_setup(QEMUFile *f, void *opaque)
{
int ret;
- DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
- stage, block_mig_state.submitted, block_mig_state.transferred);
+ DPRINTF("Enter save live setup submitted %d transferred %d\n",
+ block_mig_state.submitted, block_mig_state.transferred);
+
+ init_blk_migration(f);
+
+ /* start track dirty blocks */
+ set_dirty_tracking(1);
- if (stage < 0) {
+ flush_blks(f);
+
+ ret = qemu_file_get_error(f);
+ if (ret) {
blk_mig_cleanup();
- return 0;
+ return ret;
}
- if (block_mig_state.blk_enable != 1) {
- /* no need to migrate storage */
- qemu_put_be64(f, BLK_MIG_FLAG_EOS);
- return 1;
- }
+ blk_mig_reset_dirty_cursor();
- if (stage == 1) {
- init_blk_migration(f);
+ qemu_put_be64(f, BLK_MIG_FLAG_EOS);
- /* start track dirty blocks */
- set_dirty_tracking(1);
- }
+ return 0;
+}
+
+static int block_save_iterate(QEMUFile *f, void *opaque)
+{
+ int ret;
+
+ DPRINTF("Enter save live iterate submitted %d transferred %d\n",
+ block_mig_state.submitted, block_mig_state.transferred);
flush_blks(f);
@@ -571,56 +585,76 @@ static int block_save_live(QEMUFile *f, int stage, void *opaque)
blk_mig_reset_dirty_cursor();
- if (stage == 2) {
- /* control the rate of transfer */
- while ((block_mig_state.submitted +
- block_mig_state.read_done) * BLOCK_SIZE <
- qemu_file_get_rate_limit(f)) {
- if (block_mig_state.bulk_completed == 0) {
- /* first finish the bulk phase */
- if (blk_mig_save_bulked_block(f) == 0) {
- /* finished saving bulk on all devices */
- block_mig_state.bulk_completed = 1;
- }
- } else {
- if (blk_mig_save_dirty_block(f, 1) == 0) {
- /* no more dirty blocks */
- break;
- }
+ /* control the rate of transfer */
+ while ((block_mig_state.submitted +
+ block_mig_state.read_done) * BLOCK_SIZE <
+ qemu_file_get_rate_limit(f)) {
+ if (block_mig_state.bulk_completed == 0) {
+ /* first finish the bulk phase */
+ if (blk_mig_save_bulked_block(f) == 0) {
+ /* finished saving bulk on all devices */
+ block_mig_state.bulk_completed = 1;
+ }
+ } else {
+ if (blk_mig_save_dirty_block(f, 1) == 0) {
+ /* no more dirty blocks */
+ break;
}
}
+ }
- flush_blks(f);
+ flush_blks(f);
- ret = qemu_file_get_error(f);
- if (ret) {
- blk_mig_cleanup();
- return ret;
- }
+ ret = qemu_file_get_error(f);
+ if (ret) {
+ blk_mig_cleanup();
+ return ret;
}
- if (stage == 3) {
- /* we know for sure that save bulk is completed and
- all async read completed */
- assert(block_mig_state.submitted == 0);
+ qemu_put_be64(f, BLK_MIG_FLAG_EOS);
+
+ return is_stage2_completed();
+}
+
+static int block_save_complete(QEMUFile *f, void *opaque)
+{
+ int ret;
+
+ DPRINTF("Enter save live complete submitted %d transferred %d\n",
+ block_mig_state.submitted, block_mig_state.transferred);
+
+ flush_blks(f);
- while (blk_mig_save_dirty_block(f, 0) != 0);
+ ret = qemu_file_get_error(f);
+ if (ret) {
blk_mig_cleanup();
+ return ret;
+ }
- /* report completion */
- qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
+ blk_mig_reset_dirty_cursor();
- ret = qemu_file_get_error(f);
- if (ret) {
- return ret;
- }
+ /* we know for sure that save bulk is completed and
+ all async read completed */
+ assert(block_mig_state.submitted == 0);
- DPRINTF("Block migration completed\n");
+ while (blk_mig_save_dirty_block(f, 0) != 0) {
+ /* Do nothing */
+ }
+ blk_mig_cleanup();
+
+ /* report completion */
+ qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
+
+ ret = qemu_file_get_error(f);
+ if (ret) {
+ return ret;
}
+ DPRINTF("Block migration completed\n");
+
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
- return ((stage == 2) && is_stage2_completed());
+ return 0;
}
static int block_load(QEMUFile *f, void *opaque, int version_id)
@@ -700,20 +734,35 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static void block_set_params(int blk_enable, int shared_base, void *opaque)
+static void block_set_params(const MigrationParams *params, void *opaque)
{
- block_mig_state.blk_enable = blk_enable;
- block_mig_state.shared_base = shared_base;
+ block_mig_state.blk_enable = params->blk;
+ block_mig_state.shared_base = params->shared;
/* shared base means that blk_enable = 1 */
- block_mig_state.blk_enable |= shared_base;
+ block_mig_state.blk_enable |= params->shared;
}
+static bool block_is_active(void *opaque)
+{
+ return block_mig_state.blk_enable == 1;
+}
+
+SaveVMHandlers savevm_block_handlers = {
+ .set_params = block_set_params,
+ .save_live_setup = block_save_setup,
+ .save_live_iterate = block_save_iterate,
+ .save_live_complete = block_save_complete,
+ .load_state = block_load,
+ .cancel = block_migration_cancel,
+ .is_active = block_is_active,
+};
+
void blk_mig_init(void)
{
QSIMPLEQ_INIT(&block_mig_state.bmds_list);
QSIMPLEQ_INIT(&block_mig_state.blk_list);
- register_savevm_live(NULL, "block", 0, 1, block_set_params,
- block_save_live, NULL, block_load, &block_mig_state);
+ register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
+ &block_mig_state);
}
diff --git a/block.c b/block.c
index 0acdcac158..24323c11d0 100644
--- a/block.c
+++ b/block.c
@@ -971,101 +971,124 @@ static void bdrv_rebind(BlockDriverState *bs)
}
}
+static void bdrv_move_feature_fields(BlockDriverState *bs_dest,
+ BlockDriverState *bs_src)
+{
+ /* move some fields that need to stay attached to the device */
+ bs_dest->open_flags = bs_src->open_flags;
+
+ /* dev info */
+ bs_dest->dev_ops = bs_src->dev_ops;
+ bs_dest->dev_opaque = bs_src->dev_opaque;
+ bs_dest->dev = bs_src->dev;
+ bs_dest->buffer_alignment = bs_src->buffer_alignment;
+ bs_dest->copy_on_read = bs_src->copy_on_read;
+
+ bs_dest->enable_write_cache = bs_src->enable_write_cache;
+
+ /* i/o timing parameters */
+ bs_dest->slice_time = bs_src->slice_time;
+ bs_dest->slice_start = bs_src->slice_start;
+ bs_dest->slice_end = bs_src->slice_end;
+ bs_dest->io_limits = bs_src->io_limits;
+ bs_dest->io_base = bs_src->io_base;
+ bs_dest->throttled_reqs = bs_src->throttled_reqs;
+ bs_dest->block_timer = bs_src->block_timer;
+ bs_dest->io_limits_enabled = bs_src->io_limits_enabled;
+
+ /* r/w error */
+ bs_dest->on_read_error = bs_src->on_read_error;
+ bs_dest->on_write_error = bs_src->on_write_error;
+
+ /* i/o status */
+ bs_dest->iostatus_enabled = bs_src->iostatus_enabled;
+ bs_dest->iostatus = bs_src->iostatus;
+
+ /* dirty bitmap */
+ bs_dest->dirty_count = bs_src->dirty_count;
+ bs_dest->dirty_bitmap = bs_src->dirty_bitmap;
+
+ /* job */
+ bs_dest->in_use = bs_src->in_use;
+ bs_dest->job = bs_src->job;
+
+ /* keep the same entry in bdrv_states */
+ pstrcpy(bs_dest->device_name, sizeof(bs_dest->device_name),
+ bs_src->device_name);
+ bs_dest->list = bs_src->list;
+}
+
/*
- * Add new bs contents at the top of an image chain while the chain is
- * live, while keeping required fields on the top layer.
+ * Swap bs contents for two image chains while they are live,
+ * while keeping required fields on the BlockDriverState that is
+ * actually attached to a device.
*
* This will modify the BlockDriverState fields, and swap contents
- * between bs_new and bs_top. Both bs_new and bs_top are modified.
+ * between bs_new and bs_old. Both bs_new and bs_old are modified.
*
* bs_new is required to be anonymous.
*
* This function does not create any image files.
*/
-void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
+void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old)
{
BlockDriverState tmp;
- /* bs_new must be anonymous */
+ /* bs_new must be anonymous and shouldn't have anything fancy enabled */
assert(bs_new->device_name[0] == '\0');
+ assert(bs_new->dirty_bitmap == NULL);
+ assert(bs_new->job == NULL);
+ assert(bs_new->dev == NULL);
+ assert(bs_new->in_use == 0);
+ assert(bs_new->io_limits_enabled == false);
+ assert(bs_new->block_timer == NULL);
tmp = *bs_new;
+ *bs_new = *bs_old;
+ *bs_old = tmp;
- /* there are some fields that need to stay on the top layer: */
- tmp.open_flags = bs_top->open_flags;
+ /* there are some fields that should not be swapped, move them back */
+ bdrv_move_feature_fields(&tmp, bs_old);
+ bdrv_move_feature_fields(bs_old, bs_new);
+ bdrv_move_feature_fields(bs_new, &tmp);
- /* dev info */
- tmp.dev_ops = bs_top->dev_ops;
- tmp.dev_opaque = bs_top->dev_opaque;
- tmp.dev = bs_top->dev;
- tmp.buffer_alignment = bs_top->buffer_alignment;
- tmp.copy_on_read = bs_top->copy_on_read;
-
- tmp.enable_write_cache = bs_top->enable_write_cache;
-
- /* i/o timing parameters */
- tmp.slice_time = bs_top->slice_time;
- tmp.slice_start = bs_top->slice_start;
- tmp.slice_end = bs_top->slice_end;
- tmp.io_limits = bs_top->io_limits;
- tmp.io_base = bs_top->io_base;
- tmp.throttled_reqs = bs_top->throttled_reqs;
- tmp.block_timer = bs_top->block_timer;
- tmp.io_limits_enabled = bs_top->io_limits_enabled;
-
- /* geometry */
- tmp.cyls = bs_top->cyls;
- tmp.heads = bs_top->heads;
- tmp.secs = bs_top->secs;
- tmp.translation = bs_top->translation;
+ /* bs_new shouldn't be in bdrv_states even after the swap! */
+ assert(bs_new->device_name[0] == '\0');
- /* r/w error */
- tmp.on_read_error = bs_top->on_read_error;
- tmp.on_write_error = bs_top->on_write_error;
+ /* Check a few fields that should remain attached to the device */
+ assert(bs_new->dev == NULL);
+ assert(bs_new->job == NULL);
+ assert(bs_new->in_use == 0);
+ assert(bs_new->io_limits_enabled == false);
+ assert(bs_new->block_timer == NULL);
- /* i/o status */
- tmp.iostatus_enabled = bs_top->iostatus_enabled;
- tmp.iostatus = bs_top->iostatus;
+ bdrv_rebind(bs_new);
+ bdrv_rebind(bs_old);
+}
- /* keep the same entry in bdrv_states */
- pstrcpy(tmp.device_name, sizeof(tmp.device_name), bs_top->device_name);
- tmp.list = bs_top->list;
+/*
+ * Add new bs contents at the top of an image chain while the chain is
+ * live, while keeping required fields on the top layer.
+ *
+ * This will modify the BlockDriverState fields, and swap contents
+ * between bs_new and bs_top. Both bs_new and bs_top are modified.
+ *
+ * bs_new is required to be anonymous.
+ *
+ * This function does not create any image files.
+ */
+void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top)
+{
+ bdrv_swap(bs_new, bs_top);
/* The contents of 'tmp' will become bs_top, as we are
* swapping bs_new and bs_top contents. */
- tmp.backing_hd = bs_new;
- pstrcpy(tmp.backing_file, sizeof(tmp.backing_file), bs_top->filename);
- pstrcpy(tmp.backing_format, sizeof(tmp.backing_format),
- bs_top->drv ? bs_top->drv->format_name : "");
-
- /* swap contents of the fixed new bs and the current top */
- *bs_new = *bs_top;
- *bs_top = tmp;
-
- /* device_name[] was carried over from the old bs_top. bs_new
- * shouldn't be in bdrv_states, so we need to make device_name[]
- * reflect the anonymity of bs_new
- */
- bs_new->device_name[0] = '\0';
-
- /* clear the copied fields in the new backing file */
- bdrv_detach_dev(bs_new, bs_new->dev);
-
- qemu_co_queue_init(&bs_new->throttled_reqs);
- memset(&bs_new->io_base, 0, sizeof(bs_new->io_base));
- memset(&bs_new->io_limits, 0, sizeof(bs_new->io_limits));
- bdrv_iostatus_disable(bs_new);
-
- /* we don't use bdrv_io_limits_disable() for this, because we don't want
- * to affect or delete the block_timer, as it has been moved to bs_top */
- bs_new->io_limits_enabled = false;
- bs_new->block_timer = NULL;
- bs_new->slice_time = 0;
- bs_new->slice_start = 0;
- bs_new->slice_end = 0;
-
- bdrv_rebind(bs_new);
- bdrv_rebind(bs_top);
+ bs_top->backing_hd = bs_new;
+ bs_top->open_flags &= ~BDRV_O_NO_BACKING;
+ pstrcpy(bs_top->backing_file, sizeof(bs_top->backing_file),
+ bs_new->filename);
+ pstrcpy(bs_top->backing_format, sizeof(bs_top->backing_format),
+ bs_new->drv ? bs_new->drv->format_name : "");
}
void bdrv_delete(BlockDriverState *bs)
@@ -1610,6 +1633,20 @@ int bdrv_read(BlockDriverState *bs, int64_t sector_num,
return bdrv_rw_co(bs, sector_num, buf, nb_sectors, false);
}
+/* Just like bdrv_read(), but with I/O throttling temporarily disabled */
+int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
+ uint8_t *buf, int nb_sectors)
+{
+ bool enabled;
+ int ret;
+
+ enabled = bs->io_limits_enabled;
+ bs->io_limits_enabled = false;
+ ret = bdrv_read(bs, 0, buf, 1);
+ bs->io_limits_enabled = enabled;
+ return ret;
+}
+
#define BITS_PER_LONG (sizeof(unsigned long) * 8)
static void set_dirty_bitmap(BlockDriverState *bs, int64_t sector_num,
@@ -1828,8 +1865,8 @@ static int coroutine_fn bdrv_co_do_copy_on_readv(BlockDriverState *bs,
}
skip_bytes = (sector_num - cluster_sector_num) * BDRV_SECTOR_SIZE;
- qemu_iovec_from_buffer(qiov, bounce_buffer + skip_bytes,
- nb_sectors * BDRV_SECTOR_SIZE);
+ qemu_iovec_from_buf(qiov, 0, bounce_buffer + skip_bytes,
+ nb_sectors * BDRV_SECTOR_SIZE);
err:
qemu_vfree(bounce_buffer);
@@ -2089,152 +2126,6 @@ void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr)
*nb_sectors_ptr = length;
}
-struct partition {
- uint8_t boot_ind; /* 0x80 - active */
- uint8_t head; /* starting head */
- uint8_t sector; /* starting sector */
- uint8_t cyl; /* starting cylinder */
- uint8_t sys_ind; /* What partition type */
- uint8_t end_head; /* end head */
- uint8_t end_sector; /* end sector */
- uint8_t end_cyl; /* end cylinder */
- uint32_t start_sect; /* starting sector counting from 0 */
- uint32_t nr_sects; /* nr of sectors in partition */
-} QEMU_PACKED;
-
-/* try to guess the disk logical geometry from the MSDOS partition table. Return 0 if OK, -1 if could not guess */
-static int guess_disk_lchs(BlockDriverState *bs,
- int *pcylinders, int *pheads, int *psectors)
-{
- uint8_t buf[BDRV_SECTOR_SIZE];
- int ret, i, heads, sectors, cylinders;
- struct partition *p;
- uint32_t nr_sects;
- uint64_t nb_sectors;
- bool enabled;
-
- bdrv_get_geometry(bs, &nb_sectors);
-
- /**
- * The function will be invoked during startup not only in sync I/O mode,
- * but also in async I/O mode. So the I/O throttling function has to
- * be disabled temporarily here, not permanently.
- */
- enabled = bs->io_limits_enabled;
- bs->io_limits_enabled = false;
- ret = bdrv_read(bs, 0, buf, 1);
- bs->io_limits_enabled = enabled;
- if (ret < 0)
- return -1;
- /* test msdos magic */
- if (buf[510] != 0x55 || buf[511] != 0xaa)
- return -1;
- for(i = 0; i < 4; i++) {
- p = ((struct partition *)(buf + 0x1be)) + i;
- nr_sects = le32_to_cpu(p->nr_sects);
- if (nr_sects && p->end_head) {
- /* We make the assumption that the partition terminates on
- a cylinder boundary */
- heads = p->end_head + 1;
- sectors = p->end_sector & 63;
- if (sectors == 0)
- continue;
- cylinders = nb_sectors / (heads * sectors);
- if (cylinders < 1 || cylinders > 16383)
- continue;
- *pheads = heads;
- *psectors = sectors;
- *pcylinders = cylinders;
-#if 0
- printf("guessed geometry: LCHS=%d %d %d\n",
- cylinders, heads, sectors);
-#endif
- return 0;
- }
- }
- return -1;
-}
-
-void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs)
-{
- int translation, lba_detected = 0;
- int cylinders, heads, secs;
- uint64_t nb_sectors;
-
- /* if a geometry hint is available, use it */
- bdrv_get_geometry(bs, &nb_sectors);
- bdrv_get_geometry_hint(bs, &cylinders, &heads, &secs);
- translation = bdrv_get_translation_hint(bs);
- if (cylinders != 0) {
- *pcyls = cylinders;
- *pheads = heads;
- *psecs = secs;
- } else {
- if (guess_disk_lchs(bs, &cylinders, &heads, &secs) == 0) {
- if (heads > 16) {
- /* if heads > 16, it means that a BIOS LBA
- translation was active, so the default
- hardware geometry is OK */
- lba_detected = 1;
- goto default_geometry;
- } else {
- *pcyls = cylinders;
- *pheads = heads;
- *psecs = secs;
- /* disable any translation to be in sync with
- the logical geometry */
- if (translation == BIOS_ATA_TRANSLATION_AUTO) {
- bdrv_set_translation_hint(bs,
- BIOS_ATA_TRANSLATION_NONE);
- }
- }
- } else {
- default_geometry:
- /* if no geometry, use a standard physical disk geometry */
- cylinders = nb_sectors / (16 * 63);
-
- if (cylinders > 16383)
- cylinders = 16383;
- else if (cylinders < 2)
- cylinders = 2;
- *pcyls = cylinders;
- *pheads = 16;
- *psecs = 63;
- if ((lba_detected == 1) && (translation == BIOS_ATA_TRANSLATION_AUTO)) {
- if ((*pcyls * *pheads) <= 131072) {
- bdrv_set_translation_hint(bs,
- BIOS_ATA_TRANSLATION_LARGE);
- } else {
- bdrv_set_translation_hint(bs,
- BIOS_ATA_TRANSLATION_LBA);
- }
- }
- }
- bdrv_set_geometry_hint(bs, *pcyls, *pheads, *psecs);
- }
-}
-
-void bdrv_set_geometry_hint(BlockDriverState *bs,
- int cyls, int heads, int secs)
-{
- bs->cyls = cyls;
- bs->heads = heads;
- bs->secs = secs;
-}
-
-void bdrv_set_translation_hint(BlockDriverState *bs, int translation)
-{
- bs->translation = translation;
-}
-
-void bdrv_get_geometry_hint(BlockDriverState *bs,
- int *pcyls, int *pheads, int *psecs)
-{
- *pcyls = bs->cyls;
- *pheads = bs->heads;
- *psecs = bs->secs;
-}
-
/* throttling disk io limits */
void bdrv_set_io_limits(BlockDriverState *bs,
BlockIOLimit *io_limits)
@@ -2243,118 +2134,6 @@ void bdrv_set_io_limits(BlockDriverState *bs,
bs->io_limits_enabled = bdrv_io_limits_enabled(bs);
}
-/* Recognize floppy formats */
-typedef struct FDFormat {
- FDriveType drive;
- uint8_t last_sect;
- uint8_t max_track;
- uint8_t max_head;
- FDriveRate rate;
-} FDFormat;
-
-static const FDFormat fd_formats[] = {
- /* First entry is default format */
- /* 1.44 MB 3"1/2 floppy disks */
- { FDRIVE_DRV_144, 18, 80, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_144, 20, 80, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_144, 21, 80, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_144, 21, 82, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_144, 21, 83, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_144, 22, 80, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_144, 23, 80, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_144, 24, 80, 1, FDRIVE_RATE_500K, },
- /* 2.88 MB 3"1/2 floppy disks */
- { FDRIVE_DRV_288, 36, 80, 1, FDRIVE_RATE_1M, },
- { FDRIVE_DRV_288, 39, 80, 1, FDRIVE_RATE_1M, },
- { FDRIVE_DRV_288, 40, 80, 1, FDRIVE_RATE_1M, },
- { FDRIVE_DRV_288, 44, 80, 1, FDRIVE_RATE_1M, },
- { FDRIVE_DRV_288, 48, 80, 1, FDRIVE_RATE_1M, },
- /* 720 kB 3"1/2 floppy disks */
- { FDRIVE_DRV_144, 9, 80, 1, FDRIVE_RATE_250K, },
- { FDRIVE_DRV_144, 10, 80, 1, FDRIVE_RATE_250K, },
- { FDRIVE_DRV_144, 10, 82, 1, FDRIVE_RATE_250K, },
- { FDRIVE_DRV_144, 10, 83, 1, FDRIVE_RATE_250K, },
- { FDRIVE_DRV_144, 13, 80, 1, FDRIVE_RATE_250K, },
- { FDRIVE_DRV_144, 14, 80, 1, FDRIVE_RATE_250K, },
- /* 1.2 MB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 15, 80, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_120, 18, 80, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_120, 18, 82, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_120, 18, 83, 1, FDRIVE_RATE_500K, },
- { FDRIVE_DRV_120, 20, 80, 1, FDRIVE_RATE_500K, },
- /* 720 kB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 9, 80, 1, FDRIVE_RATE_250K, },
- { FDRIVE_DRV_120, 11, 80, 1, FDRIVE_RATE_250K, },
- /* 360 kB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 9, 40, 1, FDRIVE_RATE_300K, },
- { FDRIVE_DRV_120, 9, 40, 0, FDRIVE_RATE_300K, },
- { FDRIVE_DRV_120, 10, 41, 1, FDRIVE_RATE_300K, },
- { FDRIVE_DRV_120, 10, 42, 1, FDRIVE_RATE_300K, },
- /* 320 kB 5"1/4 floppy disks */
- { FDRIVE_DRV_120, 8, 40, 1, FDRIVE_RATE_250K, },
- { FDRIVE_DRV_120, 8, 40, 0, FDRIVE_RATE_250K, },
- /* 360 kB must match 5"1/4 better than 3"1/2... */
- { FDRIVE_DRV_144, 9, 80, 0, FDRIVE_RATE_250K, },
- /* end */
- { FDRIVE_DRV_NONE, -1, -1, 0, 0, },
-};
-
-void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
- int *max_track, int *last_sect,
- FDriveType drive_in, FDriveType *drive,
- FDriveRate *rate)
-{
- const FDFormat *parse;
- uint64_t nb_sectors, size;
- int i, first_match, match;
-
- bdrv_get_geometry_hint(bs, nb_heads, max_track, last_sect);
- if (*nb_heads != 0 && *max_track != 0 && *last_sect != 0) {
- /* User defined disk */
- *rate = FDRIVE_RATE_500K;
- } else {
- bdrv_get_geometry(bs, &nb_sectors);
- match = -1;
- first_match = -1;
- for (i = 0; ; i++) {
- parse = &fd_formats[i];
- if (parse->drive == FDRIVE_DRV_NONE) {
- break;
- }
- if (drive_in == parse->drive ||
- drive_in == FDRIVE_DRV_NONE) {
- size = (parse->max_head + 1) * parse->max_track *
- parse->last_sect;
- if (nb_sectors == size) {
- match = i;
- break;
- }
- if (first_match == -1) {
- first_match = i;
- }
- }
- }
- if (match == -1) {
- if (first_match == -1) {
- match = 1;
- } else {
- match = first_match;
- }
- parse = &fd_formats[match];
- }
- *nb_heads = parse->max_head + 1;
- *max_track = parse->max_track;
- *last_sect = parse->last_sect;
- *drive = parse->drive;
- *rate = parse->rate;
- }
-}
-
-int bdrv_get_translation_hint(BlockDriverState *bs)
-{
- return bs->translation;
-}
-
void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
BlockErrorAction on_write_error)
{
@@ -2671,6 +2450,9 @@ BlockInfoList *qmp_query_block(Error **errp)
info->value->inserted->backing_file = g_strdup(bs->backing_file);
}
+ info->value->inserted->backing_file_depth =
+ bdrv_get_backing_file_depth(bs);
+
if (bs->io_limits_enabled) {
info->value->inserted->bps =
bs->io_limits.bps[BLOCK_IO_LIMIT_TOTAL];
@@ -2830,7 +2612,7 @@ void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event)
return;
}
- return drv->bdrv_debug_event(bs, event);
+ drv->bdrv_debug_event(bs, event);
}
@@ -2975,6 +2757,19 @@ BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
return NULL;
}
+int bdrv_get_backing_file_depth(BlockDriverState *bs)
+{
+ if (!bs->drv) {
+ return 0;
+ }
+
+ if (!bs->backing_hd) {
+ return 0;
+ }
+
+ return 1 + bdrv_get_backing_file_depth(bs->backing_hd);
+}
+
#define NB_SUFFIXES 4
char *get_human_readable_size(char *buf, int buf_size, int64_t size)
@@ -3167,13 +2962,13 @@ static int multiwrite_merge(BlockDriverState *bs, BlockRequest *reqs,
// Add the first request to the merged one. If the requests are
// overlapping, drop the last sectors of the first request.
size = (reqs[i].sector - reqs[outidx].sector) << 9;
- qemu_iovec_concat(qiov, reqs[outidx].qiov, size);
+ qemu_iovec_concat(qiov, reqs[outidx].qiov, 0, size);
// We should need to add any zeros between the two requests
assert (reqs[i].sector <= oldreq_last);
// Add the second request
- qemu_iovec_concat(qiov, reqs[i].qiov, reqs[i].qiov->size);
+ qemu_iovec_concat(qiov, reqs[i].qiov, 0, reqs[i].qiov->size);
reqs[outidx].nb_sectors = qiov->size >> 9;
reqs[outidx].qiov = qiov;
@@ -3448,7 +3243,7 @@ static void bdrv_aio_bh_cb(void *opaque)
BlockDriverAIOCBSync *acb = opaque;
if (!acb->is_write)
- qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
+ qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
qemu_vfree(acb->bounce);
acb->common.cb(acb->common.opaque, acb->ret);
qemu_bh_delete(acb->bh);
@@ -3474,7 +3269,7 @@ static BlockDriverAIOCB *bdrv_aio_rw_vector(BlockDriverState *bs,
acb->bh = qemu_bh_new(bdrv_aio_bh_cb, acb);
if (is_write) {
- qemu_iovec_to_buffer(acb->qiov, acb->bounce);
+ qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
acb->ret = bs->drv->bdrv_write(bs, sector_num, acb->bounce, nb_sectors);
} else {
acb->ret = bs->drv->bdrv_read(bs, sector_num, acb->bounce, nb_sectors);
diff --git a/block.h b/block.h
index d135652902..650d872f46 100644
--- a/block.h
+++ b/block.h
@@ -122,6 +122,7 @@ int bdrv_create(BlockDriver *drv, const char* filename,
int bdrv_create_file(const char* filename, QEMUOptionParameter *options);
BlockDriverState *bdrv_new(const char *device_name);
void bdrv_make_anon(BlockDriverState *bs);
+void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old);
void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top);
void bdrv_delete(BlockDriverState *bs);
int bdrv_parse_cache_flags(const char *mode, int *flags);
@@ -141,6 +142,8 @@ bool bdrv_dev_is_tray_open(BlockDriverState *bs);
bool bdrv_dev_is_medium_locked(BlockDriverState *bs);
int bdrv_read(BlockDriverState *bs, int64_t sector_num,
uint8_t *buf, int nb_sectors);
+int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
+ uint8_t *buf, int nb_sectors);
int bdrv_write(BlockDriverState *bs, int64_t sector_num,
const uint8_t *buf, int nb_sectors);
int bdrv_pread(BlockDriverState *bs, int64_t offset,
@@ -171,11 +174,11 @@ int coroutine_fn bdrv_co_is_allocated_above(BlockDriverState *top,
int nb_sectors, int *pnum);
BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
const char *backing_file);
+int bdrv_get_backing_file_depth(BlockDriverState *bs);
int bdrv_truncate(BlockDriverState *bs, int64_t offset);
int64_t bdrv_getlength(BlockDriverState *bs);
int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
-void bdrv_guess_geometry(BlockDriverState *bs, int *pcyls, int *pheads, int *psecs);
int bdrv_commit(BlockDriverState *bs);
int bdrv_commit_all(void);
int bdrv_change_backing_file(BlockDriverState *bs,
@@ -255,36 +258,6 @@ int bdrv_has_zero_init(BlockDriverState *bs);
int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
int *pnum);
-#define BIOS_ATA_TRANSLATION_AUTO 0
-#define BIOS_ATA_TRANSLATION_NONE 1
-#define BIOS_ATA_TRANSLATION_LBA 2
-#define BIOS_ATA_TRANSLATION_LARGE 3
-#define BIOS_ATA_TRANSLATION_RECHS 4
-
-void bdrv_set_geometry_hint(BlockDriverState *bs,
- int cyls, int heads, int secs);
-void bdrv_set_translation_hint(BlockDriverState *bs, int translation);
-void bdrv_get_geometry_hint(BlockDriverState *bs,
- int *pcyls, int *pheads, int *psecs);
-typedef enum FDriveType {
- FDRIVE_DRV_144 = 0x00, /* 1.44 MB 3"5 drive */
- FDRIVE_DRV_288 = 0x01, /* 2.88 MB 3"5 drive */
- FDRIVE_DRV_120 = 0x02, /* 1.2 MB 5"25 drive */
- FDRIVE_DRV_NONE = 0x03, /* No drive connected */
-} FDriveType;
-
-typedef enum FDriveRate {
- FDRIVE_RATE_500K = 0x00, /* 500 Kbps */
- FDRIVE_RATE_300K = 0x01, /* 300 Kbps */
- FDRIVE_RATE_250K = 0x02, /* 250 Kbps */
- FDRIVE_RATE_1M = 0x03, /* 1 Mbps */
-} FDriveRate;
-
-void bdrv_get_floppy_geometry_hint(BlockDriverState *bs, int *nb_heads,
- int *max_track, int *last_sect,
- FDriveType drive_in, FDriveType *drive,
- FDriveRate *rate);
-int bdrv_get_translation_hint(BlockDriverState *bs);
void bdrv_set_on_error(BlockDriverState *bs, BlockErrorAction on_read_error,
BlockErrorAction on_write_error);
BlockErrorAction bdrv_get_on_error(BlockDriverState *bs, int is_read);
@@ -395,9 +368,7 @@ typedef enum {
BLKDBG_L2_ALLOC_COW_READ,
BLKDBG_L2_ALLOC_WRITE,
- BLKDBG_READ,
BLKDBG_READ_AIO,
- BLKDBG_READ_BACKING,
BLKDBG_READ_BACKING_AIO,
BLKDBG_READ_COMPRESSED,
@@ -433,43 +404,4 @@ typedef enum {
#define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt)
void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
-
-/* Convenience for block device models */
-
-typedef struct BlockConf {
- BlockDriverState *bs;
- uint16_t physical_block_size;
- uint16_t logical_block_size;
- uint16_t min_io_size;
- uint32_t opt_io_size;
- int32_t bootindex;
- uint32_t discard_granularity;
-} BlockConf;
-
-static inline unsigned int get_physical_block_exp(BlockConf *conf)
-{
- unsigned int exp = 0, size;
-
- for (size = conf->physical_block_size;
- size > conf->logical_block_size;
- size >>= 1) {
- exp++;
- }
-
- return exp;
-}
-
-#define DEFINE_BLOCK_PROPERTIES(_state, _conf) \
- DEFINE_PROP_DRIVE("drive", _state, _conf.bs), \
- DEFINE_PROP_BLOCKSIZE("logical_block_size", _state, \
- _conf.logical_block_size, 512), \
- DEFINE_PROP_BLOCKSIZE("physical_block_size", _state, \
- _conf.physical_block_size, 512), \
- DEFINE_PROP_UINT16("min_io_size", _state, _conf.min_io_size, 0), \
- DEFINE_PROP_UINT32("opt_io_size", _state, _conf.opt_io_size, 0), \
- DEFINE_PROP_INT32("bootindex", _state, _conf.bootindex, -1), \
- DEFINE_PROP_UINT32("discard_granularity", _state, \
- _conf.discard_granularity, 0)
-
#endif
-
diff --git a/block/blkdebug.c b/block/blkdebug.c
index e56e37da51..59dcea0650 100644
--- a/block/blkdebug.c
+++ b/block/blkdebug.c
@@ -26,24 +26,10 @@
#include "block_int.h"
#include "module.h"
-typedef struct BlkdebugVars {
- int state;
-
- /* If inject_errno != 0, an error is injected for requests */
- int inject_errno;
-
- /* Decides if all future requests fail (false) or only the next one and
- * after the next request inject_errno is reset to 0 (true) */
- bool inject_once;
-
- /* Decides if aio_readv/writev fails right away (true) or returns an error
- * return value only in the callback (false) */
- bool inject_immediately;
-} BlkdebugVars;
-
typedef struct BDRVBlkdebugState {
- BlkdebugVars vars;
- QLIST_HEAD(list, BlkdebugRule) rules[BLKDBG_EVENT_MAX];
+ int state;
+ QLIST_HEAD(, BlkdebugRule) rules[BLKDBG_EVENT_MAX];
+ QSIMPLEQ_HEAD(, BlkdebugRule) active_rules;
} BDRVBlkdebugState;
typedef struct BlkdebugAIOCB {
@@ -73,12 +59,14 @@ typedef struct BlkdebugRule {
int error;
int immediately;
int once;
+ int64_t sector;
} inject;
struct {
int new_state;
} set_state;
} options;
QLIST_ENTRY(BlkdebugRule) next;
+ QSIMPLEQ_ENTRY(BlkdebugRule) active_next;
} BlkdebugRule;
static QemuOptsList inject_error_opts = {
@@ -98,6 +86,10 @@ static QemuOptsList inject_error_opts = {
.type = QEMU_OPT_NUMBER,
},
{
+ .name = "sector",
+ .type = QEMU_OPT_NUMBER,
+ },
+ {
.name = "once",
.type = QEMU_OPT_BOOL,
},
@@ -147,9 +139,7 @@ static const char *event_names[BLKDBG_EVENT_MAX] = {
[BLKDBG_L2_ALLOC_COW_READ] = "l2_alloc.cow_read",
[BLKDBG_L2_ALLOC_WRITE] = "l2_alloc.write",
- [BLKDBG_READ] = "read",
[BLKDBG_READ_AIO] = "read_aio",
- [BLKDBG_READ_BACKING] = "read_backing",
[BLKDBG_READ_BACKING_AIO] = "read_backing_aio",
[BLKDBG_READ_COMPRESSED] = "read_compressed",
@@ -228,6 +218,7 @@ static int add_rule(QemuOpts *opts, void *opaque)
rule->options.inject.once = qemu_opt_get_bool(opts, "once", 0);
rule->options.inject.immediately =
qemu_opt_get_bool(opts, "immediately", 0);
+ rule->options.inject.sector = qemu_opt_get_number(opts, "sector", -1);
break;
case ACTION_SET_STATE:
@@ -302,7 +293,7 @@ static int blkdebug_open(BlockDriverState *bs, const char *filename, int flags)
filename = c + 1;
/* Set initial state */
- s->vars.state = 1;
+ s->state = 1;
/* Open the backing file */
ret = bdrv_file_open(&bs->file, filename, flags);
@@ -328,18 +319,18 @@ static void blkdebug_aio_cancel(BlockDriverAIOCB *blockacb)
}
static BlockDriverAIOCB *inject_error(BlockDriverState *bs,
- BlockDriverCompletionFunc *cb, void *opaque)
+ BlockDriverCompletionFunc *cb, void *opaque, BlkdebugRule *rule)
{
BDRVBlkdebugState *s = bs->opaque;
- int error = s->vars.inject_errno;
+ int error = rule->options.inject.error;
struct BlkdebugAIOCB *acb;
QEMUBH *bh;
- if (s->vars.inject_once) {
- s->vars.inject_errno = 0;
+ if (rule->options.inject.once) {
+ QSIMPLEQ_INIT(&s->active_rules);
}
- if (s->vars.inject_immediately) {
+ if (rule->options.inject.immediately) {
return NULL;
}
@@ -358,14 +349,21 @@ static BlockDriverAIOCB *blkdebug_aio_readv(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
BDRVBlkdebugState *s = bs->opaque;
+ BlkdebugRule *rule = NULL;
- if (s->vars.inject_errno) {
- return inject_error(bs, cb, opaque);
+ QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
+ if (rule->options.inject.sector == -1 ||
+ (rule->options.inject.sector >= sector_num &&
+ rule->options.inject.sector < sector_num + nb_sectors)) {
+ break;
+ }
+ }
+
+ if (rule && rule->options.inject.error) {
+ return inject_error(bs, cb, opaque, rule);
}
- BlockDriverAIOCB *acb =
- bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
- return acb;
+ return bdrv_aio_readv(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
}
static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
@@ -373,14 +371,21 @@ static BlockDriverAIOCB *blkdebug_aio_writev(BlockDriverState *bs,
BlockDriverCompletionFunc *cb, void *opaque)
{
BDRVBlkdebugState *s = bs->opaque;
+ BlkdebugRule *rule = NULL;
+
+ QSIMPLEQ_FOREACH(rule, &s->active_rules, active_next) {
+ if (rule->options.inject.sector == -1 ||
+ (rule->options.inject.sector >= sector_num &&
+ rule->options.inject.sector < sector_num + nb_sectors)) {
+ break;
+ }
+ }
- if (s->vars.inject_errno) {
- return inject_error(bs, cb, opaque);
+ if (rule && rule->options.inject.error) {
+ return inject_error(bs, cb, opaque, rule);
}
- BlockDriverAIOCB *acb =
- bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
- return acb;
+ return bdrv_aio_writev(bs->file, sector_num, qiov, nb_sectors, cb, opaque);
}
static void blkdebug_close(BlockDriverState *bs)
@@ -397,44 +402,53 @@ static void blkdebug_close(BlockDriverState *bs)
}
}
-static void process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
- BlkdebugVars *old_vars)
+static bool process_rule(BlockDriverState *bs, struct BlkdebugRule *rule,
+ int old_state, bool injected)
{
BDRVBlkdebugState *s = bs->opaque;
- BlkdebugVars *vars = &s->vars;
/* Only process rules for the current state */
- if (rule->state && rule->state != old_vars->state) {
- return;
+ if (rule->state && rule->state != old_state) {
+ return injected;
}
/* Take the action */
switch (rule->action) {
case ACTION_INJECT_ERROR:
- vars->inject_errno = rule->options.inject.error;
- vars->inject_once = rule->options.inject.once;
- vars->inject_immediately = rule->options.inject.immediately;
+ if (!injected) {
+ QSIMPLEQ_INIT(&s->active_rules);
+ injected = true;
+ }
+ QSIMPLEQ_INSERT_HEAD(&s->active_rules, rule, active_next);
break;
case ACTION_SET_STATE:
- vars->state = rule->options.set_state.new_state;
+ s->state = rule->options.set_state.new_state;
break;
}
+ return injected;
}
static void blkdebug_debug_event(BlockDriverState *bs, BlkDebugEvent event)
{
BDRVBlkdebugState *s = bs->opaque;
struct BlkdebugRule *rule;
- BlkdebugVars old_vars = s->vars;
+ int old_state = s->state;
+ bool injected;
assert((int)event >= 0 && event < BLKDBG_EVENT_MAX);
+ injected = false;
QLIST_FOREACH(rule, &s->rules[event], next) {
- process_rule(bs, rule, &old_vars);
+ injected = process_rule(bs, rule, old_state, injected);
}
}
+static int64_t blkdebug_getlength(BlockDriverState *bs)
+{
+ return bdrv_getlength(bs->file);
+}
+
static BlockDriver bdrv_blkdebug = {
.format_name = "blkdebug",
.protocol_name = "blkdebug",
@@ -443,6 +457,7 @@ static BlockDriver bdrv_blkdebug = {
.bdrv_file_open = blkdebug_open,
.bdrv_close = blkdebug_close,
+ .bdrv_getlength = blkdebug_getlength,
.bdrv_aio_readv = blkdebug_aio_readv,
.bdrv_aio_writev = blkdebug_aio_writev,
diff --git a/block/curl.c b/block/curl.c
index bf3680ba57..e7c3634d35 100644
--- a/block/curl.c
+++ b/block/curl.c
@@ -140,8 +140,8 @@ static size_t curl_read_cb(void *ptr, size_t size, size_t nmemb, void *opaque)
continue;
if ((s->buf_off >= acb->end)) {
- qemu_iovec_from_buffer(acb->qiov, s->orig_buf + acb->start,
- acb->end - acb->start);
+ qemu_iovec_from_buf(acb->qiov, 0, s->orig_buf + acb->start,
+ acb->end - acb->start);
acb->common.cb(acb->common.opaque, 0);
qemu_aio_release(acb);
s->acb[i] = NULL;
@@ -176,7 +176,7 @@ static int curl_find_buf(BDRVCURLState *s, size_t start, size_t len,
{
char *buf = state->orig_buf + (start - state->buf_start);
- qemu_iovec_from_buffer(acb->qiov, buf, len);
+ qemu_iovec_from_buf(acb->qiov, 0, buf, len);
acb->common.cb(acb->common.opaque, 0);
return FIND_RET_OK;
diff --git a/block/iscsi.c b/block/iscsi.c
index 22888a0845..993a86d829 100644
--- a/block/iscsi.c
+++ b/block/iscsi.c
@@ -35,6 +35,10 @@
#include <iscsi/iscsi.h>
#include <iscsi/scsi-lowlevel.h>
+#ifdef __linux__
+#include <scsi/sg.h>
+#include <hw/scsi-defs.h>
+#endif
typedef struct IscsiLun {
struct iscsi_context *iscsi;
@@ -56,6 +60,9 @@ typedef struct IscsiAIOCB {
int canceled;
size_t read_size;
size_t read_offset;
+#ifdef __linux__
+ sg_io_hdr_t *ioh;
+#endif
} IscsiAIOCB;
struct IscsiTask {
@@ -240,8 +247,7 @@ iscsi_aio_writev(BlockDriverState *bs, int64_t sector_num,
/* this will allow us to get rid of 'buf' completely */
size = nb_sectors * BDRV_SECTOR_SIZE;
acb->buf = g_malloc(size);
- qemu_iovec_to_buffer(acb->qiov, acb->buf);
-
+ qemu_iovec_to_buf(acb->qiov, 0, acb->buf, size);
acb->task = malloc(sizeof(struct scsi_task));
if (acb->task == NULL) {
@@ -515,6 +521,136 @@ iscsi_aio_discard(BlockDriverState *bs,
return &acb->common;
}
+#ifdef __linux__
+static void
+iscsi_aio_ioctl_cb(struct iscsi_context *iscsi, int status,
+ void *command_data, void *opaque)
+{
+ IscsiAIOCB *acb = opaque;
+
+ if (acb->canceled != 0) {
+ qemu_aio_release(acb);
+ scsi_free_scsi_task(acb->task);
+ acb->task = NULL;
+ return;
+ }
+
+ acb->status = 0;
+ if (status < 0) {
+ error_report("Failed to ioctl(SG_IO) to iSCSI lun. %s",
+ iscsi_get_error(iscsi));
+ acb->status = -EIO;
+ }
+
+ acb->ioh->driver_status = 0;
+ acb->ioh->host_status = 0;
+ acb->ioh->resid = 0;
+
+#define SG_ERR_DRIVER_SENSE 0x08
+
+ if (status == SCSI_STATUS_CHECK_CONDITION && acb->task->datain.size >= 2) {
+ int ss;
+
+ acb->ioh->driver_status |= SG_ERR_DRIVER_SENSE;
+
+ acb->ioh->sb_len_wr = acb->task->datain.size - 2;
+ ss = (acb->ioh->mx_sb_len >= acb->ioh->sb_len_wr) ?
+ acb->ioh->mx_sb_len : acb->ioh->sb_len_wr;
+ memcpy(acb->ioh->sbp, &acb->task->datain.data[2], ss);
+ }
+
+ iscsi_schedule_bh(iscsi_readv_writev_bh_cb, acb);
+ scsi_free_scsi_task(acb->task);
+ acb->task = NULL;
+}
+
+static BlockDriverAIOCB *iscsi_aio_ioctl(BlockDriverState *bs,
+ unsigned long int req, void *buf,
+ BlockDriverCompletionFunc *cb, void *opaque)
+{
+ IscsiLun *iscsilun = bs->opaque;
+ struct iscsi_context *iscsi = iscsilun->iscsi;
+ struct iscsi_data data;
+ IscsiAIOCB *acb;
+
+ assert(req == SG_IO);
+
+ acb = qemu_aio_get(&iscsi_aio_pool, bs, cb, opaque);
+
+ acb->iscsilun = iscsilun;
+ acb->canceled = 0;
+ acb->buf = NULL;
+ acb->ioh = buf;
+
+ acb->task = malloc(sizeof(struct scsi_task));
+ if (acb->task == NULL) {
+ error_report("iSCSI: Failed to allocate task for scsi command. %s",
+ iscsi_get_error(iscsi));
+ qemu_aio_release(acb);
+ return NULL;
+ }
+ memset(acb->task, 0, sizeof(struct scsi_task));
+
+ switch (acb->ioh->dxfer_direction) {
+ case SG_DXFER_TO_DEV:
+ acb->task->xfer_dir = SCSI_XFER_WRITE;
+ break;
+ case SG_DXFER_FROM_DEV:
+ acb->task->xfer_dir = SCSI_XFER_READ;
+ break;
+ default:
+ acb->task->xfer_dir = SCSI_XFER_NONE;
+ break;
+ }
+
+ acb->task->cdb_size = acb->ioh->cmd_len;
+ memcpy(&acb->task->cdb[0], acb->ioh->cmdp, acb->ioh->cmd_len);
+ acb->task->expxferlen = acb->ioh->dxfer_len;
+
+ if (acb->task->xfer_dir == SCSI_XFER_WRITE) {
+ data.data = acb->ioh->dxferp;
+ data.size = acb->ioh->dxfer_len;
+ }
+ if (iscsi_scsi_command_async(iscsi, iscsilun->lun, acb->task,
+ iscsi_aio_ioctl_cb,
+ (acb->task->xfer_dir == SCSI_XFER_WRITE) ?
+ &data : NULL,
+ acb) != 0) {
+ scsi_free_scsi_task(acb->task);
+ qemu_aio_release(acb);
+ return NULL;
+ }
+
+ /* tell libiscsi to read straight into the buffer we got from ioctl */
+ if (acb->task->xfer_dir == SCSI_XFER_READ) {
+ scsi_task_add_data_in_buffer(acb->task,
+ acb->ioh->dxfer_len,
+ acb->ioh->dxferp);
+ }
+
+ iscsi_set_events(iscsilun);
+
+ return &acb->common;
+}
+
+static int iscsi_ioctl(BlockDriverState *bs, unsigned long int req, void *buf)
+{
+ IscsiLun *iscsilun = bs->opaque;
+
+ switch (req) {
+ case SG_GET_VERSION_NUM:
+ *(int *)buf = 30000;
+ break;
+ case SG_GET_SCSI_ID:
+ ((struct sg_scsi_id *)buf)->scsi_type = iscsilun->type;
+ break;
+ default:
+ return -1;
+ }
+ return 0;
+}
+#endif
+
static int64_t
iscsi_getlength(BlockDriverState *bs)
{
@@ -885,6 +1021,16 @@ static int iscsi_open(BlockDriverState *bs, const char *filename, int flags)
if (iscsi_url != NULL) {
iscsi_destroy_url(iscsi_url);
}
+
+ /* Medium changer or tape. We dont have any emulation for this so this must
+ * be sg ioctl compatible. We force it to be sg, otherwise qemu will try
+ * to read from the device to guess the image format.
+ */
+ if (iscsilun->type == TYPE_MEDIUM_CHANGER ||
+ iscsilun->type == TYPE_TAPE) {
+ bs->sg = 1;
+ }
+
return 0;
failed:
@@ -926,6 +1072,11 @@ static BlockDriver bdrv_iscsi = {
.bdrv_aio_flush = iscsi_aio_flush,
.bdrv_aio_discard = iscsi_aio_discard,
+
+#ifdef __linux__
+ .bdrv_ioctl = iscsi_ioctl,
+ .bdrv_aio_ioctl = iscsi_aio_ioctl,
+#endif
};
static void iscsi_block_init(void)
diff --git a/block/nbd.c b/block/nbd.c
index 1212614223..2bce47bf7a 100644
--- a/block/nbd.c
+++ b/block/nbd.c
@@ -196,7 +196,7 @@ static void nbd_restart_write(void *opaque)
}
static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
- struct iovec *iov, int offset)
+ QEMUIOVector *qiov, int offset)
{
int rc, ret;
@@ -205,8 +205,9 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
qemu_aio_set_fd_handler(s->sock, nbd_reply_ready, nbd_restart_write,
nbd_have_request, s);
rc = nbd_send_request(s->sock, request);
- if (rc >= 0 && iov) {
- ret = qemu_co_sendv(s->sock, iov, request->len, offset);
+ if (rc >= 0 && qiov) {
+ ret = qemu_co_sendv(s->sock, qiov->iov, qiov->niov,
+ offset, request->len);
if (ret != request->len) {
return -EIO;
}
@@ -220,7 +221,7 @@ static int nbd_co_send_request(BDRVNBDState *s, struct nbd_request *request,
static void nbd_co_receive_reply(BDRVNBDState *s, struct nbd_request *request,
struct nbd_reply *reply,
- struct iovec *iov, int offset)
+ QEMUIOVector *qiov, int offset)
{
int ret;
@@ -231,8 +232,9 @@ static void nbd_co_receive_reply(BDRVNBDState *s, struct nbd_request *request,
if (reply->handle != request->handle) {
reply->error = EIO;
} else {
- if (iov && reply->error == 0) {
- ret = qemu_co_recvv(s->sock, iov, request->len, offset);
+ if (qiov && reply->error == 0) {
+ ret = qemu_co_recvv(s->sock, qiov->iov, qiov->niov,
+ offset, request->len);
if (ret != request->len) {
reply->error = EIO;
}
@@ -349,7 +351,7 @@ static int nbd_co_readv_1(BlockDriverState *bs, int64_t sector_num,
if (ret < 0) {
reply.error = -ret;
} else {
- nbd_co_receive_reply(s, &request, &reply, qiov->iov, offset);
+ nbd_co_receive_reply(s, &request, &reply, qiov, offset);
}
nbd_coroutine_end(s, &request);
return -reply.error;
@@ -374,7 +376,7 @@ static int nbd_co_writev_1(BlockDriverState *bs, int64_t sector_num,
request.len = nb_sectors * 512;
nbd_coroutine_start(s, &request);
- ret = nbd_co_send_request(s, &request, qiov->iov, offset);
+ ret = nbd_co_send_request(s, &request, qiov, offset);
if (ret < 0) {
reply.error = -ret;
} else {
diff --git a/block/qcow.c b/block/qcow.c
index 35dff497ae..7b5ab87d2d 100644
--- a/block/qcow.c
+++ b/block/qcow.c
@@ -540,7 +540,7 @@ done:
qemu_co_mutex_unlock(&s->lock);
if (qiov->niov > 1) {
- qemu_iovec_from_buffer(qiov, orig_buf, qiov->size);
+ qemu_iovec_from_buf(qiov, 0, orig_buf, qiov->size);
qemu_vfree(orig_buf);
}
@@ -569,7 +569,7 @@ static coroutine_fn int qcow_co_writev(BlockDriverState *bs, int64_t sector_num,
if (qiov->niov > 1) {
buf = orig_buf = qemu_blockalign(bs, qiov->size);
- qemu_iovec_to_buffer(qiov, buf);
+ qemu_iovec_to_buf(qiov, 0, buf, qiov->size);
} else {
orig_buf = NULL;
buf = (uint8_t *)qiov->iov->iov_base;
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index d7e0e19d9c..e179211c57 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -662,7 +662,10 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
qcow2_cache_depends_on_flush(s->l2_table_cache);
}
- qcow2_cache_set_dependency(bs, s->l2_table_cache, s->refcount_block_cache);
+ if (qcow2_need_accurate_refcounts(s)) {
+ qcow2_cache_set_dependency(bs, s->l2_table_cache,
+ s->refcount_block_cache);
+ }
ret = get_cluster_table(bs, m->offset, &l2_table, &l2_index);
if (ret < 0) {
goto err;
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 66f391597c..5e3f9153fb 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -627,10 +627,11 @@ int64_t qcow2_alloc_bytes(BlockDriverState *bs, int size)
BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_BYTES);
assert(size > 0 && size <= s->cluster_size);
if (s->free_byte_offset == 0) {
- s->free_byte_offset = qcow2_alloc_clusters(bs, s->cluster_size);
- if (s->free_byte_offset < 0) {
- return s->free_byte_offset;
+ offset = qcow2_alloc_clusters(bs, s->cluster_size);
+ if (offset < 0) {
+ return offset;
}
+ s->free_byte_offset = offset;
}
redo:
free_in_cluster = s->cluster_size -
diff --git a/block/qcow2-snapshot.c b/block/qcow2-snapshot.c
index 4561a2abf9..4e7c93b8b3 100644
--- a/block/qcow2-snapshot.c
+++ b/block/qcow2-snapshot.c
@@ -405,7 +405,7 @@ int qcow2_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
- qcow2_check_refcounts(bs, &result);
+ qcow2_check_refcounts(bs, &result, 0);
}
#endif
return 0;
@@ -522,7 +522,7 @@ int qcow2_snapshot_goto(BlockDriverState *bs, const char *snapshot_id)
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
- qcow2_check_refcounts(bs, &result);
+ qcow2_check_refcounts(bs, &result, 0);
}
#endif
return 0;
@@ -582,7 +582,7 @@ int qcow2_snapshot_delete(BlockDriverState *bs, const char *snapshot_id)
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
- qcow2_check_refcounts(bs, &result);
+ qcow2_check_refcounts(bs, &result, 0);
}
#endif
return 0;
diff --git a/block/qcow2.c b/block/qcow2.c
index 2c1cd0a446..fd5e214431 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -214,6 +214,62 @@ static void report_unsupported_feature(BlockDriverState *bs,
}
}
+/*
+ * Sets the dirty bit and flushes afterwards if necessary.
+ *
+ * The incompatible_features bit is only set if the image file header was
+ * updated successfully. Therefore it is not required to check the return
+ * value of this function.
+ */
+static int qcow2_mark_dirty(BlockDriverState *bs)
+{
+ BDRVQcowState *s = bs->opaque;
+ uint64_t val;
+ int ret;
+
+ assert(s->qcow_version >= 3);
+
+ if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
+ return 0; /* already dirty */
+ }
+
+ val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
+ ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features),
+ &val, sizeof(val));
+ if (ret < 0) {
+ return ret;
+ }
+ ret = bdrv_flush(bs->file);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* Only treat image as dirty if the header was updated successfully */
+ s->incompatible_features |= QCOW2_INCOMPAT_DIRTY;
+ return 0;
+}
+
+/*
+ * Clears the dirty bit and flushes before if necessary. Only call this
+ * function when there are no pending requests, it does not guard against
+ * concurrent requests dirtying the image.
+ */
+static int qcow2_mark_clean(BlockDriverState *bs)
+{
+ BDRVQcowState *s = bs->opaque;
+
+ if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
+ int ret = bdrv_flush(bs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
+ return qcow2_update_header(bs);
+ }
+ return 0;
+}
+
static int qcow2_open(BlockDriverState *bs, int flags)
{
BDRVQcowState *s = bs->opaque;
@@ -287,12 +343,13 @@ static int qcow2_open(BlockDriverState *bs, int flags)
s->compatible_features = header.compatible_features;
s->autoclear_features = header.autoclear_features;
- if (s->incompatible_features != 0) {
+ if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
void *feature_table = NULL;
qcow2_read_extensions(bs, header.header_length, ext_end,
&feature_table);
report_unsupported_feature(bs, feature_table,
- s->incompatible_features);
+ s->incompatible_features &
+ ~QCOW2_INCOMPAT_MASK);
ret = -ENOTSUP;
goto fail;
}
@@ -412,10 +469,26 @@ static int qcow2_open(BlockDriverState *bs, int flags)
/* Initialise locks */
qemu_co_mutex_init(&s->lock);
+ /* Repair image if dirty */
+ if ((s->incompatible_features & QCOW2_INCOMPAT_DIRTY) &&
+ !bs->read_only) {
+ BdrvCheckResult result = {0};
+
+ ret = qcow2_check_refcounts(bs, &result, BDRV_FIX_ERRORS);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = qcow2_mark_clean(bs);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
#ifdef DEBUG_ALLOC
{
BdrvCheckResult result = {0};
- qcow2_check_refcounts(bs, &result);
+ qcow2_check_refcounts(bs, &result, 0);
}
#endif
return ret;
@@ -508,7 +581,7 @@ int qcow2_backing_read1(BlockDriverState *bs, QEMUIOVector *qiov,
else
n1 = bs->total_sectors - sector_num;
- qemu_iovec_memset_skip(qiov, 0, 512 * (nb_sectors - n1), 512 * n1);
+ qemu_iovec_memset(qiov, 512 * n1, 0, 512 * (nb_sectors - n1));
return n1;
}
@@ -547,7 +620,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
index_in_cluster = sector_num & (s->cluster_sectors - 1);
qemu_iovec_reset(&hd_qiov);
- qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
+ qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
cur_nr_sectors * 512);
switch (ret) {
@@ -569,7 +642,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
}
} else {
/* Note: in this case, no need to wait */
- qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors);
+ qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors);
}
break;
@@ -578,7 +651,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
ret = -EIO;
goto fail;
}
- qemu_iovec_memset(&hd_qiov, 0, 512 * cur_nr_sectors);
+ qemu_iovec_memset(&hd_qiov, 0, 0, 512 * cur_nr_sectors);
break;
case QCOW2_CLUSTER_COMPRESSED:
@@ -588,7 +661,7 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
goto fail;
}
- qemu_iovec_from_buffer(&hd_qiov,
+ qemu_iovec_from_buf(&hd_qiov, 0,
s->cluster_cache + index_in_cluster * 512,
512 * cur_nr_sectors);
break;
@@ -628,11 +701,8 @@ static coroutine_fn int qcow2_co_readv(BlockDriverState *bs, int64_t sector_num,
if (s->crypt_method) {
qcow2_encrypt_sectors(s, sector_num, cluster_data,
cluster_data, cur_nr_sectors, 0, &s->aes_decrypt_key);
- qemu_iovec_reset(&hd_qiov);
- qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
- cur_nr_sectors * 512);
- qemu_iovec_from_buffer(&hd_qiov, cluster_data,
- 512 * cur_nr_sectors);
+ qemu_iovec_from_buf(qiov, bytes_done,
+ cluster_data, 512 * cur_nr_sectors);
}
break;
@@ -717,11 +787,16 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs,
goto fail;
}
+ if (l2meta.nb_clusters > 0 &&
+ (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS)) {
+ qcow2_mark_dirty(bs);
+ }
+
cluster_offset = l2meta.cluster_offset;
assert((cluster_offset & 511) == 0);
qemu_iovec_reset(&hd_qiov);
- qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
+ qemu_iovec_concat(&hd_qiov, qiov, bytes_done,
cur_nr_sectors * 512);
if (s->crypt_method) {
@@ -732,7 +807,7 @@ static coroutine_fn int qcow2_co_writev(BlockDriverState *bs,
assert(hd_qiov.size <=
QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
- qemu_iovec_to_buffer(&hd_qiov, cluster_data);
+ qemu_iovec_to_buf(&hd_qiov, 0, cluster_data, hd_qiov.size);
qcow2_encrypt_sectors(s, sector_num, cluster_data,
cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key);
@@ -788,6 +863,8 @@ static void qcow2_close(BlockDriverState *bs)
qcow2_cache_flush(bs, s->l2_table_cache);
qcow2_cache_flush(bs, s->refcount_block_cache);
+ qcow2_mark_clean(bs);
+
qcow2_cache_destroy(bs, s->l2_table_cache);
qcow2_cache_destroy(bs, s->refcount_block_cache);
@@ -952,7 +1029,16 @@ int qcow2_update_header(BlockDriverState *bs)
/* Feature table */
Qcow2Feature features[] = {
- /* no feature defined yet */
+ {
+ .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
+ .bit = QCOW2_INCOMPAT_DIRTY_BITNR,
+ .name = "dirty bit",
+ },
+ {
+ .type = QCOW2_FEAT_TYPE_COMPATIBLE,
+ .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
+ .name = "lazy refcounts",
+ },
};
ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
@@ -1135,6 +1221,11 @@ static int qcow2_create2(const char *filename, int64_t total_size,
header.crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
}
+ if (flags & BLOCK_FLAG_LAZY_REFCOUNTS) {
+ header.compatible_features |=
+ cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
+ }
+
ret = bdrv_pwrite(bs, 0, &header, sizeof(header));
if (ret < 0) {
goto out;
@@ -1248,6 +1339,8 @@ static int qcow2_create(const char *filename, QEMUOptionParameter *options)
options->value.s);
return -EINVAL;
}
+ } else if (!strcmp(options->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
+ flags |= options->value.n ? BLOCK_FLAG_LAZY_REFCOUNTS : 0;
}
options++;
}
@@ -1258,6 +1351,12 @@ static int qcow2_create(const char *filename, QEMUOptionParameter *options)
return -EINVAL;
}
+ if (version < 3 && (flags & BLOCK_FLAG_LAZY_REFCOUNTS)) {
+ fprintf(stderr, "Lazy refcounts only supported with compatibility "
+ "level 1.1 and above (use compat=1.1 or greater)\n");
+ return -EINVAL;
+ }
+
return qcow2_create2(filename, sectors, backing_file, backing_fmt, flags,
cluster_size, prealloc, options, version);
}
@@ -1444,10 +1543,12 @@ static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
return ret;
}
- ret = qcow2_cache_flush(bs, s->refcount_block_cache);
- if (ret < 0) {
- qemu_co_mutex_unlock(&s->lock);
- return ret;
+ if (qcow2_need_accurate_refcounts(s)) {
+ ret = qcow2_cache_flush(bs, s->refcount_block_cache);
+ if (ret < 0) {
+ qemu_co_mutex_unlock(&s->lock);
+ return ret;
+ }
}
qemu_co_mutex_unlock(&s->lock);
@@ -1562,6 +1663,11 @@ static QEMUOptionParameter qcow2_create_options[] = {
.type = OPT_STRING,
.help = "Preallocation mode (allowed values: off, metadata)"
},
+ {
+ .name = BLOCK_OPT_LAZY_REFCOUNTS,
+ .type = OPT_FLAG,
+ .help = "Postpone refcount updates",
+ },
{ NULL }
};
diff --git a/block/qcow2.h b/block/qcow2.h
index 455b6d7cfe..b4eb65470e 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -110,6 +110,22 @@ enum {
QCOW2_FEAT_TYPE_AUTOCLEAR = 2,
};
+/* Incompatible feature bits */
+enum {
+ QCOW2_INCOMPAT_DIRTY_BITNR = 0,
+ QCOW2_INCOMPAT_DIRTY = 1 << QCOW2_INCOMPAT_DIRTY_BITNR,
+
+ QCOW2_INCOMPAT_MASK = QCOW2_INCOMPAT_DIRTY,
+};
+
+/* Compatible feature bits */
+enum {
+ QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR = 0,
+ QCOW2_COMPAT_LAZY_REFCOUNTS = 1 << QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
+
+ QCOW2_COMPAT_FEAT_MASK = QCOW2_COMPAT_LAZY_REFCOUNTS,
+};
+
typedef struct Qcow2Feature {
uint8_t type;
uint8_t bit;
@@ -237,6 +253,11 @@ static inline int qcow2_get_cluster_type(uint64_t l2_entry)
}
}
+/* Check whether refcounts are eager or lazy */
+static inline bool qcow2_need_accurate_refcounts(BDRVQcowState *s)
+{
+ return !(s->incompatible_features & QCOW2_INCOMPAT_DIRTY);
+}
// FIXME Need qcow2_ prefix to global functions
diff --git a/block/qed.c b/block/qed.c
index ab5972466c..5f3eefa3af 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -736,7 +736,7 @@ static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
/* Zero all sectors if reading beyond the end of the backing file */
if (pos >= backing_length ||
pos + qiov->size > backing_length) {
- qemu_iovec_memset(qiov, 0, qiov->size);
+ qemu_iovec_memset(qiov, 0, 0, qiov->size);
}
/* Complete now if there are no backing file sectors to read */
@@ -748,7 +748,7 @@ static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
/* If the read straddles the end of the backing file, shorten it */
size = MIN((uint64_t)backing_length - pos, qiov->size);
- BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING);
+ BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
}
@@ -1131,7 +1131,7 @@ static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
acb->cur_nclusters = qed_bytes_to_clusters(s,
qed_offset_into_cluster(s, acb->cur_pos) + len);
- qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
+ qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
if (acb->flags & QED_AIOCB_ZERO) {
/* Skip ahead if the clusters are already zero */
@@ -1177,7 +1177,7 @@ static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
/* Calculate the I/O vector */
acb->cur_cluster = offset;
- qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
+ qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
/* Do the actual write */
qed_aio_write_main(acb, 0);
@@ -1247,11 +1247,11 @@ static void qed_aio_read_data(void *opaque, int ret,
goto err;
}
- qemu_iovec_copy(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
+ qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
/* Handle zero cluster and backing file reads */
if (ret == QED_CLUSTER_ZERO) {
- qemu_iovec_memset(&acb->cur_qiov, 0, acb->cur_qiov.size);
+ qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
qed_aio_next_io(acb, 0);
return;
} else if (ret != QED_CLUSTER_FOUND) {
diff --git a/block/raw.c b/block/raw.c
index 09d9b4878b..ff34ea41e7 100644
--- a/block/raw.c
+++ b/block/raw.c
@@ -12,12 +12,14 @@ static int raw_open(BlockDriverState *bs, int flags)
static int coroutine_fn raw_co_readv(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
+ BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
return bdrv_co_readv(bs->file, sector_num, nb_sectors, qiov);
}
static int coroutine_fn raw_co_writev(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
+ BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
return bdrv_co_writev(bs->file, sector_num, nb_sectors, qiov);
}
diff --git a/block/rbd.c b/block/rbd.c
index eebc334462..5a0f79fc8f 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -639,7 +639,7 @@ static void rbd_aio_bh_cb(void *opaque)
RBDAIOCB *acb = opaque;
if (acb->cmd == RBD_AIO_READ) {
- qemu_iovec_from_buffer(acb->qiov, acb->bounce, acb->qiov->size);
+ qemu_iovec_from_buf(acb->qiov, 0, acb->bounce, acb->qiov->size);
}
qemu_vfree(acb->bounce);
acb->common.cb(acb->common.opaque, (acb->ret > 0 ? 0 : acb->ret));
@@ -693,7 +693,7 @@ static BlockDriverAIOCB *rbd_start_aio(BlockDriverState *bs,
acb->bh = NULL;
if (cmd == RBD_AIO_WRITE) {
- qemu_iovec_to_buffer(acb->qiov, acb->bounce);
+ qemu_iovec_to_buf(acb->qiov, 0, acb->bounce, qiov->size);
}
buf = acb->bounce;
diff --git a/block/sheepdog.c b/block/sheepdog.c
index 8877f4528d..a04ad99ead 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -259,8 +259,7 @@ typedef struct AIOReq {
uint8_t flags;
uint32_t id;
- QLIST_ENTRY(AIOReq) outstanding_aio_siblings;
- QLIST_ENTRY(AIOReq) aioreq_siblings;
+ QLIST_ENTRY(AIOReq) aio_siblings;
} AIOReq;
enum AIOCBState {
@@ -283,8 +282,7 @@ struct SheepdogAIOCB {
void (*aio_done_func)(SheepdogAIOCB *);
int canceled;
-
- QLIST_HEAD(aioreq_head, AIOReq) aioreq_head;
+ int nr_pending;
};
typedef struct BDRVSheepdogState {
@@ -307,7 +305,8 @@ typedef struct BDRVSheepdogState {
Coroutine *co_recv;
uint32_t aioreq_seq_num;
- QLIST_HEAD(outstanding_aio_head, AIOReq) outstanding_aio_head;
+ QLIST_HEAD(inflight_aio_head, AIOReq) inflight_aio_head;
+ QLIST_HEAD(pending_aio_head, AIOReq) pending_aio_head;
} BDRVSheepdogState;
static const char * sd_strerror(int err)
@@ -358,7 +357,7 @@ static const char * sd_strerror(int err)
* Sheepdog I/O handling:
*
* 1. In sd_co_rw_vector, we send the I/O requests to the server and
- * link the requests to the outstanding_list in the
+ * link the requests to the inflight_list in the
* BDRVSheepdogState. The function exits without waiting for
* receiving the response.
*
@@ -386,21 +385,18 @@ static inline AIOReq *alloc_aio_req(BDRVSheepdogState *s, SheepdogAIOCB *acb,
aio_req->flags = flags;
aio_req->id = s->aioreq_seq_num++;
- QLIST_INSERT_HEAD(&s->outstanding_aio_head, aio_req,
- outstanding_aio_siblings);
- QLIST_INSERT_HEAD(&acb->aioreq_head, aio_req, aioreq_siblings);
-
+ acb->nr_pending++;
return aio_req;
}
-static inline int free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
+static inline void free_aio_req(BDRVSheepdogState *s, AIOReq *aio_req)
{
SheepdogAIOCB *acb = aio_req->aiocb;
- QLIST_REMOVE(aio_req, outstanding_aio_siblings);
- QLIST_REMOVE(aio_req, aioreq_siblings);
+
+ QLIST_REMOVE(aio_req, aio_siblings);
g_free(aio_req);
- return !QLIST_EMPTY(&acb->aioreq_head);
+ acb->nr_pending--;
}
static void coroutine_fn sd_finish_aiocb(SheepdogAIOCB *acb)
@@ -446,7 +442,7 @@ static SheepdogAIOCB *sd_aio_setup(BlockDriverState *bs, QEMUIOVector *qiov,
acb->canceled = 0;
acb->coroutine = qemu_coroutine_self();
acb->ret = 0;
- QLIST_INIT(&acb->aioreq_head);
+ acb->nr_pending = 0;
return acb;
}
@@ -502,26 +498,6 @@ success:
return fd;
}
-static int send_req(int sockfd, SheepdogReq *hdr, void *data,
- unsigned int *wlen)
-{
- int ret;
-
- ret = qemu_send_full(sockfd, hdr, sizeof(*hdr), 0);
- if (ret < sizeof(*hdr)) {
- error_report("failed to send a req, %s", strerror(errno));
- return -errno;
- }
-
- ret = qemu_send_full(sockfd, data, *wlen, 0);
- if (ret < *wlen) {
- error_report("failed to send a req, %s", strerror(errno));
- ret = -errno;
- }
-
- return ret;
-}
-
static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
unsigned int *wlen)
{
@@ -541,46 +517,36 @@ static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
return ret;
}
-static int do_req(int sockfd, SheepdogReq *hdr, void *data,
- unsigned int *wlen, unsigned int *rlen)
+static void restart_co_req(void *opaque)
{
- int ret;
+ Coroutine *co = opaque;
- socket_set_block(sockfd);
- ret = send_req(sockfd, hdr, data, wlen);
- if (ret < 0) {
- goto out;
- }
-
- ret = qemu_recv_full(sockfd, hdr, sizeof(*hdr), 0);
- if (ret < sizeof(*hdr)) {
- error_report("failed to get a rsp, %s", strerror(errno));
- ret = -errno;
- goto out;
- }
-
- if (*rlen > hdr->data_length) {
- *rlen = hdr->data_length;
- }
-
- if (*rlen) {
- ret = qemu_recv_full(sockfd, data, *rlen, 0);
- if (ret < *rlen) {
- error_report("failed to get the data, %s", strerror(errno));
- ret = -errno;
- goto out;
- }
- }
- ret = 0;
-out:
- socket_set_nonblock(sockfd);
- return ret;
+ qemu_coroutine_enter(co, NULL);
}
-static coroutine_fn int do_co_req(int sockfd, SheepdogReq *hdr, void *data,
- unsigned int *wlen, unsigned int *rlen)
+typedef struct SheepdogReqCo {
+ int sockfd;
+ SheepdogReq *hdr;
+ void *data;
+ unsigned int *wlen;
+ unsigned int *rlen;
+ int ret;
+ bool finished;
+} SheepdogReqCo;
+
+static coroutine_fn void do_co_req(void *opaque)
{
int ret;
+ Coroutine *co;
+ SheepdogReqCo *srco = opaque;
+ int sockfd = srco->sockfd;
+ SheepdogReq *hdr = srco->hdr;
+ void *data = srco->data;
+ unsigned int *wlen = srco->wlen;
+ unsigned int *rlen = srco->rlen;
+
+ co = qemu_coroutine_self();
+ qemu_aio_set_fd_handler(sockfd, NULL, restart_co_req, NULL, co);
socket_set_block(sockfd);
ret = send_co_req(sockfd, hdr, data, wlen);
@@ -588,6 +554,8 @@ static coroutine_fn int do_co_req(int sockfd, SheepdogReq *hdr, void *data,
goto out;
}
+ qemu_aio_set_fd_handler(sockfd, restart_co_req, NULL, NULL, co);
+
ret = qemu_co_recv(sockfd, hdr, sizeof(*hdr));
if (ret < sizeof(*hdr)) {
error_report("failed to get a rsp, %s", strerror(errno));
@@ -609,40 +577,79 @@ static coroutine_fn int do_co_req(int sockfd, SheepdogReq *hdr, void *data,
}
ret = 0;
out:
+ qemu_aio_set_fd_handler(sockfd, NULL, NULL, NULL, NULL);
socket_set_nonblock(sockfd);
- return ret;
+
+ srco->ret = ret;
+ srco->finished = true;
+}
+
+static int do_req(int sockfd, SheepdogReq *hdr, void *data,
+ unsigned int *wlen, unsigned int *rlen)
+{
+ Coroutine *co;
+ SheepdogReqCo srco = {
+ .sockfd = sockfd,
+ .hdr = hdr,
+ .data = data,
+ .wlen = wlen,
+ .rlen = rlen,
+ .ret = 0,
+ .finished = false,
+ };
+
+ if (qemu_in_coroutine()) {
+ do_co_req(&srco);
+ } else {
+ co = qemu_coroutine_create(do_co_req);
+ qemu_coroutine_enter(co, &srco);
+ while (!srco.finished) {
+ qemu_aio_wait();
+ }
+ }
+
+ return srco.ret;
}
static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
struct iovec *iov, int niov, int create,
enum AIOCBState aiocb_type);
+
+static AIOReq *find_pending_req(BDRVSheepdogState *s, uint64_t oid)
+{
+ AIOReq *aio_req;
+
+ QLIST_FOREACH(aio_req, &s->pending_aio_head, aio_siblings) {
+ if (aio_req->oid == oid) {
+ return aio_req;
+ }
+ }
+
+ return NULL;
+}
+
/*
* This function searchs pending requests to the object `oid', and
* sends them.
*/
-static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid, uint32_t id)
+static void coroutine_fn send_pending_req(BDRVSheepdogState *s, uint64_t oid)
{
- AIOReq *aio_req, *next;
+ AIOReq *aio_req;
SheepdogAIOCB *acb;
int ret;
- QLIST_FOREACH_SAFE(aio_req, &s->outstanding_aio_head,
- outstanding_aio_siblings, next) {
- if (id == aio_req->id) {
- continue;
- }
- if (aio_req->oid != oid) {
- continue;
- }
-
+ while ((aio_req = find_pending_req(s, oid)) != NULL) {
acb = aio_req->aiocb;
+ /* move aio_req from pending list to inflight one */
+ QLIST_REMOVE(aio_req, aio_siblings);
+ QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
ret = add_aio_request(s, aio_req, acb->qiov->iov,
acb->qiov->niov, 0, acb->aiocb_type);
if (ret < 0) {
error_report("add_aio_request is failed");
free_aio_req(s, aio_req);
- if (QLIST_EMPTY(&acb->aioreq_head)) {
+ if (!acb->nr_pending) {
sd_finish_aiocb(acb);
}
}
@@ -663,10 +670,9 @@ static void coroutine_fn aio_read_response(void *opaque)
int ret;
AIOReq *aio_req = NULL;
SheepdogAIOCB *acb;
- int rest;
unsigned long idx;
- if (QLIST_EMPTY(&s->outstanding_aio_head)) {
+ if (QLIST_EMPTY(&s->inflight_aio_head)) {
goto out;
}
@@ -677,8 +683,8 @@ static void coroutine_fn aio_read_response(void *opaque)
goto out;
}
- /* find the right aio_req from the outstanding_aio list */
- QLIST_FOREACH(aio_req, &s->outstanding_aio_head, outstanding_aio_siblings) {
+ /* find the right aio_req from the inflight aio list */
+ QLIST_FOREACH(aio_req, &s->inflight_aio_head, aio_siblings) {
if (aio_req->id == rsp.id) {
break;
}
@@ -716,12 +722,12 @@ static void coroutine_fn aio_read_response(void *opaque)
* create requests are not allowed, so we search the
* pending requests here.
*/
- send_pending_req(s, vid_to_data_oid(s->inode.vdi_id, idx), rsp.id);
+ send_pending_req(s, vid_to_data_oid(s->inode.vdi_id, idx));
}
break;
case AIOCB_READ_UDATA:
- ret = qemu_co_recvv(fd, acb->qiov->iov, rsp.data_length,
- aio_req->iov_offset);
+ ret = qemu_co_recvv(fd, acb->qiov->iov, acb->qiov->niov,
+ aio_req->iov_offset, rsp.data_length);
if (ret < 0) {
error_report("failed to get the data, %s", strerror(errno));
goto out;
@@ -734,8 +740,8 @@ static void coroutine_fn aio_read_response(void *opaque)
error_report("%s", sd_strerror(rsp.result));
}
- rest = free_aio_req(s, aio_req);
- if (!rest) {
+ free_aio_req(s, aio_req);
+ if (!acb->nr_pending) {
/*
* We've finished all requests which belong to the AIOCB, so
* we can switch back to sd_co_readv/writev now.
@@ -768,7 +774,8 @@ static int aio_flush_request(void *opaque)
{
BDRVSheepdogState *s = opaque;
- return !QLIST_EMPTY(&s->outstanding_aio_head);
+ return !QLIST_EMPTY(&s->inflight_aio_head) ||
+ !QLIST_EMPTY(&s->pending_aio_head);
}
static int set_nodelay(int fd)
@@ -993,7 +1000,7 @@ static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
}
if (wlen) {
- ret = qemu_co_sendv(s->fd, iov, wlen, aio_req->iov_offset);
+ ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen);
if (ret < 0) {
qemu_co_mutex_unlock(&s->lock);
error_report("failed to send a data, %s", strerror(errno));
@@ -1085,7 +1092,8 @@ static int sd_open(BlockDriverState *bs, const char *filename, int flags)
strstart(filename, "sheepdog:", (const char **)&filename);
- QLIST_INIT(&s->outstanding_aio_head);
+ QLIST_INIT(&s->inflight_aio_head);
+ QLIST_INIT(&s->pending_aio_head);
s->fd = -1;
memset(vdi, 0, sizeof(vdi));
@@ -1447,6 +1455,7 @@ static void coroutine_fn sd_write_done(SheepdogAIOCB *acb)
iov.iov_len = sizeof(s->inode);
aio_req = alloc_aio_req(s, acb, vid_to_vdi_oid(s->inode.vdi_id),
data_len, offset, 0, 0, offset);
+ QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
ret = add_aio_request(s, aio_req, &iov, 1, 0, AIOCB_WRITE_UDATA);
if (ret) {
free_aio_req(s, aio_req);
@@ -1515,7 +1524,7 @@ out:
* Send I/O requests to the server.
*
* This function sends requests to the server, links the requests to
- * the outstanding_list in BDRVSheepdogState, and exits without
+ * the inflight_list in BDRVSheepdogState, and exits without
* waiting the response. The responses are received in the
* `aio_read_response' function which is called from the main loop as
* a fd handler.
@@ -1547,6 +1556,12 @@ static int coroutine_fn sd_co_rw_vector(void *p)
}
}
+ /*
+ * Make sure we don't free the aiocb before we are done with all requests.
+ * This additional reference is dropped at the end of this function.
+ */
+ acb->nr_pending++;
+
while (done != total) {
uint8_t flags = 0;
uint64_t old_oid = 0;
@@ -1556,37 +1571,40 @@ static int coroutine_fn sd_co_rw_vector(void *p)
len = MIN(total - done, SD_DATA_OBJ_SIZE - offset);
- if (!inode->data_vdi_id[idx]) {
- if (acb->aiocb_type == AIOCB_READ_UDATA) {
+ switch (acb->aiocb_type) {
+ case AIOCB_READ_UDATA:
+ if (!inode->data_vdi_id[idx]) {
+ qemu_iovec_memset(acb->qiov, done, 0, len);
goto done;
}
-
- create = 1;
- } else if (acb->aiocb_type == AIOCB_WRITE_UDATA
- && !is_data_obj_writable(inode, idx)) {
- /* Copy-On-Write */
- create = 1;
- old_oid = oid;
- flags = SD_FLAG_CMD_COW;
+ break;
+ case AIOCB_WRITE_UDATA:
+ if (!inode->data_vdi_id[idx]) {
+ create = 1;
+ } else if (!is_data_obj_writable(inode, idx)) {
+ /* Copy-On-Write */
+ create = 1;
+ old_oid = oid;
+ flags = SD_FLAG_CMD_COW;
+ }
+ break;
+ default:
+ break;
}
if (create) {
- dprintf("update ino (%" PRIu32") %" PRIu64 " %" PRIu64
- " %" PRIu64 "\n", inode->vdi_id, oid,
+ dprintf("update ino (%" PRIu32 ") %" PRIu64 " %" PRIu64 " %ld\n",
+ inode->vdi_id, oid,
vid_to_data_oid(inode->data_vdi_id[idx], idx), idx);
oid = vid_to_data_oid(inode->vdi_id, idx);
- dprintf("new oid %lx\n", oid);
+ dprintf("new oid %" PRIx64 "\n", oid);
}
aio_req = alloc_aio_req(s, acb, oid, len, offset, flags, old_oid, done);
if (create) {
AIOReq *areq;
- QLIST_FOREACH(areq, &s->outstanding_aio_head,
- outstanding_aio_siblings) {
- if (areq == aio_req) {
- continue;
- }
+ QLIST_FOREACH(areq, &s->inflight_aio_head, aio_siblings) {
if (areq->oid == oid) {
/*
* Sheepdog cannot handle simultaneous create
@@ -1596,11 +1614,14 @@ static int coroutine_fn sd_co_rw_vector(void *p)
*/
aio_req->flags = 0;
aio_req->base_oid = 0;
+ QLIST_INSERT_HEAD(&s->pending_aio_head, aio_req,
+ aio_siblings);
goto done;
}
}
}
+ QLIST_INSERT_HEAD(&s->inflight_aio_head, aio_req, aio_siblings);
ret = add_aio_request(s, aio_req, acb->qiov->iov, acb->qiov->niov,
create, acb->aiocb_type);
if (ret < 0) {
@@ -1615,7 +1636,7 @@ static int coroutine_fn sd_co_rw_vector(void *p)
done += len;
}
out:
- if (QLIST_EMPTY(&acb->aioreq_head)) {
+ if (!--acb->nr_pending) {
return acb->ret;
}
return 1;
@@ -1628,7 +1649,6 @@ static coroutine_fn int sd_co_writev(BlockDriverState *bs, int64_t sector_num,
int ret;
if (bs->growable && sector_num + nb_sectors > bs->total_sectors) {
- /* TODO: shouldn't block here */
ret = sd_truncate(bs, (sector_num + nb_sectors) * SECTOR_SIZE);
if (ret < 0) {
return ret;
@@ -1655,20 +1675,12 @@ static coroutine_fn int sd_co_readv(BlockDriverState *bs, int64_t sector_num,
int nb_sectors, QEMUIOVector *qiov)
{
SheepdogAIOCB *acb;
- int i, ret;
+ int ret;
acb = sd_aio_setup(bs, qiov, sector_num, nb_sectors, NULL, NULL);
acb->aiocb_type = AIOCB_READ_UDATA;
acb->aio_done_func = sd_finish_aiocb;
- /*
- * TODO: we can do better; we don't need to initialize
- * blindly.
- */
- for (i = 0; i < qiov->niov; i++) {
- memset(qiov->iov[i].iov_base, 0, qiov->iov[i].iov_len);
- }
-
ret = sd_co_rw_vector(acb);
if (ret <= 0) {
qemu_aio_release(acb);
@@ -1696,7 +1708,7 @@ static int coroutine_fn sd_co_flush_to_disk(BlockDriverState *bs)
hdr.opcode = SD_OP_FLUSH_VDI;
hdr.oid = vid_to_vdi_oid(inode->vdi_id);
- ret = do_co_req(s->flush_fd, (SheepdogReq *)&hdr, NULL, &wlen, &rlen);
+ ret = do_req(s->flush_fd, (SheepdogReq *)&hdr, NULL, &wlen, &rlen);
if (ret) {
error_report("failed to send a request to the sheep");
return ret;
@@ -1726,7 +1738,7 @@ static int sd_snapshot_create(BlockDriverState *bs, QEMUSnapshotInfo *sn_info)
SheepdogInode *inode;
unsigned int datalen;
- dprintf("sn_info: name %s id_str %s s: name %s vm_state_size %d "
+ dprintf("sn_info: name %s id_str %s s: name %s vm_state_size %" PRId64 " "
"is_snapshot %d\n", sn_info->name, sn_info->id_str,
s->name, sn_info->vm_state_size, s->is_snapshot);
diff --git a/block/vvfat.c b/block/vvfat.c
index 0fd3367d82..7b1dcee144 100644
--- a/block/vvfat.c
+++ b/block/vvfat.c
@@ -359,11 +359,12 @@ typedef struct BDRVVVFATState {
* if the position is outside the specified geometry, fill maximum value for CHS
* and return 1 to signal overflow.
*/
-static int sector2CHS(BlockDriverState* bs, mbr_chs_t * chs, int spos){
+static int sector2CHS(mbr_chs_t *chs, int spos, int cyls, int heads, int secs)
+{
int head,sector;
- sector = spos % (bs->secs); spos/= bs->secs;
- head = spos % (bs->heads); spos/= bs->heads;
- if(spos >= bs->cyls){
+ sector = spos % secs; spos /= secs;
+ head = spos % heads; spos /= heads;
+ if (spos >= cyls) {
/* Overflow,
it happens if 32bit sector positions are used, while CHS is only 24bit.
Windows/Dos is said to take 1023/255/63 as nonrepresentable CHS */
@@ -378,7 +379,7 @@ static int sector2CHS(BlockDriverState* bs, mbr_chs_t * chs, int spos){
return 0;
}
-static void init_mbr(BDRVVVFATState* s)
+static void init_mbr(BDRVVVFATState *s, int cyls, int heads, int secs)
{
/* TODO: if the files mbr.img and bootsect.img exist, use them */
mbr_t* real_mbr=(mbr_t*)s->first_sectors;
@@ -393,12 +394,15 @@ static void init_mbr(BDRVVVFATState* s)
partition->attributes=0x80; /* bootable */
/* LBA is used when partition is outside the CHS geometry */
- lba = sector2CHS(s->bs, &partition->start_CHS, s->first_sectors_number-1);
- lba|= sector2CHS(s->bs, &partition->end_CHS, s->sector_count);
+ lba = sector2CHS(&partition->start_CHS, s->first_sectors_number - 1,
+ cyls, heads, secs);
+ lba |= sector2CHS(&partition->end_CHS, s->bs->total_sectors - 1,
+ cyls, heads, secs);
/*LBA partitions are identified only by start/length_sector_long not by CHS*/
- partition->start_sector_long =cpu_to_le32(s->first_sectors_number-1);
- partition->length_sector_long=cpu_to_le32(s->sector_count - s->first_sectors_number+1);
+ partition->start_sector_long = cpu_to_le32(s->first_sectors_number - 1);
+ partition->length_sector_long = cpu_to_le32(s->bs->total_sectors
+ - s->first_sectors_number + 1);
/* FAT12/FAT16/FAT32 */
/* DOS uses different types when partition is LBA,
@@ -830,7 +834,7 @@ static inline off_t cluster2sector(BDRVVVFATState* s, uint32_t cluster_num)
}
static int init_directories(BDRVVVFATState* s,
- const char* dirname)
+ const char *dirname, int heads, int secs)
{
bootsector_t* bootsector;
mapping_t* mapping;
@@ -957,8 +961,8 @@ static int init_directories(BDRVVVFATState* s,
bootsector->media_type=(s->first_sectors_number>1?0xf8:0xf0); /* media descriptor (f8=hd, f0=3.5 fd)*/
s->fat.pointer[0] = bootsector->media_type;
bootsector->sectors_per_fat=cpu_to_le16(s->sectors_per_fat);
- bootsector->sectors_per_track=cpu_to_le16(s->bs->secs);
- bootsector->number_of_heads=cpu_to_le16(s->bs->heads);
+ bootsector->sectors_per_track = cpu_to_le16(secs);
+ bootsector->number_of_heads = cpu_to_le16(heads);
bootsector->hidden_sectors=cpu_to_le32(s->first_sectors_number==1?0:0x3f);
bootsector->total_sectors=cpu_to_le32(s->sector_count>0xffff?s->sector_count:0);
@@ -991,7 +995,7 @@ static void vvfat_rebind(BlockDriverState *bs)
static int vvfat_open(BlockDriverState *bs, const char* dirname, int flags)
{
BDRVVVFATState *s = bs->opaque;
- int i;
+ int i, cyls, heads, secs;
#ifdef DEBUG
vvv = s;
@@ -1033,24 +1037,28 @@ DLOG(if (stderr == NULL) {
/* 1.44MB or 2.88MB floppy. 2.88MB can be FAT12 (default) or FAT16. */
if (!s->fat_type) {
s->fat_type = 12;
- bs->secs = 36;
+ secs = 36;
s->sectors_per_cluster=2;
} else {
- bs->secs=(s->fat_type == 12 ? 18 : 36);
+ secs = s->fat_type == 12 ? 18 : 36;
s->sectors_per_cluster=1;
}
s->first_sectors_number = 1;
- bs->cyls=80; bs->heads=2;
+ cyls = 80;
+ heads = 2;
} else {
/* 32MB or 504MB disk*/
if (!s->fat_type) {
s->fat_type = 16;
}
- bs->cyls=(s->fat_type == 12 ? 64 : 1024);
- bs->heads=16; bs->secs=63;
+ cyls = s->fat_type == 12 ? 64 : 1024;
+ heads = 16;
+ secs = 63;
}
+ fprintf(stderr, "vvfat %s chs %d,%d,%d\n",
+ dirname, cyls, heads, secs);
- s->sector_count=bs->cyls*bs->heads*bs->secs-(s->first_sectors_number-1);
+ s->sector_count = cyls * heads * secs - (s->first_sectors_number - 1);
if (strstr(dirname, ":rw:")) {
if (enable_write_target(s))
@@ -1066,18 +1074,16 @@ DLOG(if (stderr == NULL) {
else
dirname += i+1;
- bs->total_sectors=bs->cyls*bs->heads*bs->secs;
+ bs->total_sectors = cyls * heads * secs;
- if(init_directories(s, dirname))
+ if (init_directories(s, dirname, heads, secs)) {
return -1;
+ }
s->sector_count = s->faked_sectors + s->sectors_per_cluster*s->cluster_count;
- if(s->first_sectors_number==0x40)
- init_mbr(s);
- else {
- /* MS-DOS does not like to know about CHS (?). */
- bs->heads = bs->cyls = bs->secs = 0;
+ if (s->first_sectors_number == 0x40) {
+ init_mbr(s, cyls, heads, secs);
}
// assert(is_consistent(s));
diff --git a/block_int.h b/block_int.h
index 1fb5352d0e..6c1d9cafb1 100644
--- a/block_int.h
+++ b/block_int.h
@@ -31,8 +31,9 @@
#include "qemu-timer.h"
#include "qapi-types.h"
-#define BLOCK_FLAG_ENCRYPT 1
-#define BLOCK_FLAG_COMPAT6 4
+#define BLOCK_FLAG_ENCRYPT 1
+#define BLOCK_FLAG_COMPAT6 4
+#define BLOCK_FLAG_LAZY_REFCOUNTS 8
#define BLOCK_IO_LIMIT_READ 0
#define BLOCK_IO_LIMIT_WRITE 1
@@ -41,16 +42,17 @@
#define BLOCK_IO_SLICE_TIME 100000000
#define NANOSECONDS_PER_SECOND 1000000000.0
-#define BLOCK_OPT_SIZE "size"
-#define BLOCK_OPT_ENCRYPT "encryption"
-#define BLOCK_OPT_COMPAT6 "compat6"
-#define BLOCK_OPT_BACKING_FILE "backing_file"
-#define BLOCK_OPT_BACKING_FMT "backing_fmt"
-#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
-#define BLOCK_OPT_TABLE_SIZE "table_size"
-#define BLOCK_OPT_PREALLOC "preallocation"
-#define BLOCK_OPT_SUBFMT "subformat"
-#define BLOCK_OPT_COMPAT_LEVEL "compat"
+#define BLOCK_OPT_SIZE "size"
+#define BLOCK_OPT_ENCRYPT "encryption"
+#define BLOCK_OPT_COMPAT6 "compat6"
+#define BLOCK_OPT_BACKING_FILE "backing_file"
+#define BLOCK_OPT_BACKING_FMT "backing_fmt"
+#define BLOCK_OPT_CLUSTER_SIZE "cluster_size"
+#define BLOCK_OPT_TABLE_SIZE "table_size"
+#define BLOCK_OPT_PREALLOC "preallocation"
+#define BLOCK_OPT_SUBFMT "subformat"
+#define BLOCK_OPT_COMPAT_LEVEL "compat"
+#define BLOCK_OPT_LAZY_REFCOUNTS "lazy_refcounts"
typedef struct BdrvTrackedRequest BdrvTrackedRequest;
@@ -320,7 +322,6 @@ struct BlockDriverState {
/* NOTE: the following infos are only hints for real hardware
drivers. They are not used by the block driver */
- int cyls, heads, secs, translation;
BlockErrorAction on_read_error, on_write_error;
bool iostatus_enabled;
BlockDeviceIoStatus iostatus;
diff --git a/blockdev.c b/blockdev.c
index 9e0a72a269..8669142704 100644
--- a/blockdev.c
+++ b/blockdev.c
@@ -7,8 +7,8 @@
* later. See the COPYING file in the top-level directory.
*/
-#include "block.h"
#include "blockdev.h"
+#include "hw/block-common.h"
#include "monitor.h"
#include "qerror.h"
#include "qemu-option.h"
@@ -330,15 +330,15 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi)
max_devs = if_max_devs[type];
if (cyls || heads || secs) {
- if (cyls < 1 || (type == IF_IDE && cyls > 16383)) {
+ if (cyls < 1) {
error_report("invalid physical cyls number");
return NULL;
}
- if (heads < 1 || (type == IF_IDE && heads > 16)) {
+ if (heads < 1) {
error_report("invalid physical heads number");
return NULL;
}
- if (secs < 1 || (type == IF_IDE && secs > 63)) {
+ if (secs < 1) {
error_report("invalid physical secs number");
return NULL;
}
@@ -398,11 +398,11 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi)
#endif
if ((buf = qemu_opt_get(opts, "format")) != NULL) {
- if (strcmp(buf, "?") == 0) {
- error_printf("Supported formats:");
- bdrv_iterate_format(bdrv_format_print, NULL);
- error_printf("\n");
- return NULL;
+ if (is_help_option(buf)) {
+ error_printf("Supported formats:");
+ bdrv_iterate_format(bdrv_format_print, NULL);
+ error_printf("\n");
+ return NULL;
}
drv = bdrv_find_whitelisted_format(buf);
if (!drv) {
@@ -530,11 +530,13 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi)
dinfo->type = type;
dinfo->bus = bus_id;
dinfo->unit = unit_id;
+ dinfo->cyls = cyls;
+ dinfo->heads = heads;
+ dinfo->secs = secs;
+ dinfo->trans = translation;
dinfo->opts = opts;
dinfo->refcount = 1;
- if (serial) {
- pstrcpy(dinfo->serial, sizeof(dinfo->serial), serial);
- }
+ dinfo->serial = serial;
QTAILQ_INSERT_TAIL(&drives, dinfo, next);
bdrv_set_on_error(dinfo->bdrv, on_read_error, on_write_error);
@@ -547,17 +549,7 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi)
case IF_SCSI:
case IF_XEN:
case IF_NONE:
- switch(media) {
- case MEDIA_DISK:
- if (cyls != 0) {
- bdrv_set_geometry_hint(dinfo->bdrv, cyls, heads, secs);
- bdrv_set_translation_hint(dinfo->bdrv, translation);
- }
- break;
- case MEDIA_CDROM:
- dinfo->media_cd = 1;
- break;
- }
+ dinfo->media_cd = media == MEDIA_CDROM;
break;
case IF_SD:
case IF_FLOPPY:
@@ -609,6 +601,10 @@ DriveInfo *drive_init(QemuOpts *opts, int default_to_scsi)
bdrv_flags |= ro ? 0 : BDRV_O_RDWR;
+ if (ro && copy_on_read) {
+ error_report("warning: disabling copy_on_read on readonly drive");
+ }
+
ret = bdrv_open(dinfo->bdrv, file, bdrv_flags, drv);
if (ret < 0) {
error_report("could not open disk image %s: %s",
diff --git a/blockdev.h b/blockdev.h
index 260e16b3c6..5f27b643be 100644
--- a/blockdev.h
+++ b/blockdev.h
@@ -17,8 +17,6 @@
void blockdev_mark_auto_del(BlockDriverState *bs);
void blockdev_auto_del(BlockDriverState *bs);
-#define BLOCK_SERIAL_STRLEN 20
-
typedef enum {
IF_DEFAULT = -1, /* for use with drive_add() only */
IF_NONE,
@@ -35,8 +33,9 @@ struct DriveInfo {
int unit;
int auto_del; /* see blockdev_mark_auto_del() */
int media_cd;
+ int cyls, heads, secs, trans;
QemuOpts *opts;
- char serial[BLOCK_SERIAL_STRLEN + 1];
+ const char *serial;
QTAILQ_ENTRY(DriveInfo) next;
int refcount;
};
diff --git a/bsd-user/main.c b/bsd-user/main.c
index cd33d655f5..095ae8eaaa 100644
--- a/bsd-user/main.c
+++ b/bsd-user/main.c
@@ -681,7 +681,7 @@ static void usage(void)
"-g port wait gdb connection to port\n"
"-L path set the elf interpreter prefix (default=%s)\n"
"-s size set the stack size in bytes (default=%ld)\n"
- "-cpu model select CPU (-cpu ? for list)\n"
+ "-cpu model select CPU (-cpu help for list)\n"
"-drop-ld-preload drop LD_PRELOAD for target process\n"
"-E var=value sets/modifies targets environment variable(s)\n"
"-U var unsets targets environment variable(s)\n"
@@ -825,7 +825,7 @@ int main(int argc, char **argv)
qemu_uname_release = argv[optind++];
} else if (!strcmp(r, "cpu")) {
cpu_model = argv[optind++];
- if (strcmp(cpu_model, "?") == 0) {
+ if (is_help_option(cpu_model)) {
/* XXX: implement xxx_cpu_list for targets that still miss it */
#if defined(cpu_list)
cpu_list(stdout, &fprintf);
diff --git a/configure b/configure
index 6128ba7522..280726c3f8 100755
--- a/configure
+++ b/configure
@@ -134,9 +134,9 @@ vnc_tls=""
vnc_sasl=""
vnc_jpeg=""
vnc_png=""
-vnc_thread="no"
xen=""
xen_ctrl_version=""
+xen_pci_passthrough=""
linux_aio=""
cap_ng=""
attr=""
@@ -666,10 +666,6 @@ for opt do
;;
--enable-vnc-png) vnc_png="yes"
;;
- --disable-vnc-thread) vnc_thread="no"
- ;;
- --enable-vnc-thread) vnc_thread="yes"
- ;;
--disable-slirp) slirp="no"
;;
--disable-uuid) uuid="no"
@@ -684,6 +680,10 @@ for opt do
;;
--enable-xen) xen="yes"
;;
+ --disable-xen-pci-passthrough) xen_pci_passthrough="no"
+ ;;
+ --enable-xen-pci-passthrough) xen_pci_passthrough="yes"
+ ;;
--disable-brlapi) brlapi="no"
;;
--enable-brlapi) brlapi="yes"
@@ -924,6 +924,7 @@ mips-softmmu \
mipsel-softmmu \
mips64-softmmu \
mips64el-softmmu \
+or32-softmmu \
ppc-softmmu \
ppcemb-softmmu \
ppc64-softmmu \
@@ -950,6 +951,7 @@ microblaze-linux-user \
microblazeel-linux-user \
mips-linux-user \
mipsel-linux-user \
+or32-linux-user \
ppc-linux-user \
ppc64-linux-user \
ppc64abi32-linux-user \
@@ -1031,6 +1033,8 @@ echo " (affects only QEMU, not qemu-img)"
echo " --enable-mixemu enable mixer emulation"
echo " --disable-xen disable xen backend driver support"
echo " --enable-xen enable xen backend driver support"
+echo " --disable-xen-pci-passthrough"
+echo " --enable-xen-pci-passthrough"
echo " --disable-brlapi disable BrlAPI"
echo " --enable-brlapi enable BrlAPI"
echo " --disable-vnc-tls disable TLS encryption for VNC server"
@@ -1041,8 +1045,6 @@ echo " --disable-vnc-jpeg disable JPEG lossy compression for VNC server"
echo " --enable-vnc-jpeg enable JPEG lossy compression for VNC server"
echo " --disable-vnc-png disable PNG compression for VNC server (default)"
echo " --enable-vnc-png enable PNG compression for VNC server"
-echo " --disable-vnc-thread disable threaded VNC server"
-echo " --enable-vnc-thread enable threaded VNC server"
echo " --disable-curses disable curses output"
echo " --enable-curses enable curses output"
echo " --disable-curl disable curl connectivity"
@@ -1139,10 +1141,27 @@ else
exit 1
fi
+# Consult white-list to determine whether to enable werror
+# by default. Only enable by default for git builds
+z_version=`cut -f3 -d. $source_path/VERSION`
+
+if test -z "$werror" ; then
+ if test "$z_version" = "50" -a \
+ "$linux" = "yes" ; then
+ werror="yes"
+ else
+ werror="no"
+ fi
+fi
+
gcc_flags="-Wold-style-declaration -Wold-style-definition -Wtype-limits"
gcc_flags="-Wformat-security -Wformat-y2k -Winit-self -Wignored-qualifiers $gcc_flags"
gcc_flags="-Wmissing-include-dirs -Wempty-body -Wnested-externs $gcc_flags"
gcc_flags="-fstack-protector-all -Wendif-labels $gcc_flags"
+# Note that we do not add -Werror to gcc_flags here, because that would
+# enable it for all configure tests. If a configure test failed due
+# to -Werror this would just silently disable some features,
+# so it's too error prone.
cat > $TMPC << EOF
int main(void) { return 0; }
EOF
@@ -1365,7 +1384,6 @@ if test "$xen" != "no" ; then
# Xen (any)
cat > $TMPC <<EOF
#include <xenctrl.h>
-#include <xs.h>
int main(void) {
return 0;
}
@@ -1378,10 +1396,10 @@ EOF
xen=no
# Xen unstable
- elif (
- cat > $TMPC <<EOF
+ elif
+ cat > $TMPC <<EOF &&
#include <xenctrl.h>
-#include <xs.h>
+#include <xenstore.h>
#include <stdint.h>
#include <xen/hvm/hvm_info_table.h>
#if !defined(HVM_MAX_VCPUS)
@@ -1399,12 +1417,12 @@ int main(void) {
}
EOF
compile_prog "" "$xen_libs"
- ) ; then
+ then
xen_ctrl_version=420
xen=yes
- elif (
- cat > $TMPC <<EOF
+ elif
+ cat > $TMPC <<EOF &&
#include <xenctrl.h>
#include <xs.h>
#include <stdint.h>
@@ -1413,9 +1431,8 @@ EOF
# error HVM_MAX_VCPUS not defined
#endif
int main(void) {
- xc_interface *xc;
xs_daemon_open();
- xc = xc_interface_open(0, 0, 0);
+ xc_interface_open(0, 0, 0);
xc_hvm_set_mem_type(0, 0, HVMMEM_ram_ro, 0, 0);
xc_gnttab_open(NULL, 0);
xc_domain_add_to_physmap(0, 0, XENMAPSPACE_gmfn, 0, 0);
@@ -1423,13 +1440,13 @@ int main(void) {
}
EOF
compile_prog "" "$xen_libs"
- ) ; then
+ then
xen_ctrl_version=410
xen=yes
# Xen 4.0.0
- elif (
- cat > $TMPC <<EOF
+ elif
+ cat > $TMPC <<EOF &&
#include <xenctrl.h>
#include <xs.h>
#include <stdint.h>
@@ -1450,13 +1467,13 @@ int main(void) {
}
EOF
compile_prog "" "$xen_libs"
- ) ; then
+ then
xen_ctrl_version=400
xen=yes
# Xen 3.4.0
- elif (
- cat > $TMPC <<EOF
+ elif
+ cat > $TMPC <<EOF &&
#include <xenctrl.h>
#include <xs.h>
int main(void) {
@@ -1472,13 +1489,13 @@ int main(void) {
}
EOF
compile_prog "" "$xen_libs"
- ) ; then
+ then
xen_ctrl_version=340
xen=yes
# Xen 3.3.0
- elif (
- cat > $TMPC <<EOF
+ elif
+ cat > $TMPC <<EOF &&
#include <xenctrl.h>
#include <xs.h>
int main(void) {
@@ -1490,7 +1507,7 @@ int main(void) {
}
EOF
compile_prog "" "$xen_libs"
- ) ; then
+ then
xen_ctrl_version=330
xen=yes
@@ -1507,6 +1524,25 @@ EOF
fi
fi
+if test "$xen_pci_passthrough" != "no"; then
+ if test "$xen" = "yes" && test "$linux" = "yes" &&
+ test "$xen_ctrl_version" -ge 340; then
+ xen_pci_passthrough=yes
+ else
+ if test "$xen_pci_passthrough" = "yes"; then
+ echo "ERROR"
+ echo "ERROR: User requested feature Xen PCI Passthrough"
+ echo "ERROR: but this feature require /sys from Linux"
+ if test "$xen_ctrl_version" -lt 340; then
+ echo "ERROR: This feature does not work with Xen 3.3"
+ fi
+ echo "ERROR"
+ exit 1;
+ fi
+ xen_pci_passthrough=no
+ fi
+fi
+
##########################################
# pkg-config probe
@@ -1691,7 +1727,7 @@ cat > $TMPC <<EOF
int main(void) {
png_structp png_ptr;
png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
- return 0;
+ return png_ptr != 0;
}
EOF
if $pkg_config libpng --modversion >/dev/null 2>&1; then
@@ -1784,7 +1820,8 @@ if test "$vde" != "no" ; then
int main(void)
{
struct vde_open_args a = {0, 0, 0};
- vde_open("", "", &a);
+ char s[] = "";
+ vde_open(s, s, &a);
return 0;
}
EOF
@@ -1853,7 +1890,7 @@ for drv in $audio_drv_list; do
case $drv in
alsa)
audio_drv_probe $drv alsa/asoundlib.h -lasound \
- "snd_pcm_t **handle; return snd_pcm_close(*handle);"
+ "return snd_pcm_close((snd_pcm_t *)0);"
libs_softmmu="-lasound $libs_softmmu"
;;
@@ -2047,7 +2084,7 @@ if test "$cap" != "no" ; then
cat > $TMPC <<EOF
#include <stdio.h>
#include <sys/capability.h>
-int main(void) { cap_t caps; caps = cap_init(); }
+int main(void) { cap_t caps; caps = cap_init(); return caps != NULL; }
EOF
if compile_prog "" "-lcap" ; then
cap=yes
@@ -2304,6 +2341,7 @@ cat > $TMPC << EOF
#define _ATFILE_SOURCE
#include <stddef.h>
#include <fcntl.h>
+#include <sys/stat.h>
int main(void)
{
@@ -2554,7 +2592,7 @@ if test "$libiscsi" != "no" ; then
#include <iscsi/iscsi.h>
int main(void) { iscsi_unmap_sync(NULL,0,0,0,NULL,0); return 0; }
EOF
- if compile_prog "-Werror" "-liscsi" ; then
+ if compile_prog "" "-liscsi" ; then
libiscsi="yes"
LIBS="$LIBS -liscsi"
else
@@ -2618,13 +2656,22 @@ if test "$smartcard" != "no" ; then
#include <pk11pub.h>
int main(void) { PK11_FreeSlot(0); return 0; }
EOF
- smartcard_cflags="-I\$(SRC_PATH)/libcacard"
+ smartcard_includes="-I\$(SRC_PATH)/libcacard"
libcacard_libs="$($pkg_config --libs nss 2>/dev/null) $glib_libs"
libcacard_cflags="$($pkg_config --cflags nss 2>/dev/null) $glib_cflags"
+ test_cflags="$libcacard_cflags"
+ # The header files in nss < 3.13.3 have a bug which causes them to
+ # emit a warning. If we're going to compile QEMU with -Werror, then
+ # test that the headers don't have this bug. Otherwise we would pass
+ # the configure test but fail to compile QEMU later.
+ if test "$werror" = "yes"; then
+ test_cflags="-Werror $test_cflags"
+ fi
if $pkg_config --atleast-version=3.12.8 nss >/dev/null 2>&1 && \
- compile_prog "$smartcard_cflags $libcacard_cflags" "$libcacard_libs"; then
+ compile_prog "$test_cflags" "$libcacard_libs"; then
smartcard_nss="yes"
- QEMU_CFLAGS="$QEMU_CFLAGS $smartcard_cflags $libcacard_cflags"
+ QEMU_CFLAGS="$QEMU_CFLAGS $libcacard_cflags"
+ QEMU_INCLUDES="$QEMU_INCLUDES $smartcard_includes"
libs_softmmu="$libcacard_libs $libs_softmmu"
else
if test "$smartcard_nss" = "yes"; then
@@ -2753,7 +2800,7 @@ fi
# specification is necessary
if test "$vhost_net" = "yes" && test "$cpu" = "i386"; then
cat > $TMPC << EOF
-int sfaa(unsigned *ptr)
+static int sfaa(int *ptr)
{
return __sync_fetch_and_and(ptr, 0);
}
@@ -2766,7 +2813,7 @@ int main(int argc, char **argv)
}
EOF
if ! compile_prog "" "" ; then
- CFLAGS+="-march=i486"
+ QEMU_CFLAGS="-march=i486 $QEMU_CFLAGS"
fi
fi
@@ -2808,7 +2855,7 @@ fi
##########################################
# check if we have open_by_handle_at
-open_by_hande_at=no
+open_by_handle_at=no
cat > $TMPC << EOF
#include <fcntl.h>
#if !defined(AT_EMPTY_PATH)
@@ -2836,6 +2883,37 @@ if compile_prog "" "" ; then
fi
########################################
+# check whether we can disable the -Wunused-but-set-variable
+# option with a pragma (this is needed to silence a warning in
+# some versions of the valgrind VALGRIND_STACK_DEREGISTER macro.)
+# This test has to be compiled with -Werror as otherwise an
+# unknown pragma is only a warning.
+pragma_disable_unused_but_set=no
+cat > $TMPC << EOF
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+int main(void) {
+ return 0;
+}
+EOF
+if compile_prog "-Werror" "" ; then
+ pragma_disable_unused_but_set=yes
+fi
+
+########################################
+# check if we have valgrind/valgrind.h
+
+valgrind_h=no
+cat > $TMPC << EOF
+#include <valgrind/valgrind.h>
+int main(void) {
+ return 0;
+}
+EOF
+if compile_prog "" "" ; then
+ valgrind_h=yes
+fi
+
+########################################
# check if environ is declared
has_environ=no
@@ -2858,19 +2936,6 @@ if test "$debug" = "no" ; then
CFLAGS="-O2 -D_FORTIFY_SOURCE=2 $CFLAGS"
fi
-# Consult white-list to determine whether to enable werror
-# by default. Only enable by default for git builds
-z_version=`cut -f3 -d. $source_path/VERSION`
-
-if test -z "$werror" ; then
- if test "$z_version" = "50" -a \
- "$linux" = "yes" ; then
- werror="yes"
- else
- werror="no"
- fi
-fi
-
# Disable zero malloc errors for official releases unless explicitly told to
# enable/disable
if test -z "$zero_malloc" ; then
@@ -2881,7 +2946,8 @@ if test -z "$zero_malloc" ; then
fi
fi
-if test "$werror" = "yes" ; then
+# Now we've finished running tests it's OK to add -Werror to the compiler flags
+if test "$werror" = "yes"; then
QEMU_CFLAGS="-Werror $QEMU_CFLAGS"
fi
@@ -2998,7 +3064,6 @@ if test "$vnc" = "yes" ; then
echo "VNC SASL support $vnc_sasl"
echo "VNC JPEG support $vnc_jpeg"
echo "VNC PNG support $vnc_png"
- echo "VNC thread $vnc_thread"
fi
if test -n "$sparc_cpu"; then
echo "Target Sparc Arch $sparc_cpu"
@@ -3174,9 +3239,6 @@ if test "$vnc_png" = "yes" ; then
echo "CONFIG_VNC_PNG=y" >> $config_host_mak
echo "VNC_PNG_CFLAGS=$vnc_png_cflags" >> $config_host_mak
fi
-if test "$vnc_thread" = "yes" ; then
- echo "CONFIG_VNC_THREAD=y" >> $config_host_mak
-fi
if test "$fnmatch" = "yes" ; then
echo "CONFIG_FNMATCH=y" >> $config_host_mak
fi
@@ -3365,6 +3427,14 @@ if test "$linux_magic_h" = "yes" ; then
echo "CONFIG_LINUX_MAGIC_H=y" >> $config_host_mak
fi
+if test "$pragma_disable_unused_but_set" = "yes" ; then
+ echo "CONFIG_PRAGMA_DISABLE_UNUSED_BUT_SET=y" >> $config_host_mak
+fi
+
+if test "$valgrind_h" = "yes" ; then
+ echo "CONFIG_VALGRIND_H=y" >> $config_host_mak
+fi
+
if test "$has_environ" = "yes" ; then
echo "CONFIG_HAS_ENVIRON=y" >> $config_host_mak
fi
@@ -3465,6 +3535,7 @@ done
# use included Linux headers
if test "$linux" = "yes" ; then
+ mkdir -p linux-headers
case "$cpu" in
i386|x86_64)
symlink "$source_path/linux-headers/asm-x86" linux-headers/asm
@@ -3485,7 +3556,7 @@ target_arch2=`echo $target | cut -d '-' -f 1`
target_bigendian="no"
case "$target_arch2" in
- armeb|lm32|m68k|microblaze|mips|mipsn32|mips64|ppc|ppcemb|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb)
+ armeb|lm32|m68k|microblaze|mips|mipsn32|mips64|or32|ppc|ppcemb|ppc64|ppc64abi32|s390x|sh4eb|sparc|sparc64|sparc32plus|xtensaeb)
target_bigendian=yes
;;
esac
@@ -3555,7 +3626,7 @@ case "$target_arch2" in
bflt="yes"
target_nptl="yes"
gdb_xml_files="arm-core.xml arm-vfp.xml arm-vfp3.xml arm-neon.xml"
- target_phys_bits=32
+ target_phys_bits=64
target_llong_alignment=4
target_libs_softmmu="$fdt_libs"
;;
@@ -3601,6 +3672,11 @@ case "$target_arch2" in
target_phys_bits=64
target_long_alignment=8
;;
+ or32)
+ TARGET_ARCH=openrisc
+ TARGET_BASE_ARCH=openrisc
+ target_phys_bits=32
+ ;;
ppc)
gdb_xml_files="power-core.xml power-fpu.xml power-altivec.xml power-spe.xml"
target_phys_bits=64
@@ -3679,7 +3755,7 @@ symlink "$source_path/Makefile.target" "$target_dir/Makefile"
case "$target_arch2" in
- alpha | sparc* | xtensa* | ppc*)
+ alpha | or32 | sparc* | xtensa* | ppc*)
echo "CONFIG_TCG_PASS_AREG0=y" >> $config_target_mak
;;
esac
@@ -3702,6 +3778,9 @@ case "$target_arch2" in
if test "$xen" = "yes" -a "$target_softmmu" = "yes" ; then
target_phys_bits=64
echo "CONFIG_XEN=y" >> $config_target_mak
+ if test "$xen_pci_passthrough" = yes; then
+ echo "CONFIG_XEN_PCI_PASSTHROUGH=y" >> "$config_target_mak"
+ fi
else
echo "CONFIG_NO_XEN=y" >> $config_target_mak
fi
@@ -3850,6 +3929,10 @@ for i in $ARCH $TARGET_BASE_ARCH ; do
echo "CONFIG_MIPS_DIS=y" >> $config_target_mak
echo "CONFIG_MIPS_DIS=y" >> $libdis_config_mak
;;
+ or32)
+ echo "CONFIG_OPENRISC_DIS=y" >> $config_target_mak
+ echo "CONFIG_OPENRISC_DIS=y" >> $libdis_config_mak
+ ;;
ppc*)
echo "CONFIG_PPC_DIS=y" >> $config_target_mak
echo "CONFIG_PPC_DIS=y" >> $libdis_config_mak
diff --git a/console.c b/console.c
index 6a463f5918..4525cc70b8 100644
--- a/console.c
+++ b/console.c
@@ -28,6 +28,7 @@
//#define DEBUG_CONSOLE
#define DEFAULT_BACKSCROLL 512
#define MAX_CONSOLES 12
+#define CONSOLE_CURSOR_PERIOD 500
#define QEMU_RGBA(r, g, b, a) (((a) << 24) | ((r) << 16) | ((g) << 8) | (b))
#define QEMU_RGB(r, g, b) QEMU_RGBA(r, g, b, 0xff)
@@ -139,6 +140,8 @@ struct TextConsole {
TextCell *cells;
int text_x[2], text_y[2], cursor_invalidate;
int echo;
+ bool cursor_visible_phase;
+ QEMUTimer *cursor_timer;
int update_x0;
int update_y0;
@@ -615,7 +618,7 @@ static void console_show_cursor(TextConsole *s, int show)
y += s->total_height;
if (y < s->height) {
c = &s->cells[y1 * s->width + x];
- if (show) {
+ if (show && s->cursor_visible_phase) {
TextAttributes t_attrib = s->t_attrib_default;
t_attrib.invers = !(t_attrib.invers); /* invert fg and bg */
vga_putcharxy(s->ds, x, y, c->ch, &t_attrib);
@@ -1083,6 +1086,10 @@ void console_select(unsigned int index)
s = consoles[index];
if (s) {
DisplayState *ds = s->ds;
+
+ if (active_console->cursor_timer) {
+ qemu_del_timer(active_console->cursor_timer);
+ }
active_console = s;
if (ds_get_bits_per_pixel(s->ds)) {
ds->surface = qemu_resize_displaysurface(ds, s->g_width, s->g_height);
@@ -1090,6 +1097,10 @@ void console_select(unsigned int index)
s->ds->surface->width = s->width;
s->ds->surface->height = s->height;
}
+ if (s->cursor_timer) {
+ qemu_mod_timer(s->cursor_timer,
+ qemu_get_clock_ms(rt_clock) + CONSOLE_CURSOR_PERIOD / 2);
+ }
dpy_resize(s->ds);
vga_hw_invalidate();
}
@@ -1454,6 +1465,16 @@ static void text_console_set_echo(CharDriverState *chr, bool echo)
s->echo = echo;
}
+static void text_console_update_cursor(void *opaque)
+{
+ TextConsole *s = opaque;
+
+ s->cursor_visible_phase = !s->cursor_visible_phase;
+ vga_hw_invalidate();
+ qemu_mod_timer(s->cursor_timer,
+ qemu_get_clock_ms(rt_clock) + CONSOLE_CURSOR_PERIOD / 2);
+}
+
static void text_console_do_init(CharDriverState *chr, DisplayState *ds)
{
TextConsole *s;
@@ -1482,6 +1503,9 @@ static void text_console_do_init(CharDriverState *chr, DisplayState *ds)
s->g_height = ds_get_height(s->ds);
}
+ s->cursor_timer =
+ qemu_new_timer_ms(rt_clock, text_console_update_cursor, s);
+
s->hw_invalidate = text_console_invalidate;
s->hw_text_update = text_console_update;
s->hw = s;
diff --git a/coroutine-ucontext.c b/coroutine-ucontext.c
index 5f43083af5..784081ab18 100644
--- a/coroutine-ucontext.c
+++ b/coroutine-ucontext.c
@@ -30,6 +30,10 @@
#include "qemu-common.h"
#include "qemu-coroutine-int.h"
+#ifdef CONFIG_VALGRIND_H
+#include <valgrind/valgrind.h>
+#endif
+
enum {
/* Maximum free pool size prevents holding too many freed coroutines */
POOL_MAX_SIZE = 64,
@@ -43,6 +47,11 @@ typedef struct {
Coroutine base;
void *stack;
jmp_buf env;
+
+#ifdef CONFIG_VALGRIND_H
+ unsigned int valgrind_stack_id;
+#endif
+
} CoroutineUContext;
/**
@@ -159,6 +168,11 @@ static Coroutine *coroutine_new(void)
uc.uc_stack.ss_size = stack_size;
uc.uc_stack.ss_flags = 0;
+#ifdef CONFIG_VALGRIND_H
+ co->valgrind_stack_id =
+ VALGRIND_STACK_REGISTER(co->stack, co->stack + stack_size);
+#endif
+
arg.p = co;
makecontext(&uc, (void (*)(void))coroutine_trampoline,
@@ -185,6 +199,20 @@ Coroutine *qemu_coroutine_new(void)
return co;
}
+#ifdef CONFIG_VALGRIND_H
+#ifdef CONFIG_PRAGMA_DISABLE_UNUSED_BUT_SET
+/* Work around an unused variable in the valgrind.h macro... */
+#pragma GCC diagnostic ignored "-Wunused-but-set-variable"
+#endif
+static inline void valgrind_stack_deregister(CoroutineUContext *co)
+{
+ VALGRIND_STACK_DEREGISTER(co->valgrind_stack_id);
+}
+#ifdef CONFIG_PRAGMA_DISABLE_UNUSED_BUT_SET
+#pragma GCC diagnostic error "-Wunused-but-set-variable"
+#endif
+#endif
+
void qemu_coroutine_delete(Coroutine *co_)
{
CoroutineUContext *co = DO_UPCAST(CoroutineUContext, base, co_);
@@ -196,6 +224,10 @@ void qemu_coroutine_delete(Coroutine *co_)
return;
}
+#ifdef CONFIG_VALGRIND_H
+ valgrind_stack_deregister(co);
+#endif
+
g_free(co->stack);
g_free(co);
}
diff --git a/cpu-all.h b/cpu-all.h
index 9dc249a165..82ba1d7cd5 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -486,6 +486,7 @@ typedef struct RAMBlock {
typedef struct RAMList {
uint8_t *phys_dirty;
QLIST_HEAD(, RAMBlock) blocks;
+ uint64_t dirty_pages;
} RAMList;
extern RAMList ram_list;
diff --git a/cpu-common.h b/cpu-common.h
index 1fe3280701..85548de5ea 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -3,9 +3,7 @@
/* CPU interfaces that are target independent. */
-#ifdef TARGET_PHYS_ADDR_BITS
#include "targphys.h"
-#endif
#ifndef NEED_CPU_H
#include "poison.h"
@@ -71,9 +69,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
void cpu_unregister_map_client(void *cookie);
-#ifndef CONFIG_USER_ONLY
bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr);
-#endif
/* Coalesced MMIO regions are areas where write operations can be reordered.
* This usually implies that write operations are side-effect free. This allows
diff --git a/cpu-defs.h b/cpu-defs.h
index f49e9500a9..4018b88a1a 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -151,14 +151,6 @@ typedef struct CPUWatchpoint {
QTAILQ_ENTRY(CPUWatchpoint) entry;
} CPUWatchpoint;
-#ifdef _WIN32
-#define CPU_COMMON_THREAD \
- void *hThread;
-
-#else
-#define CPU_COMMON_THREAD
-#endif
-
#define CPU_TEMP_BUF_NLONGS 128
#define CPU_COMMON \
struct TranslationBlock *current_tb; /* currently executing TB */ \
@@ -216,10 +208,7 @@ typedef struct CPUWatchpoint {
uint32_t created; \
uint32_t stop; /* Stop request */ \
uint32_t stopped; /* Artificially stopped */ \
- struct QemuThread *thread; \
- CPU_COMMON_THREAD \
struct QemuCond *halt_cond; \
- int thread_kicked; \
struct qemu_work_item *queued_work_first, *queued_work_last; \
const char *cpu_model_str; \
struct KVMState *kvm_state; \
diff --git a/cpu-exec.c b/cpu-exec.c
index 24607fbed5..4fee0618bd 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -222,6 +222,7 @@ int cpu_exec(CPUArchState *env)
#elif defined(TARGET_LM32)
#elif defined(TARGET_MICROBLAZE)
#elif defined(TARGET_MIPS)
+#elif defined(TARGET_OPENRISC)
#elif defined(TARGET_SH4)
#elif defined(TARGET_CRIS)
#elif defined(TARGET_S390X)
@@ -285,8 +286,15 @@ int cpu_exec(CPUArchState *env)
}
#endif
#if defined(TARGET_I386)
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(env->apic_state);
+ }
+#endif
if (interrupt_request & CPU_INTERRUPT_INIT) {
- svm_check_intercept(env, SVM_EXIT_INIT);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INIT,
+ 0);
do_cpu_init(x86_env_get_cpu(env));
env->exception_index = EXCP_HALTED;
cpu_loop_exit(env);
@@ -295,7 +303,8 @@ int cpu_exec(CPUArchState *env)
} else if (env->hflags2 & HF2_GIF_MASK) {
if ((interrupt_request & CPU_INTERRUPT_SMI) &&
!(env->hflags & HF_SMM_MASK)) {
- svm_check_intercept(env, SVM_EXIT_SMI);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SMI,
+ 0);
env->interrupt_request &= ~CPU_INTERRUPT_SMI;
do_smm_enter(env);
next_tb = 0;
@@ -316,7 +325,8 @@ int cpu_exec(CPUArchState *env)
(env->eflags & IF_MASK &&
!(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
int intno;
- svm_check_intercept(env, SVM_EXIT_INTR);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INTR,
+ 0);
env->interrupt_request &= ~(CPU_INTERRUPT_HARD | CPU_INTERRUPT_VIRQ);
intno = cpu_get_pic_interrupt(env);
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing hardware INT=0x%02x\n", intno);
@@ -330,7 +340,8 @@ int cpu_exec(CPUArchState *env)
!(env->hflags & HF_INHIBIT_IRQ_MASK)) {
int intno;
/* FIXME: this should respect TPR */
- svm_check_intercept(env, SVM_EXIT_VINTR);
+ cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR,
+ 0);
intno = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_vector));
qemu_log_mask(CPU_LOG_TB_IN_ASM, "Servicing virtual hardware INT=0x%02x\n", intno);
do_interrupt_x86_hardirq(env, intno, 1);
@@ -374,6 +385,23 @@ int cpu_exec(CPUArchState *env)
do_interrupt(env);
next_tb = 0;
}
+#elif defined(TARGET_OPENRISC)
+ {
+ int idx = -1;
+ if ((interrupt_request & CPU_INTERRUPT_HARD)
+ && (env->sr & SR_IEE)) {
+ idx = EXCP_INT;
+ }
+ if ((interrupt_request & CPU_INTERRUPT_TIMER)
+ && (env->sr & SR_TEE)) {
+ idx = EXCP_TICK;
+ }
+ if (idx >= 0) {
+ env->exception_index = idx;
+ do_interrupt(env);
+ next_tb = 0;
+ }
+ }
#elif defined(TARGET_SPARC)
if (interrupt_request & CPU_INTERRUPT_HARD) {
if (cpu_interrupts_enabled(env) &&
@@ -627,6 +655,7 @@ int cpu_exec(CPUArchState *env)
| env->cc_dest | (env->cc_x << 4);
#elif defined(TARGET_MICROBLAZE)
#elif defined(TARGET_MIPS)
+#elif defined(TARGET_OPENRISC)
#elif defined(TARGET_SH4)
#elif defined(TARGET_ALPHA)
#elif defined(TARGET_CRIS)
diff --git a/cpus.c b/cpus.c
index b182b3d7d9..3de2e27f41 100644
--- a/cpus.c
+++ b/cpus.c
@@ -36,6 +36,7 @@
#include "cpus.h"
#include "qtest.h"
#include "main-loop.h"
+#include "bitmap.h"
#ifndef _WIN32
#include "compatfd.h"
@@ -61,6 +62,32 @@
static CPUArchState *next_cpu;
+static bool cpu_thread_is_idle(CPUArchState *env)
+{
+ if (env->stop || env->queued_work_first) {
+ return false;
+ }
+ if (env->stopped || !runstate_is_running()) {
+ return true;
+ }
+ if (!env->halted || qemu_cpu_has_work(env) || kvm_irqchip_in_kernel()) {
+ return false;
+ }
+ return true;
+}
+
+static bool all_cpu_threads_idle(void)
+{
+ CPUArchState *env;
+
+ for (env = first_cpu; env != NULL; env = env->next_cpu) {
+ if (!cpu_thread_is_idle(env)) {
+ return false;
+ }
+ }
+ return true;
+}
+
/***********************************************************/
/* guest cycle counter */
@@ -433,32 +460,6 @@ static int cpu_can_run(CPUArchState *env)
return 1;
}
-static bool cpu_thread_is_idle(CPUArchState *env)
-{
- if (env->stop || env->queued_work_first) {
- return false;
- }
- if (env->stopped || !runstate_is_running()) {
- return true;
- }
- if (!env->halted || qemu_cpu_has_work(env) || kvm_irqchip_in_kernel()) {
- return false;
- }
- return true;
-}
-
-bool all_cpu_threads_idle(void)
-{
- CPUArchState *env;
-
- for (env = first_cpu; env != NULL; env = env->next_cpu) {
- if (!cpu_thread_is_idle(env)) {
- return false;
- }
- }
- return true;
-}
-
static void cpu_handle_guest_debug(CPUArchState *env)
{
gdb_set_stop_cpu(env);
@@ -686,13 +687,15 @@ static void flush_queued_work(CPUArchState *env)
static void qemu_wait_io_event_common(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
+
if (env->stop) {
env->stop = 0;
env->stopped = 1;
qemu_cond_signal(&qemu_pause_cond);
}
flush_queued_work(env);
- env->thread_kicked = false;
+ cpu->thread_kicked = false;
}
static void qemu_tcg_wait_io_event(void)
@@ -728,10 +731,11 @@ static void qemu_kvm_wait_io_event(CPUArchState *env)
static void *qemu_kvm_cpu_thread_fn(void *arg)
{
CPUArchState *env = arg;
+ CPUState *cpu = ENV_GET_CPU(env);
int r;
qemu_mutex_lock(&qemu_global_mutex);
- qemu_thread_get_self(env->thread);
+ qemu_thread_get_self(cpu->thread);
env->thread_id = qemu_get_thread_id();
cpu_single_env = env;
@@ -767,11 +771,12 @@ static void *qemu_dummy_cpu_thread_fn(void *arg)
exit(1);
#else
CPUArchState *env = arg;
+ CPUState *cpu = ENV_GET_CPU(env);
sigset_t waitset;
int r;
qemu_mutex_lock_iothread();
- qemu_thread_get_self(env->thread);
+ qemu_thread_get_self(cpu->thread);
env->thread_id = qemu_get_thread_id();
sigemptyset(&waitset);
@@ -807,9 +812,10 @@ static void tcg_exec_all(void);
static void *qemu_tcg_cpu_thread_fn(void *arg)
{
CPUArchState *env = arg;
+ CPUState *cpu = ENV_GET_CPU(env);
qemu_tcg_init_cpu_signals();
- qemu_thread_get_self(env->thread);
+ qemu_thread_get_self(cpu->thread);
/* signal CPU creation */
qemu_mutex_lock(&qemu_global_mutex);
@@ -842,19 +848,20 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
static void qemu_cpu_kick_thread(CPUArchState *env)
{
+ CPUState *cpu = ENV_GET_CPU(env);
#ifndef _WIN32
int err;
- err = pthread_kill(env->thread->thread, SIG_IPI);
+ err = pthread_kill(cpu->thread->thread, SIG_IPI);
if (err) {
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
exit(1);
}
#else /* _WIN32 */
if (!qemu_cpu_is_self(env)) {
- SuspendThread(env->hThread);
+ SuspendThread(cpu->hThread);
cpu_signal(0);
- ResumeThread(env->hThread);
+ ResumeThread(cpu->hThread);
}
#endif
}
@@ -862,11 +869,12 @@ static void qemu_cpu_kick_thread(CPUArchState *env)
void qemu_cpu_kick(void *_env)
{
CPUArchState *env = _env;
+ CPUState *cpu = ENV_GET_CPU(env);
qemu_cond_broadcast(env->halt_cond);
- if (!tcg_enabled() && !env->thread_kicked) {
+ if (!tcg_enabled() && !cpu->thread_kicked) {
qemu_cpu_kick_thread(env);
- env->thread_kicked = true;
+ cpu->thread_kicked = true;
}
}
@@ -874,10 +882,11 @@ void qemu_cpu_kick_self(void)
{
#ifndef _WIN32
assert(cpu_single_env);
+ CPUState *cpu_single_cpu = ENV_GET_CPU(cpu_single_env);
- if (!cpu_single_env->thread_kicked) {
+ if (!cpu_single_cpu->thread_kicked) {
qemu_cpu_kick_thread(cpu_single_env);
- cpu_single_env->thread_kicked = true;
+ cpu_single_cpu->thread_kicked = true;
}
#else
abort();
@@ -887,8 +896,9 @@ void qemu_cpu_kick_self(void)
int qemu_cpu_is_self(void *_env)
{
CPUArchState *env = _env;
+ CPUState *cpu = ENV_GET_CPU(env);
- return qemu_thread_is_self(env->thread);
+ return qemu_thread_is_self(cpu->thread);
}
void qemu_mutex_lock_iothread(void)
@@ -974,34 +984,37 @@ void resume_all_vcpus(void)
static void qemu_tcg_init_vcpu(void *_env)
{
CPUArchState *env = _env;
+ CPUState *cpu = ENV_GET_CPU(env);
/* share a single thread for all cpus with TCG */
if (!tcg_cpu_thread) {
- env->thread = g_malloc0(sizeof(QemuThread));
+ cpu->thread = g_malloc0(sizeof(QemuThread));
env->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(env->halt_cond);
tcg_halt_cond = env->halt_cond;
- qemu_thread_create(env->thread, qemu_tcg_cpu_thread_fn, env,
+ qemu_thread_create(cpu->thread, qemu_tcg_cpu_thread_fn, env,
QEMU_THREAD_JOINABLE);
#ifdef _WIN32
- env->hThread = qemu_thread_get_handle(env->thread);
+ cpu->hThread = qemu_thread_get_handle(cpu->thread);
#endif
while (env->created == 0) {
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
}
- tcg_cpu_thread = env->thread;
+ tcg_cpu_thread = cpu->thread;
} else {
- env->thread = tcg_cpu_thread;
+ cpu->thread = tcg_cpu_thread;
env->halt_cond = tcg_halt_cond;
}
}
static void qemu_kvm_start_vcpu(CPUArchState *env)
{
- env->thread = g_malloc0(sizeof(QemuThread));
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
env->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(env->halt_cond);
- qemu_thread_create(env->thread, qemu_kvm_cpu_thread_fn, env,
+ qemu_thread_create(cpu->thread, qemu_kvm_cpu_thread_fn, env,
QEMU_THREAD_JOINABLE);
while (env->created == 0) {
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
@@ -1010,10 +1023,12 @@ static void qemu_kvm_start_vcpu(CPUArchState *env)
static void qemu_dummy_start_vcpu(CPUArchState *env)
{
- env->thread = g_malloc0(sizeof(QemuThread));
+ CPUState *cpu = ENV_GET_CPU(env);
+
+ cpu->thread = g_malloc0(sizeof(QemuThread));
env->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(env->halt_cond);
- qemu_thread_create(env->thread, qemu_dummy_cpu_thread_fn, env,
+ qemu_thread_create(cpu->thread, qemu_dummy_cpu_thread_fn, env,
QEMU_THREAD_JOINABLE);
while (env->created == 0) {
qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
@@ -1145,7 +1160,7 @@ void set_numa_modes(void)
for (env = first_cpu; env != NULL; env = env->next_cpu) {
for (i = 0; i < nb_numa_nodes; i++) {
- if (node_cpumask[i] & (1 << env->cpu_index)) {
+ if (test_bit(env->cpu_index, node_cpumask[i])) {
env->numa_node = i;
}
}
diff --git a/cutils.c b/cutils.c
index af308cd7b9..9d4c570939 100644
--- a/cutils.c
+++ b/cutils.c
@@ -26,6 +26,14 @@
#include <math.h>
#include "qemu_socket.h"
+#include "iov.h"
+
+void strpadcpy(char *buf, int buf_size, const char *str, char pad)
+{
+ int len = qemu_strnlen(str, buf_size);
+ memcpy(buf, str, len);
+ memset(buf + len, pad, buf_size - len);
+}
void pstrcpy(char *buf, int buf_size, const char *str)
{
@@ -171,48 +179,34 @@ void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
}
/*
- * Copies iovecs from src to the end of dst. It starts copying after skipping
- * the given number of bytes in src and copies until src is completely copied
- * or the total size of the copied iovec reaches size.The size of the last
- * copied iovec is changed in order to fit the specified total size if it isn't
- * a perfect fit already.
+ * Concatenates (partial) iovecs from src to the end of dst.
+ * It starts copying after skipping `soffset' bytes at the
+ * beginning of src and adds individual vectors from src to
+ * dst copies up to `sbytes' bytes total, or up to the end
+ * of src if it comes first. This way, it is okay to specify
+ * very large value for `sbytes' to indicate "up to the end
+ * of src".
+ * Only vector pointers are processed, not the actual data buffers.
*/
-void qemu_iovec_copy(QEMUIOVector *dst, QEMUIOVector *src, uint64_t skip,
- size_t size)
+void qemu_iovec_concat(QEMUIOVector *dst,
+ QEMUIOVector *src, size_t soffset, size_t sbytes)
{
int i;
size_t done;
- void *iov_base;
- uint64_t iov_len;
-
+ struct iovec *siov = src->iov;
assert(dst->nalloc != -1);
-
- done = 0;
- for (i = 0; (i < src->niov) && (done != size); i++) {
- if (skip >= src->iov[i].iov_len) {
- /* Skip the whole iov */
- skip -= src->iov[i].iov_len;
- continue;
- } else {
- /* Skip only part (or nothing) of the iov */
- iov_base = (uint8_t*) src->iov[i].iov_base + skip;
- iov_len = src->iov[i].iov_len - skip;
- skip = 0;
- }
-
- if (done + iov_len > size) {
- qemu_iovec_add(dst, iov_base, size - done);
- break;
+ assert(src->size >= soffset);
+ for (i = 0, done = 0; done < sbytes && i < src->niov; i++) {
+ if (soffset < siov[i].iov_len) {
+ size_t len = MIN(siov[i].iov_len - soffset, sbytes - done);
+ qemu_iovec_add(dst, siov[i].iov_base + soffset, len);
+ done += len;
+ soffset = 0;
} else {
- qemu_iovec_add(dst, iov_base, iov_len);
+ soffset -= siov[i].iov_len;
}
- done += iov_len;
}
-}
-
-void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size)
-{
- qemu_iovec_copy(dst, src, 0, size);
+ /* return done; */
}
void qemu_iovec_destroy(QEMUIOVector *qiov)
@@ -233,74 +227,22 @@ void qemu_iovec_reset(QEMUIOVector *qiov)
qiov->size = 0;
}
-void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf)
-{
- uint8_t *p = (uint8_t *)buf;
- int i;
-
- for (i = 0; i < qiov->niov; ++i) {
- memcpy(p, qiov->iov[i].iov_base, qiov->iov[i].iov_len);
- p += qiov->iov[i].iov_len;
- }
-}
-
-void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
+size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset,
+ void *buf, size_t bytes)
{
- const uint8_t *p = (const uint8_t *)buf;
- size_t copy;
- int i;
-
- for (i = 0; i < qiov->niov && count; ++i) {
- copy = count;
- if (copy > qiov->iov[i].iov_len)
- copy = qiov->iov[i].iov_len;
- memcpy(qiov->iov[i].iov_base, p, copy);
- p += copy;
- count -= copy;
- }
+ return iov_to_buf(qiov->iov, qiov->niov, offset, buf, bytes);
}
-void qemu_iovec_memset(QEMUIOVector *qiov, int c, size_t count)
+size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
+ const void *buf, size_t bytes)
{
- size_t n;
- int i;
-
- for (i = 0; i < qiov->niov && count; ++i) {
- n = MIN(count, qiov->iov[i].iov_len);
- memset(qiov->iov[i].iov_base, c, n);
- count -= n;
- }
+ return iov_from_buf(qiov->iov, qiov->niov, offset, buf, bytes);
}
-void qemu_iovec_memset_skip(QEMUIOVector *qiov, int c, size_t count,
- size_t skip)
+size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
+ int fillc, size_t bytes)
{
- int i;
- size_t done;
- void *iov_base;
- uint64_t iov_len;
-
- done = 0;
- for (i = 0; (i < qiov->niov) && (done != count); i++) {
- if (skip >= qiov->iov[i].iov_len) {
- /* Skip the whole iov */
- skip -= qiov->iov[i].iov_len;
- continue;
- } else {
- /* Skip only part (or nothing) of the iov */
- iov_base = (uint8_t*) qiov->iov[i].iov_base + skip;
- iov_len = qiov->iov[i].iov_len - skip;
- skip = 0;
- }
-
- if (done + iov_len > count) {
- memset(iov_base, c, count - done);
- break;
- } else {
- memset(iov_base, c, iov_len);
- }
- done += iov_len;
- }
+ return iov_memset(qiov->iov, qiov->niov, offset, fillc, bytes);
}
/*
@@ -440,112 +382,3 @@ int qemu_parse_fd(const char *param)
}
return fd;
}
-
-/*
- * Send/recv data with iovec buffers
- *
- * This function send/recv data from/to the iovec buffer directly.
- * The first `offset' bytes in the iovec buffer are skipped and next
- * `len' bytes are used.
- *
- * For example,
- *
- * do_sendv_recvv(sockfd, iov, len, offset, 1);
- *
- * is equal to
- *
- * char *buf = malloc(size);
- * iov_to_buf(iov, iovcnt, buf, offset, size);
- * send(sockfd, buf, size, 0);
- * free(buf);
- */
-static int do_sendv_recvv(int sockfd, struct iovec *iov, int len, int offset,
- int do_sendv)
-{
- int ret, diff, iovlen;
- struct iovec *last_iov;
-
- /* last_iov is inclusive, so count from one. */
- iovlen = 1;
- last_iov = iov;
- len += offset;
-
- while (last_iov->iov_len < len) {
- len -= last_iov->iov_len;
-
- last_iov++;
- iovlen++;
- }
-
- diff = last_iov->iov_len - len;
- last_iov->iov_len -= diff;
-
- while (iov->iov_len <= offset) {
- offset -= iov->iov_len;
-
- iov++;
- iovlen--;
- }
-
- iov->iov_base = (char *) iov->iov_base + offset;
- iov->iov_len -= offset;
-
- {
-#if defined CONFIG_IOVEC && defined CONFIG_POSIX
- struct msghdr msg;
- memset(&msg, 0, sizeof(msg));
- msg.msg_iov = iov;
- msg.msg_iovlen = iovlen;
-
- do {
- if (do_sendv) {
- ret = sendmsg(sockfd, &msg, 0);
- } else {
- ret = recvmsg(sockfd, &msg, 0);
- }
- } while (ret == -1 && errno == EINTR);
-#else
- struct iovec *p = iov;
- ret = 0;
- while (iovlen > 0) {
- int rc;
- if (do_sendv) {
- rc = send(sockfd, p->iov_base, p->iov_len, 0);
- } else {
- rc = qemu_recv(sockfd, p->iov_base, p->iov_len, 0);
- }
- if (rc == -1) {
- if (errno == EINTR) {
- continue;
- }
- if (ret == 0) {
- ret = -1;
- }
- break;
- }
- if (rc == 0) {
- break;
- }
- ret += rc;
- iovlen--, p++;
- }
-#endif
- }
-
- /* Undo the changes above */
- iov->iov_base = (char *) iov->iov_base - offset;
- iov->iov_len += offset;
- last_iov->iov_len += diff;
- return ret;
-}
-
-int qemu_recvv(int sockfd, struct iovec *iov, int len, int iov_offset)
-{
- return do_sendv_recvv(sockfd, iov, len, iov_offset, 0);
-}
-
-int qemu_sendv(int sockfd, struct iovec *iov, int len, int iov_offset)
-{
- return do_sendv_recvv(sockfd, iov, len, iov_offset, 1);
-}
-
diff --git a/default-configs/or32-linux-user.mak b/default-configs/or32-linux-user.mak
new file mode 100644
index 0000000000..808c1f9b83
--- /dev/null
+++ b/default-configs/or32-linux-user.mak
@@ -0,0 +1 @@
+# Default configuration for or32-linux-user
diff --git a/default-configs/or32-softmmu.mak b/default-configs/or32-softmmu.mak
new file mode 100644
index 0000000000..cce474672a
--- /dev/null
+++ b/default-configs/or32-softmmu.mak
@@ -0,0 +1,4 @@
+# Default configuration for or32-softmmu
+
+CONFIG_SERIAL=y
+CONFIG_OPENCORES_ETH=y
diff --git a/default-configs/pci.mak b/default-configs/pci.mak
index 9d3e1dbda1..69e18f1428 100644
--- a/default-configs/pci.mak
+++ b/default-configs/pci.mak
@@ -10,9 +10,12 @@ CONFIG_EEPRO100_PCI=y
CONFIG_PCNET_PCI=y
CONFIG_PCNET_COMMON=y
CONFIG_LSI_SCSI_PCI=y
+CONFIG_MEGASAS_SCSI_PCI=y
CONFIG_RTL8139_PCI=y
CONFIG_E1000_PCI=y
CONFIG_IDE_CORE=y
CONFIG_IDE_QDEV=y
CONFIG_IDE_PCI=y
CONFIG_AHCI=y
+CONFIG_ESP=y
+CONFIG_ESP_PCI=y
diff --git a/device_tree.c b/device_tree.c
index b366fddeaf..d7a9b6bb89 100644
--- a/device_tree.c
+++ b/device_tree.c
@@ -178,6 +178,36 @@ int qemu_devtree_setprop_string(void *fdt, const char *node_path,
return r;
}
+const void *qemu_devtree_getprop(void *fdt, const char *node_path,
+ const char *property, int *lenp)
+{
+ int len;
+ const void *r;
+ if (!lenp) {
+ lenp = &len;
+ }
+ r = fdt_getprop(fdt, findnode_nofail(fdt, node_path), property, lenp);
+ if (!r) {
+ fprintf(stderr, "%s: Couldn't get %s/%s: %s\n", __func__,
+ node_path, property, fdt_strerror(*lenp));
+ exit(1);
+ }
+ return r;
+}
+
+uint32_t qemu_devtree_getprop_cell(void *fdt, const char *node_path,
+ const char *property)
+{
+ int len;
+ const uint32_t *p = qemu_devtree_getprop(fdt, node_path, property, &len);
+ if (len != 4) {
+ fprintf(stderr, "%s: %s/%s not 4 bytes long (not a cell?)\n",
+ __func__, node_path, property);
+ exit(1);
+ }
+ return be32_to_cpu(*p);
+}
+
uint32_t qemu_devtree_get_phandle(void *fdt, const char *path)
{
uint32_t r;
diff --git a/device_tree.h b/device_tree.h
index 2244270b2d..f7a3e6cfc5 100644
--- a/device_tree.h
+++ b/device_tree.h
@@ -28,6 +28,10 @@ int qemu_devtree_setprop_string(void *fdt, const char *node_path,
int qemu_devtree_setprop_phandle(void *fdt, const char *node_path,
const char *property,
const char *target_node_path);
+const void *qemu_devtree_getprop(void *fdt, const char *node_path,
+ const char *property, int *lenp);
+uint32_t qemu_devtree_getprop_cell(void *fdt, const char *node_path,
+ const char *property);
uint32_t qemu_devtree_get_phandle(void *fdt, const char *path);
uint32_t qemu_devtree_alloc_phandle(void *fdt);
int qemu_devtree_nop_node(void *fdt, const char *node_path);
diff --git a/disas.c b/disas.c
index 93d8d30d1b..7b2acc9943 100644
--- a/disas.c
+++ b/disas.c
@@ -64,6 +64,22 @@ generic_print_address (bfd_vma addr, struct disassemble_info *info)
(*info->fprintf_func) (info->stream, "0x%" PRIx64, addr);
}
+/* Print address in hex, truncated to the width of a target virtual address. */
+static void
+generic_print_target_address(bfd_vma addr, struct disassemble_info *info)
+{
+ uint64_t mask = ~0ULL >> (64 - TARGET_VIRT_ADDR_SPACE_BITS);
+ generic_print_address(addr & mask, info);
+}
+
+/* Print address in hex, truncated to the width of a host virtual address. */
+static void
+generic_print_host_address(bfd_vma addr, struct disassemble_info *info)
+{
+ uint64_t mask = ~0ULL >> (64 - (sizeof(void *) * 8));
+ generic_print_address(addr & mask, info);
+}
+
/* Just return the given address. */
int
@@ -154,6 +170,7 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags)
disasm_info.read_memory_func = target_read_memory;
disasm_info.buffer_vma = code;
disasm_info.buffer_length = size;
+ disasm_info.print_address_func = generic_print_target_address;
#ifdef TARGET_WORDS_BIGENDIAN
disasm_info.endian = BFD_ENDIAN_BIG;
@@ -274,6 +291,7 @@ void disas(FILE *out, void *code, unsigned long size)
int (*print_insn)(bfd_vma pc, disassemble_info *info);
INIT_DISASSEMBLE_INFO(disasm_info, out, fprintf);
+ disasm_info.print_address_func = generic_print_host_address;
disasm_info.buffer = code;
disasm_info.buffer_vma = (uintptr_t)code;
@@ -386,6 +404,7 @@ void monitor_disas(Monitor *mon, CPUArchState *env,
monitor_disas_env = env;
monitor_disas_is_physical = is_physical;
disasm_info.read_memory_func = monitor_read_memory;
+ disasm_info.print_address_func = generic_print_target_address;
disasm_info.buffer_vma = pc;
diff --git a/dma-helpers.c b/dma-helpers.c
index 7971a89c14..35cb500581 100644
--- a/dma-helpers.c
+++ b/dma-helpers.c
@@ -9,13 +9,45 @@
#include "dma.h"
#include "trace.h"
+#include "range.h"
+#include "qemu-thread.h"
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
+/* #define DEBUG_IOMMU */
+
+static void do_dma_memory_set(dma_addr_t addr, uint8_t c, dma_addr_t len)
+{
+#define FILLBUF_SIZE 512
+ uint8_t fillbuf[FILLBUF_SIZE];
+ int l;
+
+ memset(fillbuf, c, FILLBUF_SIZE);
+ while (len > 0) {
+ l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE;
+ cpu_physical_memory_rw(addr, fillbuf, l, true);
+ len -= len;
+ addr += len;
+ }
+}
+
+int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
+{
+ dma_barrier(dma, DMA_DIRECTION_FROM_DEVICE);
+
+ if (dma_has_iommu(dma)) {
+ return iommu_dma_memory_set(dma, addr, c, len);
+ }
+ do_dma_memory_set(addr, c, len);
+
+ return 0;
+}
+
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma)
{
qsg->sg = g_malloc(alloc_hint * sizeof(ScatterGatherEntry));
qsg->nsg = 0;
qsg->nalloc = alloc_hint;
qsg->size = 0;
+ qsg->dma = dma;
}
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len)
@@ -74,10 +106,9 @@ static void dma_bdrv_unmap(DMAAIOCB *dbs)
int i;
for (i = 0; i < dbs->iov.niov; ++i) {
- cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
- dbs->iov.iov[i].iov_len,
- dbs->dir != DMA_DIRECTION_TO_DEVICE,
- dbs->iov.iov[i].iov_len);
+ dma_memory_unmap(dbs->sg->dma, dbs->iov.iov[i].iov_base,
+ dbs->iov.iov[i].iov_len, dbs->dir,
+ dbs->iov.iov[i].iov_len);
}
qemu_iovec_reset(&dbs->iov);
}
@@ -106,7 +137,7 @@ static void dma_complete(DMAAIOCB *dbs, int ret)
static void dma_bdrv_cb(void *opaque, int ret)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
- target_phys_addr_t cur_addr, cur_len;
+ dma_addr_t cur_addr, cur_len;
void *mem;
trace_dma_bdrv_cb(dbs, ret);
@@ -123,8 +154,7 @@ static void dma_bdrv_cb(void *opaque, int ret)
while (dbs->sg_cur_index < dbs->sg->nsg) {
cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
- mem = cpu_physical_memory_map(cur_addr, &cur_len,
- dbs->dir != DMA_DIRECTION_TO_DEVICE);
+ mem = dma_memory_map(dbs->sg->dma, cur_addr, &cur_len, dbs->dir);
if (!mem)
break;
qemu_iovec_add(&dbs->iov, mem, cur_len);
@@ -209,7 +239,8 @@ BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
}
-static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_dev)
+static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg,
+ DMADirection dir)
{
uint64_t resid;
int sg_cur_index;
@@ -220,7 +251,7 @@ static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_de
while (len > 0) {
ScatterGatherEntry entry = sg->sg[sg_cur_index++];
int32_t xfer = MIN(len, entry.len);
- cpu_physical_memory_rw(entry.base, ptr, xfer, !to_dev);
+ dma_memory_rw(sg->dma, entry.base, ptr, xfer, dir);
ptr += xfer;
len -= xfer;
resid -= xfer;
@@ -231,12 +262,12 @@ static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, bool to_de
uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg)
{
- return dma_buf_rw(ptr, len, sg, 0);
+ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE);
}
uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg)
{
- return dma_buf_rw(ptr, len, sg, 1);
+ return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE);
}
void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
@@ -244,3 +275,160 @@ void dma_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
{
bdrv_acct_start(bs, cookie, sg->size, type);
}
+
+bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
+ DMADirection dir)
+{
+ target_phys_addr_t paddr, plen;
+
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_memory_check context=%p addr=0x" DMA_ADDR_FMT
+ " len=0x" DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
+#endif
+
+ while (len) {
+ if (dma->translate(dma, addr, &paddr, &plen, dir) != 0) {
+ return false;
+ }
+
+ /* The translation might be valid for larger regions. */
+ if (plen > len) {
+ plen = len;
+ }
+
+ len -= plen;
+ addr += plen;
+ }
+
+ return true;
+}
+
+int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len, DMADirection dir)
+{
+ target_phys_addr_t paddr, plen;
+ int err;
+
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_memory_rw context=%p addr=0x" DMA_ADDR_FMT " len=0x"
+ DMA_ADDR_FMT " dir=%d\n", dma, addr, len, dir);
+#endif
+
+ while (len) {
+ err = dma->translate(dma, addr, &paddr, &plen, dir);
+ if (err) {
+ /*
+ * In case of failure on reads from the guest, we clean the
+ * destination buffer so that a device that doesn't test
+ * for errors will not expose qemu internal memory.
+ */
+ memset(buf, 0, len);
+ return -1;
+ }
+
+ /* The translation might be valid for larger regions. */
+ if (plen > len) {
+ plen = len;
+ }
+
+ cpu_physical_memory_rw(paddr, buf, plen,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+
+ len -= plen;
+ addr += plen;
+ buf += plen;
+ }
+
+ return 0;
+}
+
+int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
+ dma_addr_t len)
+{
+ target_phys_addr_t paddr, plen;
+ int err;
+
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_memory_set context=%p addr=0x" DMA_ADDR_FMT
+ " len=0x" DMA_ADDR_FMT "\n", dma, addr, len);
+#endif
+
+ while (len) {
+ err = dma->translate(dma, addr, &paddr, &plen,
+ DMA_DIRECTION_FROM_DEVICE);
+ if (err) {
+ return err;
+ }
+
+ /* The translation might be valid for larger regions. */
+ if (plen > len) {
+ plen = len;
+ }
+
+ do_dma_memory_set(paddr, c, plen);
+
+ len -= plen;
+ addr += plen;
+ }
+
+ return 0;
+}
+
+void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
+ DMAMapFunc map, DMAUnmapFunc unmap)
+{
+#ifdef DEBUG_IOMMU
+ fprintf(stderr, "dma_context_init(%p, %p, %p, %p)\n",
+ dma, translate, map, unmap);
+#endif
+ dma->translate = translate;
+ dma->map = map;
+ dma->unmap = unmap;
+}
+
+void *iommu_dma_memory_map(DMAContext *dma, dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir)
+{
+ int err;
+ target_phys_addr_t paddr, plen;
+ void *buf;
+
+ if (dma->map) {
+ return dma->map(dma, addr, len, dir);
+ }
+
+ plen = *len;
+ err = dma->translate(dma, addr, &paddr, &plen, dir);
+ if (err) {
+ return NULL;
+ }
+
+ /*
+ * If this is true, the virtual region is contiguous,
+ * but the translated physical region isn't. We just
+ * clamp *len, much like cpu_physical_memory_map() does.
+ */
+ if (plen < *len) {
+ *len = plen;
+ }
+
+ buf = cpu_physical_memory_map(paddr, &plen,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+ *len = plen;
+
+ return buf;
+}
+
+void iommu_dma_memory_unmap(DMAContext *dma, void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len)
+{
+ if (dma->unmap) {
+ dma->unmap(dma, buffer, len, dir, access_len);
+ return;
+ }
+
+ cpu_physical_memory_unmap(buffer, len,
+ dir == DMA_DIRECTION_FROM_DEVICE,
+ access_len);
+
+}
diff --git a/dma.h b/dma.h
index 8c1ec8f0d2..f35c4b6632 100644
--- a/dma.h
+++ b/dma.h
@@ -13,7 +13,9 @@
#include <stdio.h>
#include "hw/hw.h"
#include "block.h"
+#include "kvm.h"
+typedef struct DMAContext DMAContext;
typedef struct ScatterGatherEntry ScatterGatherEntry;
typedef enum {
@@ -26,19 +28,229 @@ struct QEMUSGList {
int nsg;
int nalloc;
size_t size;
+ DMAContext *dma;
};
#if defined(TARGET_PHYS_ADDR_BITS)
-typedef target_phys_addr_t dma_addr_t;
-#define DMA_ADDR_FMT TARGET_FMT_plx
+/*
+ * When an IOMMU is present, bus addresses become distinct from
+ * CPU/memory physical addresses and may be a different size. Because
+ * the IOVA size depends more on the bus than on the platform, we more
+ * or less have to treat these as 64-bit always to cover all (or at
+ * least most) cases.
+ */
+typedef uint64_t dma_addr_t;
+
+#define DMA_ADDR_BITS 64
+#define DMA_ADDR_FMT "%" PRIx64
+
+typedef int DMATranslateFunc(DMAContext *dma,
+ dma_addr_t addr,
+ target_phys_addr_t *paddr,
+ target_phys_addr_t *len,
+ DMADirection dir);
+typedef void* DMAMapFunc(DMAContext *dma,
+ dma_addr_t addr,
+ dma_addr_t *len,
+ DMADirection dir);
+typedef void DMAUnmapFunc(DMAContext *dma,
+ void *buffer,
+ dma_addr_t len,
+ DMADirection dir,
+ dma_addr_t access_len);
+
+struct DMAContext {
+ DMATranslateFunc *translate;
+ DMAMapFunc *map;
+ DMAUnmapFunc *unmap;
+};
+
+static inline void dma_barrier(DMAContext *dma, DMADirection dir)
+{
+ /*
+ * This is called before DMA read and write operations
+ * unless the _relaxed form is used and is responsible
+ * for providing some sane ordering of accesses vs
+ * concurrently running VCPUs.
+ *
+ * Users of map(), unmap() or lower level st/ld_*
+ * operations are responsible for providing their own
+ * ordering via barriers.
+ *
+ * This primitive implementation does a simple smp_mb()
+ * before each operation which provides pretty much full
+ * ordering.
+ *
+ * A smarter implementation can be devised if needed to
+ * use lighter barriers based on the direction of the
+ * transfer, the DMA context, etc...
+ */
+ if (kvm_enabled()) {
+ smp_mb();
+ }
+}
+
+static inline bool dma_has_iommu(DMAContext *dma)
+{
+ return !!dma;
+}
+
+/* Checks that the given range of addresses is valid for DMA. This is
+ * useful for certain cases, but usually you should just use
+ * dma_memory_{read,write}() and check for errors */
+bool iommu_dma_memory_valid(DMAContext *dma, dma_addr_t addr, dma_addr_t len,
+ DMADirection dir);
+static inline bool dma_memory_valid(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t len,
+ DMADirection dir)
+{
+ if (!dma_has_iommu(dma)) {
+ return true;
+ } else {
+ return iommu_dma_memory_valid(dma, addr, len, dir);
+ }
+}
+
+int iommu_dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len, DMADirection dir);
+static inline int dma_memory_rw_relaxed(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir)
+{
+ if (!dma_has_iommu(dma)) {
+ /* Fast-path for no IOMMU */
+ cpu_physical_memory_rw(addr, buf, len,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+ return 0;
+ } else {
+ return iommu_dma_memory_rw(dma, addr, buf, len, dir);
+ }
+}
+
+static inline int dma_memory_read_relaxed(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+}
+
+static inline int dma_memory_write_relaxed(DMAContext *dma, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ return dma_memory_rw_relaxed(dma, addr, (void *)buf, len,
+ DMA_DIRECTION_FROM_DEVICE);
+}
+
+static inline int dma_memory_rw(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len,
+ DMADirection dir)
+{
+ dma_barrier(dma, dir);
+
+ return dma_memory_rw_relaxed(dma, addr, buf, len, dir);
+}
+
+static inline int dma_memory_read(DMAContext *dma, dma_addr_t addr,
+ void *buf, dma_addr_t len)
+{
+ return dma_memory_rw(dma, addr, buf, len, DMA_DIRECTION_TO_DEVICE);
+}
+
+static inline int dma_memory_write(DMAContext *dma, dma_addr_t addr,
+ const void *buf, dma_addr_t len)
+{
+ return dma_memory_rw(dma, addr, (void *)buf, len,
+ DMA_DIRECTION_FROM_DEVICE);
+}
+
+int iommu_dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c,
+ dma_addr_t len);
+
+int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len);
+
+void *iommu_dma_memory_map(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir);
+static inline void *dma_memory_map(DMAContext *dma,
+ dma_addr_t addr, dma_addr_t *len,
+ DMADirection dir)
+{
+ if (!dma_has_iommu(dma)) {
+ target_phys_addr_t xlen = *len;
+ void *p;
+
+ p = cpu_physical_memory_map(addr, &xlen,
+ dir == DMA_DIRECTION_FROM_DEVICE);
+ *len = xlen;
+ return p;
+ } else {
+ return iommu_dma_memory_map(dma, addr, len, dir);
+ }
+}
+
+void iommu_dma_memory_unmap(DMAContext *dma,
+ void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len);
+static inline void dma_memory_unmap(DMAContext *dma,
+ void *buffer, dma_addr_t len,
+ DMADirection dir, dma_addr_t access_len)
+{
+ if (!dma_has_iommu(dma)) {
+ cpu_physical_memory_unmap(buffer, (target_phys_addr_t)len,
+ dir == DMA_DIRECTION_FROM_DEVICE,
+ access_len);
+ } else {
+ iommu_dma_memory_unmap(dma, buffer, len, dir, access_len);
+ }
+}
+
+#define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \
+ static inline uint##_bits##_t ld##_lname##_##_end##_dma(DMAContext *dma, \
+ dma_addr_t addr) \
+ { \
+ uint##_bits##_t val; \
+ dma_memory_read(dma, addr, &val, (_bits) / 8); \
+ return _end##_bits##_to_cpu(val); \
+ } \
+ static inline void st##_sname##_##_end##_dma(DMAContext *dma, \
+ dma_addr_t addr, \
+ uint##_bits##_t val) \
+ { \
+ val = cpu_to_##_end##_bits(val); \
+ dma_memory_write(dma, addr, &val, (_bits) / 8); \
+ }
+
+static inline uint8_t ldub_dma(DMAContext *dma, dma_addr_t addr)
+{
+ uint8_t val;
+
+ dma_memory_read(dma, addr, &val, 1);
+ return val;
+}
+
+static inline void stb_dma(DMAContext *dma, dma_addr_t addr, uint8_t val)
+{
+ dma_memory_write(dma, addr, &val, 1);
+}
+
+DEFINE_LDST_DMA(uw, w, 16, le);
+DEFINE_LDST_DMA(l, l, 32, le);
+DEFINE_LDST_DMA(q, q, 64, le);
+DEFINE_LDST_DMA(uw, w, 16, be);
+DEFINE_LDST_DMA(l, l, 32, be);
+DEFINE_LDST_DMA(q, q, 64, be);
+
+#undef DEFINE_LDST_DMA
+
+void dma_context_init(DMAContext *dma, DMATranslateFunc translate,
+ DMAMapFunc map, DMAUnmapFunc unmap);
struct ScatterGatherEntry {
dma_addr_t base;
dma_addr_t len;
};
-void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint);
+void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint, DMAContext *dma);
void qemu_sglist_add(QEMUSGList *qsg, dma_addr_t base, dma_addr_t len);
void qemu_sglist_destroy(QEMUSGList *qsg);
#endif
diff --git a/docs/qapi-code-gen.txt b/docs/qapi-code-gen.txt
index ad11767a2f..cccb11e562 100644
--- a/docs/qapi-code-gen.txt
+++ b/docs/qapi-code-gen.txt
@@ -220,6 +220,8 @@ Example:
#endif
mdroth@illuin:~/w/qemu2.git$
+(The actual structure of the visit_type_* functions is a bit more complex
+in order to propagate errors correctly and avoid leaking memory).
=== scripts/qapi-commands.py ===
diff --git a/docs/specs/qcow2.txt b/docs/specs/qcow2.txt
index 87bf785fe0..36a559d886 100644
--- a/docs/specs/qcow2.txt
+++ b/docs/specs/qcow2.txt
@@ -75,13 +75,23 @@ in the description of a field.
Bitmask of incompatible features. An implementation must
fail to open an image if an unknown bit is set.
- Bits 0-63: Reserved (set to 0)
+ Bit 0: Dirty bit. If this bit is set then refcounts
+ may be inconsistent, make sure to scan L1/L2
+ tables to repair refcounts before accessing the
+ image.
+
+ Bits 1-63: Reserved (set to 0)
80 - 87: compatible_features
Bitmask of compatible features. An implementation can
safely ignore any unknown bits that are set.
- Bits 0-63: Reserved (set to 0)
+ Bit 0: Lazy refcounts bit. If this bit is set then
+ lazy refcount updates can be used. This means
+ marking the image file dirty and postponing
+ refcount metadata updates.
+
+ Bits 1-63: Reserved (set to 0)
88 - 95: autoclear_features
Bitmask of auto-clear features. An implementation may only
diff --git a/docs/usb-storage.txt b/docs/usb-storage.txt
new file mode 100644
index 0000000000..e58e849d4d
--- /dev/null
+++ b/docs/usb-storage.txt
@@ -0,0 +1,38 @@
+
+qemu usb storage emulation
+--------------------------
+
+QEMU has two emulations for usb storage devices.
+
+Number one emulates the classic bulk-only transport protocol which is
+used by 99% of the usb sticks on the marked today and is called
+"usb-storage". Usage (hooking up to xhci, other host controllers work
+too):
+
+ qemu ${other_vm_args} \
+ -drive if=none,id=stick,file=/path/to/file.img \
+ -device nec-usb-xhci,id=xhci \
+ -device usb-storage,bus=xhci.0,drive=stick
+
+
+Number two is the newer usb attached scsi transport. This one doesn't
+automagically create a scsi disk, so you have to explicitly attach one
+manually. Multiple logical units are supported. Here is an example
+with tree logical units:
+
+ qemu ${other_vm_args} \
+ -drive if=none,id=uas-disk1,file=/path/to/file1.img \
+ -drive if=none,id=uas-disk2,file=/path/to/file2.img \
+ -drive if=none,id=uas-cdrom,media=cdrom,file=/path/to/image.iso \
+ -device nec-usb-xhci,id=xhci \
+ -device usb-uas,id=uas,bus=xhci.0 \
+ -device scsi-hd,bus=uas.0,scsi-id=0,lun=0,drive=uas-disk1 \
+ -device scsi-hd,bus=uas.0,scsi-id=0,lun=1,drive=uas-disk2 \
+ -device scsi-cd,bus=uas.0,scsi-id=0,lun=5,drive=uas-cdrom
+
+
+enjoy,
+ Gerd
+
+--
+Gerd Hoffmann <kraxel@redhat.com>
diff --git a/elf.h b/elf.h
index 9c9acfaf75..a21ea535bd 100644
--- a/elf.h
+++ b/elf.h
@@ -106,6 +106,8 @@ typedef int64_t Elf64_Sxword;
#define EM_H8S 48 /* Hitachi H8S */
#define EM_LATTICEMICO32 138 /* LatticeMico32 */
+#define EM_OPENRISC 92 /* OpenCores OpenRISC */
+
#define EM_UNICORE32 110 /* UniCore32 */
/*
diff --git a/error.c b/error.c
index a52b7710d2..58f55a012e 100644
--- a/error.c
+++ b/error.c
@@ -32,6 +32,7 @@ void error_set(Error **errp, const char *fmt, ...)
if (errp == NULL) {
return;
}
+ assert(*errp == NULL);
err = g_malloc0(sizeof(*err));
@@ -132,7 +133,7 @@ bool error_is_type(Error *err, const char *fmt)
void error_propagate(Error **dst_err, Error *local_err)
{
- if (dst_err) {
+ if (dst_err && !*dst_err) {
*dst_err = local_err;
} else if (local_err) {
error_free(local_err);
diff --git a/error.h b/error.h
index 45ff6c1ffe..3d9d96def0 100644
--- a/error.h
+++ b/error.h
@@ -57,7 +57,7 @@ void error_set_field(Error *err, const char *field, const char *value);
/**
* Propagate an error to an indirect pointer to an error. This function will
* always transfer ownership of the error reference and handles the case where
- * dst_err is NULL correctly.
+ * dst_err is NULL correctly. Errors after the first are discarded.
*/
void error_propagate(Error **dst_err, Error *local_err);
diff --git a/event_notifier.c b/event_notifier.c
index 0b829813d3..2c207e1399 100644
--- a/event_notifier.c
+++ b/event_notifier.c
@@ -10,11 +10,19 @@
* See the COPYING file in the top-level directory.
*/
+#include "qemu-common.h"
#include "event_notifier.h"
+#include "qemu-char.h"
+
#ifdef CONFIG_EVENTFD
#include <sys/eventfd.h>
#endif
+void event_notifier_init_fd(EventNotifier *e, int fd)
+{
+ e->fd = fd;
+}
+
int event_notifier_init(EventNotifier *e, int active)
{
#ifdef CONFIG_EVENTFD
@@ -38,24 +46,22 @@ int event_notifier_get_fd(EventNotifier *e)
return e->fd;
}
-int event_notifier_test_and_clear(EventNotifier *e)
+int event_notifier_set_handler(EventNotifier *e,
+ EventNotifierHandler *handler)
{
- uint64_t value;
- int r = read(e->fd, &value, sizeof(value));
+ return qemu_set_fd_handler(e->fd, (IOHandler *)handler, NULL, e);
+}
+
+int event_notifier_set(EventNotifier *e)
+{
+ uint64_t value = 1;
+ int r = write(e->fd, &value, sizeof(value));
return r == sizeof(value);
}
-int event_notifier_test(EventNotifier *e)
+int event_notifier_test_and_clear(EventNotifier *e)
{
uint64_t value;
int r = read(e->fd, &value, sizeof(value));
- if (r == sizeof(value)) {
- /* restore previous value. */
- int s = write(e->fd, &value, sizeof(value));
- /* never blocks because we use EFD_SEMAPHORE.
- * If we didn't we'd get EAGAIN on overflow
- * and we'd have to write code to ignore it. */
- assert(s == sizeof(value));
- }
return r == sizeof(value);
}
diff --git a/event_notifier.h b/event_notifier.h
index 886222cb36..f0ec2f2171 100644
--- a/event_notifier.h
+++ b/event_notifier.h
@@ -16,13 +16,17 @@
#include "qemu-common.h"
struct EventNotifier {
- int fd;
+ int fd;
};
+typedef void EventNotifierHandler(EventNotifier *);
+
+void event_notifier_init_fd(EventNotifier *, int fd);
int event_notifier_init(EventNotifier *, int active);
void event_notifier_cleanup(EventNotifier *);
int event_notifier_get_fd(EventNotifier *);
+int event_notifier_set(EventNotifier *);
int event_notifier_test_and_clear(EventNotifier *);
-int event_notifier_test(EventNotifier *);
+int event_notifier_set_handler(EventNotifier *, EventNotifierHandler *);
#endif
diff --git a/exec-obsolete.h b/exec-obsolete.h
index 792c831718..c09925610d 100644
--- a/exec-obsolete.h
+++ b/exec-obsolete.h
@@ -45,15 +45,15 @@ int cpu_physical_memory_set_dirty_tracking(int enable);
#define CODE_DIRTY_FLAG 0x02
#define MIGRATION_DIRTY_FLAG 0x08
-/* read dirty bit (return 0 or 1) */
-static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
+static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
}
-static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
+/* read dirty bit (return 0 or 1) */
+static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
+ return cpu_physical_memory_get_dirty_flags(addr) == 0xff;
}
static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
@@ -61,41 +61,55 @@ static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
int dirty_flags)
{
int ret = 0;
- uint8_t *p;
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
- p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- ret |= *p++ & dirty_flags;
+ ret |= cpu_physical_memory_get_dirty_flags(addr) & dirty_flags;
}
return ret;
}
+static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
+{
+ if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
+ !cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG)) {
+ ram_list.dirty_pages++;
+ }
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
+}
+
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
{
- ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
+ cpu_physical_memory_set_dirty_flags(addr, 0xff);
}
-static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
- int dirty_flags)
+static inline int cpu_physical_memory_clear_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
{
- return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
+ int mask = ~dirty_flags;
+
+ if ((dirty_flags & MIGRATION_DIRTY_FLAG) &&
+ cpu_physical_memory_get_dirty(addr, TARGET_PAGE_SIZE,
+ MIGRATION_DIRTY_FLAG)) {
+ ram_list.dirty_pages--;
+ }
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] &= mask;
}
static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start,
ram_addr_t length,
int dirty_flags)
{
- uint8_t *p;
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
- p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- *p++ |= dirty_flags;
+ cpu_physical_memory_set_dirty_flags(addr, dirty_flags);
}
}
@@ -103,16 +117,12 @@ static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
ram_addr_t length,
int dirty_flags)
{
- int mask;
- uint8_t *p;
ram_addr_t addr, end;
end = TARGET_PAGE_ALIGN(start + length);
start &= TARGET_PAGE_MASK;
- mask = ~dirty_flags;
- p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
for (addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
- *p++ &= mask;
+ cpu_physical_memory_clear_dirty_flags(addr, dirty_flags);
}
}
diff --git a/exec.c b/exec.c
index 8244d54a85..a42a0b5b78 100644
--- a/exec.c
+++ b/exec.c
@@ -1824,11 +1824,29 @@ void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
}
+static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
+ uintptr_t length)
+{
+ uintptr_t start1;
+
+ /* we modify the TLB cache so that the dirty bit will be set again
+ when accessing the range */
+ start1 = (uintptr_t)qemu_safe_ram_ptr(start);
+ /* Check that we don't span multiple blocks - this breaks the
+ address comparisons below. */
+ if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
+ != (end - 1) - start) {
+ abort();
+ }
+ cpu_tlb_reset_dirty_all(start1, length);
+
+}
+
/* Note: start and end must be within the same ram block. */
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
int dirty_flags)
{
- uintptr_t length, start1;
+ uintptr_t length;
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
@@ -1838,16 +1856,9 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
return;
cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
- /* we modify the TLB cache so that the dirty bit will be set again
- when accessing the range */
- start1 = (uintptr_t)qemu_safe_ram_ptr(start);
- /* Check that we don't span multiple blocks - this breaks the
- address comparisons below. */
- if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
- != (end - 1) - start) {
- abort();
+ if (tcg_enabled()) {
+ tlb_reset_dirty_range_all(start, end, length);
}
- cpu_tlb_reset_dirty_all(start1, length);
}
int cpu_physical_memory_set_dirty_tracking(int enable)
@@ -2229,14 +2240,6 @@ static void phys_sections_clear(void)
phys_sections_nb = 0;
}
-/* register physical memory.
- For RAM, 'size' must be a multiple of the target page size.
- If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
- io memory page. The address used when calling the IO function is
- the offset from the start of the region, plus region_offset. Both
- start_addr and region_offset are rounded down to a page boundary
- before calculating this offset. This should not be a problem unless
- the low bits of start_addr and region_offset differ. */
static void register_subpage(MemoryRegionSection *section)
{
subpage_t *subpage;
@@ -2260,7 +2263,7 @@ static void register_subpage(MemoryRegionSection *section)
subpage = container_of(existing->mr, subpage_t, iomem);
}
start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
- end = start + section->size;
+ end = start + section->size - 1;
subpage_register(subpage, start, end, phys_section_add(section));
}
@@ -2294,10 +2297,15 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section,
remain.offset_within_address_space += now.size;
remain.offset_within_region += now.size;
}
- now = remain;
- now.size &= TARGET_PAGE_MASK;
- if (now.size) {
- register_multipage(&now);
+ while (remain.size >= TARGET_PAGE_SIZE) {
+ now = remain;
+ if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
+ now.size = TARGET_PAGE_SIZE;
+ register_subpage(&now);
+ } else {
+ now.size &= TARGET_PAGE_MASK;
+ register_multipage(&now);
+ }
remain.size -= now.size;
remain.offset_within_address_space += now.size;
remain.offset_within_region += now.size;
@@ -2525,26 +2533,14 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
exit(1);
#endif
} else {
-#if defined(TARGET_S390X) && defined(CONFIG_KVM)
- /* S390 KVM requires the topmost vma of the RAM to be smaller than
- an system defined value, which is at least 256GB. Larger systems
- have larger values. We put the guest between the end of data
- segment (system break) and this value. We use 32GB as a base to
- have enough room for the system break to grow. */
- new_block->host = mmap((void*)0x800000000, size,
- PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
- if (new_block->host == MAP_FAILED) {
- fprintf(stderr, "Allocating RAM failed\n");
- abort();
- }
-#else
if (xen_enabled()) {
xen_ram_alloc(new_block->offset, size, mr);
+ } else if (kvm_enabled()) {
+ /* some s390/kvm configurations have special constraints */
+ new_block->host = kvm_vmalloc(size);
} else {
new_block->host = qemu_vmalloc(size);
}
-#endif
qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
}
}
@@ -2554,8 +2550,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
last_ram_offset() >> TARGET_PAGE_BITS);
- memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
- 0xff, size >> TARGET_PAGE_BITS);
+ cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
if (kvm_enabled())
kvm_setup_guest_memory(new_block->host, size);
@@ -3212,13 +3207,13 @@ static void core_log_global_stop(MemoryListener *listener)
static void core_eventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
static void core_eventfd_del(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
@@ -3278,13 +3273,13 @@ static void io_log_global_stop(MemoryListener *listener)
static void io_eventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
static void io_eventfd_del(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
diff --git a/gdbstub.c b/gdbstub.c
index 08cf8645d7..5d37dd98f4 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -1155,6 +1155,68 @@ static int cpu_gdb_write_register(CPUMIPSState *env, uint8_t *mem_buf, int n)
return sizeof(target_ulong);
}
+#elif defined(TARGET_OPENRISC)
+
+#define NUM_CORE_REGS (32 + 3)
+
+static int cpu_gdb_read_register(CPUOpenRISCState *env, uint8_t *mem_buf, int n)
+{
+ if (n < 32) {
+ GET_REG32(env->gpr[n]);
+ } else {
+ switch (n) {
+ case 32: /* PPC */
+ GET_REG32(env->ppc);
+ break;
+
+ case 33: /* NPC */
+ GET_REG32(env->npc);
+ break;
+
+ case 34: /* SR */
+ GET_REG32(env->sr);
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+static int cpu_gdb_write_register(CPUOpenRISCState *env,
+ uint8_t *mem_buf, int n)
+{
+ uint32_t tmp;
+
+ if (n > NUM_CORE_REGS) {
+ return 0;
+ }
+
+ tmp = ldl_p(mem_buf);
+
+ if (n < 32) {
+ env->gpr[n] = tmp;
+ } else {
+ switch (n) {
+ case 32: /* PPC */
+ env->ppc = tmp;
+ break;
+
+ case 33: /* NPC */
+ env->npc = tmp;
+ break;
+
+ case 34: /* SR */
+ env->sr = tmp;
+ break;
+
+ default:
+ break;
+ }
+ }
+ return 4;
+}
#elif defined (TARGET_SH4)
/* Hint: Use "set architecture sh4" in GDB to see fpu registers */
@@ -1924,6 +1986,8 @@ static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
}
#elif defined (TARGET_MICROBLAZE)
s->c_cpu->sregs[SR_PC] = pc;
+#elif defined(TARGET_OPENRISC)
+ s->c_cpu->pc = pc;
#elif defined (TARGET_CRIS)
s->c_cpu->pc = pc;
#elif defined (TARGET_ALPHA)
diff --git a/hmp-commands.hx b/hmp-commands.hx
index f5d9d91de8..9bbc7f7555 100644
--- a/hmp-commands.hx
+++ b/hmp-commands.hx
@@ -101,7 +101,7 @@ ETEXI
.name = "block_job_cancel",
.args_type = "device:B",
.params = "device",
- .help = "stop an active block streaming operation",
+ .help = "stop an active background block operation",
.mhandler.cmd = hmp_block_job_cancel,
},
@@ -1236,8 +1236,7 @@ ETEXI
.args_type = "fdname:s",
.params = "getfd name",
.help = "receive a file descriptor via SCM rights and assign it a name",
- .user_print = monitor_user_noop,
- .mhandler.cmd_new = do_getfd,
+ .mhandler.cmd = hmp_getfd,
},
STEXI
@@ -1253,8 +1252,7 @@ ETEXI
.args_type = "fdname:s",
.params = "closefd name",
.help = "close a file descriptor previously passed via SCM rights",
- .user_print = monitor_user_noop,
- .mhandler.cmd_new = do_closefd,
+ .mhandler.cmd = hmp_closefd,
},
STEXI
diff --git a/hmp.c b/hmp.c
index b9cec1dafb..25688ab0e2 100644
--- a/hmp.c
+++ b/hmp.c
@@ -145,6 +145,8 @@ void hmp_info_migrate(Monitor *mon)
info->ram->remaining >> 10);
monitor_printf(mon, "total ram: %" PRIu64 " kbytes\n",
info->ram->total >> 10);
+ monitor_printf(mon, "total time: %" PRIu64 " milliseconds\n",
+ info->ram->total_time);
}
if (info->has_disk) {
@@ -225,6 +227,8 @@ void hmp_info_block(Monitor *mon)
if (info->value->inserted->has_backing_file) {
monitor_printf(mon, " backing_file=");
monitor_print_filename(mon, info->value->inserted->backing_file);
+ monitor_printf(mon, " backing_file_depth=%" PRId64,
+ info->value->inserted->backing_file_depth);
}
monitor_printf(mon, " ro=%d drv=%s encrypted=%d",
info->value->inserted->ro,
@@ -1000,3 +1004,21 @@ void hmp_netdev_del(Monitor *mon, const QDict *qdict)
qmp_netdev_del(id, &err);
hmp_handle_error(mon, &err);
}
+
+void hmp_getfd(Monitor *mon, const QDict *qdict)
+{
+ const char *fdname = qdict_get_str(qdict, "fdname");
+ Error *errp = NULL;
+
+ qmp_getfd(fdname, &errp);
+ hmp_handle_error(mon, &errp);
+}
+
+void hmp_closefd(Monitor *mon, const QDict *qdict)
+{
+ const char *fdname = qdict_get_str(qdict, "fdname");
+ Error *errp = NULL;
+
+ qmp_closefd(fdname, &errp);
+ hmp_handle_error(mon, &errp);
+}
diff --git a/hmp.h b/hmp.h
index 79d138d3ee..8d2b0d76da 100644
--- a/hmp.h
+++ b/hmp.h
@@ -64,5 +64,7 @@ void hmp_device_del(Monitor *mon, const QDict *qdict);
void hmp_dump_guest_memory(Monitor *mon, const QDict *qdict);
void hmp_netdev_add(Monitor *mon, const QDict *qdict);
void hmp_netdev_del(Monitor *mon, const QDict *qdict);
+void hmp_getfd(Monitor *mon, const QDict *qdict);
+void hmp_closefd(Monitor *mon, const QDict *qdict);
#endif
diff --git a/hw/9pfs/virtio-9p.c b/hw/9pfs/virtio-9p.c
index c633fb9b7e..4b52540116 100644
--- a/hw/9pfs/virtio-9p.c
+++ b/hw/9pfs/virtio-9p.c
@@ -983,11 +983,16 @@ static void v9fs_attach(void *opaque)
err += offset;
trace_v9fs_attach_return(pdu->tag, pdu->id,
qid.type, qid.version, qid.path);
- s->root_fid = fid;
- /* disable migration */
- error_set(&s->migration_blocker, QERR_VIRTFS_FEATURE_BLOCKS_MIGRATION,
- s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
- migrate_add_blocker(s->migration_blocker);
+ /*
+ * disable migration if we haven't done already.
+ * attach could get called multiple times for the same export.
+ */
+ if (!s->migration_blocker) {
+ s->root_fid = fid;
+ error_set(&s->migration_blocker, QERR_VIRTFS_FEATURE_BLOCKS_MIGRATION,
+ s->ctx.fs_root ? s->ctx.fs_root : "NULL", s->tag);
+ migrate_add_blocker(s->migration_blocker);
+ }
out:
put_fid(pdu, fidp);
out_nofid:
@@ -1648,7 +1653,7 @@ out:
* with qemu_iovec_destroy().
*/
static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
- uint64_t skip, size_t size,
+ size_t skip, size_t size,
bool is_write)
{
QEMUIOVector elem;
@@ -1665,7 +1670,7 @@ static void v9fs_init_qiov_from_pdu(QEMUIOVector *qiov, V9fsPDU *pdu,
qemu_iovec_init_external(&elem, iov, niov);
qemu_iovec_init(qiov, niov);
- qemu_iovec_copy(qiov, &elem, skip, size);
+ qemu_iovec_concat(qiov, &elem, skip, size);
}
static void v9fs_read(void *opaque)
@@ -1715,7 +1720,7 @@ static void v9fs_read(void *opaque)
qemu_iovec_init(&qiov, qiov_full.niov);
do {
qemu_iovec_reset(&qiov);
- qemu_iovec_copy(&qiov, &qiov_full, count, qiov_full.size - count);
+ qemu_iovec_concat(&qiov, &qiov_full, count, qiov_full.size - count);
if (0) {
print_sg(qiov.iov, qiov.niov);
}
@@ -1970,7 +1975,7 @@ static void v9fs_write(void *opaque)
qemu_iovec_init(&qiov, qiov_full.niov);
do {
qemu_iovec_reset(&qiov);
- qemu_iovec_copy(&qiov, &qiov_full, total, qiov_full.size - total);
+ qemu_iovec_concat(&qiov, &qiov_full, total, qiov_full.size - total);
if (0) {
print_sg(qiov.iov, qiov.niov);
}
diff --git a/hw/Makefile.objs b/hw/Makefile.objs
index 3d7725934f..12cc141796 100644
--- a/hw/Makefile.objs
+++ b/hw/Makefile.objs
@@ -86,7 +86,9 @@ hw-obj-$(CONFIG_OPENCORES_ETH) += opencores_eth.o
# SCSI layer
hw-obj-$(CONFIG_LSI_SCSI_PCI) += lsi53c895a.o
+hw-obj-$(CONFIG_MEGASAS_SCSI_PCI) += megasas.o
hw-obj-$(CONFIG_ESP) += esp.o
+hw-obj-$(CONFIG_ESP_PCI) += esp-pci.o
hw-obj-y += sysbus.o isa-bus.o
hw-obj-y += qdev-addr.o
@@ -137,7 +139,7 @@ common-obj-$(CONFIG_MAX111X) += max111x.o
common-obj-$(CONFIG_DS1338) += ds1338.o
common-obj-y += i2c.o smbus.o smbus_eeprom.o
common-obj-y += eeprom93xx.o
-common-obj-y += scsi-disk.o cdrom.o
+common-obj-y += scsi-disk.o cdrom.o hd-geometry.o block-common.o
common-obj-y += scsi-generic.o scsi-bus.o
common-obj-y += hid.o
common-obj-$(CONFIG_SSI) += ssi.o
diff --git a/hw/ac97.c b/hw/ac97.c
index e791b9d3e6..0f561fa5c1 100644
--- a/hw/ac97.c
+++ b/hw/ac97.c
@@ -1319,13 +1319,12 @@ static int ac97_initfn (PCIDevice *dev)
return 0;
}
-static int ac97_exitfn (PCIDevice *dev)
+static void ac97_exitfn (PCIDevice *dev)
{
AC97LinkState *s = DO_UPCAST (AC97LinkState, dev, dev);
memory_region_destroy (&s->io_nam);
memory_region_destroy (&s->io_nabm);
- return 0;
}
int ac97_init (PCIBus *bus)
diff --git a/hw/apic-msidef.h b/hw/apic-msidef.h
new file mode 100644
index 0000000000..6e2eb71f2f
--- /dev/null
+++ b/hw/apic-msidef.h
@@ -0,0 +1,30 @@
+#ifndef HW_APIC_MSIDEF_H
+#define HW_APIC_MSIDEF_H
+
+/*
+ * Intel APIC constants: from include/asm/msidef.h
+ */
+
+/*
+ * Shifts for MSI data
+ */
+
+#define MSI_DATA_VECTOR_SHIFT 0
+#define MSI_DATA_VECTOR_MASK 0x000000ff
+
+#define MSI_DATA_DELIVERY_MODE_SHIFT 8
+#define MSI_DATA_LEVEL_SHIFT 14
+#define MSI_DATA_TRIGGER_SHIFT 15
+
+/*
+ * Shift/mask fields for msi address
+ */
+
+#define MSI_ADDR_DEST_MODE_SHIFT 2
+
+#define MSI_ADDR_REDIRECTION_SHIFT 3
+
+#define MSI_ADDR_DEST_ID_SHIFT 12
+#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
+
+#endif /* HW_APIC_MSIDEF_H */
diff --git a/hw/apic.c b/hw/apic.c
index 5fbf01c278..385555eb43 100644
--- a/hw/apic.c
+++ b/hw/apic.c
@@ -16,6 +16,7 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
+#include "qemu-thread.h"
#include "apic_internal.h"
#include "apic.h"
#include "ioapic.h"
@@ -23,19 +24,10 @@
#include "host-utils.h"
#include "trace.h"
#include "pc.h"
+#include "apic-msidef.h"
#define MAX_APIC_WORDS 8
-/* Intel APIC constants: from include/asm/msidef.h */
-#define MSI_DATA_VECTOR_SHIFT 0
-#define MSI_DATA_VECTOR_MASK 0x000000ff
-#define MSI_DATA_DELIVERY_MODE_SHIFT 8
-#define MSI_DATA_TRIGGER_SHIFT 15
-#define MSI_DATA_LEVEL_SHIFT 14
-#define MSI_ADDR_DEST_MODE_SHIFT 2
-#define MSI_ADDR_DEST_ID_SHIFT 12
-#define MSI_ADDR_DEST_ID_MASK 0x00ffff0
-
#define SYNC_FROM_VAPIC 0x1
#define SYNC_TO_VAPIC 0x2
#define SYNC_ISR_IRR_TO_VAPIC 0x4
@@ -370,11 +362,10 @@ static void apic_update_irq(APICCommonState *s)
if (!(s->spurious_vec & APIC_SV_ENABLE)) {
return;
}
- if (apic_irq_pending(s) > 0) {
+ if (!qemu_cpu_is_self(s->cpu_env)) {
+ cpu_interrupt(s->cpu_env, CPU_INTERRUPT_POLL);
+ } else if (apic_irq_pending(s) > 0) {
cpu_interrupt(s->cpu_env, CPU_INTERRUPT_HARD);
- } else if (apic_accept_pic_intr(&s->busdev.qdev) &&
- pic_get_output(isa_pic)) {
- apic_deliver_pic_intr(&s->busdev.qdev, 1);
}
}
@@ -544,6 +535,15 @@ static void apic_deliver(DeviceState *d, uint8_t dest, uint8_t dest_mode,
apic_bus_deliver(deliver_bitmask, delivery_mode, vector_num, trigger_mode);
}
+static bool apic_check_pic(APICCommonState *s)
+{
+ if (!apic_accept_pic_intr(&s->busdev.qdev) || !pic_get_output(isa_pic)) {
+ return false;
+ }
+ apic_deliver_pic_intr(&s->busdev.qdev, 1);
+ return true;
+}
+
int apic_get_interrupt(DeviceState *d)
{
APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
@@ -569,7 +569,12 @@ int apic_get_interrupt(DeviceState *d)
reset_bit(s->irr, intno);
set_bit(s->isr, intno);
apic_sync_vapic(s, SYNC_TO_VAPIC);
+
+ /* re-inject if there is still a pending PIC interrupt */
+ apic_check_pic(s);
+
apic_update_irq(s);
+
return intno;
}
@@ -809,8 +814,11 @@ static void apic_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
{
int n = index - 0x32;
s->lvt[n] = val;
- if (n == APIC_LVT_TIMER)
+ if (n == APIC_LVT_TIMER) {
apic_timer_update(s, qemu_get_clock_ns(vm_clock));
+ } else if (n == APIC_LVT_LINT0 && apic_check_pic(s)) {
+ apic_update_irq(s);
+ }
}
break;
case 0x38:
diff --git a/hw/apic.h b/hw/apic.h
index 62179cebee..1d48e027c3 100644
--- a/hw/apic.h
+++ b/hw/apic.h
@@ -20,9 +20,13 @@ void apic_init_reset(DeviceState *s);
void apic_sipi(DeviceState *s);
void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip,
TPRAccess access);
+void apic_poll_irq(DeviceState *d);
+void apic_designate_bsp(DeviceState *d);
/* pc.c */
-int cpu_is_bsp(CPUX86State *env);
DeviceState *cpu_get_current_apic(void);
+/* cpu.c */
+bool cpu_is_bsp(X86CPU *cpu);
+
#endif
diff --git a/hw/apic_common.c b/hw/apic_common.c
index 60b82596e7..58e63b00da 100644
--- a/hw/apic_common.c
+++ b/hw/apic_common.c
@@ -43,8 +43,8 @@ uint64_t cpu_get_apic_base(DeviceState *d)
trace_cpu_get_apic_base((uint64_t)s->apicbase);
return s->apicbase;
} else {
- trace_cpu_get_apic_base(0);
- return 0;
+ trace_cpu_get_apic_base(MSR_IA32_APICBASE_BSP);
+ return MSR_IA32_APICBASE_BSP;
}
}
@@ -201,13 +201,23 @@ void apic_init_reset(DeviceState *d)
s->timer_expiry = -1;
}
+void apic_designate_bsp(DeviceState *d)
+{
+ if (d == NULL) {
+ return;
+ }
+
+ APICCommonState *s = APIC_COMMON(d);
+ s->apicbase |= MSR_IA32_APICBASE_BSP;
+}
+
static void apic_reset_common(DeviceState *d)
{
APICCommonState *s = DO_UPCAST(APICCommonState, busdev.qdev, d);
APICCommonClass *info = APIC_COMMON_GET_CLASS(s);
bool bsp;
- bsp = cpu_is_bsp(s->cpu_env);
+ bsp = cpu_is_bsp(x86_env_get_cpu(s->cpu_env));
s->apicbase = 0xfee00000 |
(bsp ? MSR_IA32_APICBASE_BSP : 0) | MSR_IA32_APICBASE_ENABLE;
diff --git a/hw/apic_internal.h b/hw/apic_internal.h
index 60a6a8bdae..4d8ff490ce 100644
--- a/hw/apic_internal.h
+++ b/hw/apic_internal.h
@@ -141,7 +141,6 @@ void apic_report_irq_delivered(int delivered);
bool apic_next_timer(APICCommonState *s, int64_t current_time);
void apic_enable_tpr_access_reporting(DeviceState *d, bool enable);
void apic_enable_vapic(DeviceState *d, target_phys_addr_t paddr);
-void apic_poll_irq(DeviceState *d);
void vapic_report_tpr_access(DeviceState *dev, void *cpu, target_ulong ip,
TPRAccess access);
diff --git a/hw/arm-misc.h b/hw/arm-misc.h
index 1f96229d3c..bdd8fecc99 100644
--- a/hw/arm-misc.h
+++ b/hw/arm-misc.h
@@ -25,7 +25,7 @@ qemu_irq *armv7m_init(MemoryRegion *address_space_mem,
/* arm_boot.c */
struct arm_boot_info {
- int ram_size;
+ uint64_t ram_size;
const char *kernel_filename;
const char *kernel_cmdline;
const char *initrd_filename;
diff --git a/hw/arm/Makefile.objs b/hw/arm/Makefile.objs
index 88ff47d95e..c413780784 100644
--- a/hw/arm/Makefile.objs
+++ b/hw/arm/Makefile.objs
@@ -11,6 +11,7 @@ obj-y += realview_gic.o realview.o arm_sysctl.o arm11mpcore.o a9mpcore.o
obj-y += exynos4210_gic.o exynos4210_combiner.o exynos4210.o
obj-y += exynos4_boards.o exynos4210_uart.o exynos4210_pwm.o
obj-y += exynos4210_pmu.o exynos4210_mct.o exynos4210_fimd.o
+obj-y += exynos4210_rtc.o exynos4210_i2c.o
obj-y += arm_l2x0.o
obj-y += arm_mptimer.o a15mpcore.o
obj-y += armv7m.o armv7m_nvic.o stellaris.o pl022.o stellaris_enet.o
@@ -34,6 +35,8 @@ obj-y += framebuffer.o
obj-y += vexpress.o
obj-y += strongarm.o
obj-y += collie.o
+obj-y += imx_serial.o imx_ccm.o imx_timer.o imx_avic.o
+obj-y += kzm.o
obj-y += pl041.o lm4549.o
obj-$(CONFIG_FDT) += ../device_tree.o
diff --git a/hw/arm_boot.c b/hw/arm_boot.c
index a1e6ddbc1c..a6e9143662 100644
--- a/hw/arm_boot.c
+++ b/hw/arm_boot.c
@@ -216,11 +216,12 @@ static void set_kernel_args_old(const struct arm_boot_info *info)
static int load_dtb(target_phys_addr_t addr, const struct arm_boot_info *binfo)
{
#ifdef CONFIG_FDT
- uint32_t mem_reg_property[] = { cpu_to_be32(binfo->loader_start),
- cpu_to_be32(binfo->ram_size) };
+ uint32_t *mem_reg_property;
+ uint32_t mem_reg_propsize;
void *fdt = NULL;
char *filename;
int size, rc;
+ uint32_t acells, scells, hival;
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, binfo->dtb_filename);
if (!filename) {
@@ -236,8 +237,36 @@ static int load_dtb(target_phys_addr_t addr, const struct arm_boot_info *binfo)
}
g_free(filename);
+ acells = qemu_devtree_getprop_cell(fdt, "/", "#address-cells");
+ scells = qemu_devtree_getprop_cell(fdt, "/", "#size-cells");
+ if (acells == 0 || scells == 0) {
+ fprintf(stderr, "dtb file invalid (#address-cells or #size-cells 0)\n");
+ return -1;
+ }
+
+ mem_reg_propsize = acells + scells;
+ mem_reg_property = g_new0(uint32_t, mem_reg_propsize);
+ mem_reg_property[acells - 1] = cpu_to_be32(binfo->loader_start);
+ hival = cpu_to_be32(binfo->loader_start >> 32);
+ if (acells > 1) {
+ mem_reg_property[acells - 2] = hival;
+ } else if (hival != 0) {
+ fprintf(stderr, "qemu: dtb file not compatible with "
+ "RAM start address > 4GB\n");
+ exit(1);
+ }
+ mem_reg_property[acells + scells - 1] = cpu_to_be32(binfo->ram_size);
+ hival = cpu_to_be32(binfo->ram_size >> 32);
+ if (scells > 1) {
+ mem_reg_property[acells + scells - 2] = hival;
+ } else if (hival != 0) {
+ fprintf(stderr, "qemu: dtb file not compatible with "
+ "RAM size > 4GB\n");
+ exit(1);
+ }
+
rc = qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property,
- sizeof(mem_reg_property));
+ mem_reg_propsize * sizeof(uint32_t));
if (rc < 0) {
fprintf(stderr, "couldn't set /memory/reg\n");
}
@@ -357,7 +386,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
if (kernel_size < 0) {
entry = info->loader_start + KERNEL_LOAD_ADDR;
kernel_size = load_image_targphys(info->kernel_filename, entry,
- ram_size - KERNEL_LOAD_ADDR);
+ info->ram_size - KERNEL_LOAD_ADDR);
is_linux = 1;
}
if (kernel_size < 0) {
@@ -371,7 +400,8 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
initrd_size = load_image_targphys(info->initrd_filename,
info->loader_start
+ INITRD_LOAD_ADDR,
- ram_size - INITRD_LOAD_ADDR);
+ info->ram_size
+ - INITRD_LOAD_ADDR);
if (initrd_size < 0) {
fprintf(stderr, "qemu: could not load initrd '%s'\n",
info->initrd_filename);
@@ -398,6 +428,12 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info)
bootloader[5] = dtb_start;
} else {
bootloader[5] = info->loader_start + KERNEL_ARGS_ADDR;
+ if (info->ram_size >= (1ULL << 32)) {
+ fprintf(stderr, "qemu: RAM size must be less than 4GB to boot"
+ " Linux kernel using ATAGS (try passing a device tree"
+ " using -dtb)\n");
+ exit(1);
+ }
}
bootloader[6] = entry;
for (n = 0; n < sizeof(bootloader) / 4; n++) {
diff --git a/hw/arm_gic.c b/hw/arm_gic.c
index ec22322930..186ac66f00 100644
--- a/hw/arm_gic.c
+++ b/hw/arm_gic.c
@@ -25,7 +25,7 @@
#ifdef DEBUG_GIC
#define DPRINTF(fmt, ...) \
-do { printf("arm_gic: " fmt , ## __VA_ARGS__); } while (0)
+do { fprintf(stderr, "arm_gic: " fmt , ## __VA_ARGS__); } while (0)
#else
#define DPRINTF(fmt, ...) do {} while(0)
#endif
diff --git a/hw/block-common.c b/hw/block-common.c
new file mode 100644
index 0000000000..f0196d78dc
--- /dev/null
+++ b/hw/block-common.c
@@ -0,0 +1,64 @@
+/*
+ * Common code for block device models
+ *
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#include "blockdev.h"
+#include "hw/block-common.h"
+#include "qemu-error.h"
+
+void blkconf_serial(BlockConf *conf, char **serial)
+{
+ DriveInfo *dinfo;
+
+ if (!*serial) {
+ /* try to fall back to value set with legacy -drive serial=... */
+ dinfo = drive_get_by_blockdev(conf->bs);
+ if (dinfo->serial) {
+ *serial = g_strdup(dinfo->serial);
+ }
+ }
+}
+
+int blkconf_geometry(BlockConf *conf, int *ptrans,
+ unsigned cyls_max, unsigned heads_max, unsigned secs_max)
+{
+ DriveInfo *dinfo;
+
+ if (!conf->cyls && !conf->heads && !conf->secs) {
+ /* try to fall back to value set with legacy -drive cyls=... */
+ dinfo = drive_get_by_blockdev(conf->bs);
+ conf->cyls = dinfo->cyls;
+ conf->heads = dinfo->heads;
+ conf->secs = dinfo->secs;
+ if (ptrans) {
+ *ptrans = dinfo->trans;
+ }
+ }
+ if (!conf->cyls && !conf->heads && !conf->secs) {
+ hd_geometry_guess(conf->bs,
+ &conf->cyls, &conf->heads, &conf->secs,
+ ptrans);
+ } else if (ptrans && *ptrans == BIOS_ATA_TRANSLATION_AUTO) {
+ *ptrans = hd_bios_chs_auto_trans(conf->cyls, conf->heads, conf->secs);
+ }
+ if (conf->cyls || conf->heads || conf->secs) {
+ if (conf->cyls < 1 || conf->cyls > cyls_max) {
+ error_report("cyls must be between 1 and %u", cyls_max);
+ return -1;
+ }
+ if (conf->heads < 1 || conf->heads > heads_max) {
+ error_report("heads must be between 1 and %u", heads_max);
+ return -1;
+ }
+ if (conf->secs < 1 || conf->secs > secs_max) {
+ error_report("secs must be between 1 and %u", secs_max);
+ return -1;
+ }
+ }
+ return 0;
+}
diff --git a/hw/block-common.h b/hw/block-common.h
new file mode 100644
index 0000000000..bb808f7f56
--- /dev/null
+++ b/hw/block-common.h
@@ -0,0 +1,79 @@
+/*
+ * Common code for block device models
+ *
+ * Copyright (C) 2012 Red Hat, Inc.
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later. See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_BLOCK_COMMON_H
+#define HW_BLOCK_COMMON_H
+
+#include "qemu-common.h"
+
+/* Configuration */
+
+typedef struct BlockConf {
+ BlockDriverState *bs;
+ uint16_t physical_block_size;
+ uint16_t logical_block_size;
+ uint16_t min_io_size;
+ uint32_t opt_io_size;
+ int32_t bootindex;
+ uint32_t discard_granularity;
+ /* geometry, not all devices use this */
+ uint32_t cyls, heads, secs;
+} BlockConf;
+
+static inline unsigned int get_physical_block_exp(BlockConf *conf)
+{
+ unsigned int exp = 0, size;
+
+ for (size = conf->physical_block_size;
+ size > conf->logical_block_size;
+ size >>= 1) {
+ exp++;
+ }
+
+ return exp;
+}
+
+#define DEFINE_BLOCK_PROPERTIES(_state, _conf) \
+ DEFINE_PROP_DRIVE("drive", _state, _conf.bs), \
+ DEFINE_PROP_BLOCKSIZE("logical_block_size", _state, \
+ _conf.logical_block_size, 512), \
+ DEFINE_PROP_BLOCKSIZE("physical_block_size", _state, \
+ _conf.physical_block_size, 512), \
+ DEFINE_PROP_UINT16("min_io_size", _state, _conf.min_io_size, 0), \
+ DEFINE_PROP_UINT32("opt_io_size", _state, _conf.opt_io_size, 0), \
+ DEFINE_PROP_INT32("bootindex", _state, _conf.bootindex, -1), \
+ DEFINE_PROP_UINT32("discard_granularity", _state, \
+ _conf.discard_granularity, 0)
+
+#define DEFINE_BLOCK_CHS_PROPERTIES(_state, _conf) \
+ DEFINE_PROP_UINT32("cyls", _state, _conf.cyls, 0), \
+ DEFINE_PROP_UINT32("heads", _state, _conf.heads, 0), \
+ DEFINE_PROP_UINT32("secs", _state, _conf.secs, 0)
+
+/* Configuration helpers */
+
+void blkconf_serial(BlockConf *conf, char **serial);
+int blkconf_geometry(BlockConf *conf, int *trans,
+ unsigned cyls_max, unsigned heads_max, unsigned secs_max);
+
+/* Hard disk geometry */
+
+#define BIOS_ATA_TRANSLATION_AUTO 0
+#define BIOS_ATA_TRANSLATION_NONE 1
+#define BIOS_ATA_TRANSLATION_LBA 2
+#define BIOS_ATA_TRANSLATION_LARGE 3
+#define BIOS_ATA_TRANSLATION_RECHS 4
+
+void hd_geometry_guess(BlockDriverState *bs,
+ uint32_t *pcyls, uint32_t *pheads, uint32_t *psecs,
+ int *ptrans);
+int hd_bios_chs_auto_trans(uint32_t cyls, uint32_t heads, uint32_t secs);
+
+#endif
diff --git a/hw/bt-l2cap.c b/hw/bt-l2cap.c
index 2ccba6071c..cb43ee7733 100644
--- a/hw/bt-l2cap.c
+++ b/hw/bt-l2cap.c
@@ -1000,7 +1000,8 @@ static void l2cap_iframe_in(struct l2cap_chan_s *ch, uint16_t cid,
/* TODO: Signal an error? */
return;
}
- return l2cap_sframe_in(ch, le16_to_cpup((void *) hdr->data));
+ l2cap_sframe_in(ch, le16_to_cpup((void *) hdr->data));
+ return;
}
switch (hdr->data[1] >> 6) { /* SAR */
@@ -1010,7 +1011,8 @@ static void l2cap_iframe_in(struct l2cap_chan_s *ch, uint16_t cid,
if (len - 4 > ch->mps)
goto len_error;
- return ch->params.sdu_in(ch->params.opaque, hdr->data + 2, len - 4);
+ ch->params.sdu_in(ch->params.opaque, hdr->data + 2, len - 4);
+ break;
case L2CAP_SAR_START:
if (ch->len_total || len < 6)
@@ -1033,7 +1035,8 @@ static void l2cap_iframe_in(struct l2cap_chan_s *ch, uint16_t cid,
goto len_error;
memcpy(ch->sdu + ch->len_cur, hdr->data + 2, len - 4);
- return ch->params.sdu_in(ch->params.opaque, ch->sdu, ch->len_total);
+ ch->params.sdu_in(ch->params.opaque, ch->sdu, ch->len_total);
+ break;
case L2CAP_SAR_CONT:
if (!ch->len_total || ch->len_cur + len - 4 >= ch->len_total)
@@ -1136,7 +1139,7 @@ static void l2cap_bframe_submit(struct bt_l2cap_conn_params_s *parms)
{
struct l2cap_chan_s *chan = (struct l2cap_chan_s *) parms;
- return l2cap_pdu_submit(chan->l2cap);
+ l2cap_pdu_submit(chan->l2cap);
}
#if 0
diff --git a/hw/cadence_gem.c b/hw/cadence_gem.c
index dbde3920d0..967f62513e 100644
--- a/hw/cadence_gem.c
+++ b/hw/cadence_gem.c
@@ -339,8 +339,8 @@ typedef struct {
uint8_t phy_loop; /* Are we in phy loopback? */
/* The current DMA descriptor pointers */
- target_phys_addr_t rx_desc_addr;
- target_phys_addr_t tx_desc_addr;
+ uint32_t rx_desc_addr;
+ uint32_t tx_desc_addr;
} GemState;
@@ -405,7 +405,7 @@ static void phy_update_link(GemState *s)
}
}
-static int gem_can_receive(VLANClientState *nc)
+static int gem_can_receive(NetClientState *nc)
{
GemState *s;
@@ -602,7 +602,7 @@ static int gem_mac_address_filter(GemState *s, const uint8_t *packet)
* gem_receive:
* Fit a packet handed to us by QEMU into the receive descriptor ring.
*/
-static ssize_t gem_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t gem_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
unsigned desc[2];
target_phys_addr_t packet_desc_addr, last_desc_addr;
@@ -1146,7 +1146,7 @@ static const MemoryRegionOps gem_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static void gem_cleanup(VLANClientState *nc)
+static void gem_cleanup(NetClientState *nc)
{
GemState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -1154,14 +1154,14 @@ static void gem_cleanup(VLANClientState *nc)
s->nic = NULL;
}
-static void gem_set_link(VLANClientState *nc)
+static void gem_set_link(NetClientState *nc)
{
DB_PRINT("\n");
phy_update_link(DO_UPCAST(NICState, nc, nc)->opaque);
}
static NetClientInfo net_gem_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = gem_can_receive,
.receive = gem_receive,
diff --git a/hw/cirrus_vga.c b/hw/cirrus_vga.c
index afedaa43d3..623dd688d9 100644
--- a/hw/cirrus_vga.c
+++ b/hw/cirrus_vga.c
@@ -43,6 +43,8 @@
//#define DEBUG_CIRRUS
//#define DEBUG_BITBLT
+#define VGA_RAM_SIZE (8192 * 1024)
+
/***************************************
*
* definitions
@@ -2891,7 +2893,8 @@ static int vga_initfn(ISADevice *dev)
ISACirrusVGAState *d = DO_UPCAST(ISACirrusVGAState, dev, dev);
VGACommonState *s = &d->cirrus_vga.vga;
- vga_common_init(s, VGA_RAM_SIZE);
+ s->vram_size_mb = VGA_RAM_SIZE >> 20;
+ vga_common_init(s);
cirrus_init_common(&d->cirrus_vga, CIRRUS_ID_CLGD5430, 0,
isa_address_space(dev));
s->ds = graphic_console_init(s->update, s->invalidate,
@@ -2933,7 +2936,8 @@ static int pci_cirrus_vga_initfn(PCIDevice *dev)
int16_t device_id = pc->device_id;
/* setup VGA */
- vga_common_init(&s->vga, VGA_RAM_SIZE);
+ s->vga.vram_size_mb = VGA_RAM_SIZE >> 20;
+ vga_common_init(&s->vga);
cirrus_init_common(s, device_id, 1, pci_address_space(dev));
s->vga.ds = graphic_console_init(s->vga.update, s->vga.invalidate,
s->vga.screen_dump, s->vga.text_update,
diff --git a/hw/dp8393x.c b/hw/dp8393x.c
index 017d0742ae..4fa6eccba4 100644
--- a/hw/dp8393x.c
+++ b/hw/dp8393x.c
@@ -673,7 +673,7 @@ static const MemoryRegionOps dp8393x_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int nic_can_receive(VLANClientState *nc)
+static int nic_can_receive(NetClientState *nc)
{
dp8393xState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -722,7 +722,7 @@ static int receive_filter(dp8393xState *s, const uint8_t * buf, int size)
return -1;
}
-static ssize_t nic_receive(VLANClientState *nc, const uint8_t * buf, size_t size)
+static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
{
dp8393xState *s = DO_UPCAST(NICState, nc, nc)->opaque;
uint16_t data[10];
@@ -858,7 +858,7 @@ static void nic_reset(void *opaque)
dp8393x_update_irq(s);
}
-static void nic_cleanup(VLANClientState *nc)
+static void nic_cleanup(NetClientState *nc)
{
dp8393xState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -872,7 +872,7 @@ static void nic_cleanup(VLANClientState *nc)
}
static NetClientInfo net_dp83932_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = nic_can_receive,
.receive = nic_receive,
@@ -899,7 +899,6 @@ void dp83932_init(NICInfo *nd, target_phys_addr_t base, int it_shift,
s->regs[SONIC_SR] = 0x0004; /* only revision recognized by Linux */
s->conf.macaddr = nd->macaddr;
- s->conf.vlan = nd->vlan;
s->conf.peer = nd->netdev;
s->nic = qemu_new_nic(&net_dp83932_info, &s->conf, nd->model, nd->name, s);
diff --git a/hw/e1000.c b/hw/e1000.c
index 4573f1301e..ae8a6c5523 100644
--- a/hw/e1000.c
+++ b/hw/e1000.c
@@ -720,7 +720,7 @@ receive_filter(E1000State *s, const uint8_t *buf, int size)
}
static void
-e1000_set_link_status(VLANClientState *nc)
+e1000_set_link_status(NetClientState *nc)
{
E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
uint32_t old_status = s->mac_reg[STATUS];
@@ -754,7 +754,7 @@ static bool e1000_has_rxbufs(E1000State *s, size_t total_size)
}
static int
-e1000_can_receive(VLANClientState *nc)
+e1000_can_receive(NetClientState *nc)
{
E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -770,7 +770,7 @@ static uint64_t rx_desc_base(E1000State *s)
}
static ssize_t
-e1000_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+e1000_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
struct e1000_rx_desc desc;
@@ -1185,14 +1185,14 @@ e1000_mmio_setup(E1000State *d)
}
static void
-e1000_cleanup(VLANClientState *nc)
+e1000_cleanup(NetClientState *nc)
{
E1000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
s->nic = NULL;
}
-static int
+static void
pci_e1000_uninit(PCIDevice *dev)
{
E1000State *d = DO_UPCAST(E1000State, dev, dev);
@@ -1201,12 +1201,11 @@ pci_e1000_uninit(PCIDevice *dev)
qemu_free_timer(d->autoneg_timer);
memory_region_destroy(&d->mmio);
memory_region_destroy(&d->io);
- qemu_del_vlan_client(&d->nic->nc);
- return 0;
+ qemu_del_net_client(&d->nic->nc);
}
static NetClientInfo net_e1000_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = e1000_can_receive,
.receive = e1000_receive,
diff --git a/hw/eepro100.c b/hw/eepro100.c
index 6279ae36ec..50d117e35e 100644
--- a/hw/eepro100.c
+++ b/hw/eepro100.c
@@ -1596,10 +1596,17 @@ static void eepro100_write(void *opaque, target_phys_addr_t addr,
EEPRO100State *s = opaque;
switch (size) {
- case 1: return eepro100_write1(s, addr, data);
- case 2: return eepro100_write2(s, addr, data);
- case 4: return eepro100_write4(s, addr, data);
- default: abort();
+ case 1:
+ eepro100_write1(s, addr, data);
+ break;
+ case 2:
+ eepro100_write2(s, addr, data);
+ break;
+ case 4:
+ eepro100_write4(s, addr, data);
+ break;
+ default:
+ abort();
}
}
@@ -1609,7 +1616,7 @@ static const MemoryRegionOps eepro100_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static int nic_can_receive(VLANClientState *nc)
+static int nic_can_receive(NetClientState *nc)
{
EEPRO100State *s = DO_UPCAST(NICState, nc, nc)->opaque;
TRACE(RXTX, logout("%p\n", s));
@@ -1619,7 +1626,7 @@ static int nic_can_receive(VLANClientState *nc)
#endif
}
-static ssize_t nic_receive(VLANClientState *nc, const uint8_t * buf, size_t size)
+static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size)
{
/* TODO:
* - Magic packets should set bit 30 in power management driver register.
@@ -1824,14 +1831,14 @@ static const VMStateDescription vmstate_eepro100 = {
}
};
-static void nic_cleanup(VLANClientState *nc)
+static void nic_cleanup(NetClientState *nc)
{
EEPRO100State *s = DO_UPCAST(NICState, nc, nc)->opaque;
s->nic = NULL;
}
-static int pci_nic_uninit(PCIDevice *pci_dev)
+static void pci_nic_uninit(PCIDevice *pci_dev)
{
EEPRO100State *s = DO_UPCAST(EEPRO100State, dev, pci_dev);
@@ -1840,12 +1847,11 @@ static int pci_nic_uninit(PCIDevice *pci_dev)
memory_region_destroy(&s->flash_bar);
vmstate_unregister(&pci_dev->qdev, s->vmstate, s);
eeprom93xx_free(&pci_dev->qdev, s->eeprom);
- qemu_del_vlan_client(&s->nic->nc);
- return 0;
+ qemu_del_net_client(&s->nic->nc);
}
static NetClientInfo net_eepro100_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = nic_can_receive,
.receive = nic_receive,
diff --git a/hw/es1370.c b/hw/es1370.c
index 573f747362..e34234c350 100644
--- a/hw/es1370.c
+++ b/hw/es1370.c
@@ -1018,12 +1018,11 @@ static int es1370_initfn (PCIDevice *dev)
return 0;
}
-static int es1370_exitfn (PCIDevice *dev)
+static void es1370_exitfn (PCIDevice *dev)
{
ES1370State *s = DO_UPCAST (ES1370State, dev, dev);
memory_region_destroy (&s->io);
- return 0;
}
int es1370_init (PCIBus *bus)
diff --git a/hw/escc.c b/hw/escc.c
index 4d8a8e8886..e1f5e73ba2 100644
--- a/hw/escc.c
+++ b/hw/escc.c
@@ -905,7 +905,6 @@ static Property escc_properties[] = {
DEFINE_PROP_UINT32("frequency", SerialState, frequency, 0),
DEFINE_PROP_UINT32("it_shift", SerialState, it_shift, 0),
DEFINE_PROP_UINT32("disabled", SerialState, disabled, 0),
- DEFINE_PROP_UINT32("disabled", SerialState, disabled, 0),
DEFINE_PROP_UINT32("chnBtype", SerialState, chn[0].type, 0),
DEFINE_PROP_UINT32("chnAtype", SerialState, chn[1].type, 0),
DEFINE_PROP_CHR("chrB", SerialState, chn[0].chr),
diff --git a/hw/esp-pci.c b/hw/esp-pci.c
new file mode 100644
index 0000000000..170e007be9
--- /dev/null
+++ b/hw/esp-pci.c
@@ -0,0 +1,518 @@
+/*
+ * QEMU ESP/NCR53C9x emulation
+ *
+ * Copyright (c) 2005-2006 Fabrice Bellard
+ * Copyright (c) 2012 Herve Poussineau
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "pci.h"
+#include "eeprom93xx.h"
+#include "esp.h"
+#include "trace.h"
+#include "qemu-log.h"
+
+#define TYPE_AM53C974_DEVICE "am53c974"
+
+#define DMA_CMD 0x0
+#define DMA_STC 0x1
+#define DMA_SPA 0x2
+#define DMA_WBC 0x3
+#define DMA_WAC 0x4
+#define DMA_STAT 0x5
+#define DMA_SMDLA 0x6
+#define DMA_WMAC 0x7
+
+#define DMA_CMD_MASK 0x03
+#define DMA_CMD_DIAG 0x04
+#define DMA_CMD_MDL 0x10
+#define DMA_CMD_INTE_P 0x20
+#define DMA_CMD_INTE_D 0x40
+#define DMA_CMD_DIR 0x80
+
+#define DMA_STAT_PWDN 0x01
+#define DMA_STAT_ERROR 0x02
+#define DMA_STAT_ABORT 0x04
+#define DMA_STAT_DONE 0x08
+#define DMA_STAT_SCSIINT 0x10
+#define DMA_STAT_BCMBLT 0x20
+
+#define SBAC_STATUS 0x1000
+
+typedef struct PCIESPState {
+ PCIDevice dev;
+ MemoryRegion io;
+ uint32_t dma_regs[8];
+ uint32_t sbac;
+ ESPState esp;
+} PCIESPState;
+
+static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val)
+{
+ trace_esp_pci_dma_idle(val);
+ esp_dma_enable(&pci->esp, 0, 0);
+}
+
+static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val)
+{
+ trace_esp_pci_dma_blast(val);
+ qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n");
+}
+
+static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val)
+{
+ trace_esp_pci_dma_abort(val);
+ if (pci->esp.current_req) {
+ scsi_req_cancel(pci->esp.current_req);
+ }
+}
+
+static void esp_pci_handle_start(PCIESPState *pci, uint32_t val)
+{
+ trace_esp_pci_dma_start(val);
+
+ pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC];
+ pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA];
+ pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA];
+
+ pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
+ | DMA_STAT_DONE | DMA_STAT_ABORT
+ | DMA_STAT_ERROR | DMA_STAT_PWDN);
+
+ esp_dma_enable(&pci->esp, 0, 1);
+}
+
+static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val)
+{
+ trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val);
+ switch (saddr) {
+ case DMA_CMD:
+ pci->dma_regs[saddr] = val;
+ switch (val & DMA_CMD_MASK) {
+ case 0x0: /* IDLE */
+ esp_pci_handle_idle(pci, val);
+ break;
+ case 0x1: /* BLAST */
+ esp_pci_handle_blast(pci, val);
+ break;
+ case 0x2: /* ABORT */
+ esp_pci_handle_abort(pci, val);
+ break;
+ case 0x3: /* START */
+ esp_pci_handle_start(pci, val);
+ break;
+ default: /* can't happen */
+ abort();
+ }
+ break;
+ case DMA_STC:
+ case DMA_SPA:
+ case DMA_SMDLA:
+ pci->dma_regs[saddr] = val;
+ break;
+ case DMA_STAT:
+ if (!(pci->sbac & SBAC_STATUS)) {
+ /* clear some bits on write */
+ uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE;
+ pci->dma_regs[DMA_STAT] &= ~(val & mask);
+ }
+ break;
+ default:
+ trace_esp_pci_error_invalid_write_dma(val, saddr);
+ return;
+ }
+}
+
+static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr)
+{
+ uint32_t val;
+
+ val = pci->dma_regs[saddr];
+ if (saddr == DMA_STAT) {
+ if (pci->esp.rregs[ESP_RSTAT] & STAT_INT) {
+ val |= DMA_STAT_SCSIINT;
+ }
+ if (pci->sbac & SBAC_STATUS) {
+ pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT |
+ DMA_STAT_DONE);
+ }
+ }
+
+ trace_esp_pci_dma_read(saddr, val);
+ return val;
+}
+
+static void esp_pci_io_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned int size)
+{
+ PCIESPState *pci = opaque;
+
+ if (size < 4 || addr & 3) {
+ /* need to upgrade request: we only support 4-bytes accesses */
+ uint32_t current = 0, mask;
+ int shift;
+
+ if (addr < 0x40) {
+ current = pci->esp.wregs[addr >> 2];
+ } else if (addr < 0x60) {
+ current = pci->dma_regs[(addr - 0x40) >> 2];
+ } else if (addr < 0x74) {
+ current = pci->sbac;
+ }
+
+ shift = (4 - size) * 8;
+ mask = (~(uint32_t)0 << shift) >> shift;
+
+ shift = ((4 - (addr & 3)) & 3) * 8;
+ val <<= shift;
+ val |= current & ~(mask << shift);
+ addr &= ~3;
+ size = 4;
+ }
+
+ if (addr < 0x40) {
+ /* SCSI core reg */
+ esp_reg_write(&pci->esp, addr >> 2, val);
+ } else if (addr < 0x60) {
+ /* PCI DMA CCB */
+ esp_pci_dma_write(pci, (addr - 0x40) >> 2, val);
+ } else if (addr == 0x70) {
+ /* DMA SCSI Bus and control */
+ trace_esp_pci_sbac_write(pci->sbac, val);
+ pci->sbac = val;
+ } else {
+ trace_esp_pci_error_invalid_write((int)addr);
+ }
+}
+
+static uint64_t esp_pci_io_read(void *opaque, target_phys_addr_t addr,
+ unsigned int size)
+{
+ PCIESPState *pci = opaque;
+ uint32_t ret;
+
+ if (addr < 0x40) {
+ /* SCSI core reg */
+ ret = esp_reg_read(&pci->esp, addr >> 2);
+ } else if (addr < 0x60) {
+ /* PCI DMA CCB */
+ ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2);
+ } else if (addr == 0x70) {
+ /* DMA SCSI Bus and control */
+ trace_esp_pci_sbac_read(pci->sbac);
+ ret = pci->sbac;
+ } else {
+ /* Invalid region */
+ trace_esp_pci_error_invalid_read((int)addr);
+ ret = 0;
+ }
+
+ /* give only requested data */
+ ret >>= (addr & 3) * 8;
+ ret &= ~(~(uint64_t)0 << (8 * size));
+
+ return ret;
+}
+
+static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len,
+ DMADirection dir)
+{
+ dma_addr_t addr;
+ DMADirection expected_dir;
+
+ if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) {
+ expected_dir = DMA_DIRECTION_FROM_DEVICE;
+ } else {
+ expected_dir = DMA_DIRECTION_TO_DEVICE;
+ }
+
+ if (dir != expected_dir) {
+ trace_esp_pci_error_invalid_dma_direction();
+ return;
+ }
+
+ if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) {
+ qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n");
+ }
+
+ addr = pci->dma_regs[DMA_SPA];
+ if (pci->dma_regs[DMA_WBC] < len) {
+ len = pci->dma_regs[DMA_WBC];
+ }
+
+ pci_dma_rw(&pci->dev, addr, buf, len, dir);
+
+ /* update status registers */
+ pci->dma_regs[DMA_WBC] -= len;
+ pci->dma_regs[DMA_WAC] += len;
+}
+
+static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len)
+{
+ PCIESPState *pci = opaque;
+ esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE);
+}
+
+static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len)
+{
+ PCIESPState *pci = opaque;
+ esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE);
+}
+
+static const MemoryRegionOps esp_pci_io_ops = {
+ .read = esp_pci_io_read,
+ .write = esp_pci_io_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+};
+
+static void esp_pci_hard_reset(DeviceState *dev)
+{
+ PCIESPState *pci = DO_UPCAST(PCIESPState, dev.qdev, dev);
+ esp_hard_reset(&pci->esp);
+ pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P
+ | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK);
+ pci->dma_regs[DMA_WBC] &= ~0xffff;
+ pci->dma_regs[DMA_WAC] = 0xffffffff;
+ pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT
+ | DMA_STAT_DONE | DMA_STAT_ABORT
+ | DMA_STAT_ERROR);
+ pci->dma_regs[DMA_WMAC] = 0xfffffffd;
+}
+
+static const VMStateDescription vmstate_esp_pci_scsi = {
+ .name = "pciespscsi",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, PCIESPState),
+ VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)),
+ VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void esp_pci_command_complete(SCSIRequest *req, uint32_t status,
+ size_t resid)
+{
+ ESPState *s = req->hba_private;
+ PCIESPState *pci = container_of(s, PCIESPState, esp);
+
+ esp_command_complete(req, status, resid);
+ pci->dma_regs[DMA_WBC] = 0;
+ pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE;
+}
+
+static const struct SCSIBusInfo esp_pci_scsi_info = {
+ .tcq = false,
+ .max_target = ESP_MAX_DEVS,
+ .max_lun = 7,
+
+ .transfer_data = esp_transfer_data,
+ .complete = esp_pci_command_complete,
+ .cancel = esp_request_cancelled,
+};
+
+static int esp_pci_scsi_init(PCIDevice *dev)
+{
+ PCIESPState *pci = DO_UPCAST(PCIESPState, dev, dev);
+ ESPState *s = &pci->esp;
+ uint8_t *pci_conf;
+
+ pci_conf = pci->dev.config;
+
+ /* Interrupt pin A */
+ pci_conf[PCI_INTERRUPT_PIN] = 0x01;
+
+ s->dma_memory_read = esp_pci_dma_memory_read;
+ s->dma_memory_write = esp_pci_dma_memory_write;
+ s->dma_opaque = pci;
+ s->chip_id = TCHI_AM53C974;
+ memory_region_init_io(&pci->io, &esp_pci_io_ops, pci, "esp-io", 0x80);
+
+ pci_register_bar(&pci->dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io);
+ s->irq = pci->dev.irq[0];
+
+ scsi_bus_new(&s->bus, &dev->qdev, &esp_pci_scsi_info);
+ if (!dev->qdev.hotplugged) {
+ return scsi_bus_legacy_handle_cmdline(&s->bus);
+ }
+ return 0;
+}
+
+static void esp_pci_scsi_uninit(PCIDevice *d)
+{
+ PCIESPState *pci = DO_UPCAST(PCIESPState, dev, d);
+
+ memory_region_destroy(&pci->io);
+}
+
+static void esp_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->init = esp_pci_scsi_init;
+ k->exit = esp_pci_scsi_uninit;
+ k->vendor_id = PCI_VENDOR_ID_AMD;
+ k->device_id = PCI_DEVICE_ID_AMD_SCSI;
+ k->revision = 0x10;
+ k->class_id = PCI_CLASS_STORAGE_SCSI;
+ dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter";
+ dc->reset = esp_pci_hard_reset;
+ dc->vmsd = &vmstate_esp_pci_scsi;
+}
+
+static const TypeInfo esp_pci_info = {
+ .name = TYPE_AM53C974_DEVICE,
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(PCIESPState),
+ .class_init = esp_pci_class_init,
+};
+
+typedef struct {
+ PCIESPState pci;
+ eeprom_t *eeprom;
+} DC390State;
+
+#define TYPE_DC390_DEVICE "dc390"
+#define DC390(obj) \
+ OBJECT_CHECK(DC390State, obj, TYPE_DC390_DEVICE)
+
+#define EE_ADAPT_SCSI_ID 64
+#define EE_MODE2 65
+#define EE_DELAY 66
+#define EE_TAG_CMD_NUM 67
+#define EE_ADAPT_OPTIONS 68
+#define EE_BOOT_SCSI_ID 69
+#define EE_BOOT_SCSI_LUN 70
+#define EE_CHKSUM1 126
+#define EE_CHKSUM2 127
+
+#define EE_ADAPT_OPTION_F6_F8_AT_BOOT 0x01
+#define EE_ADAPT_OPTION_BOOT_FROM_CDROM 0x02
+#define EE_ADAPT_OPTION_INT13 0x04
+#define EE_ADAPT_OPTION_SCAM_SUPPORT 0x08
+
+
+static uint32_t dc390_read_config(PCIDevice *dev, uint32_t addr, int l)
+{
+ DC390State *pci = DC390(dev);
+ uint32_t val;
+
+ val = pci_default_read_config(dev, addr, l);
+
+ if (addr == 0x00 && l == 1) {
+ /* First byte of address space is AND-ed with EEPROM DO line */
+ if (!eeprom93xx_read(pci->eeprom)) {
+ val &= ~0xff;
+ }
+ }
+
+ return val;
+}
+
+static void dc390_write_config(PCIDevice *dev,
+ uint32_t addr, uint32_t val, int l)
+{
+ DC390State *pci = DC390(dev);
+ if (addr == 0x80) {
+ /* EEPROM write */
+ int eesk = val & 0x80 ? 1 : 0;
+ int eedi = val & 0x40 ? 1 : 0;
+ eeprom93xx_write(pci->eeprom, 1, eesk, eedi);
+ } else if (addr == 0xc0) {
+ /* EEPROM CS low */
+ eeprom93xx_write(pci->eeprom, 0, 0, 0);
+ } else {
+ pci_default_write_config(dev, addr, val, l);
+ }
+}
+
+static int dc390_scsi_init(PCIDevice *dev)
+{
+ DC390State *pci = DC390(dev);
+ uint8_t *contents;
+ uint16_t chksum = 0;
+ int i, ret;
+
+ /* init base class */
+ ret = esp_pci_scsi_init(dev);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* EEPROM */
+ pci->eeprom = eeprom93xx_new(DEVICE(dev), 64);
+
+ /* set default eeprom values */
+ contents = (uint8_t *)eeprom93xx_data(pci->eeprom);
+
+ for (i = 0; i < 16; i++) {
+ contents[i * 2] = 0x57;
+ contents[i * 2 + 1] = 0x00;
+ }
+ contents[EE_ADAPT_SCSI_ID] = 7;
+ contents[EE_MODE2] = 0x0f;
+ contents[EE_TAG_CMD_NUM] = 0x04;
+ contents[EE_ADAPT_OPTIONS] = EE_ADAPT_OPTION_F6_F8_AT_BOOT
+ | EE_ADAPT_OPTION_BOOT_FROM_CDROM
+ | EE_ADAPT_OPTION_INT13;
+
+ /* update eeprom checksum */
+ for (i = 0; i < EE_CHKSUM1; i += 2) {
+ chksum += contents[i] + (((uint16_t)contents[i + 1]) << 8);
+ }
+ chksum = 0x1234 - chksum;
+ contents[EE_CHKSUM1] = chksum & 0xff;
+ contents[EE_CHKSUM2] = chksum >> 8;
+
+ return 0;
+}
+
+static void dc390_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->init = dc390_scsi_init;
+ k->config_read = dc390_read_config;
+ k->config_write = dc390_write_config;
+ dc->desc = "Tekram DC-390 SCSI adapter";
+}
+
+static const TypeInfo dc390_info = {
+ .name = "dc390",
+ .parent = TYPE_AM53C974_DEVICE,
+ .instance_size = sizeof(DC390State),
+ .class_init = dc390_class_init,
+};
+
+static void esp_pci_register_types(void)
+{
+ type_register_static(&esp_pci_info);
+ type_register_static(&dc390_info);
+}
+
+type_init(esp_pci_register_types)
diff --git a/hw/esp.c b/hw/esp.c
index 8d73e56886..52c46e615f 100644
--- a/hw/esp.c
+++ b/hw/esp.c
@@ -2,6 +2,7 @@
* QEMU ESP/NCR53C9x emulation
*
* Copyright (c) 2005-2006 Fabrice Bellard
+ * Copyright (c) 2012 Herve Poussineau
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -23,9 +24,9 @@
*/
#include "sysbus.h"
-#include "scsi.h"
#include "esp.h"
#include "trace.h"
+#include "qemu-log.h"
/*
* On Sparc32, this is the ESP (NCR53C90) part of chip STP2000 (Master I/O),
@@ -35,116 +36,6 @@
* http://www.ibiblio.org/pub/historic-linux/early-ports/Sparc/NCR/NCR53C9X.txt
*/
-#define ESP_ERROR(fmt, ...) \
- do { printf("ESP ERROR: %s: " fmt, __func__ , ## __VA_ARGS__); } while (0)
-
-#define ESP_REGS 16
-#define TI_BUFSZ 16
-
-typedef struct ESPState ESPState;
-
-struct ESPState {
- SysBusDevice busdev;
- MemoryRegion iomem;
- uint8_t rregs[ESP_REGS];
- uint8_t wregs[ESP_REGS];
- qemu_irq irq;
- uint32_t it_shift;
- int32_t ti_size;
- uint32_t ti_rptr, ti_wptr;
- uint32_t status;
- uint32_t dma;
- uint8_t ti_buf[TI_BUFSZ];
- SCSIBus bus;
- SCSIDevice *current_dev;
- SCSIRequest *current_req;
- uint8_t cmdbuf[TI_BUFSZ];
- uint32_t cmdlen;
- uint32_t do_cmd;
-
- /* The amount of data left in the current DMA transfer. */
- uint32_t dma_left;
- /* The size of the current DMA transfer. Zero if no transfer is in
- progress. */
- uint32_t dma_counter;
- int dma_enabled;
-
- uint32_t async_len;
- uint8_t *async_buf;
-
- ESPDMAMemoryReadWriteFunc dma_memory_read;
- ESPDMAMemoryReadWriteFunc dma_memory_write;
- void *dma_opaque;
- void (*dma_cb)(ESPState *s);
-};
-
-#define ESP_TCLO 0x0
-#define ESP_TCMID 0x1
-#define ESP_FIFO 0x2
-#define ESP_CMD 0x3
-#define ESP_RSTAT 0x4
-#define ESP_WBUSID 0x4
-#define ESP_RINTR 0x5
-#define ESP_WSEL 0x5
-#define ESP_RSEQ 0x6
-#define ESP_WSYNTP 0x6
-#define ESP_RFLAGS 0x7
-#define ESP_WSYNO 0x7
-#define ESP_CFG1 0x8
-#define ESP_RRES1 0x9
-#define ESP_WCCF 0x9
-#define ESP_RRES2 0xa
-#define ESP_WTEST 0xa
-#define ESP_CFG2 0xb
-#define ESP_CFG3 0xc
-#define ESP_RES3 0xd
-#define ESP_TCHI 0xe
-#define ESP_RES4 0xf
-
-#define CMD_DMA 0x80
-#define CMD_CMD 0x7f
-
-#define CMD_NOP 0x00
-#define CMD_FLUSH 0x01
-#define CMD_RESET 0x02
-#define CMD_BUSRESET 0x03
-#define CMD_TI 0x10
-#define CMD_ICCS 0x11
-#define CMD_MSGACC 0x12
-#define CMD_PAD 0x18
-#define CMD_SATN 0x1a
-#define CMD_SEL 0x41
-#define CMD_SELATN 0x42
-#define CMD_SELATNS 0x43
-#define CMD_ENSEL 0x44
-
-#define STAT_DO 0x00
-#define STAT_DI 0x01
-#define STAT_CD 0x02
-#define STAT_ST 0x03
-#define STAT_MO 0x06
-#define STAT_MI 0x07
-#define STAT_PIO_MASK 0x06
-
-#define STAT_TC 0x10
-#define STAT_PE 0x20
-#define STAT_GE 0x40
-#define STAT_INT 0x80
-
-#define BUSID_DID 0x07
-
-#define INTR_FC 0x08
-#define INTR_BS 0x10
-#define INTR_DC 0x20
-#define INTR_RST 0x80
-
-#define SEQ_0 0x0
-#define SEQ_CD 0x4
-
-#define CFG1_RESREPT 0x40
-
-#define TCHI_FAS100A 0x4
-
static void esp_raise_irq(ESPState *s)
{
if (!(s->rregs[ESP_RSTAT] & STAT_INT)) {
@@ -163,11 +54,8 @@ static void esp_lower_irq(ESPState *s)
}
}
-static void esp_dma_enable(void *opaque, int irq, int level)
+void esp_dma_enable(ESPState *s, int irq, int level)
{
- DeviceState *d = opaque;
- ESPState *s = container_of(d, ESPState, busdev.qdev);
-
if (level) {
s->dma_enabled = 1;
trace_esp_dma_enable();
@@ -181,9 +69,9 @@ static void esp_dma_enable(void *opaque, int irq, int level)
}
}
-static void esp_request_cancelled(SCSIRequest *req)
+void esp_request_cancelled(SCSIRequest *req)
{
- ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
+ ESPState *s = req->hba_private;
if (req == s->current_req) {
scsi_req_unref(s->current_req);
@@ -239,7 +127,7 @@ static void do_busid_cmd(ESPState *s, uint8_t *buf, uint8_t busid)
trace_esp_do_busid_cmd(busid);
lun = busid & 7;
current_lun = scsi_device_find(&s->bus, 0, s->current_dev->id, lun);
- s->current_req = scsi_req_new(current_lun, 0, lun, buf, NULL);
+ s->current_req = scsi_req_new(current_lun, 0, lun, buf, s);
datalen = scsi_req_enqueue(s->current_req);
s->ti_size = datalen;
if (datalen != 0) {
@@ -270,7 +158,7 @@ static void handle_satn(ESPState *s)
uint8_t buf[32];
int len;
- if (!s->dma_enabled) {
+ if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn;
return;
}
@@ -284,7 +172,7 @@ static void handle_s_without_atn(ESPState *s)
uint8_t buf[32];
int len;
- if (!s->dma_enabled) {
+ if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_s_without_atn;
return;
}
@@ -296,7 +184,7 @@ static void handle_s_without_atn(ESPState *s)
static void handle_satn_stop(ESPState *s)
{
- if (!s->dma_enabled) {
+ if (s->dma && !s->dma_enabled) {
s->dma_cb = handle_satn_stop;
return;
}
@@ -390,10 +278,10 @@ static void esp_do_dma(ESPState *s)
esp_dma_done(s);
}
-static void esp_command_complete(SCSIRequest *req, uint32_t status,
+void esp_command_complete(SCSIRequest *req, uint32_t status,
size_t resid)
{
- ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
+ ESPState *s = req->hba_private;
trace_esp_command_complete();
if (s->ti_size != 0) {
@@ -415,9 +303,9 @@ static void esp_command_complete(SCSIRequest *req, uint32_t status,
}
}
-static void esp_transfer_data(SCSIRequest *req, uint32_t len)
+void esp_transfer_data(SCSIRequest *req, uint32_t len)
{
- ESPState *s = DO_UPCAST(ESPState, busdev.qdev, req->bus->qbus.parent);
+ ESPState *s = req->hba_private;
trace_esp_transfer_data(s->dma_left, s->ti_size);
s->async_len = len;
@@ -435,6 +323,11 @@ static void handle_ti(ESPState *s)
{
uint32_t dmalen, minlen;
+ if (s->dma && !s->dma_enabled) {
+ s->dma_cb = handle_ti;
+ return;
+ }
+
dmalen = s->rregs[ESP_TCLO] | (s->rregs[ESP_TCMID] << 8);
if (dmalen==0) {
dmalen=0x10000;
@@ -462,13 +355,11 @@ static void handle_ti(ESPState *s)
}
}
-static void esp_hard_reset(DeviceState *d)
+void esp_hard_reset(ESPState *s)
{
- ESPState *s = container_of(d, ESPState, busdev.qdev);
-
memset(s->rregs, 0, ESP_REGS);
memset(s->wregs, 0, ESP_REGS);
- s->rregs[ESP_TCHI] = TCHI_FAS100A; // Indicate fas100a
+ s->rregs[ESP_TCHI] = s->chip_id;
s->ti_size = 0;
s->ti_rptr = 0;
s->ti_wptr = 0;
@@ -479,40 +370,23 @@ static void esp_hard_reset(DeviceState *d)
s->rregs[ESP_CFG1] = 7;
}
-static void esp_soft_reset(DeviceState *d)
+static void esp_soft_reset(ESPState *s)
{
- ESPState *s = container_of(d, ESPState, busdev.qdev);
-
qemu_irq_lower(s->irq);
- esp_hard_reset(d);
+ esp_hard_reset(s);
}
-static void parent_esp_reset(void *opaque, int irq, int level)
+static void parent_esp_reset(ESPState *s, int irq, int level)
{
if (level) {
- esp_soft_reset(opaque);
- }
-}
-
-static void esp_gpio_demux(void *opaque, int irq, int level)
-{
- switch (irq) {
- case 0:
- parent_esp_reset(opaque, irq, level);
- break;
- case 1:
- esp_dma_enable(opaque, irq, level);
- break;
+ esp_soft_reset(s);
}
}
-static uint64_t esp_mem_read(void *opaque, target_phys_addr_t addr,
- unsigned size)
+uint64_t esp_reg_read(ESPState *s, uint32_t saddr)
{
- ESPState *s = opaque;
- uint32_t saddr, old_val;
+ uint32_t old_val;
- saddr = addr >> s->it_shift;
trace_esp_mem_readb(saddr, s->rregs[saddr]);
switch (saddr) {
case ESP_FIFO:
@@ -520,7 +394,8 @@ static uint64_t esp_mem_read(void *opaque, target_phys_addr_t addr,
s->ti_size--;
if ((s->rregs[ESP_RSTAT] & STAT_PIO_MASK) == 0) {
/* Data out. */
- ESP_ERROR("PIO data read not implemented\n");
+ qemu_log_mask(LOG_UNIMP,
+ "esp: PIO data read not implemented\n");
s->rregs[ESP_FIFO] = 0;
} else {
s->rregs[ESP_FIFO] = s->ti_buf[s->ti_rptr++];
@@ -548,13 +423,8 @@ static uint64_t esp_mem_read(void *opaque, target_phys_addr_t addr,
return s->rregs[saddr];
}
-static void esp_mem_write(void *opaque, target_phys_addr_t addr,
- uint64_t val, unsigned size)
+void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val)
{
- ESPState *s = opaque;
- uint32_t saddr;
-
- saddr = addr >> s->it_shift;
trace_esp_mem_writeb(saddr, s->wregs[saddr], val);
switch (saddr) {
case ESP_TCLO:
@@ -565,7 +435,7 @@ static void esp_mem_write(void *opaque, target_phys_addr_t addr,
if (s->do_cmd) {
s->cmdbuf[s->cmdlen++] = val & 0xff;
} else if (s->ti_size == TI_BUFSZ - 1) {
- ESP_ERROR("fifo overrun\n");
+ trace_esp_error_fifo_overrun();
} else {
s->ti_size++;
s->ti_buf[s->ti_wptr++] = val & 0xff;
@@ -594,7 +464,7 @@ static void esp_mem_write(void *opaque, target_phys_addr_t addr,
break;
case CMD_RESET:
trace_esp_mem_writeb_cmd_reset(val);
- esp_soft_reset(&s->busdev.qdev);
+ esp_soft_reset(s);
break;
case CMD_BUSRESET:
trace_esp_mem_writeb_cmd_bus_reset(val);
@@ -628,6 +498,9 @@ static void esp_mem_write(void *opaque, target_phys_addr_t addr,
case CMD_SATN:
trace_esp_mem_writeb_cmd_satn(val);
break;
+ case CMD_RSTATN:
+ trace_esp_mem_writeb_cmd_rstatn(val);
+ break;
case CMD_SEL:
trace_esp_mem_writeb_cmd_sel(val);
handle_s_without_atn(s);
@@ -644,8 +517,13 @@ static void esp_mem_write(void *opaque, target_phys_addr_t addr,
trace_esp_mem_writeb_cmd_ensel(val);
s->rregs[ESP_RINTR] = 0;
break;
+ case CMD_DISSEL:
+ trace_esp_mem_writeb_cmd_dissel(val);
+ s->rregs[ESP_RINTR] = 0;
+ esp_raise_irq(s);
+ break;
default:
- ESP_ERROR("Unhandled ESP command (%2.2x)\n", (unsigned)val);
+ trace_esp_error_unhandled_command(val);
break;
}
break;
@@ -660,7 +538,7 @@ static void esp_mem_write(void *opaque, target_phys_addr_t addr,
s->rregs[saddr] = val;
break;
default:
- ESP_ERROR("invalid write of 0x%02x at [0x%x]\n", (unsigned)val, saddr);
+ trace_esp_error_invalid_write(val, saddr);
return;
}
s->wregs[saddr] = val;
@@ -672,14 +550,7 @@ static bool esp_mem_accepts(void *opaque, target_phys_addr_t addr,
return (size == 1) || (is_write && size == 4);
}
-static const MemoryRegionOps esp_mem_ops = {
- .read = esp_mem_read,
- .write = esp_mem_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
- .valid.accepts = esp_mem_accepts,
-};
-
-static const VMStateDescription vmstate_esp = {
+const VMStateDescription vmstate_esp = {
.name ="esp",
.version_id = 3,
.minimum_version_id = 3,
@@ -701,6 +572,40 @@ static const VMStateDescription vmstate_esp = {
}
};
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ uint32_t it_shift;
+ ESPState esp;
+} SysBusESPState;
+
+static void sysbus_esp_mem_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned int size)
+{
+ SysBusESPState *sysbus = opaque;
+ uint32_t saddr;
+
+ saddr = addr >> sysbus->it_shift;
+ esp_reg_write(&sysbus->esp, saddr, val);
+}
+
+static uint64_t sysbus_esp_mem_read(void *opaque, target_phys_addr_t addr,
+ unsigned int size)
+{
+ SysBusESPState *sysbus = opaque;
+ uint32_t saddr;
+
+ saddr = addr >> sysbus->it_shift;
+ return esp_reg_read(&sysbus->esp, saddr);
+}
+
+static const MemoryRegionOps sysbus_esp_mem_ops = {
+ .read = sysbus_esp_mem_read,
+ .write = sysbus_esp_mem_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid.accepts = esp_mem_accepts,
+};
+
void esp_init(target_phys_addr_t espaddr, int it_shift,
ESPDMAMemoryReadWriteFunc dma_memory_read,
ESPDMAMemoryReadWriteFunc dma_memory_write,
@@ -709,14 +614,16 @@ void esp_init(target_phys_addr_t espaddr, int it_shift,
{
DeviceState *dev;
SysBusDevice *s;
+ SysBusESPState *sysbus;
ESPState *esp;
dev = qdev_create(NULL, "esp");
- esp = DO_UPCAST(ESPState, busdev.qdev, dev);
+ sysbus = DO_UPCAST(SysBusESPState, busdev.qdev, dev);
+ esp = &sysbus->esp;
esp->dma_memory_read = dma_memory_read;
esp->dma_memory_write = dma_memory_write;
esp->dma_opaque = dma_opaque;
- esp->it_shift = it_shift;
+ sysbus->it_shift = it_shift;
/* XXX for now until rc4030 has been changed to use DMA enable signal */
esp->dma_enabled = 1;
qdev_init_nofail(dev);
@@ -737,48 +644,78 @@ static const struct SCSIBusInfo esp_scsi_info = {
.cancel = esp_request_cancelled
};
-static int esp_init1(SysBusDevice *dev)
+static void sysbus_esp_gpio_demux(void *opaque, int irq, int level)
{
- ESPState *s = FROM_SYSBUS(ESPState, dev);
+ DeviceState *d = opaque;
+ SysBusESPState *sysbus = container_of(d, SysBusESPState, busdev.qdev);
+ ESPState *s = &sysbus->esp;
+
+ switch (irq) {
+ case 0:
+ parent_esp_reset(s, irq, level);
+ break;
+ case 1:
+ esp_dma_enable(opaque, irq, level);
+ break;
+ }
+}
+
+static int sysbus_esp_init(SysBusDevice *dev)
+{
+ SysBusESPState *sysbus = FROM_SYSBUS(SysBusESPState, dev);
+ ESPState *s = &sysbus->esp;
sysbus_init_irq(dev, &s->irq);
- assert(s->it_shift != -1);
+ assert(sysbus->it_shift != -1);
- memory_region_init_io(&s->iomem, &esp_mem_ops, s,
- "esp", ESP_REGS << s->it_shift);
- sysbus_init_mmio(dev, &s->iomem);
+ s->chip_id = TCHI_FAS100A;
+ memory_region_init_io(&sysbus->iomem, &sysbus_esp_mem_ops, sysbus,
+ "esp", ESP_REGS << sysbus->it_shift);
+ sysbus_init_mmio(dev, &sysbus->iomem);
- qdev_init_gpio_in(&dev->qdev, esp_gpio_demux, 2);
+ qdev_init_gpio_in(&dev->qdev, sysbus_esp_gpio_demux, 2);
scsi_bus_new(&s->bus, &dev->qdev, &esp_scsi_info);
return scsi_bus_legacy_handle_cmdline(&s->bus);
}
-static Property esp_properties[] = {
- {.name = NULL},
+static void sysbus_esp_hard_reset(DeviceState *dev)
+{
+ SysBusESPState *sysbus = DO_UPCAST(SysBusESPState, busdev.qdev, dev);
+ esp_hard_reset(&sysbus->esp);
+}
+
+static const VMStateDescription vmstate_sysbus_esp_scsi = {
+ .name = "sysbusespscsi",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT(esp, SysBusESPState, 0, vmstate_esp, ESPState),
+ VMSTATE_END_OF_LIST()
+ }
};
-static void esp_class_init(ObjectClass *klass, void *data)
+static void sysbus_esp_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
- k->init = esp_init1;
- dc->reset = esp_hard_reset;
- dc->vmsd = &vmstate_esp;
- dc->props = esp_properties;
+ k->init = sysbus_esp_init;
+ dc->reset = sysbus_esp_hard_reset;
+ dc->vmsd = &vmstate_sysbus_esp_scsi;
}
-static TypeInfo esp_info = {
+static const TypeInfo sysbus_esp_info = {
.name = "esp",
.parent = TYPE_SYS_BUS_DEVICE,
- .instance_size = sizeof(ESPState),
- .class_init = esp_class_init,
+ .instance_size = sizeof(SysBusESPState),
+ .class_init = sysbus_esp_class_init,
};
static void esp_register_types(void)
{
- type_register_static(&esp_info);
+ type_register_static(&sysbus_esp_info);
}
type_init(esp_register_types)
diff --git a/hw/esp.h b/hw/esp.h
index 62bfd4d129..fa855e2fdf 100644
--- a/hw/esp.h
+++ b/hw/esp.h
@@ -1,6 +1,8 @@
#ifndef QEMU_HW_ESP_H
#define QEMU_HW_ESP_H
+#include "scsi.h"
+
/* esp.c */
#define ESP_MAX_DEVS 7
typedef void (*ESPDMAMemoryReadWriteFunc)(void *opaque, uint8_t *buf, int len);
@@ -10,4 +12,121 @@ void esp_init(target_phys_addr_t espaddr, int it_shift,
void *dma_opaque, qemu_irq irq, qemu_irq *reset,
qemu_irq *dma_enable);
+#define ESP_REGS 16
+#define TI_BUFSZ 16
+
+typedef struct ESPState ESPState;
+
+struct ESPState {
+ uint8_t rregs[ESP_REGS];
+ uint8_t wregs[ESP_REGS];
+ qemu_irq irq;
+ uint8_t chip_id;
+ int32_t ti_size;
+ uint32_t ti_rptr, ti_wptr;
+ uint32_t status;
+ uint32_t dma;
+ uint8_t ti_buf[TI_BUFSZ];
+ SCSIBus bus;
+ SCSIDevice *current_dev;
+ SCSIRequest *current_req;
+ uint8_t cmdbuf[TI_BUFSZ];
+ uint32_t cmdlen;
+ uint32_t do_cmd;
+
+ /* The amount of data left in the current DMA transfer. */
+ uint32_t dma_left;
+ /* The size of the current DMA transfer. Zero if no transfer is in
+ progress. */
+ uint32_t dma_counter;
+ int dma_enabled;
+
+ uint32_t async_len;
+ uint8_t *async_buf;
+
+ ESPDMAMemoryReadWriteFunc dma_memory_read;
+ ESPDMAMemoryReadWriteFunc dma_memory_write;
+ void *dma_opaque;
+ void (*dma_cb)(ESPState *s);
+};
+
+#define ESP_TCLO 0x0
+#define ESP_TCMID 0x1
+#define ESP_FIFO 0x2
+#define ESP_CMD 0x3
+#define ESP_RSTAT 0x4
+#define ESP_WBUSID 0x4
+#define ESP_RINTR 0x5
+#define ESP_WSEL 0x5
+#define ESP_RSEQ 0x6
+#define ESP_WSYNTP 0x6
+#define ESP_RFLAGS 0x7
+#define ESP_WSYNO 0x7
+#define ESP_CFG1 0x8
+#define ESP_RRES1 0x9
+#define ESP_WCCF 0x9
+#define ESP_RRES2 0xa
+#define ESP_WTEST 0xa
+#define ESP_CFG2 0xb
+#define ESP_CFG3 0xc
+#define ESP_RES3 0xd
+#define ESP_TCHI 0xe
+#define ESP_RES4 0xf
+
+#define CMD_DMA 0x80
+#define CMD_CMD 0x7f
+
+#define CMD_NOP 0x00
+#define CMD_FLUSH 0x01
+#define CMD_RESET 0x02
+#define CMD_BUSRESET 0x03
+#define CMD_TI 0x10
+#define CMD_ICCS 0x11
+#define CMD_MSGACC 0x12
+#define CMD_PAD 0x18
+#define CMD_SATN 0x1a
+#define CMD_RSTATN 0x1b
+#define CMD_SEL 0x41
+#define CMD_SELATN 0x42
+#define CMD_SELATNS 0x43
+#define CMD_ENSEL 0x44
+#define CMD_DISSEL 0x45
+
+#define STAT_DO 0x00
+#define STAT_DI 0x01
+#define STAT_CD 0x02
+#define STAT_ST 0x03
+#define STAT_MO 0x06
+#define STAT_MI 0x07
+#define STAT_PIO_MASK 0x06
+
+#define STAT_TC 0x10
+#define STAT_PE 0x20
+#define STAT_GE 0x40
+#define STAT_INT 0x80
+
+#define BUSID_DID 0x07
+
+#define INTR_FC 0x08
+#define INTR_BS 0x10
+#define INTR_DC 0x20
+#define INTR_RST 0x80
+
+#define SEQ_0 0x0
+#define SEQ_CD 0x4
+
+#define CFG1_RESREPT 0x40
+
+#define TCHI_FAS100A 0x4
+#define TCHI_AM53C974 0x12
+
+void esp_dma_enable(ESPState *s, int irq, int level);
+void esp_request_cancelled(SCSIRequest *req);
+void esp_command_complete(SCSIRequest *req, uint32_t status, size_t resid);
+void esp_transfer_data(SCSIRequest *req, uint32_t len);
+void esp_hard_reset(ESPState *s);
+uint64_t esp_reg_read(ESPState *s, uint32_t saddr);
+void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val);
+extern const VMStateDescription vmstate_esp;
+
#endif
diff --git a/hw/etraxfs_eth.c b/hw/etraxfs_eth.c
index 16a0637a4a..b124f5bb3a 100644
--- a/hw/etraxfs_eth.c
+++ b/hw/etraxfs_eth.c
@@ -507,12 +507,12 @@ static int eth_match_groupaddr(struct fs_eth *eth, const unsigned char *sa)
return match;
}
-static int eth_can_receive(VLANClientState *nc)
+static int eth_can_receive(NetClientState *nc)
{
return 1;
}
-static ssize_t eth_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
unsigned char sa_bcast[6] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
struct fs_eth *eth = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -549,7 +549,7 @@ static int eth_tx_push(void *opaque, unsigned char *buf, int len, bool eop)
return len;
}
-static void eth_set_link(VLANClientState *nc)
+static void eth_set_link(NetClientState *nc)
{
struct fs_eth *eth = DO_UPCAST(NICState, nc, nc)->opaque;
D(printf("%s %d\n", __func__, nc->link_down));
@@ -566,7 +566,7 @@ static const MemoryRegionOps eth_ops = {
}
};
-static void eth_cleanup(VLANClientState *nc)
+static void eth_cleanup(NetClientState *nc)
{
struct fs_eth *eth = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -579,7 +579,7 @@ static void eth_cleanup(VLANClientState *nc)
}
static NetClientInfo net_etraxfs_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = eth_can_receive,
.receive = eth_receive,
diff --git a/hw/exynos4210.c b/hw/exynos4210.c
index 9c20b3f22d..00d4db8871 100644
--- a/hw/exynos4210.c
+++ b/hw/exynos4210.c
@@ -33,9 +33,19 @@
/* PWM */
#define EXYNOS4210_PWM_BASE_ADDR 0x139D0000
+/* RTC */
+#define EXYNOS4210_RTC_BASE_ADDR 0x10070000
+
/* MCT */
#define EXYNOS4210_MCT_BASE_ADDR 0x10050000
+/* I2C */
+#define EXYNOS4210_I2C_SHIFT 0x00010000
+#define EXYNOS4210_I2C_BASE_ADDR 0x13860000
+/* Interrupt Group of External Interrupt Combiner for I2C */
+#define EXYNOS4210_I2C_INTG 27
+#define EXYNOS4210_HDMI_INTG 16
+
/* UART's definitions */
#define EXYNOS4210_UART0_BASE_ADDR 0x13800000
#define EXYNOS4210_UART1_BASE_ADDR 0x13810000
@@ -216,7 +226,7 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem,
/* mirror of iROM */
memory_region_init_alias(&s->irom_alias_mem, "exynos4210.irom_alias",
&s->irom_mem,
- EXYNOS4210_IROM_BASE_ADDR,
+ 0,
EXYNOS4210_IROM_SIZE);
memory_region_set_readonly(&s->irom_alias_mem, true);
memory_region_add_subregion(system_mem, EXYNOS4210_IROM_MIRROR_BASE_ADDR,
@@ -258,6 +268,11 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem,
s->irq_table[exynos4210_get_irq(22, 3)],
s->irq_table[exynos4210_get_irq(22, 4)],
NULL);
+ /* RTC */
+ sysbus_create_varargs("exynos4210.rtc", EXYNOS4210_RTC_BASE_ADDR,
+ s->irq_table[exynos4210_get_irq(23, 0)],
+ s->irq_table[exynos4210_get_irq(23, 1)],
+ NULL);
/* Multi Core Timer */
dev = qdev_create(NULL, "exynos4210.mct");
@@ -275,6 +290,26 @@ Exynos4210State *exynos4210_init(MemoryRegion *system_mem,
s->irq_table[exynos4210_get_irq(35, 3)]);
sysbus_mmio_map(busdev, 0, EXYNOS4210_MCT_BASE_ADDR);
+ /*** I2C ***/
+ for (n = 0; n < EXYNOS4210_I2C_NUMBER; n++) {
+ uint32_t addr = EXYNOS4210_I2C_BASE_ADDR + EXYNOS4210_I2C_SHIFT * n;
+ qemu_irq i2c_irq;
+
+ if (n < 8) {
+ i2c_irq = s->irq_table[exynos4210_get_irq(EXYNOS4210_I2C_INTG, n)];
+ } else {
+ i2c_irq = s->irq_table[exynos4210_get_irq(EXYNOS4210_HDMI_INTG, 1)];
+ }
+
+ dev = qdev_create(NULL, "exynos4210.i2c");
+ qdev_init_nofail(dev);
+ busdev = sysbus_from_qdev(dev);
+ sysbus_connect_irq(busdev, 0, i2c_irq);
+ sysbus_mmio_map(busdev, 0, addr);
+ s->i2c_if[n] = (i2c_bus *)qdev_get_child_bus(dev, "i2c");
+ }
+
+
/*** UARTs ***/
exynos4210_uart_create(EXYNOS4210_UART0_BASE_ADDR,
EXYNOS4210_UART0_FIFO_SIZE, 0, NULL,
diff --git a/hw/exynos4210.h b/hw/exynos4210.h
index 9b1ae4c8b1..a43ba3aedc 100644
--- a/hw/exynos4210.h
+++ b/hw/exynos4210.h
@@ -74,6 +74,8 @@
#define EXYNOS4210_EXT_GIC_NIRQ (160-32)
#define EXYNOS4210_INT_GIC_NIRQ 64
+#define EXYNOS4210_I2C_NUMBER 9
+
typedef struct Exynos4210Irq {
qemu_irq int_combiner_irq[EXYNOS4210_MAX_INT_COMBINER_IN_IRQ];
qemu_irq ext_combiner_irq[EXYNOS4210_MAX_EXT_COMBINER_IN_IRQ];
@@ -95,6 +97,7 @@ typedef struct Exynos4210State {
MemoryRegion dram1_mem;
MemoryRegion boot_secondary;
MemoryRegion bootreg_mem;
+ i2c_bus *i2c_if[EXYNOS4210_I2C_NUMBER];
} Exynos4210State;
void exynos4210_write_secondary(ARMCPU *cpu,
diff --git a/hw/exynos4210_i2c.c b/hw/exynos4210_i2c.c
new file mode 100644
index 0000000000..3f72a5c464
--- /dev/null
+++ b/hw/exynos4210_i2c.c
@@ -0,0 +1,334 @@
+/*
+ * Exynos4210 I2C Bus Serial Interface Emulation
+ *
+ * Copyright (C) 2012 Samsung Electronics Co Ltd.
+ * Maksim Kozlov, <m.kozlov@samsung.com>
+ * Igor Mitsyanko, <i.mitsyanko@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+#include "qemu-timer.h"
+#include "sysbus.h"
+#include "i2c.h"
+
+#ifndef EXYNOS4_I2C_DEBUG
+#define EXYNOS4_I2C_DEBUG 0
+#endif
+
+#define TYPE_EXYNOS4_I2C "exynos4210.i2c"
+#define EXYNOS4_I2C(obj) \
+ OBJECT_CHECK(Exynos4210I2CState, (obj), TYPE_EXYNOS4_I2C)
+
+/* Exynos4210 I2C memory map */
+#define EXYNOS4_I2C_MEM_SIZE 0x14
+#define I2CCON_ADDR 0x00 /* control register */
+#define I2CSTAT_ADDR 0x04 /* control/status register */
+#define I2CADD_ADDR 0x08 /* address register */
+#define I2CDS_ADDR 0x0c /* data shift register */
+#define I2CLC_ADDR 0x10 /* line control register */
+
+#define I2CCON_ACK_GEN (1 << 7)
+#define I2CCON_INTRS_EN (1 << 5)
+#define I2CCON_INT_PEND (1 << 4)
+
+#define EXYNOS4_I2C_MODE(reg) (((reg) >> 6) & 3)
+#define I2C_IN_MASTER_MODE(reg) (((reg) >> 6) & 2)
+#define I2CMODE_MASTER_Rx 0x2
+#define I2CMODE_MASTER_Tx 0x3
+#define I2CSTAT_LAST_BIT (1 << 0)
+#define I2CSTAT_OUTPUT_EN (1 << 4)
+#define I2CSTAT_START_BUSY (1 << 5)
+
+
+#if EXYNOS4_I2C_DEBUG
+#define DPRINT(fmt, args...) \
+ do { fprintf(stderr, "QEMU I2C: "fmt, ## args); } while (0)
+
+static const char *exynos4_i2c_get_regname(unsigned offset)
+{
+ switch (offset) {
+ case I2CCON_ADDR:
+ return "I2CCON";
+ case I2CSTAT_ADDR:
+ return "I2CSTAT";
+ case I2CADD_ADDR:
+ return "I2CADD";
+ case I2CDS_ADDR:
+ return "I2CDS";
+ case I2CLC_ADDR:
+ return "I2CLC";
+ default:
+ return "[?]";
+ }
+}
+
+#else
+#define DPRINT(fmt, args...) do { } while (0)
+#endif
+
+typedef struct Exynos4210I2CState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ i2c_bus *bus;
+ qemu_irq irq;
+
+ uint8_t i2ccon;
+ uint8_t i2cstat;
+ uint8_t i2cadd;
+ uint8_t i2cds;
+ uint8_t i2clc;
+ bool scl_free;
+} Exynos4210I2CState;
+
+static inline void exynos4210_i2c_raise_interrupt(Exynos4210I2CState *s)
+{
+ if (s->i2ccon & I2CCON_INTRS_EN) {
+ s->i2ccon |= I2CCON_INT_PEND;
+ qemu_irq_raise(s->irq);
+ }
+}
+
+static void exynos4210_i2c_data_receive(void *opaque)
+{
+ Exynos4210I2CState *s = (Exynos4210I2CState *)opaque;
+ int ret;
+
+ s->i2cstat &= ~I2CSTAT_LAST_BIT;
+ s->scl_free = false;
+ ret = i2c_recv(s->bus);
+ if (ret < 0 && (s->i2ccon & I2CCON_ACK_GEN)) {
+ s->i2cstat |= I2CSTAT_LAST_BIT; /* Data is not acknowledged */
+ } else {
+ s->i2cds = ret;
+ }
+ exynos4210_i2c_raise_interrupt(s);
+}
+
+static void exynos4210_i2c_data_send(void *opaque)
+{
+ Exynos4210I2CState *s = (Exynos4210I2CState *)opaque;
+
+ s->i2cstat &= ~I2CSTAT_LAST_BIT;
+ s->scl_free = false;
+ if (i2c_send(s->bus, s->i2cds) < 0 && (s->i2ccon & I2CCON_ACK_GEN)) {
+ s->i2cstat |= I2CSTAT_LAST_BIT;
+ }
+ exynos4210_i2c_raise_interrupt(s);
+}
+
+static uint64_t exynos4210_i2c_read(void *opaque, target_phys_addr_t offset,
+ unsigned size)
+{
+ Exynos4210I2CState *s = (Exynos4210I2CState *)opaque;
+ uint8_t value;
+
+ switch (offset) {
+ case I2CCON_ADDR:
+ value = s->i2ccon;
+ break;
+ case I2CSTAT_ADDR:
+ value = s->i2cstat;
+ break;
+ case I2CADD_ADDR:
+ value = s->i2cadd;
+ break;
+ case I2CDS_ADDR:
+ value = s->i2cds;
+ s->scl_free = true;
+ if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Rx &&
+ (s->i2cstat & I2CSTAT_START_BUSY) &&
+ !(s->i2ccon & I2CCON_INT_PEND)) {
+ exynos4210_i2c_data_receive(s);
+ }
+ break;
+ case I2CLC_ADDR:
+ value = s->i2clc;
+ break;
+ default:
+ value = 0;
+ DPRINT("ERROR: Bad read offset 0x%x\n", (unsigned int)offset);
+ break;
+ }
+
+ DPRINT("read %s [0x%02x] -> 0x%02x\n", exynos4_i2c_get_regname(offset),
+ (unsigned int)offset, value);
+ return value;
+}
+
+static void exynos4210_i2c_write(void *opaque, target_phys_addr_t offset,
+ uint64_t value, unsigned size)
+{
+ Exynos4210I2CState *s = (Exynos4210I2CState *)opaque;
+ uint8_t v = value & 0xff;
+
+ DPRINT("write %s [0x%02x] <- 0x%02x\n", exynos4_i2c_get_regname(offset),
+ (unsigned int)offset, v);
+
+ switch (offset) {
+ case I2CCON_ADDR:
+ s->i2ccon = (v & ~I2CCON_INT_PEND) | (s->i2ccon & I2CCON_INT_PEND);
+ if ((s->i2ccon & I2CCON_INT_PEND) && !(v & I2CCON_INT_PEND)) {
+ s->i2ccon &= ~I2CCON_INT_PEND;
+ qemu_irq_lower(s->irq);
+ if (!(s->i2ccon & I2CCON_INTRS_EN)) {
+ s->i2cstat &= ~I2CSTAT_START_BUSY;
+ }
+
+ if (s->i2cstat & I2CSTAT_START_BUSY) {
+ if (s->scl_free) {
+ if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Tx) {
+ exynos4210_i2c_data_send(s);
+ } else if (EXYNOS4_I2C_MODE(s->i2cstat) ==
+ I2CMODE_MASTER_Rx) {
+ exynos4210_i2c_data_receive(s);
+ }
+ } else {
+ s->i2ccon |= I2CCON_INT_PEND;
+ qemu_irq_raise(s->irq);
+ }
+ }
+ }
+ break;
+ case I2CSTAT_ADDR:
+ s->i2cstat =
+ (s->i2cstat & I2CSTAT_START_BUSY) | (v & ~I2CSTAT_START_BUSY);
+
+ if (!(s->i2cstat & I2CSTAT_OUTPUT_EN)) {
+ s->i2cstat &= ~I2CSTAT_START_BUSY;
+ s->scl_free = true;
+ qemu_irq_lower(s->irq);
+ break;
+ }
+
+ /* Nothing to do if in i2c slave mode */
+ if (!I2C_IN_MASTER_MODE(s->i2cstat)) {
+ break;
+ }
+
+ if (v & I2CSTAT_START_BUSY) {
+ s->i2cstat &= ~I2CSTAT_LAST_BIT;
+ s->i2cstat |= I2CSTAT_START_BUSY; /* Line is busy */
+ s->scl_free = false;
+
+ /* Generate start bit and send slave address */
+ if (i2c_start_transfer(s->bus, s->i2cds >> 1, s->i2cds & 0x1) &&
+ (s->i2ccon & I2CCON_ACK_GEN)) {
+ s->i2cstat |= I2CSTAT_LAST_BIT;
+ } else if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Rx) {
+ exynos4210_i2c_data_receive(s);
+ }
+ exynos4210_i2c_raise_interrupt(s);
+ } else {
+ i2c_end_transfer(s->bus);
+ if (!(s->i2ccon & I2CCON_INT_PEND)) {
+ s->i2cstat &= ~I2CSTAT_START_BUSY;
+ }
+ s->scl_free = true;
+ }
+ break;
+ case I2CADD_ADDR:
+ if ((s->i2cstat & I2CSTAT_OUTPUT_EN) == 0) {
+ s->i2cadd = v;
+ }
+ break;
+ case I2CDS_ADDR:
+ if (s->i2cstat & I2CSTAT_OUTPUT_EN) {
+ s->i2cds = v;
+ s->scl_free = true;
+ if (EXYNOS4_I2C_MODE(s->i2cstat) == I2CMODE_MASTER_Tx &&
+ (s->i2cstat & I2CSTAT_START_BUSY) &&
+ !(s->i2ccon & I2CCON_INT_PEND)) {
+ exynos4210_i2c_data_send(s);
+ }
+ }
+ break;
+ case I2CLC_ADDR:
+ s->i2clc = v;
+ break;
+ default:
+ DPRINT("ERROR: Bad write offset 0x%x\n", (unsigned int)offset);
+ break;
+ }
+}
+
+static const MemoryRegionOps exynos4210_i2c_ops = {
+ .read = exynos4210_i2c_read,
+ .write = exynos4210_i2c_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const VMStateDescription exynos4210_i2c_vmstate = {
+ .name = TYPE_EXYNOS4_I2C,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(i2ccon, Exynos4210I2CState),
+ VMSTATE_UINT8(i2cstat, Exynos4210I2CState),
+ VMSTATE_UINT8(i2cds, Exynos4210I2CState),
+ VMSTATE_UINT8(i2cadd, Exynos4210I2CState),
+ VMSTATE_UINT8(i2clc, Exynos4210I2CState),
+ VMSTATE_BOOL(scl_free, Exynos4210I2CState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void exynos4210_i2c_reset(DeviceState *d)
+{
+ Exynos4210I2CState *s = EXYNOS4_I2C(d);
+
+ s->i2ccon = 0x00;
+ s->i2cstat = 0x00;
+ s->i2cds = 0xFF;
+ s->i2clc = 0x00;
+ s->i2cadd = 0xFF;
+ s->scl_free = true;
+}
+
+static int exynos4210_i2c_realize(SysBusDevice *dev)
+{
+ Exynos4210I2CState *s = EXYNOS4_I2C(dev);
+
+ memory_region_init_io(&s->iomem, &exynos4210_i2c_ops, s, TYPE_EXYNOS4_I2C,
+ EXYNOS4_I2C_MEM_SIZE);
+ sysbus_init_mmio(dev, &s->iomem);
+ sysbus_init_irq(dev, &s->irq);
+ s->bus = i2c_init_bus(&dev->qdev, "i2c");
+ return 0;
+}
+
+static void exynos4210_i2c_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *sbdc = SYS_BUS_DEVICE_CLASS(klass);
+
+ dc->vmsd = &exynos4210_i2c_vmstate;
+ dc->reset = exynos4210_i2c_reset;
+ sbdc->init = exynos4210_i2c_realize;
+}
+
+static const TypeInfo exynos4210_i2c_type_info = {
+ .name = TYPE_EXYNOS4_I2C,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Exynos4210I2CState),
+ .class_init = exynos4210_i2c_class_init,
+};
+
+static void exynos4210_i2c_register_types(void)
+{
+ type_register_static(&exynos4210_i2c_type_info);
+}
+
+type_init(exynos4210_i2c_register_types)
diff --git a/hw/exynos4210_mct.c b/hw/exynos4210_mct.c
index 7474fcf802..7a22b1f900 100644
--- a/hw/exynos4210_mct.c
+++ b/hw/exynos4210_mct.c
@@ -376,10 +376,6 @@ static uint64_t exynos4210_gfrc_get_count(Exynos4210MCTGT *s)
{
uint64_t count = 0;
count = ptimer_get_count(s->ptimer_frc);
- if (!count) {
- /* Timer event was generated and s->reg.cnt holds adequate value */
- return s->reg.cnt;
- }
count = s->count - count;
return s->reg.cnt + count;
}
diff --git a/hw/exynos4210_pwm.c b/hw/exynos4210_pwm.c
index 6243e59c48..0c228280a9 100644
--- a/hw/exynos4210_pwm.c
+++ b/hw/exynos4210_pwm.c
@@ -200,7 +200,7 @@ static void exynos4210_pwm_tick(void *opaque)
ptimer_run(p->timer[id].ptimer, 1);
} else {
/* stop timer, set status to STOP, see Basic Timer Operation */
- p->reg_tcon = ~TCON_TIMER_START(id);
+ p->reg_tcon &= ~TCON_TIMER_START(id);
ptimer_stop(p->timer[id].ptimer);
}
}
diff --git a/hw/exynos4210_rtc.c b/hw/exynos4210_rtc.c
new file mode 100644
index 0000000000..42a4ddc327
--- /dev/null
+++ b/hw/exynos4210_rtc.c
@@ -0,0 +1,592 @@
+/*
+ * Samsung exynos4210 Real Time Clock
+ *
+ * Copyright (c) 2012 Samsung Electronics Co., Ltd.
+ * Ogurtsov Oleg <o.ogurtsov@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ */
+
+/* Description:
+ * Register RTCCON:
+ * CLKSEL Bit[1] not used
+ * CLKOUTEN Bit[9] not used
+ */
+
+#include "sysbus.h"
+#include "qemu-timer.h"
+#include "qemu-common.h"
+#include "ptimer.h"
+
+#include "hw.h"
+#include "qemu-timer.h"
+#include "sysemu.h"
+
+#include "exynos4210.h"
+
+#define DEBUG_RTC 0
+
+#if DEBUG_RTC
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stdout, "RTC: [%24s:%5d] " fmt, __func__, __LINE__, \
+ ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) do {} while (0)
+#endif
+
+#define EXYNOS4210_RTC_REG_MEM_SIZE 0x0100
+
+#define INTP 0x0030
+#define RTCCON 0x0040
+#define TICCNT 0x0044
+#define RTCALM 0x0050
+#define ALMSEC 0x0054
+#define ALMMIN 0x0058
+#define ALMHOUR 0x005C
+#define ALMDAY 0x0060
+#define ALMMON 0x0064
+#define ALMYEAR 0x0068
+#define BCDSEC 0x0070
+#define BCDMIN 0x0074
+#define BCDHOUR 0x0078
+#define BCDDAY 0x007C
+#define BCDDAYWEEK 0x0080
+#define BCDMON 0x0084
+#define BCDYEAR 0x0088
+#define CURTICNT 0x0090
+
+#define TICK_TIMER_ENABLE 0x0100
+#define TICNT_THRESHHOLD 2
+
+
+#define RTC_ENABLE 0x0001
+
+#define INTP_TICK_ENABLE 0x0001
+#define INTP_ALM_ENABLE 0x0002
+
+#define ALARM_INT_ENABLE 0x0040
+
+#define RTC_BASE_FREQ 32768
+
+typedef struct Exynos4210RTCState {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ /* registers */
+ uint32_t reg_intp;
+ uint32_t reg_rtccon;
+ uint32_t reg_ticcnt;
+ uint32_t reg_rtcalm;
+ uint32_t reg_almsec;
+ uint32_t reg_almmin;
+ uint32_t reg_almhour;
+ uint32_t reg_almday;
+ uint32_t reg_almmon;
+ uint32_t reg_almyear;
+ uint32_t reg_curticcnt;
+
+ ptimer_state *ptimer; /* tick timer */
+ ptimer_state *ptimer_1Hz; /* clock timer */
+ uint32_t freq;
+
+ qemu_irq tick_irq; /* Time Tick Generator irq */
+ qemu_irq alm_irq; /* alarm irq */
+
+ struct tm current_tm; /* current time */
+} Exynos4210RTCState;
+
+#define TICCKSEL(value) ((value & (0x0F << 4)) >> 4)
+
+/*** VMState ***/
+static const VMStateDescription vmstate_exynos4210_rtc_state = {
+ .name = "exynos4210.rtc",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(reg_intp, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_rtccon, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_ticcnt, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_rtcalm, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_almsec, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_almmin, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_almhour, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_almday, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_almmon, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_almyear, Exynos4210RTCState),
+ VMSTATE_UINT32(reg_curticcnt, Exynos4210RTCState),
+ VMSTATE_PTIMER(ptimer, Exynos4210RTCState),
+ VMSTATE_PTIMER(ptimer_1Hz, Exynos4210RTCState),
+ VMSTATE_UINT32(freq, Exynos4210RTCState),
+ VMSTATE_INT32(current_tm.tm_sec, Exynos4210RTCState),
+ VMSTATE_INT32(current_tm.tm_min, Exynos4210RTCState),
+ VMSTATE_INT32(current_tm.tm_hour, Exynos4210RTCState),
+ VMSTATE_INT32(current_tm.tm_wday, Exynos4210RTCState),
+ VMSTATE_INT32(current_tm.tm_mday, Exynos4210RTCState),
+ VMSTATE_INT32(current_tm.tm_mon, Exynos4210RTCState),
+ VMSTATE_INT32(current_tm.tm_year, Exynos4210RTCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#define BCD3DIGITS(x) \
+ ((uint32_t)to_bcd((uint8_t)(x % 100)) + \
+ ((uint32_t)to_bcd((uint8_t)((x % 1000) / 100)) << 8))
+
+static void check_alarm_raise(Exynos4210RTCState *s)
+{
+ unsigned int alarm_raise = 0;
+ struct tm stm = s->current_tm;
+
+ if ((s->reg_rtcalm & 0x01) &&
+ (to_bcd((uint8_t)stm.tm_sec) == (uint8_t)s->reg_almsec)) {
+ alarm_raise = 1;
+ }
+ if ((s->reg_rtcalm & 0x02) &&
+ (to_bcd((uint8_t)stm.tm_min) == (uint8_t)s->reg_almmin)) {
+ alarm_raise = 1;
+ }
+ if ((s->reg_rtcalm & 0x04) &&
+ (to_bcd((uint8_t)stm.tm_hour) == (uint8_t)s->reg_almhour)) {
+ alarm_raise = 1;
+ }
+ if ((s->reg_rtcalm & 0x08) &&
+ (to_bcd((uint8_t)stm.tm_mday) == (uint8_t)s->reg_almday)) {
+ alarm_raise = 1;
+ }
+ if ((s->reg_rtcalm & 0x10) &&
+ (to_bcd((uint8_t)stm.tm_mon) == (uint8_t)s->reg_almmon)) {
+ alarm_raise = 1;
+ }
+ if ((s->reg_rtcalm & 0x20) &&
+ (BCD3DIGITS(stm.tm_year) == s->reg_almyear)) {
+ alarm_raise = 1;
+ }
+
+ if (alarm_raise) {
+ DPRINTF("ALARM IRQ\n");
+ /* set irq status */
+ s->reg_intp |= INTP_ALM_ENABLE;
+ qemu_irq_raise(s->alm_irq);
+ }
+}
+
+/*
+ * RTC update frequency
+ * Parameters:
+ * reg_value - current RTCCON register or his new value
+ */
+static void exynos4210_rtc_update_freq(Exynos4210RTCState *s,
+ uint32_t reg_value)
+{
+ uint32_t freq;
+
+ freq = s->freq;
+ /* set frequncy for time generator */
+ s->freq = RTC_BASE_FREQ / (1 << TICCKSEL(reg_value));
+
+ if (freq != s->freq) {
+ ptimer_set_freq(s->ptimer, s->freq);
+ DPRINTF("freq=%dHz\n", s->freq);
+ }
+}
+
+/* month is between 0 and 11. */
+static int get_days_in_month(int month, int year)
+{
+ static const int days_tab[12] = {
+ 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
+ };
+ int d;
+ if ((unsigned)month >= 12) {
+ return 31;
+ }
+ d = days_tab[month];
+ if (month == 1) {
+ if ((year % 4) == 0 && ((year % 100) != 0 || (year % 400) == 0)) {
+ d++;
+ }
+ }
+ return d;
+}
+
+/* update 'tm' to the next second */
+static void rtc_next_second(struct tm *tm)
+{
+ int days_in_month;
+
+ tm->tm_sec++;
+ if ((unsigned)tm->tm_sec >= 60) {
+ tm->tm_sec = 0;
+ tm->tm_min++;
+ if ((unsigned)tm->tm_min >= 60) {
+ tm->tm_min = 0;
+ tm->tm_hour++;
+ if ((unsigned)tm->tm_hour >= 24) {
+ tm->tm_hour = 0;
+ /* next day */
+ tm->tm_wday++;
+ if ((unsigned)tm->tm_wday >= 7) {
+ tm->tm_wday = 0;
+ }
+ days_in_month = get_days_in_month(tm->tm_mon,
+ tm->tm_year + 1900);
+ tm->tm_mday++;
+ if (tm->tm_mday < 1) {
+ tm->tm_mday = 1;
+ } else if (tm->tm_mday > days_in_month) {
+ tm->tm_mday = 1;
+ tm->tm_mon++;
+ if (tm->tm_mon >= 12) {
+ tm->tm_mon = 0;
+ tm->tm_year++;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * tick handler
+ */
+static void exynos4210_rtc_tick(void *opaque)
+{
+ Exynos4210RTCState *s = (Exynos4210RTCState *)opaque;
+
+ DPRINTF("TICK IRQ\n");
+ /* set irq status */
+ s->reg_intp |= INTP_TICK_ENABLE;
+ /* raise IRQ */
+ qemu_irq_raise(s->tick_irq);
+
+ /* restart timer */
+ ptimer_set_count(s->ptimer, s->reg_ticcnt);
+ ptimer_run(s->ptimer, 1);
+}
+
+/*
+ * 1Hz clock handler
+ */
+static void exynos4210_rtc_1Hz_tick(void *opaque)
+{
+ Exynos4210RTCState *s = (Exynos4210RTCState *)opaque;
+
+ rtc_next_second(&s->current_tm);
+ /* DPRINTF("1Hz tick\n"); */
+
+ /* raise IRQ */
+ if (s->reg_rtcalm & ALARM_INT_ENABLE) {
+ check_alarm_raise(s);
+ }
+
+ ptimer_set_count(s->ptimer_1Hz, RTC_BASE_FREQ);
+ ptimer_run(s->ptimer_1Hz, 1);
+}
+
+/*
+ * RTC Read
+ */
+static uint64_t exynos4210_rtc_read(void *opaque, target_phys_addr_t offset,
+ unsigned size)
+{
+ uint32_t value = 0;
+ Exynos4210RTCState *s = (Exynos4210RTCState *)opaque;
+
+ switch (offset) {
+ case INTP:
+ value = s->reg_intp;
+ break;
+ case RTCCON:
+ value = s->reg_rtccon;
+ break;
+ case TICCNT:
+ value = s->reg_ticcnt;
+ break;
+ case RTCALM:
+ value = s->reg_rtcalm;
+ break;
+ case ALMSEC:
+ value = s->reg_almsec;
+ break;
+ case ALMMIN:
+ value = s->reg_almmin;
+ break;
+ case ALMHOUR:
+ value = s->reg_almhour;
+ break;
+ case ALMDAY:
+ value = s->reg_almday;
+ break;
+ case ALMMON:
+ value = s->reg_almmon;
+ break;
+ case ALMYEAR:
+ value = s->reg_almyear;
+ break;
+
+ case BCDSEC:
+ value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_sec);
+ break;
+ case BCDMIN:
+ value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_min);
+ break;
+ case BCDHOUR:
+ value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_hour);
+ break;
+ case BCDDAYWEEK:
+ value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_wday);
+ break;
+ case BCDDAY:
+ value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_mday);
+ break;
+ case BCDMON:
+ value = (uint32_t)to_bcd((uint8_t)s->current_tm.tm_mon + 1);
+ break;
+ case BCDYEAR:
+ value = BCD3DIGITS(s->current_tm.tm_year);
+ break;
+
+ case CURTICNT:
+ s->reg_curticcnt = ptimer_get_count(s->ptimer);
+ value = s->reg_curticcnt;
+ break;
+
+ default:
+ fprintf(stderr,
+ "[exynos4210.rtc: bad read offset " TARGET_FMT_plx "]\n",
+ offset);
+ break;
+ }
+ return value;
+}
+
+/*
+ * RTC Write
+ */
+static void exynos4210_rtc_write(void *opaque, target_phys_addr_t offset,
+ uint64_t value, unsigned size)
+{
+ Exynos4210RTCState *s = (Exynos4210RTCState *)opaque;
+
+ switch (offset) {
+ case INTP:
+ if (value & INTP_ALM_ENABLE) {
+ qemu_irq_lower(s->alm_irq);
+ s->reg_intp &= (~INTP_ALM_ENABLE);
+ }
+ if (value & INTP_TICK_ENABLE) {
+ qemu_irq_lower(s->tick_irq);
+ s->reg_intp &= (~INTP_TICK_ENABLE);
+ }
+ break;
+ case RTCCON:
+ if (value & RTC_ENABLE) {
+ exynos4210_rtc_update_freq(s, value);
+ }
+ if ((value & RTC_ENABLE) > (s->reg_rtccon & RTC_ENABLE)) {
+ /* clock timer */
+ ptimer_set_count(s->ptimer_1Hz, RTC_BASE_FREQ);
+ ptimer_run(s->ptimer_1Hz, 1);
+ DPRINTF("run clock timer\n");
+ }
+ if ((value & RTC_ENABLE) < (s->reg_rtccon & RTC_ENABLE)) {
+ /* tick timer */
+ ptimer_stop(s->ptimer);
+ /* clock timer */
+ ptimer_stop(s->ptimer_1Hz);
+ DPRINTF("stop all timers\n");
+ }
+ if (value & RTC_ENABLE) {
+ if ((value & TICK_TIMER_ENABLE) >
+ (s->reg_rtccon & TICK_TIMER_ENABLE) &&
+ (s->reg_ticcnt)) {
+ ptimer_set_count(s->ptimer, s->reg_ticcnt);
+ ptimer_run(s->ptimer, 1);
+ DPRINTF("run tick timer\n");
+ }
+ if ((value & TICK_TIMER_ENABLE) <
+ (s->reg_rtccon & TICK_TIMER_ENABLE)) {
+ ptimer_stop(s->ptimer);
+ }
+ }
+ s->reg_rtccon = value;
+ break;
+ case TICCNT:
+ if (value > TICNT_THRESHHOLD) {
+ s->reg_ticcnt = value;
+ } else {
+ fprintf(stderr,
+ "[exynos4210.rtc: bad TICNT value %u ]\n",
+ (uint32_t)value);
+ }
+ break;
+
+ case RTCALM:
+ s->reg_rtcalm = value;
+ break;
+ case ALMSEC:
+ s->reg_almsec = (value & 0x7f);
+ break;
+ case ALMMIN:
+ s->reg_almmin = (value & 0x7f);
+ break;
+ case ALMHOUR:
+ s->reg_almhour = (value & 0x3f);
+ break;
+ case ALMDAY:
+ s->reg_almday = (value & 0x3f);
+ break;
+ case ALMMON:
+ s->reg_almmon = (value & 0x1f);
+ break;
+ case ALMYEAR:
+ s->reg_almyear = (value & 0x0fff);
+ break;
+
+ case BCDSEC:
+ if (s->reg_rtccon & RTC_ENABLE) {
+ s->current_tm.tm_sec = (int)from_bcd((uint8_t)value);
+ }
+ break;
+ case BCDMIN:
+ if (s->reg_rtccon & RTC_ENABLE) {
+ s->current_tm.tm_min = (int)from_bcd((uint8_t)value);
+ }
+ break;
+ case BCDHOUR:
+ if (s->reg_rtccon & RTC_ENABLE) {
+ s->current_tm.tm_hour = (int)from_bcd((uint8_t)value);
+ }
+ break;
+ case BCDDAYWEEK:
+ if (s->reg_rtccon & RTC_ENABLE) {
+ s->current_tm.tm_wday = (int)from_bcd((uint8_t)value);
+ }
+ break;
+ case BCDDAY:
+ if (s->reg_rtccon & RTC_ENABLE) {
+ s->current_tm.tm_mday = (int)from_bcd((uint8_t)value);
+ }
+ break;
+ case BCDMON:
+ if (s->reg_rtccon & RTC_ENABLE) {
+ s->current_tm.tm_mon = (int)from_bcd((uint8_t)value) - 1;
+ }
+ break;
+ case BCDYEAR:
+ if (s->reg_rtccon & RTC_ENABLE) {
+ /* 3 digits */
+ s->current_tm.tm_year = (int)from_bcd((uint8_t)value) +
+ (int)from_bcd((uint8_t)((value >> 8) & 0x0f)) * 100;
+ }
+ break;
+
+ default:
+ fprintf(stderr,
+ "[exynos4210.rtc: bad write offset " TARGET_FMT_plx "]\n",
+ offset);
+ break;
+
+ }
+}
+
+/*
+ * Set default values to timer fields and registers
+ */
+static void exynos4210_rtc_reset(DeviceState *d)
+{
+ Exynos4210RTCState *s = (Exynos4210RTCState *)d;
+
+ qemu_get_timedate(&s->current_tm, 0);
+
+ DPRINTF("Get time from host: %d-%d-%d %2d:%02d:%02d\n",
+ s->current_tm.tm_year, s->current_tm.tm_mon, s->current_tm.tm_mday,
+ s->current_tm.tm_hour, s->current_tm.tm_min, s->current_tm.tm_sec);
+
+ s->reg_intp = 0;
+ s->reg_rtccon = 0;
+ s->reg_ticcnt = 0;
+ s->reg_rtcalm = 0;
+ s->reg_almsec = 0;
+ s->reg_almmin = 0;
+ s->reg_almhour = 0;
+ s->reg_almday = 0;
+ s->reg_almmon = 0;
+ s->reg_almyear = 0;
+
+ s->reg_curticcnt = 0;
+
+ exynos4210_rtc_update_freq(s, s->reg_rtccon);
+ ptimer_stop(s->ptimer);
+ ptimer_stop(s->ptimer_1Hz);
+}
+
+static const MemoryRegionOps exynos4210_rtc_ops = {
+ .read = exynos4210_rtc_read,
+ .write = exynos4210_rtc_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+/*
+ * RTC timer initialization
+ */
+static int exynos4210_rtc_init(SysBusDevice *dev)
+{
+ Exynos4210RTCState *s = FROM_SYSBUS(Exynos4210RTCState, dev);
+ QEMUBH *bh;
+
+ bh = qemu_bh_new(exynos4210_rtc_tick, s);
+ s->ptimer = ptimer_init(bh);
+ ptimer_set_freq(s->ptimer, RTC_BASE_FREQ);
+ exynos4210_rtc_update_freq(s, 0);
+
+ bh = qemu_bh_new(exynos4210_rtc_1Hz_tick, s);
+ s->ptimer_1Hz = ptimer_init(bh);
+ ptimer_set_freq(s->ptimer_1Hz, RTC_BASE_FREQ);
+
+ sysbus_init_irq(dev, &s->alm_irq);
+ sysbus_init_irq(dev, &s->tick_irq);
+
+ memory_region_init_io(&s->iomem, &exynos4210_rtc_ops, s, "exynos4210-rtc",
+ EXYNOS4210_RTC_REG_MEM_SIZE);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ return 0;
+}
+
+static void exynos4210_rtc_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = exynos4210_rtc_init;
+ dc->reset = exynos4210_rtc_reset;
+ dc->vmsd = &vmstate_exynos4210_rtc_state;
+}
+
+static const TypeInfo exynos4210_rtc_info = {
+ .name = "exynos4210.rtc",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(Exynos4210RTCState),
+ .class_init = exynos4210_rtc_class_init,
+};
+
+static void exynos4210_rtc_register_types(void)
+{
+ type_register_static(&exynos4210_rtc_info);
+}
+
+type_init(exynos4210_rtc_register_types)
diff --git a/hw/exynos4_boards.c b/hw/exynos4_boards.c
index e5c2a5f388..4bb0a60cb1 100644
--- a/hw/exynos4_boards.c
+++ b/hw/exynos4_boards.c
@@ -81,7 +81,7 @@ static void lan9215_init(uint32_t base, qemu_irq irq)
SysBusDevice *s;
/* This should be a 9215 but the 9118 is close enough */
- if (nd_table[0].vlan) {
+ if (nd_table[0].used) {
qemu_check_nic_model(&nd_table[0], "lan9118");
dev = qdev_create(NULL, "lan9118");
qdev_set_nic_properties(dev, &nd_table[0]);
diff --git a/hw/fdc.c b/hw/fdc.c
index 5b3224b39b..08830c1ba2 100644
--- a/hw/fdc.c
+++ b/hw/fdc.c
@@ -52,6 +52,113 @@
/********************************************************/
/* Floppy drive emulation */
+typedef enum FDriveRate {
+ FDRIVE_RATE_500K = 0x00, /* 500 Kbps */
+ FDRIVE_RATE_300K = 0x01, /* 300 Kbps */
+ FDRIVE_RATE_250K = 0x02, /* 250 Kbps */
+ FDRIVE_RATE_1M = 0x03, /* 1 Mbps */
+} FDriveRate;
+
+typedef struct FDFormat {
+ FDriveType drive;
+ uint8_t last_sect;
+ uint8_t max_track;
+ uint8_t max_head;
+ FDriveRate rate;
+} FDFormat;
+
+static const FDFormat fd_formats[] = {
+ /* First entry is default format */
+ /* 1.44 MB 3"1/2 floppy disks */
+ { FDRIVE_DRV_144, 18, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 20, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 21, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 21, 82, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 21, 83, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 22, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 23, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_144, 24, 80, 1, FDRIVE_RATE_500K, },
+ /* 2.88 MB 3"1/2 floppy disks */
+ { FDRIVE_DRV_288, 36, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 39, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 40, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 44, 80, 1, FDRIVE_RATE_1M, },
+ { FDRIVE_DRV_288, 48, 80, 1, FDRIVE_RATE_1M, },
+ /* 720 kB 3"1/2 floppy disks */
+ { FDRIVE_DRV_144, 9, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 10, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 10, 82, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 10, 83, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 13, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_144, 14, 80, 1, FDRIVE_RATE_250K, },
+ /* 1.2 MB 5"1/4 floppy disks */
+ { FDRIVE_DRV_120, 15, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 18, 80, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 18, 82, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 18, 83, 1, FDRIVE_RATE_500K, },
+ { FDRIVE_DRV_120, 20, 80, 1, FDRIVE_RATE_500K, },
+ /* 720 kB 5"1/4 floppy disks */
+ { FDRIVE_DRV_120, 9, 80, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_120, 11, 80, 1, FDRIVE_RATE_250K, },
+ /* 360 kB 5"1/4 floppy disks */
+ { FDRIVE_DRV_120, 9, 40, 1, FDRIVE_RATE_300K, },
+ { FDRIVE_DRV_120, 9, 40, 0, FDRIVE_RATE_300K, },
+ { FDRIVE_DRV_120, 10, 41, 1, FDRIVE_RATE_300K, },
+ { FDRIVE_DRV_120, 10, 42, 1, FDRIVE_RATE_300K, },
+ /* 320 kB 5"1/4 floppy disks */
+ { FDRIVE_DRV_120, 8, 40, 1, FDRIVE_RATE_250K, },
+ { FDRIVE_DRV_120, 8, 40, 0, FDRIVE_RATE_250K, },
+ /* 360 kB must match 5"1/4 better than 3"1/2... */
+ { FDRIVE_DRV_144, 9, 80, 0, FDRIVE_RATE_250K, },
+ /* end */
+ { FDRIVE_DRV_NONE, -1, -1, 0, 0, },
+};
+
+static void pick_geometry(BlockDriverState *bs, int *nb_heads,
+ int *max_track, int *last_sect,
+ FDriveType drive_in, FDriveType *drive,
+ FDriveRate *rate)
+{
+ const FDFormat *parse;
+ uint64_t nb_sectors, size;
+ int i, first_match, match;
+
+ bdrv_get_geometry(bs, &nb_sectors);
+ match = -1;
+ first_match = -1;
+ for (i = 0; ; i++) {
+ parse = &fd_formats[i];
+ if (parse->drive == FDRIVE_DRV_NONE) {
+ break;
+ }
+ if (drive_in == parse->drive ||
+ drive_in == FDRIVE_DRV_NONE) {
+ size = (parse->max_head + 1) * parse->max_track *
+ parse->last_sect;
+ if (nb_sectors == size) {
+ match = i;
+ break;
+ }
+ if (first_match == -1) {
+ first_match = i;
+ }
+ }
+ }
+ if (match == -1) {
+ if (first_match == -1) {
+ match = 1;
+ } else {
+ match = first_match;
+ }
+ parse = &fd_formats[match];
+ }
+ *nb_heads = parse->max_head + 1;
+ *max_track = parse->max_track;
+ *last_sect = parse->last_sect;
+ *drive = parse->drive;
+ *rate = parse->rate;
+}
+
#define GET_CUR_DRV(fdctrl) ((fdctrl)->cur_drv)
#define SET_CUR_DRV(fdctrl, drive) ((fdctrl)->cur_drv = (drive))
@@ -153,8 +260,12 @@ static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect,
}
#endif
drv->head = head;
- if (drv->track != track)
+ if (drv->track != track) {
+ if (drv->bs != NULL && bdrv_is_inserted(drv->bs)) {
+ drv->media_changed = 0;
+ }
ret = 1;
+ }
drv->track = track;
drv->sect = sect;
}
@@ -170,9 +281,7 @@ static int fd_seek(FDrive *drv, uint8_t head, uint8_t track, uint8_t sect,
static void fd_recalibrate(FDrive *drv)
{
FLOPPY_DPRINTF("recalibrate\n");
- drv->head = 0;
- drv->track = 0;
- drv->sect = 1;
+ fd_seek(drv, 0, 0, 1, 1);
}
/* Revalidate a disk drive after a disk change */
@@ -185,13 +294,10 @@ static void fd_revalidate(FDrive *drv)
FLOPPY_DPRINTF("revalidate\n");
if (drv->bs != NULL) {
ro = bdrv_is_read_only(drv->bs);
- bdrv_get_floppy_geometry_hint(drv->bs, &nb_heads, &max_track,
- &last_sect, drv->drive, &drive, &rate);
+ pick_geometry(drv->bs, &nb_heads, &max_track,
+ &last_sect, drv->drive, &drive, &rate);
if (!bdrv_is_inserted(drv->bs)) {
FLOPPY_DPRINTF("No disk in drive\n");
- } else if (nb_heads != 0 && max_track != 0 && last_sect != 0) {
- FLOPPY_DPRINTF("User defined disk (%d %d %d)\n",
- nb_heads - 1, max_track, last_sect);
} else {
FLOPPY_DPRINTF("Floppy disk (%d h %d t %d s) %s\n", nb_heads,
max_track, last_sect, ro ? "ro" : "rw");
@@ -305,6 +411,9 @@ enum {
};
enum {
+ FD_SR0_DS0 = 0x01,
+ FD_SR0_DS1 = 0x02,
+ FD_SR0_HEAD = 0x04,
FD_SR0_EQPMT = 0x10,
FD_SR0_SEEK = 0x20,
FD_SR0_ABNTERM = 0x40,
@@ -711,14 +820,6 @@ static void fdctrl_raise_irq(FDCtrl *fdctrl, uint8_t status0)
qemu_set_irq(fdctrl->irq, 1);
fdctrl->sra |= FD_SRA_INTPEND;
}
- if (status0 & FD_SR0_SEEK) {
- FDrive *cur_drv;
- /* A seek clears the disk change line (if a disk is inserted) */
- cur_drv = get_cur_drv(fdctrl);
- if (cur_drv->bs != NULL && bdrv_is_inserted(cur_drv->bs)) {
- cur_drv->media_changed = 0;
- }
- }
fdctrl->reset_sensei = 0;
fdctrl->status0 = status0;
@@ -978,14 +1079,15 @@ static void fdctrl_reset_fifo(FDCtrl *fdctrl)
}
/* Set FIFO status for the host to read */
-static void fdctrl_set_fifo(FDCtrl *fdctrl, int fifo_len, int do_irq)
+static void fdctrl_set_fifo(FDCtrl *fdctrl, int fifo_len, uint8_t status0)
{
fdctrl->data_dir = FD_DIR_READ;
fdctrl->data_len = fifo_len;
fdctrl->data_pos = 0;
fdctrl->msr |= FD_MSR_CMDBUSY | FD_MSR_RQM | FD_MSR_DIO;
- if (do_irq)
- fdctrl_raise_irq(fdctrl, 0x00);
+ if (status0) {
+ fdctrl_raise_irq(fdctrl, status0);
+ }
}
/* Set an error: unimplemented/unknown command */
@@ -997,7 +1099,10 @@ static void fdctrl_unimplemented(FDCtrl *fdctrl, int direction)
fdctrl_set_fifo(fdctrl, 1, 0);
}
-/* Seek to next sector */
+/* Seek to next sector
+ * returns 0 when end of track reached (for DBL_SIDES on head 1)
+ * otherwise returns 1
+ */
static int fdctrl_seek_to_next_sect(FDCtrl *fdctrl, FDrive *cur_drv)
{
FLOPPY_DPRINTF("seek to next sector (%d %02x %02x => %d)\n",
@@ -1005,30 +1110,39 @@ static int fdctrl_seek_to_next_sect(FDCtrl *fdctrl, FDrive *cur_drv)
fd_sector(cur_drv));
/* XXX: cur_drv->sect >= cur_drv->last_sect should be an
error in fact */
- if (cur_drv->sect >= cur_drv->last_sect ||
- cur_drv->sect == fdctrl->eot) {
- cur_drv->sect = 1;
+ uint8_t new_head = cur_drv->head;
+ uint8_t new_track = cur_drv->track;
+ uint8_t new_sect = cur_drv->sect;
+
+ int ret = 1;
+
+ if (new_sect >= cur_drv->last_sect ||
+ new_sect == fdctrl->eot) {
+ new_sect = 1;
if (FD_MULTI_TRACK(fdctrl->data_state)) {
- if (cur_drv->head == 0 &&
+ if (new_head == 0 &&
(cur_drv->flags & FDISK_DBL_SIDES) != 0) {
- cur_drv->head = 1;
+ new_head = 1;
} else {
- cur_drv->head = 0;
- cur_drv->track++;
- if ((cur_drv->flags & FDISK_DBL_SIDES) == 0)
- return 0;
+ new_head = 0;
+ new_track++;
+ if ((cur_drv->flags & FDISK_DBL_SIDES) == 0) {
+ ret = 0;
+ }
}
} else {
- cur_drv->track++;
- return 0;
+ new_track++;
+ ret = 0;
+ }
+ if (ret == 1) {
+ FLOPPY_DPRINTF("seek to next track (%d %02x %02x => %d)\n",
+ new_head, new_track, new_sect, fd_sector(cur_drv));
}
- FLOPPY_DPRINTF("seek to next track (%d %02x %02x => %d)\n",
- cur_drv->head, cur_drv->track,
- cur_drv->sect, fd_sector(cur_drv));
} else {
- cur_drv->sect++;
+ new_sect++;
}
- return 1;
+ fd_seek(cur_drv, new_head, new_track, new_sect, 1);
+ return ret;
}
/* Callback for transfer end (stop or abort) */
@@ -1038,10 +1152,12 @@ static void fdctrl_stop_transfer(FDCtrl *fdctrl, uint8_t status0,
FDrive *cur_drv;
cur_drv = get_cur_drv(fdctrl);
+ fdctrl->status0 = status0 | FD_SR0_SEEK | (cur_drv->head << 2) |
+ GET_CUR_DRV(fdctrl);
+
FLOPPY_DPRINTF("transfer status: %02x %02x %02x (%02x)\n",
- status0, status1, status2,
- status0 | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl));
- fdctrl->fifo[0] = status0 | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl);
+ status0, status1, status2, fdctrl->status0);
+ fdctrl->fifo[0] = fdctrl->status0;
fdctrl->fifo[1] = status1;
fdctrl->fifo[2] = status2;
fdctrl->fifo[3] = cur_drv->track;
@@ -1054,7 +1170,7 @@ static void fdctrl_stop_transfer(FDCtrl *fdctrl, uint8_t status0,
}
fdctrl->msr |= FD_MSR_RQM | FD_MSR_DIO;
fdctrl->msr &= ~FD_MSR_NONDMA;
- fdctrl_set_fifo(fdctrl, 7, 1);
+ fdctrl_set_fifo(fdctrl, 7, fdctrl->status0);
}
/* Prepare a data transfer (either DMA or FIFO) */
@@ -1169,7 +1285,7 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction)
if (direction != FD_DIR_WRITE)
fdctrl->msr |= FD_MSR_DIO;
/* IO based transfer: calculate len */
- fdctrl_raise_irq(fdctrl, 0x00);
+ fdctrl_raise_irq(fdctrl, FD_SR0_SEEK);
return;
}
@@ -1598,16 +1714,18 @@ static void fdctrl_handle_sense_interrupt_status(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv = get_cur_drv(fdctrl);
- if(fdctrl->reset_sensei > 0) {
+ if (fdctrl->reset_sensei > 0) {
fdctrl->fifo[0] =
FD_SR0_RDYCHG + FD_RESET_SENSEI_COUNT - fdctrl->reset_sensei;
fdctrl->reset_sensei--;
+ } else if (!(fdctrl->sra & FD_SRA_INTPEND)) {
+ fdctrl->fifo[0] = FD_SR0_INVCMD;
+ fdctrl_set_fifo(fdctrl, 1, 0);
+ return;
} else {
- /* XXX: status0 handling is broken for read/write
- commands, so we do this hack. It should be suppressed
- ASAP */
fdctrl->fifo[0] =
- FD_SR0_SEEK | (cur_drv->head << 2) | GET_CUR_DRV(fdctrl);
+ (fdctrl->status0 & ~(FD_SR0_HEAD | FD_SR0_DS1 | FD_SR0_DS0))
+ | GET_CUR_DRV(fdctrl);
}
fdctrl->fifo[1] = cur_drv->track;
@@ -1626,11 +1744,7 @@ static void fdctrl_handle_seek(FDCtrl *fdctrl, int direction)
/* The seek command just sends step pulses to the drive and doesn't care if
* there is a medium inserted of if it's banging the head against the drive.
*/
- if (fdctrl->fifo[2] > cur_drv->max_track) {
- cur_drv->track = cur_drv->max_track;
- } else {
- cur_drv->track = fdctrl->fifo[2];
- }
+ fd_seek(cur_drv, cur_drv->head, fdctrl->fifo[2], cur_drv->sect, 1);
/* Raise Interrupt */
fdctrl_raise_irq(fdctrl, FD_SR0_SEEK);
}
@@ -1688,32 +1802,35 @@ static void fdctrl_handle_drive_specification_command(FDCtrl *fdctrl, int direct
}
}
-static void fdctrl_handle_relative_seek_out(FDCtrl *fdctrl, int direction)
+static void fdctrl_handle_relative_seek_in(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
if (fdctrl->fifo[2] + cur_drv->track >= cur_drv->max_track) {
- cur_drv->track = cur_drv->max_track - 1;
+ fd_seek(cur_drv, cur_drv->head, cur_drv->max_track - 1,
+ cur_drv->sect, 1);
} else {
- cur_drv->track += fdctrl->fifo[2];
+ fd_seek(cur_drv, cur_drv->head,
+ cur_drv->track + fdctrl->fifo[2], cur_drv->sect, 1);
}
fdctrl_reset_fifo(fdctrl);
/* Raise Interrupt */
fdctrl_raise_irq(fdctrl, FD_SR0_SEEK);
}
-static void fdctrl_handle_relative_seek_in(FDCtrl *fdctrl, int direction)
+static void fdctrl_handle_relative_seek_out(FDCtrl *fdctrl, int direction)
{
FDrive *cur_drv;
SET_CUR_DRV(fdctrl, fdctrl->fifo[1] & FD_DOR_SELMASK);
cur_drv = get_cur_drv(fdctrl);
if (fdctrl->fifo[2] > cur_drv->track) {
- cur_drv->track = 0;
+ fd_seek(cur_drv, cur_drv->head, 0, cur_drv->sect, 1);
} else {
- cur_drv->track -= fdctrl->fifo[2];
+ fd_seek(cur_drv, cur_drv->head,
+ cur_drv->track - fdctrl->fifo[2], cur_drv->sect, 1);
}
fdctrl_reset_fifo(fdctrl);
/* Raise Interrupt */
@@ -2046,18 +2163,13 @@ static int sun4m_fdc_init1(SysBusDevice *dev)
return fdctrl_init_common(fdctrl);
}
-void fdc_get_bs(BlockDriverState *bs[], ISADevice *dev)
+FDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i)
{
- FDCtrlISABus *isa = DO_UPCAST(FDCtrlISABus, busdev, dev);
- FDCtrl *fdctrl = &isa->state;
- int i;
+ FDCtrlISABus *isa = DO_UPCAST(FDCtrlISABus, busdev, fdc);
- for (i = 0; i < MAX_FD; i++) {
- bs[i] = fdctrl->drives[i].bs;
- }
+ return isa->state.drives[i].drive;
}
-
static const VMStateDescription vmstate_isa_fdc ={
.name = "fdc",
.version_id = 2,
diff --git a/hw/fdc.h b/hw/fdc.h
index 1b32b17bef..b5c9f31074 100644
--- a/hw/fdc.h
+++ b/hw/fdc.h
@@ -6,11 +6,19 @@
/* fdc.c */
#define MAX_FD 2
+typedef enum FDriveType {
+ FDRIVE_DRV_144 = 0x00, /* 1.44 MB 3"5 drive */
+ FDRIVE_DRV_288 = 0x01, /* 2.88 MB 3"5 drive */
+ FDRIVE_DRV_120 = 0x02, /* 1.2 MB 5"25 drive */
+ FDRIVE_DRV_NONE = 0x03, /* No drive connected */
+} FDriveType;
+
ISADevice *fdctrl_init_isa(ISABus *bus, DriveInfo **fds);
void fdctrl_init_sysbus(qemu_irq irq, int dma_chann,
target_phys_addr_t mmio_base, DriveInfo **fds);
void sun4m_fdctrl_init(qemu_irq irq, target_phys_addr_t io_base,
DriveInfo **fds, qemu_irq *fdc_tc);
-void fdc_get_bs(BlockDriverState *bs[], ISADevice *dev);
+
+FDriveType isa_fdc_get_drive_type(ISADevice *fdc, int i);
#endif
diff --git a/hw/hd-geometry.c b/hw/hd-geometry.c
new file mode 100644
index 0000000000..1cdb9fb753
--- /dev/null
+++ b/hw/hd-geometry.c
@@ -0,0 +1,157 @@
+/*
+ * Hard disk geometry utilities
+ *
+ * Copyright (C) 2012 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+#include "block.h"
+#include "hw/block-common.h"
+#include "trace.h"
+
+struct partition {
+ uint8_t boot_ind; /* 0x80 - active */
+ uint8_t head; /* starting head */
+ uint8_t sector; /* starting sector */
+ uint8_t cyl; /* starting cylinder */
+ uint8_t sys_ind; /* What partition type */
+ uint8_t end_head; /* end head */
+ uint8_t end_sector; /* end sector */
+ uint8_t end_cyl; /* end cylinder */
+ uint32_t start_sect; /* starting sector counting from 0 */
+ uint32_t nr_sects; /* nr of sectors in partition */
+} QEMU_PACKED;
+
+/* try to guess the disk logical geometry from the MSDOS partition table.
+ Return 0 if OK, -1 if could not guess */
+static int guess_disk_lchs(BlockDriverState *bs,
+ int *pcylinders, int *pheads, int *psectors)
+{
+ uint8_t buf[BDRV_SECTOR_SIZE];
+ int i, heads, sectors, cylinders;
+ struct partition *p;
+ uint32_t nr_sects;
+ uint64_t nb_sectors;
+
+ bdrv_get_geometry(bs, &nb_sectors);
+
+ /**
+ * The function will be invoked during startup not only in sync I/O mode,
+ * but also in async I/O mode. So the I/O throttling function has to
+ * be disabled temporarily here, not permanently.
+ */
+ if (bdrv_read_unthrottled(bs, 0, buf, 1) < 0) {
+ return -1;
+ }
+ /* test msdos magic */
+ if (buf[510] != 0x55 || buf[511] != 0xaa) {
+ return -1;
+ }
+ for (i = 0; i < 4; i++) {
+ p = ((struct partition *)(buf + 0x1be)) + i;
+ nr_sects = le32_to_cpu(p->nr_sects);
+ if (nr_sects && p->end_head) {
+ /* We make the assumption that the partition terminates on
+ a cylinder boundary */
+ heads = p->end_head + 1;
+ sectors = p->end_sector & 63;
+ if (sectors == 0) {
+ continue;
+ }
+ cylinders = nb_sectors / (heads * sectors);
+ if (cylinders < 1 || cylinders > 16383) {
+ continue;
+ }
+ *pheads = heads;
+ *psectors = sectors;
+ *pcylinders = cylinders;
+ trace_hd_geometry_lchs_guess(bs, cylinders, heads, sectors);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+static void guess_chs_for_size(BlockDriverState *bs,
+ uint32_t *pcyls, uint32_t *pheads, uint32_t *psecs)
+{
+ uint64_t nb_sectors;
+ int cylinders;
+
+ bdrv_get_geometry(bs, &nb_sectors);
+
+ cylinders = nb_sectors / (16 * 63);
+ if (cylinders > 16383) {
+ cylinders = 16383;
+ } else if (cylinders < 2) {
+ cylinders = 2;
+ }
+ *pcyls = cylinders;
+ *pheads = 16;
+ *psecs = 63;
+}
+
+void hd_geometry_guess(BlockDriverState *bs,
+ uint32_t *pcyls, uint32_t *pheads, uint32_t *psecs,
+ int *ptrans)
+{
+ int cylinders, heads, secs, translation;
+
+ if (guess_disk_lchs(bs, &cylinders, &heads, &secs) < 0) {
+ /* no LCHS guess: use a standard physical disk geometry */
+ guess_chs_for_size(bs, pcyls, pheads, psecs);
+ translation = hd_bios_chs_auto_trans(*pcyls, *pheads, *psecs);
+ } else if (heads > 16) {
+ /* LCHS guess with heads > 16 means that a BIOS LBA
+ translation was active, so a standard physical disk
+ geometry is OK */
+ guess_chs_for_size(bs, pcyls, pheads, psecs);
+ translation = *pcyls * *pheads <= 131072
+ ? BIOS_ATA_TRANSLATION_LARGE
+ : BIOS_ATA_TRANSLATION_LBA;
+ } else {
+ /* LCHS guess with heads <= 16: use as physical geometry */
+ *pcyls = cylinders;
+ *pheads = heads;
+ *psecs = secs;
+ /* disable any translation to be in sync with
+ the logical geometry */
+ translation = BIOS_ATA_TRANSLATION_NONE;
+ }
+ if (ptrans) {
+ *ptrans = translation;
+ }
+ trace_hd_geometry_guess(bs, *pcyls, *pheads, *psecs, translation);
+}
+
+int hd_bios_chs_auto_trans(uint32_t cyls, uint32_t heads, uint32_t secs)
+{
+ return cyls <= 1024 && heads <= 16 && secs <= 63
+ ? BIOS_ATA_TRANSLATION_NONE
+ : BIOS_ATA_TRANSLATION_LBA;
+}
diff --git a/hw/highbank.c b/hw/highbank.c
index 4bdea5df7d..11aa1312c0 100644
--- a/hw/highbank.c
+++ b/hw/highbank.c
@@ -284,7 +284,7 @@ static void highbank_init(ram_addr_t ram_size,
sysbus_create_simple("sysbus-ahci", 0xffe08000, pic[83]);
- if (nd_table[0].vlan) {
+ if (nd_table[0].used) {
qemu_check_nic_model(&nd_table[0], "xgmac");
dev = qdev_create(NULL, "xgmac");
qdev_set_nic_properties(dev, &nd_table[0]);
diff --git a/hw/i386/Makefile.objs b/hw/i386/Makefile.objs
index eb171b7c47..8c764bbfef 100644
--- a/hw/i386/Makefile.objs
+++ b/hw/i386/Makefile.objs
@@ -7,6 +7,8 @@ obj-y += debugcon.o multiboot.o
obj-y += pc_piix.o
obj-y += pc_sysfw.o
obj-$(CONFIG_XEN) += xen_platform.o xen_apic.o
+obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen-host-pci-device.o
+obj-$(CONFIG_XEN_PCI_PASSTHROUGH) += xen_pt.o xen_pt_config_init.o xen_pt_msi.o
obj-y += kvm/
obj-$(CONFIG_SPICE) += qxl.o qxl-logger.o qxl-render.o
diff --git a/hw/ide.h b/hw/ide.h
index 0b18c9016b..2db4079f68 100644
--- a/hw/ide.h
+++ b/hw/ide.h
@@ -29,7 +29,9 @@ void mmio_ide_init (target_phys_addr_t membase, target_phys_addr_t membase2,
qemu_irq irq, int shift,
DriveInfo *hd0, DriveInfo *hd1);
-void ide_get_bs(BlockDriverState *bs[], BusState *qbus);
+int ide_get_geometry(BusState *bus, int unit,
+ int16_t *cyls, int8_t *heads, int8_t *secs);
+int ide_get_bios_chs_trans(BusState *bus, int unit);
/* ide/core.c */
void ide_drive_get(DriveInfo **hd, int max_bus);
diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c
index e275e68934..efea93f0b4 100644
--- a/hw/ide/ahci.c
+++ b/hw/ide/ahci.c
@@ -588,7 +588,7 @@ static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis)
AHCIPortRegs *pr = &ad->port_regs;
uint8_t *d2h_fis;
int i;
- target_phys_addr_t cmd_len = 0x80;
+ dma_addr_t cmd_len = 0x80;
int cmd_mapped = 0;
if (!ad->res_fis || !(pr->cmd & PORT_CMD_FIS_RX)) {
@@ -598,7 +598,8 @@ static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis)
if (!cmd_fis) {
/* map cmd_fis */
uint64_t tbl_addr = le64_to_cpu(ad->cur_cmd->tbl_addr);
- cmd_fis = cpu_physical_memory_map(tbl_addr, &cmd_len, 0);
+ cmd_fis = dma_memory_map(ad->hba->dma, tbl_addr, &cmd_len,
+ DMA_DIRECTION_TO_DEVICE);
cmd_mapped = 1;
}
@@ -630,7 +631,8 @@ static void ahci_write_fis_d2h(AHCIDevice *ad, uint8_t *cmd_fis)
ahci_trigger_irq(ad->hba, ad, PORT_IRQ_D2H_REG_FIS);
if (cmd_mapped) {
- cpu_physical_memory_unmap(cmd_fis, cmd_len, 0, cmd_len);
+ dma_memory_unmap(ad->hba->dma, cmd_fis, cmd_len,
+ DMA_DIRECTION_TO_DEVICE, cmd_len);
}
}
@@ -640,8 +642,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist)
uint32_t opts = le32_to_cpu(cmd->opts);
uint64_t prdt_addr = le64_to_cpu(cmd->tbl_addr) + 0x80;
int sglist_alloc_hint = opts >> AHCI_CMD_HDR_PRDT_LEN;
- target_phys_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG));
- target_phys_addr_t real_prdt_len = prdt_len;
+ dma_addr_t prdt_len = (sglist_alloc_hint * sizeof(AHCI_SG));
+ dma_addr_t real_prdt_len = prdt_len;
uint8_t *prdt;
int i;
int r = 0;
@@ -652,7 +654,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist)
}
/* map PRDT */
- if (!(prdt = cpu_physical_memory_map(prdt_addr, &prdt_len, 0))){
+ if (!(prdt = dma_memory_map(ad->hba->dma, prdt_addr, &prdt_len,
+ DMA_DIRECTION_TO_DEVICE))){
DPRINTF(ad->port_no, "map failed\n");
return -1;
}
@@ -667,7 +670,7 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist)
if (sglist_alloc_hint > 0) {
AHCI_SG *tbl = (AHCI_SG *)prdt;
- qemu_sglist_init(sglist, sglist_alloc_hint);
+ qemu_sglist_init(sglist, sglist_alloc_hint, ad->hba->dma);
for (i = 0; i < sglist_alloc_hint; i++) {
/* flags_size is zero-based */
qemu_sglist_add(sglist, le64_to_cpu(tbl[i].addr),
@@ -676,7 +679,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist)
}
out:
- cpu_physical_memory_unmap(prdt, prdt_len, 0, prdt_len);
+ dma_memory_unmap(ad->hba->dma, prdt, prdt_len,
+ DMA_DIRECTION_TO_DEVICE, prdt_len);
return r;
}
@@ -786,7 +790,7 @@ static int handle_cmd(AHCIState *s, int port, int slot)
uint64_t tbl_addr;
AHCICmdHdr *cmd;
uint8_t *cmd_fis;
- target_phys_addr_t cmd_len;
+ dma_addr_t cmd_len;
if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
/* Engine currently busy, try again later */
@@ -808,7 +812,8 @@ static int handle_cmd(AHCIState *s, int port, int slot)
tbl_addr = le64_to_cpu(cmd->tbl_addr);
cmd_len = 0x80;
- cmd_fis = cpu_physical_memory_map(tbl_addr, &cmd_len, 1);
+ cmd_fis = dma_memory_map(s->dma, tbl_addr, &cmd_len,
+ DMA_DIRECTION_FROM_DEVICE);
if (!cmd_fis) {
DPRINTF(port, "error: guest passed us an invalid cmd fis\n");
@@ -934,7 +939,8 @@ static int handle_cmd(AHCIState *s, int port, int slot)
}
out:
- cpu_physical_memory_unmap(cmd_fis, cmd_len, 1, cmd_len);
+ dma_memory_unmap(s->dma, cmd_fis, cmd_len, DMA_DIRECTION_FROM_DEVICE,
+ cmd_len);
if (s->dev[port].port.ifs[0].status & (BUSY_STAT|DRQ_STAT)) {
/* async command, complete later */
@@ -1114,11 +1120,12 @@ static const IDEDMAOps ahci_dma_ops = {
.reset = ahci_dma_reset,
};
-void ahci_init(AHCIState *s, DeviceState *qdev, int ports)
+void ahci_init(AHCIState *s, DeviceState *qdev, DMAContext *dma, int ports)
{
qemu_irq *irqs;
int i;
+ s->dma = dma;
s->ports = ports;
s->dev = g_malloc0(sizeof(AHCIDevice) * ports);
ahci_reg_init(s);
@@ -1187,7 +1194,7 @@ static void sysbus_ahci_reset(DeviceState *dev)
static int sysbus_ahci_init(SysBusDevice *dev)
{
SysbusAHCIState *s = FROM_SYSBUS(SysbusAHCIState, dev);
- ahci_init(&s->ahci, &dev->qdev, s->num_ports);
+ ahci_init(&s->ahci, &dev->qdev, NULL, s->num_ports);
sysbus_init_mmio(dev, &s->ahci.mem);
sysbus_init_irq(dev, &s->ahci.irq);
diff --git a/hw/ide/ahci.h b/hw/ide/ahci.h
index ec1b6a5f66..1200a56ada 100644
--- a/hw/ide/ahci.h
+++ b/hw/ide/ahci.h
@@ -299,6 +299,7 @@ typedef struct AHCIState {
uint32_t idp_index; /* Current IDP index */
int ports;
qemu_irq irq;
+ DMAContext *dma;
} AHCIState;
typedef struct AHCIPCIState {
@@ -329,7 +330,7 @@ typedef struct NCQFrame {
uint8_t reserved10;
} QEMU_PACKED NCQFrame;
-void ahci_init(AHCIState *s, DeviceState *qdev, int ports);
+void ahci_init(AHCIState *s, DeviceState *qdev, DMAContext *dma, int ports);
void ahci_uninit(AHCIState *s);
void ahci_reset(AHCIState *s);
diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c
index 5919cf52d8..f7f714c726 100644
--- a/hw/ide/atapi.c
+++ b/hw/ide/atapi.c
@@ -956,6 +956,36 @@ static void cmd_read_cdvd_capacity(IDEState *s, uint8_t* buf)
ide_atapi_cmd_reply(s, 8, 8);
}
+static void cmd_read_disc_information(IDEState *s, uint8_t* buf)
+{
+ uint8_t type = buf[1] & 7;
+ uint32_t max_len = ube16_to_cpu(buf + 7);
+
+ /* Types 1/2 are only defined for Blu-Ray. */
+ if (type != 0) {
+ ide_atapi_cmd_error(s, ILLEGAL_REQUEST,
+ ASC_INV_FIELD_IN_CMD_PACKET);
+ return;
+ }
+
+ memset(buf, 0, 34);
+ buf[1] = 32;
+ buf[2] = 0xe; /* last session complete, disc finalized */
+ buf[3] = 1; /* first track on disc */
+ buf[4] = 1; /* # of sessions */
+ buf[5] = 1; /* first track of last session */
+ buf[6] = 1; /* last track of last session */
+ buf[7] = 0x20; /* unrestricted use */
+ buf[8] = 0x00; /* CD-ROM or DVD-ROM */
+ /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
+ /* 12-23: not meaningful for CD-ROM or DVD-ROM */
+ /* 24-31: disc bar code */
+ /* 32: disc application code */
+ /* 33: number of OPC tables */
+
+ ide_atapi_cmd_reply(s, 34, max_len);
+}
+
static void cmd_read_dvd_structure(IDEState *s, uint8_t* buf)
{
int max_len;
@@ -1045,6 +1075,7 @@ static const struct {
[ 0x43 ] = { cmd_read_toc_pma_atip, CHECK_READY },
[ 0x46 ] = { cmd_get_configuration, ALLOW_UA },
[ 0x4a ] = { cmd_get_event_status_notification, ALLOW_UA },
+ [ 0x51 ] = { cmd_read_disc_information, CHECK_READY },
[ 0x5a ] = { cmd_mode_sense, /* (10) */ 0 },
[ 0xa8 ] = { cmd_read, /* (12) */ CHECK_READY },
[ 0xad ] = { cmd_read_dvd_structure, CHECK_READY },
diff --git a/hw/ide/cmd646.c b/hw/ide/cmd646.c
index bf8ece4708..e0b9443496 100644
--- a/hw/ide/cmd646.c
+++ b/hw/ide/cmd646.c
@@ -94,12 +94,12 @@ static void cmd646_data_write(void *opaque, target_phys_addr_t addr,
CMD646BAR *cmd646bar = opaque;
if (size == 1) {
- return ide_ioport_write(cmd646bar->bus, addr, data);
+ ide_ioport_write(cmd646bar->bus, addr, data);
} else if (addr == 0) {
if (size == 2) {
- return ide_data_writew(cmd646bar->bus, addr, data);
+ ide_data_writew(cmd646bar->bus, addr, data);
} else {
- return ide_data_writel(cmd646bar->bus, addr, data);
+ ide_data_writel(cmd646bar->bus, addr, data);
}
}
}
@@ -295,7 +295,7 @@ static int pci_cmd646_ide_initfn(PCIDevice *dev)
return 0;
}
-static int pci_cmd646_ide_exitfn(PCIDevice *dev)
+static void pci_cmd646_ide_exitfn(PCIDevice *dev)
{
PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);
unsigned i;
@@ -309,8 +309,6 @@ static int pci_cmd646_ide_exitfn(PCIDevice *dev)
memory_region_destroy(&d->cmd646_bar[i].data);
}
memory_region_destroy(&d->bmdma_bar);
-
- return 0;
}
void pci_cmd646_ide_init(PCIBus *bus, DriveInfo **hd_table,
diff --git a/hw/ide/core.c b/hw/ide/core.c
index 71d4d7732a..d65ef3d58d 100644
--- a/hw/ide/core.c
+++ b/hw/ide/core.c
@@ -30,6 +30,7 @@
#include "qemu-timer.h"
#include "sysemu.h"
#include "dma.h"
+#include "hw/block-common.h"
#include "blockdev.h"
#include <hw/ide/internal.h>
@@ -1924,31 +1925,20 @@ static const BlockDevOps ide_cd_block_ops = {
int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind,
const char *version, const char *serial, const char *model,
- uint64_t wwn)
+ uint64_t wwn,
+ uint32_t cylinders, uint32_t heads, uint32_t secs,
+ int chs_trans)
{
- int cylinders, heads, secs;
uint64_t nb_sectors;
s->bs = bs;
s->drive_kind = kind;
bdrv_get_geometry(bs, &nb_sectors);
- bdrv_guess_geometry(bs, &cylinders, &heads, &secs);
- if (cylinders < 1 || cylinders > 16383) {
- error_report("cyls must be between 1 and 16383");
- return -1;
- }
- if (heads < 1 || heads > 16) {
- error_report("heads must be between 1 and 16");
- return -1;
- }
- if (secs < 1 || secs > 63) {
- error_report("secs must be between 1 and 63");
- return -1;
- }
s->cylinders = cylinders;
s->heads = heads;
s->sectors = secs;
+ s->chs_trans = chs_trans;
s->nb_sectors = nb_sectors;
s->wwn = wwn;
/* The SMART values should be preserved across power cycles
@@ -2075,17 +2065,39 @@ void ide_init2(IDEBus *bus, qemu_irq irq)
void ide_init2_with_non_qdev_drives(IDEBus *bus, DriveInfo *hd0,
DriveInfo *hd1, qemu_irq irq)
{
- int i;
+ int i, trans;
DriveInfo *dinfo;
+ uint32_t cyls, heads, secs;
for(i = 0; i < 2; i++) {
dinfo = i == 0 ? hd0 : hd1;
ide_init1(bus, i);
if (dinfo) {
+ cyls = dinfo->cyls;
+ heads = dinfo->heads;
+ secs = dinfo->secs;
+ trans = dinfo->trans;
+ if (!cyls && !heads && !secs) {
+ hd_geometry_guess(dinfo->bdrv, &cyls, &heads, &secs, &trans);
+ } else if (trans == BIOS_ATA_TRANSLATION_AUTO) {
+ trans = hd_bios_chs_auto_trans(cyls, heads, secs);
+ }
+ if (cyls < 1 || cyls > 65535) {
+ error_report("cyls must be between 1 and 65535");
+ exit(1);
+ }
+ if (heads < 1 || heads > 16) {
+ error_report("heads must be between 1 and 16");
+ exit(1);
+ }
+ if (secs < 1 || secs > 255) {
+ error_report("secs must be between 1 and 255");
+ exit(1);
+ }
if (ide_init_drive(&bus->ifs[i], dinfo->bdrv,
- dinfo->media_cd ? IDE_CD : IDE_HD, NULL,
- *dinfo->serial ? dinfo->serial : NULL,
- NULL, 0) < 0) {
+ dinfo->media_cd ? IDE_CD : IDE_HD,
+ NULL, dinfo->serial, NULL, 0,
+ cyls, heads, secs, trans) < 0) {
error_report("Can't set up IDE drive %s", dinfo->id);
exit(1);
}
diff --git a/hw/ide/ich.c b/hw/ide/ich.c
index e3eaaea882..272b7734b5 100644
--- a/hw/ide/ich.c
+++ b/hw/ide/ich.c
@@ -98,7 +98,7 @@ static int pci_ich9_ahci_init(PCIDevice *dev)
uint8_t *sata_cap;
d = DO_UPCAST(struct AHCIPCIState, card, dev);
- ahci_init(&d->ahci, &dev->qdev, 6);
+ ahci_init(&d->ahci, &dev->qdev, pci_dma_context(dev), 6);
pci_config_set_prog_interface(d->card.config, AHCI_PROGMODE_MAJOR_REV_1);
@@ -132,15 +132,13 @@ static int pci_ich9_ahci_init(PCIDevice *dev)
return 0;
}
-static int pci_ich9_uninit(PCIDevice *dev)
+static void pci_ich9_uninit(PCIDevice *dev)
{
struct AHCIPCIState *d;
d = DO_UPCAST(struct AHCIPCIState, card, dev);
msi_uninit(dev);
ahci_uninit(&d->ahci);
-
- return 0;
}
static void ich_ahci_class_init(ObjectClass *klass, void *data)
diff --git a/hw/ide/internal.h b/hw/ide/internal.h
index 1a02f57bf5..7170bd9cd0 100644
--- a/hw/ide/internal.h
+++ b/hw/ide/internal.h
@@ -11,6 +11,7 @@
#include "iorange.h"
#include "dma.h"
#include "sysemu.h"
+#include "hw/block-common.h"
#include "hw/scsi-defs.h"
/* debug IDE devices */
@@ -344,7 +345,7 @@ struct IDEState {
uint8_t unit;
/* ide config */
IDEDriveKind drive_kind;
- int cylinders, heads, sectors;
+ int cylinders, heads, sectors, chs_trans;
int64_t nb_sectors;
int mult_sectors;
int identify_set;
@@ -474,6 +475,7 @@ struct IDEDevice {
DeviceState qdev;
uint32_t unit;
BlockConf conf;
+ int chs_trans;
char *version;
char *serial;
char *model;
@@ -545,7 +547,9 @@ uint32_t ide_data_readl(void *opaque, uint32_t addr);
int ide_init_drive(IDEState *s, BlockDriverState *bs, IDEDriveKind kind,
const char *version, const char *serial, const char *model,
- uint64_t wwn);
+ uint64_t wwn,
+ uint32_t cylinders, uint32_t heads, uint32_t secs,
+ int chs_trans);
void ide_init2(IDEBus *bus, qemu_irq irq);
void ide_init2_with_non_qdev_drives(IDEBus *bus, DriveInfo *hd0,
DriveInfo *hd1, qemu_irq irq);
diff --git a/hw/ide/macio.c b/hw/ide/macio.c
index 7b38d9e683..848cb31429 100644
--- a/hw/ide/macio.c
+++ b/hw/ide/macio.c
@@ -76,7 +76,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret)
s->io_buffer_size = io->len;
- qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
+ qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
qemu_sglist_add(&s->sg, io->addr, io->len);
io->addr += io->len;
io->len = 0;
@@ -133,7 +133,7 @@ static void pmac_ide_transfer_cb(void *opaque, int ret)
s->io_buffer_index = 0;
s->io_buffer_size = io->len;
- qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1);
+ qemu_sglist_init(&s->sg, io->len / MACIO_PAGE_SIZE + 1, NULL);
qemu_sglist_add(&s->sg, io->addr, io->len);
io->addr += io->len;
io->len = 0;
diff --git a/hw/ide/piix.c b/hw/ide/piix.c
index f5a74c293a..4ded9ee13d 100644
--- a/hw/ide/piix.c
+++ b/hw/ide/piix.c
@@ -73,7 +73,8 @@ static void bmdma_write(void *opaque, target_phys_addr_t addr,
#endif
switch(addr & 3) {
case 0:
- return bmdma_cmd_writeb(bm, val);
+ bmdma_cmd_writeb(bm, val);
+ break;
case 2:
bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
break;
@@ -200,7 +201,7 @@ PCIDevice *pci_piix3_xen_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn)
return dev;
}
-static int pci_piix_ide_exitfn(PCIDevice *dev)
+static void pci_piix_ide_exitfn(PCIDevice *dev)
{
PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);
unsigned i;
@@ -212,8 +213,6 @@ static int pci_piix_ide_exitfn(PCIDevice *dev)
memory_region_destroy(&d->bmdma[i].addr_ioport);
}
memory_region_destroy(&d->bmdma_bar);
-
- return 0;
}
/* hd_table must contain 4 block drivers */
diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c
index c122395401..5ea9b8f4b2 100644
--- a/hw/ide/qdev.c
+++ b/hw/ide/qdev.c
@@ -21,6 +21,7 @@
#include "qemu-error.h"
#include <hw/ide/internal.h>
#include "blockdev.h"
+#include "hw/block-common.h"
#include "sysemu.h"
/* --------------------------------- */
@@ -111,11 +112,24 @@ IDEDevice *ide_create_drive(IDEBus *bus, int unit, DriveInfo *drive)
return DO_UPCAST(IDEDevice, qdev, dev);
}
-void ide_get_bs(BlockDriverState *bs[], BusState *qbus)
+int ide_get_geometry(BusState *bus, int unit,
+ int16_t *cyls, int8_t *heads, int8_t *secs)
{
- IDEBus *bus = DO_UPCAST(IDEBus, qbus, qbus);
- bs[0] = bus->master ? bus->master->conf.bs : NULL;
- bs[1] = bus->slave ? bus->slave->conf.bs : NULL;
+ IDEState *s = &DO_UPCAST(IDEBus, qbus, bus)->ifs[unit];
+
+ if (s->drive_kind != IDE_HD || !s->bs) {
+ return -1;
+ }
+
+ *cyls = s->cylinders;
+ *heads = s->heads;
+ *secs = s->sectors;
+ return 0;
+}
+
+int ide_get_bios_chs_trans(BusState *bus, int unit)
+{
+ return DO_UPCAST(IDEBus, qbus, bus)->ifs[unit].chs_trans;
}
/* --------------------------------- */
@@ -128,25 +142,22 @@ static int ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind)
{
IDEBus *bus = DO_UPCAST(IDEBus, qbus, dev->qdev.parent_bus);
IDEState *s = bus->ifs + dev->unit;
- const char *serial;
- DriveInfo *dinfo;
if (dev->conf.discard_granularity && dev->conf.discard_granularity != 512) {
error_report("discard_granularity must be 512 for ide");
return -1;
}
- serial = dev->serial;
- if (!serial) {
- /* try to fall back to value set with legacy -drive serial=... */
- dinfo = drive_get_by_blockdev(dev->conf.bs);
- if (*dinfo->serial) {
- serial = dinfo->serial;
- }
+ blkconf_serial(&dev->conf, &dev->serial);
+ if (kind != IDE_CD
+ && blkconf_geometry(&dev->conf, &dev->chs_trans, 65536, 16, 255) < 0) {
+ return -1;
}
if (ide_init_drive(s, dev->conf.bs, kind,
- dev->version, serial, dev->model, dev->wwn) < 0) {
+ dev->version, dev->serial, dev->model, dev->wwn,
+ dev->conf.cyls, dev->conf.heads, dev->conf.secs,
+ dev->chs_trans) < 0) {
return -1;
}
@@ -189,6 +200,9 @@ static int ide_drive_initfn(IDEDevice *dev)
static Property ide_hd_properties[] = {
DEFINE_IDE_DEV_PROPERTIES(),
+ DEFINE_BLOCK_CHS_PROPERTIES(IDEDrive, dev.conf),
+ DEFINE_PROP_BIOS_CHS_TRANS("bios-chs-trans",
+ IDEDrive, dev.chs_trans, BIOS_ATA_TRANSLATION_AUTO),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/ide/via.c b/hw/ide/via.c
index eec5136019..b20e4f094e 100644
--- a/hw/ide/via.c
+++ b/hw/ide/via.c
@@ -74,7 +74,8 @@ static void bmdma_write(void *opaque, target_phys_addr_t addr,
#endif
switch (addr & 3) {
case 0:
- return bmdma_cmd_writeb(bm, val);
+ bmdma_cmd_writeb(bm, val);
+ break;
case 2:
bm->status = (val & 0x60) | (bm->status & 1) | (bm->status & ~val & 0x06);
break;
@@ -189,7 +190,7 @@ static int vt82c686b_ide_initfn(PCIDevice *dev)
return 0;
}
-static int vt82c686b_ide_exitfn(PCIDevice *dev)
+static void vt82c686b_ide_exitfn(PCIDevice *dev)
{
PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);
unsigned i;
@@ -201,8 +202,6 @@ static int vt82c686b_ide_exitfn(PCIDevice *dev)
memory_region_destroy(&d->bmdma[i].addr_ioport);
}
memory_region_destroy(&d->bmdma_bar);
-
- return 0;
}
void vt82c686b_ide_init(PCIBus *bus, DriveInfo **hd_table, int devfn)
diff --git a/hw/imx.h b/hw/imx.h
new file mode 100644
index 0000000000..ccf586fefe
--- /dev/null
+++ b/hw/imx.h
@@ -0,0 +1,34 @@
+/*
+ * i.MX31 emulation
+ *
+ * Copyright (C) 2012 Peter Chubb
+ * NICTA
+ *
+ * This code is released under the GPL, version 2.0 or later
+ * See the file `../COPYING' for details.
+ */
+
+#ifndef IMX_H
+#define IMX_H
+
+void imx_serial_create(int uart, const target_phys_addr_t addr, qemu_irq irq);
+
+typedef enum {
+ NOCLK,
+ MCU,
+ HSP,
+ IPG,
+ CLK_32k
+} IMXClk;
+
+uint32_t imx_clock_frequency(DeviceState *s, IMXClk clock);
+
+void imx_timerp_create(const target_phys_addr_t addr,
+ qemu_irq irq,
+ DeviceState *ccm);
+void imx_timerg_create(const target_phys_addr_t addr,
+ qemu_irq irq,
+ DeviceState *ccm);
+
+
+#endif /* IMX_H */
diff --git a/hw/imx_avic.c b/hw/imx_avic.c
new file mode 100644
index 0000000000..4f010e8ee2
--- /dev/null
+++ b/hw/imx_avic.c
@@ -0,0 +1,408 @@
+/*
+ * i.MX31 Vectored Interrupt Controller
+ *
+ * Note this is NOT the PL192 provided by ARM, but
+ * a custom implementation by Freescale.
+ *
+ * Copyright (c) 2008 OKL
+ * Copyright (c) 2011 NICTA Pty Ltd
+ * Originally Written by Hans Jiang
+ *
+ * This code is licenced under the GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ *
+ * TODO: implement vectors.
+ */
+
+#include "hw.h"
+#include "sysbus.h"
+#include "host-utils.h"
+
+#define DEBUG_INT 1
+#undef DEBUG_INT /* comment out for debugging */
+
+#ifdef DEBUG_INT
+#define DPRINTF(fmt, args...) \
+do { printf("imx_avic: " fmt , ##args); } while (0)
+#else
+#define DPRINTF(fmt, args...) do {} while (0)
+#endif
+
+/*
+ * Define to 1 for messages about attempts to
+ * access unimplemented registers or similar.
+ */
+#define DEBUG_IMPLEMENTATION 1
+#if DEBUG_IMPLEMENTATION
+# define IPRINTF(fmt, args...) \
+ do { fprintf(stderr, "imx_avic: " fmt, ##args); } while (0)
+#else
+# define IPRINTF(fmt, args...) do {} while (0)
+#endif
+
+#define IMX_AVIC_NUM_IRQS 64
+
+/* Interrupt Control Bits */
+#define ABFLAG (1<<25)
+#define ABFEN (1<<24)
+#define NIDIS (1<<22) /* Normal Interrupt disable */
+#define FIDIS (1<<21) /* Fast interrupt disable */
+#define NIAD (1<<20) /* Normal Interrupt Arbiter Rise ARM level */
+#define FIAD (1<<19) /* Fast Interrupt Arbiter Rise ARM level */
+#define NM (1<<18) /* Normal interrupt mode */
+
+
+#define PRIO_PER_WORD (sizeof(uint32_t) * 8 / 4)
+#define PRIO_WORDS (IMX_AVIC_NUM_IRQS/PRIO_PER_WORD)
+
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ uint64_t pending;
+ uint64_t enabled;
+ uint64_t is_fiq;
+ uint32_t intcntl;
+ uint32_t intmask;
+ qemu_irq irq;
+ qemu_irq fiq;
+ uint32_t prio[PRIO_WORDS]; /* Priorities are 4-bits each */
+} IMXAVICState;
+
+static const VMStateDescription vmstate_imx_avic = {
+ .name = "imx-avic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(pending, IMXAVICState),
+ VMSTATE_UINT64(enabled, IMXAVICState),
+ VMSTATE_UINT64(is_fiq, IMXAVICState),
+ VMSTATE_UINT32(intcntl, IMXAVICState),
+ VMSTATE_UINT32(intmask, IMXAVICState),
+ VMSTATE_UINT32_ARRAY(prio, IMXAVICState, PRIO_WORDS),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+
+
+static inline int imx_avic_prio(IMXAVICState *s, int irq)
+{
+ uint32_t word = irq / PRIO_PER_WORD;
+ uint32_t part = 4 * (irq % PRIO_PER_WORD);
+ return 0xf & (s->prio[word] >> part);
+}
+
+static inline void imx_avic_set_prio(IMXAVICState *s, int irq, int prio)
+{
+ uint32_t word = irq / PRIO_PER_WORD;
+ uint32_t part = 4 * (irq % PRIO_PER_WORD);
+ uint32_t mask = ~(0xf << part);
+ s->prio[word] &= mask;
+ s->prio[word] |= prio << part;
+}
+
+/* Update interrupts. */
+static void imx_avic_update(IMXAVICState *s)
+{
+ int i;
+ uint64_t new = s->pending & s->enabled;
+ uint64_t flags;
+
+ flags = new & s->is_fiq;
+ qemu_set_irq(s->fiq, !!flags);
+
+ flags = new & ~s->is_fiq;
+ if (!flags || (s->intmask == 0x1f)) {
+ qemu_set_irq(s->irq, !!flags);
+ return;
+ }
+
+ /*
+ * Take interrupt if there's a pending interrupt with
+ * priority higher than the value of intmask
+ */
+ for (i = 0; i < IMX_AVIC_NUM_IRQS; i++) {
+ if (flags & (1UL << i)) {
+ if (imx_avic_prio(s, i) > s->intmask) {
+ qemu_set_irq(s->irq, 1);
+ return;
+ }
+ }
+ }
+ qemu_set_irq(s->irq, 0);
+}
+
+static void imx_avic_set_irq(void *opaque, int irq, int level)
+{
+ IMXAVICState *s = (IMXAVICState *)opaque;
+
+ if (level) {
+ DPRINTF("Raising IRQ %d, prio %d\n",
+ irq, imx_avic_prio(s, irq));
+ s->pending |= (1ULL << irq);
+ } else {
+ DPRINTF("Clearing IRQ %d, prio %d\n",
+ irq, imx_avic_prio(s, irq));
+ s->pending &= ~(1ULL << irq);
+ }
+
+ imx_avic_update(s);
+}
+
+
+static uint64_t imx_avic_read(void *opaque,
+ target_phys_addr_t offset, unsigned size)
+{
+ IMXAVICState *s = (IMXAVICState *)opaque;
+
+
+ DPRINTF("read(offset = 0x%x)\n", offset >> 2);
+ switch (offset >> 2) {
+ case 0: /* INTCNTL */
+ return s->intcntl;
+
+ case 1: /* Normal Interrupt Mask Register, NIMASK */
+ return s->intmask;
+
+ case 2: /* Interrupt Enable Number Register, INTENNUM */
+ case 3: /* Interrupt Disable Number Register, INTDISNUM */
+ return 0;
+
+ case 4: /* Interrupt Enabled Number Register High */
+ return s->enabled >> 32;
+
+ case 5: /* Interrupt Enabled Number Register Low */
+ return s->enabled & 0xffffffffULL;
+
+ case 6: /* Interrupt Type Register High */
+ return s->is_fiq >> 32;
+
+ case 7: /* Interrupt Type Register Low */
+ return s->is_fiq & 0xffffffffULL;
+
+ case 8: /* Normal Interrupt Priority Register 7 */
+ case 9: /* Normal Interrupt Priority Register 6 */
+ case 10:/* Normal Interrupt Priority Register 5 */
+ case 11:/* Normal Interrupt Priority Register 4 */
+ case 12:/* Normal Interrupt Priority Register 3 */
+ case 13:/* Normal Interrupt Priority Register 2 */
+ case 14:/* Normal Interrupt Priority Register 1 */
+ case 15:/* Normal Interrupt Priority Register 0 */
+ return s->prio[15-(offset>>2)];
+
+ case 16: /* Normal interrupt vector and status register */
+ {
+ /*
+ * This returns the highest priority
+ * outstanding interrupt. Where there is more than
+ * one pending IRQ with the same priority,
+ * take the highest numbered one.
+ */
+ uint64_t flags = s->pending & s->enabled & ~s->is_fiq;
+ int i;
+ int prio = -1;
+ int irq = -1;
+ for (i = 63; i >= 0; --i) {
+ if (flags & (1ULL<<i)) {
+ int irq_prio = imx_avic_prio(s, i);
+ if (irq_prio > prio) {
+ irq = i;
+ prio = irq_prio;
+ }
+ }
+ }
+ if (irq >= 0) {
+ imx_avic_set_irq(s, irq, 0);
+ return irq << 16 | prio;
+ }
+ return 0xffffffffULL;
+ }
+ case 17:/* Fast Interrupt vector and status register */
+ {
+ uint64_t flags = s->pending & s->enabled & s->is_fiq;
+ int i = ctz64(flags);
+ if (i < 64) {
+ imx_avic_set_irq(opaque, i, 0);
+ return i;
+ }
+ return 0xffffffffULL;
+ }
+ case 18:/* Interrupt source register high */
+ return s->pending >> 32;
+
+ case 19:/* Interrupt source register low */
+ return s->pending & 0xffffffffULL;
+
+ case 20:/* Interrupt Force Register high */
+ case 21:/* Interrupt Force Register low */
+ return 0;
+
+ case 22:/* Normal Interrupt Pending Register High */
+ return (s->pending & s->enabled & ~s->is_fiq) >> 32;
+
+ case 23:/* Normal Interrupt Pending Register Low */
+ return (s->pending & s->enabled & ~s->is_fiq) & 0xffffffffULL;
+
+ case 24: /* Fast Interrupt Pending Register High */
+ return (s->pending & s->enabled & s->is_fiq) >> 32;
+
+ case 25: /* Fast Interrupt Pending Register Low */
+ return (s->pending & s->enabled & s->is_fiq) & 0xffffffffULL;
+
+ case 0x40: /* AVIC vector 0, use for WFI WAR */
+ return 0x4;
+
+ default:
+ IPRINTF("imx_avic_read: Bad offset 0x%x\n", (int)offset);
+ return 0;
+ }
+}
+
+static void imx_avic_write(void *opaque, target_phys_addr_t offset,
+ uint64_t val, unsigned size)
+{
+ IMXAVICState *s = (IMXAVICState *)opaque;
+
+ /* Vector Registers not yet supported */
+ if (offset >= 0x100 && offset <= 0x2fc) {
+ IPRINTF("imx_avic_write to vector register %d ignored\n",
+ (unsigned int)((offset - 0x100) >> 2));
+ return;
+ }
+
+ DPRINTF("imx_avic_write(0x%x) = %x\n",
+ (unsigned int)offset>>2, (unsigned int)val);
+ switch (offset >> 2) {
+ case 0: /* Interrupt Control Register, INTCNTL */
+ s->intcntl = val & (ABFEN | NIDIS | FIDIS | NIAD | FIAD | NM);
+ if (s->intcntl & ABFEN) {
+ s->intcntl &= ~(val & ABFLAG);
+ }
+ break;
+
+ case 1: /* Normal Interrupt Mask Register, NIMASK */
+ s->intmask = val & 0x1f;
+ break;
+
+ case 2: /* Interrupt Enable Number Register, INTENNUM */
+ DPRINTF("enable(%d)\n", (int)val);
+ val &= 0x3f;
+ s->enabled |= (1ULL << val);
+ break;
+
+ case 3: /* Interrupt Disable Number Register, INTDISNUM */
+ DPRINTF("disable(%d)\n", (int)val);
+ val &= 0x3f;
+ s->enabled &= ~(1ULL << val);
+ break;
+
+ case 4: /* Interrupt Enable Number Register High */
+ s->enabled = (s->enabled & 0xffffffffULL) | (val << 32);
+ break;
+
+ case 5: /* Interrupt Enable Number Register Low */
+ s->enabled = (s->enabled & 0xffffffff00000000ULL) | val;
+ break;
+
+ case 6: /* Interrupt Type Register High */
+ s->is_fiq = (s->is_fiq & 0xffffffffULL) | (val << 32);
+ break;
+
+ case 7: /* Interrupt Type Register Low */
+ s->is_fiq = (s->is_fiq & 0xffffffff00000000ULL) | val;
+ break;
+
+ case 8: /* Normal Interrupt Priority Register 7 */
+ case 9: /* Normal Interrupt Priority Register 6 */
+ case 10:/* Normal Interrupt Priority Register 5 */
+ case 11:/* Normal Interrupt Priority Register 4 */
+ case 12:/* Normal Interrupt Priority Register 3 */
+ case 13:/* Normal Interrupt Priority Register 2 */
+ case 14:/* Normal Interrupt Priority Register 1 */
+ case 15:/* Normal Interrupt Priority Register 0 */
+ s->prio[15-(offset>>2)] = val;
+ break;
+
+ /* Read-only registers, writes ignored */
+ case 16:/* Normal Interrupt Vector and Status register */
+ case 17:/* Fast Interrupt vector and status register */
+ case 18:/* Interrupt source register high */
+ case 19:/* Interrupt source register low */
+ return;
+
+ case 20:/* Interrupt Force Register high */
+ s->pending = (s->pending & 0xffffffffULL) | (val << 32);
+ break;
+
+ case 21:/* Interrupt Force Register low */
+ s->pending = (s->pending & 0xffffffff00000000ULL) | val;
+ break;
+
+ case 22:/* Normal Interrupt Pending Register High */
+ case 23:/* Normal Interrupt Pending Register Low */
+ case 24: /* Fast Interrupt Pending Register High */
+ case 25: /* Fast Interrupt Pending Register Low */
+ return;
+
+ default:
+ IPRINTF("imx_avic_write: Bad offset %x\n", (int)offset);
+ }
+ imx_avic_update(s);
+}
+
+static const MemoryRegionOps imx_avic_ops = {
+ .read = imx_avic_read,
+ .write = imx_avic_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static void imx_avic_reset(DeviceState *dev)
+{
+ IMXAVICState *s = container_of(dev, IMXAVICState, busdev.qdev);
+ s->pending = 0;
+ s->enabled = 0;
+ s->is_fiq = 0;
+ s->intmask = 0x1f;
+ s->intcntl = 0;
+ memset(s->prio, 0, sizeof s->prio);
+}
+
+static int imx_avic_init(SysBusDevice *dev)
+{
+ IMXAVICState *s = FROM_SYSBUS(IMXAVICState, dev);;
+
+ memory_region_init_io(&s->iomem, &imx_avic_ops, s, "imx_avic", 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ qdev_init_gpio_in(&dev->qdev, imx_avic_set_irq, IMX_AVIC_NUM_IRQS);
+ sysbus_init_irq(dev, &s->irq);
+ sysbus_init_irq(dev, &s->fiq);
+
+ return 0;
+}
+
+
+static void imx_avic_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ k->init = imx_avic_init;
+ dc->vmsd = &vmstate_imx_avic;
+ dc->reset = imx_avic_reset;
+ dc->desc = "i.MX Advanced Vector Interrupt Controller";
+}
+
+static const TypeInfo imx_avic_info = {
+ .name = "imx_avic",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXAVICState),
+ .class_init = imx_avic_class_init,
+};
+
+static void imx_avic_register_types(void)
+{
+ type_register_static(&imx_avic_info);
+}
+
+type_init(imx_avic_register_types)
diff --git a/hw/imx_ccm.c b/hw/imx_ccm.c
new file mode 100644
index 0000000000..10952c6ea1
--- /dev/null
+++ b/hw/imx_ccm.c
@@ -0,0 +1,321 @@
+/*
+ * IMX31 Clock Control Module
+ *
+ * Copyright (C) 2012 NICTA
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * To get the timer frequencies right, we need to emulate at least part of
+ * the CCM.
+ */
+
+#include "hw.h"
+#include "sysbus.h"
+#include "sysemu.h"
+#include "imx.h"
+
+#define CKIH_FREQ 26000000 /* 26MHz crystal input */
+#define CKIL_FREQ 32768 /* nominal 32khz clock */
+
+
+//#define DEBUG_CCM 1
+#ifdef DEBUG_CCM
+#define DPRINTF(fmt, args...) \
+do { printf("imx_ccm: " fmt , ##args); } while (0)
+#else
+#define DPRINTF(fmt, args...) do {} while (0)
+#endif
+
+static int imx_ccm_post_load(void *opaque, int version_id);
+
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+
+ uint32_t ccmr;
+ uint32_t pdr0;
+ uint32_t pdr1;
+ uint32_t mpctl;
+ uint32_t spctl;
+ uint32_t cgr[3];
+ uint32_t pmcr0;
+ uint32_t pmcr1;
+
+ /* Frequencies precalculated on register changes */
+ uint32_t pll_refclk_freq;
+ uint32_t mcu_clk_freq;
+ uint32_t hsp_clk_freq;
+ uint32_t ipg_clk_freq;
+} IMXCCMState;
+
+static const VMStateDescription vmstate_imx_ccm = {
+ .name = "imx-ccm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(ccmr, IMXCCMState),
+ VMSTATE_UINT32(pdr0, IMXCCMState),
+ VMSTATE_UINT32(pdr1, IMXCCMState),
+ VMSTATE_UINT32(mpctl, IMXCCMState),
+ VMSTATE_UINT32(spctl, IMXCCMState),
+ VMSTATE_UINT32_ARRAY(cgr, IMXCCMState, 3),
+ VMSTATE_UINT32(pmcr0, IMXCCMState),
+ VMSTATE_UINT32(pmcr1, IMXCCMState),
+ VMSTATE_UINT32(pll_refclk_freq, IMXCCMState),
+ },
+ .post_load = imx_ccm_post_load,
+};
+
+/* CCMR */
+#define CCMR_FPME (1<<0)
+#define CCMR_MPE (1<<3)
+#define CCMR_MDS (1<<7)
+#define CCMR_FPMF (1<<26)
+#define CCMR_PRCS (3<<1)
+
+/* PDR0 */
+#define PDR0_MCU_PODF_SHIFT (0)
+#define PDR0_MCU_PODF_MASK (0x7)
+#define PDR0_MAX_PODF_SHIFT (3)
+#define PDR0_MAX_PODF_MASK (0x7)
+#define PDR0_IPG_PODF_SHIFT (6)
+#define PDR0_IPG_PODF_MASK (0x3)
+#define PDR0_NFC_PODF_SHIFT (8)
+#define PDR0_NFC_PODF_MASK (0x7)
+#define PDR0_HSP_PODF_SHIFT (11)
+#define PDR0_HSP_PODF_MASK (0x7)
+#define PDR0_PER_PODF_SHIFT (16)
+#define PDR0_PER_PODF_MASK (0x1f)
+#define PDR0_CSI_PODF_SHIFT (23)
+#define PDR0_CSI_PODF_MASK (0x1ff)
+
+#define EXTRACT(value, name) (((value) >> PDR0_##name##_PODF_SHIFT) \
+ & PDR0_##name##_PODF_MASK)
+#define INSERT(value, name) (((value) & PDR0_##name##_PODF_MASK) << \
+ PDR0_##name##_PODF_SHIFT)
+/* PLL control registers */
+#define PD(v) (((v) >> 26) & 0xf)
+#define MFD(v) (((v) >> 16) & 0x3ff)
+#define MFI(v) (((v) >> 10) & 0xf);
+#define MFN(v) ((v) & 0x3ff)
+
+#define PLL_PD(x) (((x) & 0xf) << 26)
+#define PLL_MFD(x) (((x) & 0x3ff) << 16)
+#define PLL_MFI(x) (((x) & 0xf) << 10)
+#define PLL_MFN(x) (((x) & 0x3ff) << 0)
+
+uint32_t imx_clock_frequency(DeviceState *dev, IMXClk clock)
+{
+ IMXCCMState *s = container_of(dev, IMXCCMState, busdev.qdev);
+
+ switch (clock) {
+ case NOCLK:
+ return 0;
+ case MCU:
+ return s->mcu_clk_freq;
+ case HSP:
+ return s->hsp_clk_freq;
+ case IPG:
+ return s->ipg_clk_freq;
+ case CLK_32k:
+ return CKIL_FREQ;
+ }
+ return 0;
+}
+
+/*
+ * Calculate PLL output frequency
+ */
+static uint32_t calc_pll(uint32_t pllreg, uint32_t base_freq)
+{
+ int32_t mfn = MFN(pllreg); /* Numerator */
+ uint32_t mfi = MFI(pllreg); /* Integer part */
+ uint32_t mfd = 1 + MFD(pllreg); /* Denominator */
+ uint32_t pd = 1 + PD(pllreg); /* Pre-divider */
+
+ if (mfi < 5) {
+ mfi = 5;
+ }
+ /* mfn is 10-bit signed twos-complement */
+ mfn <<= 32 - 10;
+ mfn >>= 32 - 10;
+
+ return ((2 * (base_freq >> 10) * (mfi * mfd + mfn)) /
+ (mfd * pd)) << 10;
+}
+
+static void update_clocks(IMXCCMState *s)
+{
+ /*
+ * If we ever emulate more clocks, this should switch to a data-driven
+ * approach
+ */
+
+ if ((s->ccmr & CCMR_PRCS) == 1) {
+ s->pll_refclk_freq = CKIL_FREQ * 1024;
+ } else {
+ s->pll_refclk_freq = CKIH_FREQ;
+ }
+
+ /* ipg_clk_arm aka MCU clock */
+ if ((s->ccmr & CCMR_MDS) || !(s->ccmr & CCMR_MPE)) {
+ s->mcu_clk_freq = s->pll_refclk_freq;
+ } else {
+ s->mcu_clk_freq = calc_pll(s->mpctl, s->pll_refclk_freq);
+ }
+
+ /* High-speed clock */
+ s->hsp_clk_freq = s->mcu_clk_freq / (1 + EXTRACT(s->pdr0, HSP));
+ s->ipg_clk_freq = s->hsp_clk_freq / (1 + EXTRACT(s->pdr0, IPG));
+
+ DPRINTF("Clocks: mcu %uMHz, HSP %uMHz, IPG %uHz\n",
+ s->mcu_clk_freq / 1000000,
+ s->hsp_clk_freq / 1000000,
+ s->ipg_clk_freq);
+}
+
+static void imx_ccm_reset(DeviceState *dev)
+{
+ IMXCCMState *s = container_of(dev, IMXCCMState, busdev.qdev);
+
+ s->ccmr = 0x074b0b7b;
+ s->pdr0 = 0xff870b48;
+ s->pdr1 = 0x49fcfe7f;
+ s->mpctl = PLL_PD(1) | PLL_MFD(0) | PLL_MFI(6) | PLL_MFN(0);
+ s->cgr[0] = s->cgr[1] = s->cgr[2] = 0xffffffff;
+ s->spctl = PLL_PD(1) | PLL_MFD(4) | PLL_MFI(0xc) | PLL_MFN(1);
+ s->pmcr0 = 0x80209828;
+
+ update_clocks(s);
+}
+
+static uint64_t imx_ccm_read(void *opaque, target_phys_addr_t offset,
+ unsigned size)
+{
+ IMXCCMState *s = (IMXCCMState *)opaque;
+
+ DPRINTF("read(offset=%x)", offset >> 2);
+ switch (offset >> 2) {
+ case 0: /* CCMR */
+ DPRINTF(" ccmr = 0x%x\n", s->ccmr);
+ return s->ccmr;
+ case 1:
+ DPRINTF(" pdr0 = 0x%x\n", s->pdr0);
+ return s->pdr0;
+ case 2:
+ DPRINTF(" pdr1 = 0x%x\n", s->pdr1);
+ return s->pdr1;
+ case 4:
+ DPRINTF(" mpctl = 0x%x\n", s->mpctl);
+ return s->mpctl;
+ case 6:
+ DPRINTF(" spctl = 0x%x\n", s->spctl);
+ return s->spctl;
+ case 8:
+ DPRINTF(" cgr0 = 0x%x\n", s->cgr[0]);
+ return s->cgr[0];
+ case 9:
+ DPRINTF(" cgr1 = 0x%x\n", s->cgr[1]);
+ return s->cgr[1];
+ case 10:
+ DPRINTF(" cgr2 = 0x%x\n", s->cgr[2]);
+ return s->cgr[2];
+ case 18: /* LTR1 */
+ return 0x00004040;
+ case 23:
+ DPRINTF(" pcmr0 = 0x%x\n", s->pmcr0);
+ return s->pmcr0;
+ }
+ DPRINTF(" return 0\n");
+ return 0;
+}
+
+static void imx_ccm_write(void *opaque, target_phys_addr_t offset,
+ uint64_t value, unsigned size)
+{
+ IMXCCMState *s = (IMXCCMState *)opaque;
+
+ DPRINTF("write(offset=%x, value = %x)\n",
+ offset >> 2, (unsigned int)value);
+ switch (offset >> 2) {
+ case 0:
+ s->ccmr = CCMR_FPMF | (value & 0x3b6fdfff);
+ break;
+ case 1:
+ s->pdr0 = value & 0xff9f3fff;
+ break;
+ case 2:
+ s->pdr1 = value;
+ break;
+ case 4:
+ s->mpctl = value & 0xbfff3fff;
+ break;
+ case 6:
+ s->spctl = value & 0xbfff3fff;
+ break;
+ case 8:
+ s->cgr[0] = value;
+ return;
+ case 9:
+ s->cgr[1] = value;
+ return;
+ case 10:
+ s->cgr[2] = value;
+ return;
+
+ default:
+ return;
+ }
+ update_clocks(s);
+}
+
+static const struct MemoryRegionOps imx_ccm_ops = {
+ .read = imx_ccm_read,
+ .write = imx_ccm_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int imx_ccm_init(SysBusDevice *dev)
+{
+ IMXCCMState *s = FROM_SYSBUS(typeof(*s), dev);
+
+ memory_region_init_io(&s->iomem, &imx_ccm_ops, s, "imx_ccm", 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ return 0;
+}
+
+static int imx_ccm_post_load(void *opaque, int version_id)
+{
+ IMXCCMState *s = (IMXCCMState *)opaque;
+
+ update_clocks(s);
+ return 0;
+}
+
+static void imx_ccm_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *sbc = SYS_BUS_DEVICE_CLASS(klass);
+
+ sbc->init = imx_ccm_init;
+ dc->reset = imx_ccm_reset;
+ dc->vmsd = &vmstate_imx_ccm;
+ dc->desc = "i.MX Clock Control Module";
+}
+
+static TypeInfo imx_ccm_info = {
+ .name = "imx_ccm",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXCCMState),
+ .class_init = imx_ccm_class_init,
+};
+
+static void imx_ccm_register_types(void)
+{
+ type_register_static(&imx_ccm_info);
+}
+
+type_init(imx_ccm_register_types)
diff --git a/hw/imx_serial.c b/hw/imx_serial.c
new file mode 100644
index 0000000000..d4eae430f5
--- /dev/null
+++ b/hw/imx_serial.c
@@ -0,0 +1,467 @@
+/*
+ * IMX31 UARTS
+ *
+ * Copyright (c) 2008 OKL
+ * Originally Written by Hans Jiang
+ * Copyright (c) 2011 NICTA Pty Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ * This is a `bare-bones' implementation of the IMX series serial ports.
+ * TODO:
+ * -- implement FIFOs. The real hardware has 32 word transmit
+ * and receive FIFOs; we currently use a 1-char buffer
+ * -- implement DMA
+ * -- implement BAUD-rate and modem lines, for when the backend
+ * is a real serial device.
+ */
+
+#include "hw.h"
+#include "sysbus.h"
+#include "sysemu.h"
+#include "qemu-char.h"
+#include "imx.h"
+
+//#define DEBUG_SERIAL 1
+#ifdef DEBUG_SERIAL
+#define DPRINTF(fmt, args...) \
+do { printf("imx_serial: " fmt , ##args); } while (0)
+#else
+#define DPRINTF(fmt, args...) do {} while (0)
+#endif
+
+/*
+ * Define to 1 for messages about attempts to
+ * access unimplemented registers or similar.
+ */
+//#define DEBUG_IMPLEMENTATION 1
+#ifdef DEBUG_IMPLEMENTATION
+# define IPRINTF(fmt, args...) \
+ do { fprintf(stderr, "imx_serial: " fmt, ##args); } while (0)
+#else
+# define IPRINTF(fmt, args...) do {} while (0)
+#endif
+
+typedef struct {
+ SysBusDevice busdev;
+ MemoryRegion iomem;
+ int32_t readbuff;
+
+ uint32_t usr1;
+ uint32_t usr2;
+ uint32_t ucr1;
+ uint32_t ucr2;
+ uint32_t uts1;
+
+ /*
+ * The registers below are implemented just so that the
+ * guest OS sees what it has written
+ */
+ uint32_t onems;
+ uint32_t ufcr;
+ uint32_t ubmr;
+ uint32_t ubrc;
+ uint32_t ucr3;
+
+ qemu_irq irq;
+ CharDriverState *chr;
+} IMXSerialState;
+
+static const VMStateDescription vmstate_imx_serial = {
+ .name = "imx-serial",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(readbuff, IMXSerialState),
+ VMSTATE_UINT32(usr1, IMXSerialState),
+ VMSTATE_UINT32(usr2, IMXSerialState),
+ VMSTATE_UINT32(ucr1, IMXSerialState),
+ VMSTATE_UINT32(uts1, IMXSerialState),
+ VMSTATE_UINT32(onems, IMXSerialState),
+ VMSTATE_UINT32(ufcr, IMXSerialState),
+ VMSTATE_UINT32(ubmr, IMXSerialState),
+ VMSTATE_UINT32(ubrc, IMXSerialState),
+ VMSTATE_UINT32(ucr3, IMXSerialState),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+
+#define URXD_CHARRDY (1<<15) /* character read is valid */
+#define URXD_ERR (1<<14) /* Character has error */
+#define URXD_BRK (1<<11) /* Break received */
+
+#define USR1_PARTYER (1<<15) /* Parity Error */
+#define USR1_RTSS (1<<14) /* RTS pin status */
+#define USR1_TRDY (1<<13) /* Tx ready */
+#define USR1_RTSD (1<<12) /* RTS delta: pin changed state */
+#define USR1_ESCF (1<<11) /* Escape sequence interrupt */
+#define USR1_FRAMERR (1<<10) /* Framing error */
+#define USR1_RRDY (1<<9) /* receiver ready */
+#define USR1_AGTIM (1<<8) /* Aging timer interrupt */
+#define USR1_DTRD (1<<7) /* DTR changed */
+#define USR1_RXDS (1<<6) /* Receiver is idle */
+#define USR1_AIRINT (1<<5) /* Aysnch IR interrupt */
+#define USR1_AWAKE (1<<4) /* Falling edge detected on RXd pin */
+
+#define USR2_ADET (1<<15) /* Autobaud complete */
+#define USR2_TXFE (1<<14) /* Transmit FIFO empty */
+#define USR2_DTRF (1<<13) /* DTR/DSR transition */
+#define USR2_IDLE (1<<12) /* UART has been idle for too long */
+#define USR2_ACST (1<<11) /* Autobaud counter stopped */
+#define USR2_RIDELT (1<<10) /* Ring Indicator delta */
+#define USR2_RIIN (1<<9) /* Ring Indicator Input */
+#define USR2_IRINT (1<<8) /* Serial Infrared Interrupt */
+#define USR2_WAKE (1<<7) /* Start bit detected */
+#define USR2_DCDDELT (1<<6) /* Data Carrier Detect delta */
+#define USR2_DCDIN (1<<5) /* Data Carrier Detect Input */
+#define USR2_RTSF (1<<4) /* RTS transition */
+#define USR2_TXDC (1<<3) /* Transmission complete */
+#define USR2_BRCD (1<<2) /* Break condition detected */
+#define USR2_ORE (1<<1) /* Overrun error */
+#define USR2_RDR (1<<0) /* Receive data ready */
+
+#define UCR1_TRDYEN (1<<13) /* Tx Ready Interrupt Enable */
+#define UCR1_RRDYEN (1<<9) /* Rx Ready Interrupt Enable */
+#define UCR1_TXMPTYEN (1<<6) /* Tx Empty Interrupt Enable */
+#define UCR1_UARTEN (1<<0) /* UART Enable */
+
+#define UCR2_TXEN (1<<2) /* Transmitter enable */
+#define UCR2_RXEN (1<<1) /* Receiver enable */
+#define UCR2_SRST (1<<0) /* Reset complete */
+
+#define UTS1_TXEMPTY (1<<6)
+#define UTS1_RXEMPTY (1<<5)
+#define UTS1_TXFULL (1<<4)
+#define UTS1_RXFULL (1<<3)
+
+static void imx_update(IMXSerialState *s)
+{
+ uint32_t flags;
+
+ flags = (s->usr1 & s->ucr1) & (USR1_TRDY|USR1_RRDY);
+ if (!(s->ucr1 & UCR1_TXMPTYEN)) {
+ flags &= ~USR1_TRDY;
+ }
+
+ qemu_set_irq(s->irq, !!flags);
+}
+
+static void imx_serial_reset(IMXSerialState *s)
+{
+
+ s->usr1 = USR1_TRDY | USR1_RXDS;
+ /*
+ * Fake attachment of a terminal: assert RTS.
+ */
+ s->usr1 |= USR1_RTSS;
+ s->usr2 = USR2_TXFE | USR2_TXDC | USR2_DCDIN;
+ s->uts1 = UTS1_RXEMPTY | UTS1_TXEMPTY;
+ s->ucr1 = 0;
+ s->ucr2 = UCR2_SRST;
+ s->ucr3 = 0x700;
+ s->ubmr = 0;
+ s->ubrc = 4;
+ s->readbuff = URXD_ERR;
+}
+
+static void imx_serial_reset_at_boot(DeviceState *dev)
+{
+ IMXSerialState *s = container_of(dev, IMXSerialState, busdev.qdev);
+
+ imx_serial_reset(s);
+
+ /*
+ * enable the uart on boot, so messages from the linux decompresser
+ * are visible. On real hardware this is done by the boot rom
+ * before anything else is loaded.
+ */
+ s->ucr1 = UCR1_UARTEN;
+ s->ucr2 = UCR2_TXEN;
+
+}
+
+static uint64_t imx_serial_read(void *opaque, target_phys_addr_t offset,
+ unsigned size)
+{
+ IMXSerialState *s = (IMXSerialState *)opaque;
+ uint32_t c;
+
+ DPRINTF("read(offset=%x)\n", offset >> 2);
+ switch (offset >> 2) {
+ case 0x0: /* URXD */
+ c = s->readbuff;
+ if (!(s->uts1 & UTS1_RXEMPTY)) {
+ /* Character is valid */
+ c |= URXD_CHARRDY;
+ s->usr1 &= ~USR1_RRDY;
+ s->usr2 &= ~USR2_RDR;
+ s->uts1 |= UTS1_RXEMPTY;
+ imx_update(s);
+ qemu_chr_accept_input(s->chr);
+ }
+ return c;
+
+ case 0x20: /* UCR1 */
+ return s->ucr1;
+
+ case 0x21: /* UCR2 */
+ return s->ucr2;
+
+ case 0x25: /* USR1 */
+ return s->usr1;
+
+ case 0x26: /* USR2 */
+ return s->usr2;
+
+ case 0x2A: /* BRM Modulator */
+ return s->ubmr;
+
+ case 0x2B: /* Baud Rate Count */
+ return s->ubrc;
+
+ case 0x2d: /* Test register */
+ return s->uts1;
+
+ case 0x24: /* UFCR */
+ return s->ufcr;
+
+ case 0x2c:
+ return s->onems;
+
+ case 0x22: /* UCR3 */
+ return s->ucr3;
+
+ case 0x23: /* UCR4 */
+ case 0x29: /* BRM Incremental */
+ return 0x0; /* TODO */
+
+ default:
+ IPRINTF("imx_serial_read: bad offset: 0x%x\n", (int)offset);
+ return 0;
+ }
+}
+
+static void imx_serial_write(void *opaque, target_phys_addr_t offset,
+ uint64_t value, unsigned size)
+{
+ IMXSerialState *s = (IMXSerialState *)opaque;
+ unsigned char ch;
+
+ DPRINTF("write(offset=%x, value = %x) to %s\n",
+ offset >> 2,
+ (unsigned int)value, s->chr ? s->chr->label : "NODEV");
+
+ switch (offset >> 2) {
+ case 0x10: /* UTXD */
+ ch = value;
+ if (s->ucr2 & UCR2_TXEN) {
+ if (s->chr) {
+ qemu_chr_fe_write(s->chr, &ch, 1);
+ }
+ s->usr1 &= ~USR1_TRDY;
+ imx_update(s);
+ s->usr1 |= USR1_TRDY;
+ imx_update(s);
+ }
+ break;
+
+ case 0x20: /* UCR1 */
+ s->ucr1 = value & 0xffff;
+ DPRINTF("write(ucr1=%x)\n", (unsigned int)value);
+ imx_update(s);
+ break;
+
+ case 0x21: /* UCR2 */
+ /*
+ * Only a few bits in control register 2 are implemented as yet.
+ * If it's intended to use a real serial device as a back-end, this
+ * register will have to be implemented more fully.
+ */
+ if (!(value & UCR2_SRST)) {
+ imx_serial_reset(s);
+ imx_update(s);
+ value |= UCR2_SRST;
+ }
+ if (value & UCR2_RXEN) {
+ if (!(s->ucr2 & UCR2_RXEN)) {
+ qemu_chr_accept_input(s->chr);
+ }
+ }
+ s->ucr2 = value & 0xffff;
+ break;
+
+ case 0x25: /* USR1 */
+ value &= USR1_AWAKE | USR1_AIRINT | USR1_DTRD | USR1_AGTIM |
+ USR1_FRAMERR | USR1_ESCF | USR1_RTSD | USR1_PARTYER;
+ s->usr1 &= ~value;
+ break;
+
+ case 0x26: /* USR2 */
+ /*
+ * Writing 1 to some bits clears them; all other
+ * values are ignored
+ */
+ value &= USR2_ADET | USR2_DTRF | USR2_IDLE | USR2_ACST |
+ USR2_RIDELT | USR2_IRINT | USR2_WAKE |
+ USR2_DCDDELT | USR2_RTSF | USR2_BRCD | USR2_ORE;
+ s->usr2 &= ~value;
+ break;
+
+ /*
+ * Linux expects to see what it writes to these registers
+ * We don't currently alter the baud rate
+ */
+ case 0x29: /* UBIR */
+ s->ubrc = value & 0xffff;
+ break;
+
+ case 0x2a: /* UBMR */
+ s->ubmr = value & 0xffff;
+ break;
+
+ case 0x2c: /* One ms reg */
+ s->onems = value & 0xffff;
+ break;
+
+ case 0x24: /* FIFO control register */
+ s->ufcr = value & 0xffff;
+ break;
+
+ case 0x22: /* UCR3 */
+ s->ucr3 = value & 0xffff;
+ break;
+
+ case 0x2d: /* UTS1 */
+ case 0x23: /* UCR4 */
+ IPRINTF("Unimplemented Register %x written to\n", offset >> 2);
+ /* TODO */
+ break;
+
+ default:
+ IPRINTF("imx_serial_write: Bad offset 0x%x\n", (int)offset);
+ }
+}
+
+static int imx_can_receive(void *opaque)
+{
+ IMXSerialState *s = (IMXSerialState *)opaque;
+ return !(s->usr1 & USR1_RRDY);
+}
+
+static void imx_put_data(void *opaque, uint32_t value)
+{
+ IMXSerialState *s = (IMXSerialState *)opaque;
+ DPRINTF("received char\n");
+ s->usr1 |= USR1_RRDY;
+ s->usr2 |= USR2_RDR;
+ s->uts1 &= ~UTS1_RXEMPTY;
+ s->readbuff = value;
+ imx_update(s);
+}
+
+static void imx_receive(void *opaque, const uint8_t *buf, int size)
+{
+ imx_put_data(opaque, *buf);
+}
+
+static void imx_event(void *opaque, int event)
+{
+ if (event == CHR_EVENT_BREAK) {
+ imx_put_data(opaque, URXD_BRK);
+ }
+}
+
+
+static const struct MemoryRegionOps imx_serial_ops = {
+ .read = imx_serial_read,
+ .write = imx_serial_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static int imx_serial_init(SysBusDevice *dev)
+{
+ IMXSerialState *s = FROM_SYSBUS(IMXSerialState, dev);
+
+
+ memory_region_init_io(&s->iomem, &imx_serial_ops, s, "imx-serial", 0x1000);
+ sysbus_init_mmio(dev, &s->iomem);
+ sysbus_init_irq(dev, &s->irq);
+
+ if (s->chr) {
+ qemu_chr_add_handlers(s->chr, imx_can_receive, imx_receive,
+ imx_event, s);
+ } else {
+ DPRINTF("No char dev for uart at 0x%lx\n",
+ (unsigned long)s->iomem.ram_addr);
+ }
+
+ return 0;
+}
+
+void imx_serial_create(int uart, const target_phys_addr_t addr, qemu_irq irq)
+{
+ DeviceState *dev;
+ SysBusDevice *bus;
+ CharDriverState *chr;
+ const char chr_name[] = "serial";
+ char label[ARRAY_SIZE(chr_name) + 1];
+
+ dev = qdev_create(NULL, "imx-serial");
+
+ if (uart >= MAX_SERIAL_PORTS) {
+ hw_error("Cannot assign uart %d: QEMU supports only %d ports\n",
+ uart, MAX_SERIAL_PORTS);
+ }
+ chr = serial_hds[uart];
+ if (!chr) {
+ snprintf(label, ARRAY_SIZE(label), "%s%d", chr_name, uart);
+ chr = qemu_chr_new(label, "null", NULL);
+ if (!(chr)) {
+ hw_error("Can't assign serial port to imx-uart%d.\n", uart);
+ }
+ }
+
+ qdev_prop_set_chr(dev, "chardev", chr);
+ bus = sysbus_from_qdev(dev);
+ qdev_init_nofail(dev);
+ if (addr != (target_phys_addr_t)-1) {
+ sysbus_mmio_map(bus, 0, addr);
+ }
+ sysbus_connect_irq(bus, 0, irq);
+
+}
+
+
+static Property imx32_serial_properties[] = {
+ DEFINE_PROP_CHR("chardev", IMXSerialState, chr),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void imx_serial_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+
+ k->init = imx_serial_init;
+ dc->vmsd = &vmstate_imx_serial;
+ dc->reset = imx_serial_reset_at_boot;
+ dc->desc = "i.MX series UART";
+ dc->props = imx32_serial_properties;
+}
+
+static TypeInfo imx_serial_info = {
+ .name = "imx-serial",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXSerialState),
+ .class_init = imx_serial_class_init,
+};
+
+static void imx_serial_register_types(void)
+{
+ type_register_static(&imx_serial_info);
+}
+
+type_init(imx_serial_register_types)
diff --git a/hw/imx_timer.c b/hw/imx_timer.c
new file mode 100644
index 0000000000..16215ccf04
--- /dev/null
+++ b/hw/imx_timer.c
@@ -0,0 +1,689 @@
+/*
+ * IMX31 Timer
+ *
+ * Copyright (c) 2008 OK Labs
+ * Copyright (c) 2011 NICTA Pty Ltd
+ * Originally Written by Hans Jiang
+ * Updated by Peter Chubb
+ *
+ * This code is licenced under GPL version 2 or later. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "hw.h"
+#include "qemu-timer.h"
+#include "ptimer.h"
+#include "sysbus.h"
+#include "imx.h"
+
+//#define DEBUG_TIMER 1
+#ifdef DEBUG_TIMER
+# define DPRINTF(fmt, args...) \
+ do { printf("imx_timer: " fmt , ##args); } while (0)
+#else
+# define DPRINTF(fmt, args...) do {} while (0)
+#endif
+
+/*
+ * Define to 1 for messages about attempts to
+ * access unimplemented registers or similar.
+ */
+#define DEBUG_IMPLEMENTATION 1
+#if DEBUG_IMPLEMENTATION
+# define IPRINTF(fmt, args...) \
+ do { fprintf(stderr, "imx_timer: " fmt, ##args); } while (0)
+#else
+# define IPRINTF(fmt, args...) do {} while (0)
+#endif
+
+/*
+ * GPT : General purpose timer
+ *
+ * This timer counts up continuously while it is enabled, resetting itself
+ * to 0 when it reaches TIMER_MAX (in freerun mode) or when it
+ * reaches the value of ocr1 (in periodic mode). WE simulate this using a
+ * QEMU ptimer counting down from ocr1 and reloading from ocr1 in
+ * periodic mode, or counting from ocr1 to zero, then TIMER_MAX - ocr1.
+ * waiting_rov is set when counting from TIMER_MAX.
+ *
+ * In the real hardware, there are three comparison registers that can
+ * trigger interrupts, and compare channel 1 can be used to
+ * force-reset the timer. However, this is a `bare-bones'
+ * implementation: only what Linux 3.x uses has been implemented
+ * (free-running timer from 0 to OCR1 or TIMER_MAX) .
+ */
+
+
+#define TIMER_MAX 0XFFFFFFFFUL
+
+/* Control register. Not all of these bits have any effect (yet) */
+#define GPT_CR_EN (1 << 0) /* GPT Enable */
+#define GPT_CR_ENMOD (1 << 1) /* GPT Enable Mode */
+#define GPT_CR_DBGEN (1 << 2) /* GPT Debug mode enable */
+#define GPT_CR_WAITEN (1 << 3) /* GPT Wait Mode Enable */
+#define GPT_CR_DOZEN (1 << 4) /* GPT Doze mode enable */
+#define GPT_CR_STOPEN (1 << 5) /* GPT Stop Mode Enable */
+#define GPT_CR_CLKSRC_SHIFT (6)
+#define GPT_CR_CLKSRC_MASK (0x7)
+
+#define GPT_CR_FRR (1 << 9) /* Freerun or Restart */
+#define GPT_CR_SWR (1 << 15) /* Software Reset */
+#define GPT_CR_IM1 (3 << 16) /* Input capture channel 1 mode (2 bits) */
+#define GPT_CR_IM2 (3 << 18) /* Input capture channel 2 mode (2 bits) */
+#define GPT_CR_OM1 (7 << 20) /* Output Compare Channel 1 Mode (3 bits) */
+#define GPT_CR_OM2 (7 << 23) /* Output Compare Channel 2 Mode (3 bits) */
+#define GPT_CR_OM3 (7 << 26) /* Output Compare Channel 3 Mode (3 bits) */
+#define GPT_CR_FO1 (1 << 29) /* Force Output Compare Channel 1 */
+#define GPT_CR_FO2 (1 << 30) /* Force Output Compare Channel 2 */
+#define GPT_CR_FO3 (1 << 31) /* Force Output Compare Channel 3 */
+
+#define GPT_SR_OF1 (1 << 0)
+#define GPT_SR_ROV (1 << 5)
+
+#define GPT_IR_OF1IE (1 << 0)
+#define GPT_IR_ROVIE (1 << 5)
+
+typedef struct {
+ SysBusDevice busdev;
+ ptimer_state *timer;
+ MemoryRegion iomem;
+ DeviceState *ccm;
+
+ uint32_t cr;
+ uint32_t pr;
+ uint32_t sr;
+ uint32_t ir;
+ uint32_t ocr1;
+ uint32_t cnt;
+
+ uint32_t waiting_rov;
+ qemu_irq irq;
+} IMXTimerGState;
+
+static const VMStateDescription vmstate_imx_timerg = {
+ .name = "imx-timerg",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(cr, IMXTimerGState),
+ VMSTATE_UINT32(pr, IMXTimerGState),
+ VMSTATE_UINT32(sr, IMXTimerGState),
+ VMSTATE_UINT32(ir, IMXTimerGState),
+ VMSTATE_UINT32(ocr1, IMXTimerGState),
+ VMSTATE_UINT32(cnt, IMXTimerGState),
+ VMSTATE_UINT32(waiting_rov, IMXTimerGState),
+ VMSTATE_PTIMER(timer, IMXTimerGState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const IMXClk imx_timerg_clocks[] = {
+ NOCLK, /* 000 No clock source */
+ IPG, /* 001 ipg_clk, 532MHz*/
+ IPG, /* 010 ipg_clk_highfreq */
+ NOCLK, /* 011 not defined */
+ CLK_32k, /* 100 ipg_clk_32k */
+ NOCLK, /* 101 not defined */
+ NOCLK, /* 110 not defined */
+ NOCLK, /* 111 not defined */
+};
+
+
+static void imx_timerg_set_freq(IMXTimerGState *s)
+{
+ int clksrc;
+ uint32_t freq;
+
+ clksrc = (s->cr >> GPT_CR_CLKSRC_SHIFT) & GPT_CR_CLKSRC_MASK;
+ freq = imx_clock_frequency(s->ccm, imx_timerg_clocks[clksrc]) / (1 + s->pr);
+
+ DPRINTF("Setting gtimer clksrc %d to frequency %d\n", clksrc, freq);
+ if (freq) {
+ ptimer_set_freq(s->timer, freq);
+ }
+}
+
+static void imx_timerg_update(IMXTimerGState *s)
+{
+ uint32_t flags = s->sr & s->ir & (GPT_SR_OF1 | GPT_SR_ROV);
+
+ DPRINTF("g-timer SR: %s %s IR=%s %s, %s\n",
+ s->sr & GPT_SR_OF1 ? "OF1" : "",
+ s->sr & GPT_SR_ROV ? "ROV" : "",
+ s->ir & GPT_SR_OF1 ? "OF1" : "",
+ s->ir & GPT_SR_ROV ? "ROV" : "",
+ s->cr & GPT_CR_EN ? "CR_EN" : "Not Enabled");
+
+
+ qemu_set_irq(s->irq, (s->cr & GPT_CR_EN) && flags);
+}
+
+static uint32_t imx_timerg_update_counts(IMXTimerGState *s)
+{
+ uint64_t target = s->waiting_rov ? TIMER_MAX : s->ocr1;
+ uint64_t cnt = ptimer_get_count(s->timer);
+ s->cnt = target - cnt;
+ return s->cnt;
+}
+
+static void imx_timerg_reload(IMXTimerGState *s, uint32_t timeout)
+{
+ uint64_t diff_cnt;
+
+ if (!(s->cr & GPT_CR_FRR)) {
+ IPRINTF("IMX_timerg_reload --- called in reset-mode\n");
+ return;
+ }
+
+ /*
+ * For small timeouts, qemu sometimes runs too slow.
+ * Better deliver a late interrupt than none.
+ *
+ * In Reset mode (FRR bit clear)
+ * the ptimer reloads itself from OCR1;
+ * in free-running mode we need to fake
+ * running from 0 to ocr1 to TIMER_MAX
+ */
+ if (timeout > s->cnt) {
+ diff_cnt = timeout - s->cnt;
+ } else {
+ diff_cnt = 0;
+ }
+ ptimer_set_count(s->timer, diff_cnt);
+}
+
+static uint64_t imx_timerg_read(void *opaque, target_phys_addr_t offset,
+ unsigned size)
+{
+ IMXTimerGState *s = (IMXTimerGState *)opaque;
+
+ DPRINTF("g-read(offset=%x)", offset >> 2);
+ switch (offset >> 2) {
+ case 0: /* Control Register */
+ DPRINTF(" cr = %x\n", s->cr);
+ return s->cr;
+
+ case 1: /* prescaler */
+ DPRINTF(" pr = %x\n", s->pr);
+ return s->pr;
+
+ case 2: /* Status Register */
+ DPRINTF(" sr = %x\n", s->sr);
+ return s->sr;
+
+ case 3: /* Interrupt Register */
+ DPRINTF(" ir = %x\n", s->ir);
+ return s->ir;
+
+ case 4: /* Output Compare Register 1 */
+ DPRINTF(" ocr1 = %x\n", s->ocr1);
+ return s->ocr1;
+
+
+ case 9: /* cnt */
+ imx_timerg_update_counts(s);
+ DPRINTF(" cnt = %x\n", s->cnt);
+ return s->cnt;
+ }
+
+ IPRINTF("imx_timerg_read: Bad offset %x\n",
+ (int)offset >> 2);
+ return 0;
+}
+
+static void imx_timerg_reset(DeviceState *dev)
+{
+ IMXTimerGState *s = container_of(dev, IMXTimerGState, busdev.qdev);
+
+ /*
+ * Soft reset doesn't touch some bits; hard reset clears them
+ */
+ s->cr &= ~(GPT_CR_EN|GPT_CR_DOZEN|GPT_CR_WAITEN|GPT_CR_DBGEN);
+ s->sr = 0;
+ s->pr = 0;
+ s->ir = 0;
+ s->cnt = 0;
+ s->ocr1 = TIMER_MAX;
+ ptimer_stop(s->timer);
+ ptimer_set_limit(s->timer, TIMER_MAX, 1);
+ imx_timerg_set_freq(s);
+}
+
+static void imx_timerg_write(void *opaque, target_phys_addr_t offset,
+ uint64_t value, unsigned size)
+{
+ IMXTimerGState *s = (IMXTimerGState *)opaque;
+ DPRINTF("g-write(offset=%x, value = 0x%x)\n", (unsigned int)offset >> 2,
+ (unsigned int)value);
+
+ switch (offset >> 2) {
+ case 0: {
+ uint32_t oldcr = s->cr;
+ /* CR */
+ if (value & GPT_CR_SWR) { /* force reset */
+ value &= ~GPT_CR_SWR;
+ imx_timerg_reset(&s->busdev.qdev);
+ imx_timerg_update(s);
+ }
+
+ s->cr = value & ~0x7c00;
+ imx_timerg_set_freq(s);
+ if ((oldcr ^ value) & GPT_CR_EN) {
+ if (value & GPT_CR_EN) {
+ if (value & GPT_CR_ENMOD) {
+ ptimer_set_count(s->timer, s->ocr1);
+ s->cnt = 0;
+ }
+ ptimer_run(s->timer,
+ (value & GPT_CR_FRR) && (s->ocr1 != TIMER_MAX));
+ } else {
+ ptimer_stop(s->timer);
+ };
+ }
+ return;
+ }
+
+ case 1: /* Prescaler */
+ s->pr = value & 0xfff;
+ imx_timerg_set_freq(s);
+ return;
+
+ case 2: /* SR */
+ /*
+ * No point in implementing the status register bits to do with
+ * external interrupt sources.
+ */
+ value &= GPT_SR_OF1 | GPT_SR_ROV;
+ s->sr &= ~value;
+ imx_timerg_update(s);
+ return;
+
+ case 3: /* IR -- interrupt register */
+ s->ir = value & 0x3f;
+ imx_timerg_update(s);
+ return;
+
+ case 4: /* OCR1 -- output compare register */
+ /* In non-freerun mode, reset count when this register is written */
+ if (!(s->cr & GPT_CR_FRR)) {
+ s->waiting_rov = 0;
+ ptimer_set_limit(s->timer, value, 1);
+ } else {
+ imx_timerg_update_counts(s);
+ if (value > s->cnt) {
+ s->waiting_rov = 0;
+ imx_timerg_reload(s, value);
+ } else {
+ s->waiting_rov = 1;
+ imx_timerg_reload(s, TIMER_MAX - s->cnt);
+ }
+ }
+ s->ocr1 = value;
+ return;
+
+ default:
+ IPRINTF("imx_timerg_write: Bad offset %x\n",
+ (int)offset >> 2);
+ }
+}
+
+static void imx_timerg_timeout(void *opaque)
+{
+ IMXTimerGState *s = (IMXTimerGState *)opaque;
+
+ DPRINTF("imx_timerg_timeout, waiting rov=%d\n", s->waiting_rov);
+ if (s->cr & GPT_CR_FRR) {
+ /*
+ * Free running timer from 0 -> TIMERMAX
+ * Generates interrupt at TIMER_MAX and at cnt==ocr1
+ * If ocr1 == TIMER_MAX, then no need to reload timer.
+ */
+ if (s->ocr1 == TIMER_MAX) {
+ DPRINTF("s->ocr1 == TIMER_MAX, FRR\n");
+ s->sr |= GPT_SR_OF1 | GPT_SR_ROV;
+ imx_timerg_update(s);
+ return;
+ }
+
+ if (s->waiting_rov) {
+ /*
+ * We were waiting for cnt==TIMER_MAX
+ */
+ s->sr |= GPT_SR_ROV;
+ s->waiting_rov = 0;
+ s->cnt = 0;
+ imx_timerg_reload(s, s->ocr1);
+ } else {
+ /* Must have got a cnt==ocr1 timeout. */
+ s->sr |= GPT_SR_OF1;
+ s->cnt = s->ocr1;
+ s->waiting_rov = 1;
+ imx_timerg_reload(s, TIMER_MAX);
+ }
+ imx_timerg_update(s);
+ return;
+ }
+
+ s->sr |= GPT_SR_OF1;
+ imx_timerg_update(s);
+}
+
+static const MemoryRegionOps imx_timerg_ops = {
+ .read = imx_timerg_read,
+ .write = imx_timerg_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+
+static int imx_timerg_init(SysBusDevice *dev)
+{
+ IMXTimerGState *s = FROM_SYSBUS(IMXTimerGState, dev);
+ QEMUBH *bh;
+
+ sysbus_init_irq(dev, &s->irq);
+ memory_region_init_io(&s->iomem, &imx_timerg_ops,
+ s, "imxg-timer",
+ 0x00001000);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ bh = qemu_bh_new(imx_timerg_timeout, s);
+ s->timer = ptimer_init(bh);
+
+ /* Hard reset resets extra bits in CR */
+ s->cr = 0;
+ return 0;
+}
+
+
+
+/*
+ * EPIT: Enhanced periodic interrupt timer
+ */
+
+#define CR_EN (1 << 0)
+#define CR_ENMOD (1 << 1)
+#define CR_OCIEN (1 << 2)
+#define CR_RLD (1 << 3)
+#define CR_PRESCALE_SHIFT (4)
+#define CR_PRESCALE_MASK (0xfff)
+#define CR_SWR (1 << 16)
+#define CR_IOVW (1 << 17)
+#define CR_DBGEN (1 << 18)
+#define CR_EPIT (1 << 19)
+#define CR_DOZEN (1 << 20)
+#define CR_STOPEN (1 << 21)
+#define CR_CLKSRC_SHIFT (24)
+#define CR_CLKSRC_MASK (0x3 << CR_CLKSRC_SHIFT)
+
+
+/*
+ * Exact clock frequencies vary from board to board.
+ * These are typical.
+ */
+static const IMXClk imx_timerp_clocks[] = {
+ 0, /* disabled */
+ IPG, /* ipg_clk, ~532MHz */
+ IPG, /* ipg_clk_highfreq */
+ CLK_32k, /* ipg_clk_32k -- ~32kHz */
+};
+
+typedef struct {
+ SysBusDevice busdev;
+ ptimer_state *timer;
+ MemoryRegion iomem;
+ DeviceState *ccm;
+
+ uint32_t cr;
+ uint32_t lr;
+ uint32_t cmp;
+
+ uint32_t freq;
+ int int_level;
+ qemu_irq irq;
+} IMXTimerPState;
+
+/*
+ * Update interrupt status
+ */
+static void imx_timerp_update(IMXTimerPState *s)
+{
+ if (s->int_level && (s->cr & CR_OCIEN)) {
+ qemu_irq_raise(s->irq);
+ } else {
+ qemu_irq_lower(s->irq);
+ }
+}
+
+static void imx_timerp_reset(DeviceState *dev)
+{
+ IMXTimerPState *s = container_of(dev, IMXTimerPState, busdev.qdev);
+
+ s->cr = 0;
+ s->lr = TIMER_MAX;
+ s->int_level = 0;
+ s->cmp = 0;
+ ptimer_stop(s->timer);
+ ptimer_set_count(s->timer, TIMER_MAX);
+}
+
+static uint64_t imx_timerp_read(void *opaque, target_phys_addr_t offset,
+ unsigned size)
+{
+ IMXTimerPState *s = (IMXTimerPState *)opaque;
+
+ DPRINTF("p-read(offset=%x)", offset >> 2);
+ switch (offset >> 2) {
+ case 0: /* Control Register */
+ DPRINTF("cr %x\n", s->cr);
+ return s->cr;
+
+ case 1: /* Status Register */
+ DPRINTF("int_level %x\n", s->int_level);
+ return s->int_level;
+
+ case 2: /* LR - ticks*/
+ DPRINTF("lr %x\n", s->lr);
+ return s->lr;
+
+ case 3: /* CMP */
+ DPRINTF("cmp %x\n", s->cmp);
+ return s->cmp;
+
+ case 4: /* CNT */
+ return ptimer_get_count(s->timer);
+ }
+ IPRINTF("imx_timerp_read: Bad offset %x\n",
+ (int)offset >> 2);
+ return 0;
+}
+
+static void set_timerp_freq(IMXTimerPState *s)
+{
+ int clksrc;
+ unsigned prescaler;
+ uint32_t freq;
+
+ clksrc = (s->cr & CR_CLKSRC_MASK) >> CR_CLKSRC_SHIFT;
+ prescaler = 1 + ((s->cr >> CR_PRESCALE_SHIFT) & CR_PRESCALE_MASK);
+ freq = imx_clock_frequency(s->ccm, imx_timerp_clocks[clksrc]) / prescaler;
+
+ s->freq = freq;
+ DPRINTF("Setting ptimer frequency to %u\n", freq);
+
+ if (freq) {
+ ptimer_set_freq(s->timer, freq);
+ }
+}
+
+static void imx_timerp_write(void *opaque, target_phys_addr_t offset,
+ uint64_t value, unsigned size)
+{
+ IMXTimerPState *s = (IMXTimerPState *)opaque;
+ DPRINTF("p-write(offset=%x, value = %x)\n", (unsigned int)offset >> 2,
+ (unsigned int)value);
+
+ switch (offset >> 2) {
+ case 0: /* CR */
+ if (value & CR_SWR) {
+ imx_timerp_reset(&s->busdev.qdev);
+ value &= ~CR_SWR;
+ }
+ s->cr = value & 0x03ffffff;
+ set_timerp_freq(s);
+
+ if (s->freq && (s->cr & CR_EN)) {
+ if (!(s->cr & CR_ENMOD)) {
+ ptimer_set_count(s->timer, s->lr);
+ }
+ ptimer_run(s->timer, 0);
+ } else {
+ ptimer_stop(s->timer);
+ }
+ break;
+
+ case 1: /* SR - ACK*/
+ s->int_level = 0;
+ imx_timerp_update(s);
+ break;
+
+ case 2: /* LR - set ticks */
+ s->lr = value;
+ ptimer_set_limit(s->timer, value, !!(s->cr & CR_IOVW));
+ break;
+
+ case 3: /* CMP */
+ s->cmp = value;
+ if (value) {
+ IPRINTF(
+ "Values for EPIT comparison other than zero not supported\n"
+ );
+ }
+ break;
+
+ default:
+ IPRINTF("imx_timerp_write: Bad offset %x\n",
+ (int)offset >> 2);
+ }
+}
+
+static void imx_timerp_tick(void *opaque)
+{
+ IMXTimerPState *s = (IMXTimerPState *)opaque;
+
+ DPRINTF("imxp tick\n");
+ if (!(s->cr & CR_RLD)) {
+ ptimer_set_count(s->timer, TIMER_MAX);
+ }
+ s->int_level = 1;
+ imx_timerp_update(s);
+}
+
+void imx_timerp_create(const target_phys_addr_t addr,
+ qemu_irq irq,
+ DeviceState *ccm)
+{
+ IMXTimerPState *pp;
+ DeviceState *dev;
+
+ dev = sysbus_create_simple("imx_timerp", addr, irq);
+ pp = container_of(dev, IMXTimerPState, busdev.qdev);
+ pp->ccm = ccm;
+}
+
+static const MemoryRegionOps imx_timerp_ops = {
+ .read = imx_timerp_read,
+ .write = imx_timerp_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+static const VMStateDescription vmstate_imx_timerp = {
+ .name = "imx-timerp",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .minimum_version_id_old = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(cr, IMXTimerPState),
+ VMSTATE_UINT32(lr, IMXTimerPState),
+ VMSTATE_UINT32(cmp, IMXTimerPState),
+ VMSTATE_UINT32(freq, IMXTimerPState),
+ VMSTATE_INT32(int_level, IMXTimerPState),
+ VMSTATE_PTIMER(timer, IMXTimerPState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static int imx_timerp_init(SysBusDevice *dev)
+{
+ IMXTimerPState *s = FROM_SYSBUS(IMXTimerPState, dev);
+ QEMUBH *bh;
+
+ DPRINTF("imx_timerp_init\n");
+
+ sysbus_init_irq(dev, &s->irq);
+ memory_region_init_io(&s->iomem, &imx_timerp_ops,
+ s, "imxp-timer",
+ 0x00001000);
+ sysbus_init_mmio(dev, &s->iomem);
+
+ bh = qemu_bh_new(imx_timerp_tick, s);
+ s->timer = ptimer_init(bh);
+
+ return 0;
+}
+
+
+void imx_timerg_create(const target_phys_addr_t addr,
+ qemu_irq irq,
+ DeviceState *ccm)
+{
+ IMXTimerGState *pp;
+ DeviceState *dev;
+
+ dev = sysbus_create_simple("imx_timerg", addr, irq);
+ pp = container_of(dev, IMXTimerGState, busdev.qdev);
+ pp->ccm = ccm;
+}
+
+static void imx_timerg_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ k->init = imx_timerg_init;
+ dc->vmsd = &vmstate_imx_timerg;
+ dc->reset = imx_timerg_reset;
+ dc->desc = "i.MX general timer";
+}
+
+static void imx_timerp_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass);
+ k->init = imx_timerp_init;
+ dc->vmsd = &vmstate_imx_timerp;
+ dc->reset = imx_timerp_reset;
+ dc->desc = "i.MX periodic timer";
+}
+
+static const TypeInfo imx_timerp_info = {
+ .name = "imx_timerp",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXTimerPState),
+ .class_init = imx_timerp_class_init,
+};
+
+static const TypeInfo imx_timerg_info = {
+ .name = "imx_timerg",
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(IMXTimerGState),
+ .class_init = imx_timerg_class_init,
+};
+
+static void imx_timer_register_types(void)
+{
+ type_register_static(&imx_timerp_info);
+ type_register_static(&imx_timerg_info);
+}
+
+type_init(imx_timer_register_types)
diff --git a/hw/integratorcp.c b/hw/integratorcp.c
index deacbf4d0d..d0e2e9068e 100644
--- a/hw/integratorcp.c
+++ b/hw/integratorcp.c
@@ -493,7 +493,7 @@ static void integratorcp_init(ram_addr_t ram_size,
sysbus_create_simple("pl050_keyboard", 0x18000000, pic[3]);
sysbus_create_simple("pl050_mouse", 0x19000000, pic[4]);
sysbus_create_varargs("pl181", 0x1c000000, pic[23], pic[24], NULL);
- if (nd_table[0].vlan)
+ if (nd_table[0].used)
smc91c111_init(&nd_table[0], 0xc8000000, pic[27]);
sysbus_create_simple("pl110", 0xc0000000, pic[22]);
diff --git a/hw/intel-hda.c b/hw/intel-hda.c
index 31fe1c54f6..127e81888b 100644
--- a/hw/intel-hda.c
+++ b/hw/intel-hda.c
@@ -1149,13 +1149,12 @@ static int intel_hda_init(PCIDevice *pci)
return 0;
}
-static int intel_hda_exit(PCIDevice *pci)
+static void intel_hda_exit(PCIDevice *pci)
{
IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci);
msi_uninit(&d->pci);
memory_region_destroy(&d->mmio);
- return 0;
}
static int intel_hda_post_load(void *opaque, int version)
diff --git a/hw/ioh3420.c b/hw/ioh3420.c
index 0a2601cac4..94a537c9b3 100644
--- a/hw/ioh3420.c
+++ b/hw/ioh3420.c
@@ -96,7 +96,6 @@ static int ioh3420_initfn(PCIDevice *d)
PCIEPort *p = DO_UPCAST(PCIEPort, br, br);
PCIESlot *s = DO_UPCAST(PCIESlot, port, p);
int rc;
- int tmp;
rc = pci_bridge_initfn(d);
if (rc < 0) {
@@ -144,12 +143,11 @@ err_pcie_cap:
err_msi:
msi_uninit(d);
err_bridge:
- tmp = pci_bridge_exitfn(d);
- assert(!tmp);
+ pci_bridge_exitfn(d);
return rc;
}
-static int ioh3420_exitfn(PCIDevice *d)
+static void ioh3420_exitfn(PCIDevice *d)
{
PCIBridge* br = DO_UPCAST(PCIBridge, dev, d);
PCIEPort *p = DO_UPCAST(PCIEPort, br, br);
@@ -159,7 +157,7 @@ static int ioh3420_exitfn(PCIDevice *d)
pcie_chassis_del_slot(s);
pcie_cap_exit(d);
msi_uninit(d);
- return pci_bridge_exitfn(d);
+ pci_bridge_exitfn(d);
}
PCIESlot *ioh3420_init(PCIBus *bus, int devfn, bool multifunction,
diff --git a/hw/ivshmem.c b/hw/ivshmem.c
index 05559b639c..0c58161565 100644
--- a/hw/ivshmem.c
+++ b/hw/ivshmem.c
@@ -23,6 +23,7 @@
#include "kvm.h"
#include "migration.h"
#include "qerror.h"
+#include "event_notifier.h"
#include <sys/mman.h>
#include <sys/types.h>
@@ -45,7 +46,7 @@
typedef struct Peer {
int nb_eventfds;
- int *eventfds;
+ EventNotifier *eventfds;
} Peer;
typedef struct EventfdEntry {
@@ -63,14 +64,12 @@ typedef struct IVShmemState {
CharDriverState *server_chr;
MemoryRegion ivshmem_mmio;
- pcibus_t mmio_addr;
/* We might need to register the BAR before we actually have the memory.
* So prepare a container MemoryRegion for the BAR immediately and
* add a subregion when we have the memory.
*/
MemoryRegion bar;
MemoryRegion ivshmem;
- MemoryRegion msix_bar;
uint64_t ivshmem_size; /* size of shared memory region */
int shm_fd; /* shared memory file descriptor */
@@ -168,7 +167,6 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr,
{
IVShmemState *s = opaque;
- uint64_t write_one = 1;
uint16_t dest = val >> 16;
uint16_t vector = val & 0xff;
@@ -194,12 +192,8 @@ static void ivshmem_io_write(void *opaque, target_phys_addr_t addr,
/* check doorbell range */
if (vector < s->peers[dest].nb_eventfds) {
- IVSHMEM_DPRINTF("Writing %" PRId64 " to VM %d on vector %d\n",
- write_one, dest, vector);
- if (write(s->peers[dest].eventfds[vector],
- &(write_one), 8) != 8) {
- IVSHMEM_DPRINTF("error writing to eventfd\n");
- }
+ IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
+ event_notifier_set(&s->peers[dest].eventfds[vector]);
}
break;
default:
@@ -279,12 +273,13 @@ static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {
msix_notify(pdev, entry->vector);
}
-static CharDriverState* create_eventfd_chr_device(void * opaque, int eventfd,
- int vector)
+static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n,
+ int vector)
{
/* create a event character device based on the passed eventfd */
IVShmemState *s = opaque;
CharDriverState * chr;
+ int eventfd = event_notifier_get_fd(n);
chr = qemu_chr_open_eventfd(eventfd);
@@ -347,16 +342,39 @@ static void create_shared_memory_BAR(IVShmemState *s, int fd) {
pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->bar);
}
+static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_add_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ &s->peers[posn].eventfds[i]);
+}
+
+static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
+{
+ memory_region_del_eventfd(&s->ivshmem_mmio,
+ DOORBELL,
+ 4,
+ true,
+ (posn << 16) | i,
+ &s->peers[posn].eventfds[i]);
+}
+
static void close_guest_eventfds(IVShmemState *s, int posn)
{
int i, guest_curr_max;
guest_curr_max = s->peers[posn].nb_eventfds;
+ memory_region_transaction_begin();
for (i = 0; i < guest_curr_max; i++) {
- kvm_set_ioeventfd_mmio(s->peers[posn].eventfds[i],
- s->mmio_addr + DOORBELL, (posn << 16) | i, 0, 4);
- close(s->peers[posn].eventfds[i]);
+ ivshmem_del_eventfd(s, posn, i);
+ }
+ memory_region_transaction_commit();
+ for (i = 0; i < guest_curr_max; i++) {
+ event_notifier_cleanup(&s->peers[posn].eventfds[i]);
}
g_free(s->peers[posn].eventfds);
@@ -369,12 +387,7 @@ static void setup_ioeventfds(IVShmemState *s) {
for (i = 0; i <= s->max_peer; i++) {
for (j = 0; j < s->peers[i].nb_eventfds; j++) {
- memory_region_add_eventfd(&s->ivshmem_mmio,
- DOORBELL,
- 4,
- true,
- (i << 16) | j,
- s->peers[i].eventfds[j]);
+ ivshmem_add_eventfd(s, i, j);
}
}
}
@@ -476,14 +489,14 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
if (guest_max_eventfd == 0) {
/* one eventfd per MSI vector */
- s->peers[incoming_posn].eventfds = (int *) g_malloc(s->vectors *
- sizeof(int));
+ s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors);
}
/* this is an eventfd for a particular guest VM */
IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
guest_max_eventfd, incoming_fd);
- s->peers[incoming_posn].eventfds[guest_max_eventfd] = incoming_fd;
+ event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd],
+ incoming_fd);
/* increment count for particular guest */
s->peers[incoming_posn].nb_eventfds++;
@@ -495,15 +508,12 @@ static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
if (incoming_posn == s->vm_id) {
s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s,
- s->peers[s->vm_id].eventfds[guest_max_eventfd],
+ &s->peers[s->vm_id].eventfds[guest_max_eventfd],
guest_max_eventfd);
}
if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
- if (kvm_set_ioeventfd_mmio(incoming_fd, s->mmio_addr + DOORBELL,
- (incoming_posn << 16) | guest_max_eventfd, 1, 4) < 0) {
- fprintf(stderr, "ivshmem: ioeventfd not available\n");
- }
+ ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd);
}
return;
@@ -563,16 +573,13 @@ static uint64_t ivshmem_get_size(IVShmemState * s) {
static void ivshmem_setup_msi(IVShmemState * s)
{
- memory_region_init(&s->msix_bar, "ivshmem-msix", 4096);
- if (!msix_init(&s->dev, s->vectors, &s->msix_bar, 1, 0)) {
- pci_register_bar(&s->dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY,
- &s->msix_bar);
- IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
- } else {
+ if (msix_init_exclusive_bar(&s->dev, s->vectors, 1)) {
IVSHMEM_DPRINTF("msix initialization failed\n");
exit(1);
}
+ IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
+
/* allocate QEMU char devices for receiving interrupts */
s->eventfd_table = g_malloc0(s->vectors * sizeof(EventfdEntry));
@@ -764,7 +771,7 @@ static int pci_ivshmem_init(PCIDevice *dev)
return 0;
}
-static int pci_ivshmem_uninit(PCIDevice *dev)
+static void pci_ivshmem_uninit(PCIDevice *dev)
{
IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
@@ -779,8 +786,6 @@ static int pci_ivshmem_uninit(PCIDevice *dev)
memory_region_destroy(&s->ivshmem);
memory_region_destroy(&s->bar);
unregister_savevm(&dev->qdev, "ivshmem", s);
-
- return 0;
}
static Property ivshmem_properties[] = {
diff --git a/hw/kzm.c b/hw/kzm.c
new file mode 100644
index 0000000000..6a5e9dfaca
--- /dev/null
+++ b/hw/kzm.c
@@ -0,0 +1,154 @@
+/*
+ * KZM Board System emulation.
+ *
+ * Copyright (c) 2008 OKL and 2011 NICTA
+ * Written by Hans at OK-Labs
+ * Updated by Peter Chubb.
+ *
+ * This code is licenced under the GPL, version 2 or later.
+ * See the file `COPYING' in the top level directory.
+ *
+ * It (partially) emulates a Kyoto Microcomputer
+ * KZM-ARM11-01 evaluation board, with a Freescale
+ * i.MX31 SoC
+ */
+
+#include "sysbus.h"
+#include "exec-memory.h"
+#include "hw.h"
+#include "arm-misc.h"
+#include "devices.h"
+#include "net.h"
+#include "sysemu.h"
+#include "boards.h"
+#include "pc.h" /* for the FPGA UART that emulates a 16550 */
+#include "imx.h"
+
+ /* Memory map for Kzm Emulation Baseboard:
+ * 0x00000000-0x00003fff 16k secure ROM IGNORED
+ * 0x00004000-0x00407fff Reserved IGNORED
+ * 0x00404000-0x00407fff ROM IGNORED
+ * 0x00408000-0x0fffffff Reserved IGNORED
+ * 0x10000000-0x1fffbfff RAM aliasing IGNORED
+ * 0x1fffc000-0x1fffffff RAM EMULATED
+ * 0x20000000-0x2fffffff Reserved IGNORED
+ * 0x30000000-0x7fffffff I.MX31 Internal Register Space
+ * 0x43f00000 IO_AREA0
+ * 0x43f90000 UART1 EMULATED
+ * 0x43f94000 UART2 EMULATED
+ * 0x68000000 AVIC EMULATED
+ * 0x53f80000 CCM EMULATED
+ * 0x53f94000 PIT 1 EMULATED
+ * 0x53f98000 PIT 2 EMULATED
+ * 0x53f90000 GPT EMULATED
+ * 0x80000000-0x87ffffff RAM EMULATED
+ * 0x88000000-0x8fffffff RAM Aliasing EMULATED
+ * 0xa0000000-0xafffffff NAND Flash IGNORED
+ * 0xb0000000-0xb3ffffff Unavailable IGNORED
+ * 0xb4000000-0xb4000fff 8-bit free space IGNORED
+ * 0xb4001000-0xb400100f Board control IGNORED
+ * 0xb4001003 DIP switch
+ * 0xb4001010-0xb400101f 7-segment LED IGNORED
+ * 0xb4001020-0xb400102f LED IGNORED
+ * 0xb4001030-0xb400103f LED IGNORED
+ * 0xb4001040-0xb400104f FPGA, UART EMULATED
+ * 0xb4001050-0xb400105f FPGA, UART EMULATED
+ * 0xb4001060-0xb40fffff FPGA IGNORED
+ * 0xb6000000-0xb61fffff LAN controller EMULATED
+ * 0xb6200000-0xb62fffff FPGA NAND Controller IGNORED
+ * 0xb6300000-0xb7ffffff Free IGNORED
+ * 0xb8000000-0xb8004fff Memory control registers IGNORED
+ * 0xc0000000-0xc3ffffff PCMCIA/CF IGNORED
+ * 0xc4000000-0xffffffff Reserved IGNORED
+ */
+
+#define KZM_RAMADDRESS (0x80000000)
+#define KZM_FPGA (0xb4001040)
+
+static struct arm_boot_info kzm_binfo = {
+ .loader_start = KZM_RAMADDRESS,
+ .board_id = 1722,
+};
+
+static void kzm_init(ram_addr_t ram_size,
+ const char *boot_device,
+ const char *kernel_filename, const char *kernel_cmdline,
+ const char *initrd_filename, const char *cpu_model)
+{
+ ARMCPU *cpu;
+ MemoryRegion *address_space_mem = get_system_memory();
+ MemoryRegion *ram = g_new(MemoryRegion, 1);
+ MemoryRegion *sram = g_new(MemoryRegion, 1);
+ MemoryRegion *ram_alias = g_new(MemoryRegion, 1);
+ qemu_irq *cpu_pic;
+ DeviceState *dev;
+ DeviceState *ccm;
+
+ if (!cpu_model) {
+ cpu_model = "arm1136";
+ }
+
+ cpu = cpu_arm_init(cpu_model);
+ if (!cpu) {
+ fprintf(stderr, "Unable to find CPU definition\n");
+ exit(1);
+ }
+
+ /* On a real system, the first 16k is a `secure boot rom' */
+
+ memory_region_init_ram(ram, "kzm.ram", ram_size);
+ vmstate_register_ram_global(ram);
+ memory_region_add_subregion(address_space_mem, KZM_RAMADDRESS, ram);
+
+ memory_region_init_alias(ram_alias, "ram.alias", ram, 0, ram_size);
+ memory_region_add_subregion(address_space_mem, 0x88000000, ram_alias);
+
+ memory_region_init_ram(sram, "kzm.sram", 0x4000);
+ memory_region_add_subregion(address_space_mem, 0x1FFFC000, sram);
+
+ cpu_pic = arm_pic_init_cpu(cpu);
+ dev = sysbus_create_varargs("imx_avic", 0x68000000,
+ cpu_pic[ARM_PIC_CPU_IRQ],
+ cpu_pic[ARM_PIC_CPU_FIQ], NULL);
+
+
+ imx_serial_create(0, 0x43f90000, qdev_get_gpio_in(dev, 45));
+ imx_serial_create(1, 0x43f94000, qdev_get_gpio_in(dev, 32));
+
+ ccm = sysbus_create_simple("imx_ccm", 0x53f80000, NULL);
+
+ imx_timerp_create(0x53f94000, qdev_get_gpio_in(dev, 28), ccm);
+ imx_timerp_create(0x53f98000, qdev_get_gpio_in(dev, 27), ccm);
+ imx_timerg_create(0x53f90000, qdev_get_gpio_in(dev, 29), ccm);
+
+ if (nd_table[0].used) {
+ lan9118_init(&nd_table[0], 0xb6000000, qdev_get_gpio_in(dev, 52));
+ }
+
+ if (serial_hds[2]) { /* touchscreen */
+ serial_mm_init(address_space_mem, KZM_FPGA+0x10, 0,
+ qdev_get_gpio_in(dev, 52),
+ 14745600, serial_hds[2],
+ DEVICE_NATIVE_ENDIAN);
+ }
+
+ kzm_binfo.ram_size = ram_size;
+ kzm_binfo.kernel_filename = kernel_filename;
+ kzm_binfo.kernel_cmdline = kernel_cmdline;
+ kzm_binfo.initrd_filename = initrd_filename;
+ kzm_binfo.nb_cpus = 1;
+ arm_load_kernel(cpu, &kzm_binfo);
+}
+
+static QEMUMachine kzm_machine = {
+ .name = "kzm",
+ .desc = "ARM KZM Emulation Baseboard (ARM1136)",
+ .init = kzm_init,
+};
+
+static void kzm_machine_init(void)
+{
+ qemu_register_machine(&kzm_machine);
+}
+
+machine_init(kzm_machine_init)
diff --git a/hw/lan9118.c b/hw/lan9118.c
index 7b4fe87fca..ff0a50be19 100644
--- a/hw/lan9118.c
+++ b/hw/lan9118.c
@@ -384,7 +384,7 @@ static void phy_update_link(lan9118_state *s)
phy_update_irq(s);
}
-static void lan9118_set_link(VLANClientState *nc)
+static void lan9118_set_link(NetClientState *nc)
{
phy_update_link(DO_UPCAST(NICState, nc, nc)->opaque);
}
@@ -456,7 +456,7 @@ static void lan9118_reset(DeviceState *d)
lan9118_reload_eeprom(s);
}
-static int lan9118_can_receive(VLANClientState *nc)
+static int lan9118_can_receive(NetClientState *nc)
{
return 1;
}
@@ -509,7 +509,7 @@ static int lan9118_filter(lan9118_state *s, const uint8_t *addr)
}
}
-static ssize_t lan9118_receive(VLANClientState *nc, const uint8_t *buf,
+static ssize_t lan9118_receive(NetClientState *nc, const uint8_t *buf,
size_t size)
{
lan9118_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -1166,9 +1166,11 @@ static void lan9118_16bit_mode_write(void *opaque, target_phys_addr_t offset,
{
switch (size) {
case 2:
- return lan9118_writew(opaque, offset, (uint32_t)val);
+ lan9118_writew(opaque, offset, (uint32_t)val);
+ return;
case 4:
- return lan9118_writel(opaque, offset, val, size);
+ lan9118_writel(opaque, offset, val, size);
+ return;
}
hw_error("lan9118_write: Bad size 0x%x\n", size);
@@ -1302,7 +1304,7 @@ static const MemoryRegionOps lan9118_16bit_mem_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void lan9118_cleanup(VLANClientState *nc)
+static void lan9118_cleanup(NetClientState *nc)
{
lan9118_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -1310,7 +1312,7 @@ static void lan9118_cleanup(VLANClientState *nc)
}
static NetClientInfo net_lan9118_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = lan9118_can_receive,
.receive = lan9118_receive,
diff --git a/hw/lance.c b/hw/lance.c
index ce3d46c17b..9b98bb849a 100644
--- a/hw/lance.c
+++ b/hw/lance.c
@@ -85,7 +85,7 @@ static const MemoryRegionOps lance_mem_ops = {
},
};
-static void lance_cleanup(VLANClientState *nc)
+static void lance_cleanup(NetClientState *nc)
{
PCNetState *d = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -93,7 +93,7 @@ static void lance_cleanup(VLANClientState *nc)
}
static NetClientInfo net_lance_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = pcnet_can_receive,
.receive = pcnet_receive,
diff --git a/hw/lsi53c895a.c b/hw/lsi53c895a.c
index 2fe141d24e..34afe96742 100644
--- a/hw/lsi53c895a.c
+++ b/hw/lsi53c895a.c
@@ -282,8 +282,6 @@ static inline int lsi_irq_on_rsl(LSIState *s)
static void lsi_soft_reset(LSIState *s)
{
- lsi_request *p;
-
DPRINTF("Reset\n");
s->carry = 0;
@@ -350,15 +348,8 @@ static void lsi_soft_reset(LSIState *s)
s->sbc = 0;
s->csbc = 0;
s->sbr = 0;
- while (!QTAILQ_EMPTY(&s->queue)) {
- p = QTAILQ_FIRST(&s->queue);
- QTAILQ_REMOVE(&s->queue, p, next);
- g_free(p);
- }
- if (s->current) {
- g_free(s->current);
- s->current = NULL;
- }
+ assert(QTAILQ_EMPTY(&s->queue));
+ assert(!s->current);
}
static int lsi_dma_40bit(LSIState *s)
@@ -650,23 +641,24 @@ static lsi_request *lsi_find_by_tag(LSIState *s, uint32_t tag)
return NULL;
}
+static void lsi_request_free(LSIState *s, lsi_request *p)
+{
+ if (p == s->current) {
+ s->current = NULL;
+ } else {
+ QTAILQ_REMOVE(&s->queue, p, next);
+ }
+ g_free(p);
+}
+
static void lsi_request_cancelled(SCSIRequest *req)
{
LSIState *s = DO_UPCAST(LSIState, dev.qdev, req->bus->qbus.parent);
lsi_request *p = req->hba_private;
- if (s->current && req == s->current->req) {
- scsi_req_unref(req);
- g_free(s->current);
- s->current = NULL;
- return;
- }
-
- if (p) {
- QTAILQ_REMOVE(&s->queue, p, next);
- scsi_req_unref(req);
- g_free(p);
- }
+ req->hba_private = NULL;
+ lsi_request_free(s, p);
+ scsi_req_unref(req);
}
/* Record that data is available for a queued command. Returns zero if
@@ -714,10 +706,10 @@ static void lsi_command_complete(SCSIRequest *req, uint32_t status, size_t resid
lsi_set_phase(s, PHASE_ST);
}
- if (s->current && req == s->current->req) {
- scsi_req_unref(s->current->req);
- g_free(s->current);
- s->current = NULL;
+ if (req->hba_private == s->current) {
+ req->hba_private = NULL;
+ lsi_request_free(s, s->current);
+ scsi_req_unref(req);
}
lsi_resume_script(s);
}
@@ -728,7 +720,8 @@ static void lsi_transfer_data(SCSIRequest *req, uint32_t len)
LSIState *s = DO_UPCAST(LSIState, dev.qdev, req->bus->qbus.parent);
int out;
- if (s->waiting == 1 || !s->current || req->hba_private != s->current ||
+ assert(req->hba_private);
+ if (s->waiting == 1 || req->hba_private != s->current ||
(lsi_irq_on_rsl(s) && !(s->scntl1 & LSI_SCNTL1_CON))) {
if (lsi_queue_req(s, req, len)) {
return;
@@ -1738,7 +1731,7 @@ static void lsi_reg_writeb(LSIState *s, int offset, uint8_t val)
lsi_execute_script(s);
}
if (val & LSI_ISTAT0_SRST) {
- lsi_soft_reset(s);
+ qdev_reset_all(&s->dev.qdev);
}
break;
case 0x16: /* MBOX0 */
@@ -2071,15 +2064,13 @@ static const VMStateDescription vmstate_lsi_scsi = {
}
};
-static int lsi_scsi_uninit(PCIDevice *d)
+static void lsi_scsi_uninit(PCIDevice *d)
{
LSIState *s = DO_UPCAST(LSIState, dev, d);
memory_region_destroy(&s->mmio_io);
memory_region_destroy(&s->ram_io);
memory_region_destroy(&s->io_io);
-
- return 0;
}
static const struct SCSIBusInfo lsi_scsi_info = {
diff --git a/hw/mcf5208.c b/hw/mcf5208.c
index d3ebe8d9ad..ee25b1b387 100644
--- a/hw/mcf5208.c
+++ b/hw/mcf5208.c
@@ -236,7 +236,7 @@ static void mcf5208evb_init(ram_addr_t ram_size,
fprintf(stderr, "Too many NICs\n");
exit(1);
}
- if (nd_table[0].vlan)
+ if (nd_table[0].used)
mcf_fec_init(address_space_mem, &nd_table[0],
0xfc030000, pic + 36);
diff --git a/hw/mcf_fec.c b/hw/mcf_fec.c
index ae37bef0f0..2fec5bc73e 100644
--- a/hw/mcf_fec.c
+++ b/hw/mcf_fec.c
@@ -351,13 +351,13 @@ static void mcf_fec_write(void *opaque, target_phys_addr_t addr,
mcf_fec_update(s);
}
-static int mcf_fec_can_receive(VLANClientState *nc)
+static int mcf_fec_can_receive(NetClientState *nc)
{
mcf_fec_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
return s->rx_enabled;
}
-static ssize_t mcf_fec_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t mcf_fec_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
mcf_fec_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
mcf_fec_bd bd;
@@ -439,7 +439,7 @@ static const MemoryRegionOps mcf_fec_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void mcf_fec_cleanup(VLANClientState *nc)
+static void mcf_fec_cleanup(NetClientState *nc)
{
mcf_fec_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -450,7 +450,7 @@ static void mcf_fec_cleanup(VLANClientState *nc)
}
static NetClientInfo net_mcf_fec_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = mcf_fec_can_receive,
.receive = mcf_fec_receive,
@@ -472,7 +472,6 @@ void mcf_fec_init(MemoryRegion *sysmem, NICInfo *nd,
memory_region_add_subregion(sysmem, base, &s->iomem);
s->conf.macaddr = nd->macaddr;
- s->conf.vlan = nd->vlan;
s->conf.peer = nd->netdev;
s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, nd->model, nd->name, s);
diff --git a/hw/megasas.c b/hw/megasas.c
new file mode 100644
index 0000000000..c35a15db4f
--- /dev/null
+++ b/hw/megasas.c
@@ -0,0 +1,2209 @@
+/*
+ * QEMU MegaRAID SAS 8708EM2 Host Bus Adapter emulation
+ * Based on the linux driver code at drivers/scsi/megaraid
+ *
+ * Copyright (c) 2009-2012 Hannes Reinecke, SUSE Labs
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw.h"
+#include "pci.h"
+#include "dma.h"
+#include "msix.h"
+#include "iov.h"
+#include "scsi.h"
+#include "scsi-defs.h"
+#include "block_int.h"
+#include "trace.h"
+
+#include "mfi.h"
+
+#define MEGASAS_VERSION "1.70"
+#define MEGASAS_MAX_FRAMES 2048 /* Firmware limit at 65535 */
+#define MEGASAS_DEFAULT_FRAMES 1000 /* Windows requires this */
+#define MEGASAS_MAX_SGE 128 /* Firmware limit */
+#define MEGASAS_DEFAULT_SGE 80
+#define MEGASAS_MAX_SECTORS 0xFFFF /* No real limit */
+#define MEGASAS_MAX_ARRAYS 128
+
+#define NAA_LOCALLY_ASSIGNED_ID 0x3ULL
+#define IEEE_COMPANY_LOCALLY_ASSIGNED 0x525400
+
+#define MEGASAS_FLAG_USE_JBOD 0
+#define MEGASAS_MASK_USE_JBOD (1 << MEGASAS_FLAG_USE_JBOD)
+#define MEGASAS_FLAG_USE_MSIX 1
+#define MEGASAS_MASK_USE_MSIX (1 << MEGASAS_FLAG_USE_MSIX)
+#define MEGASAS_FLAG_USE_QUEUE64 2
+#define MEGASAS_MASK_USE_QUEUE64 (1 << MEGASAS_FLAG_USE_QUEUE64)
+
+static const char *mfi_frame_desc[] = {
+ "MFI init", "LD Read", "LD Write", "LD SCSI", "PD SCSI",
+ "MFI Doorbell", "MFI Abort", "MFI SMP", "MFI Stop"};
+
+typedef struct MegasasCmd {
+ uint32_t index;
+ uint16_t flags;
+ uint16_t count;
+ uint64_t context;
+
+ target_phys_addr_t pa;
+ target_phys_addr_t pa_size;
+ union mfi_frame *frame;
+ SCSIRequest *req;
+ QEMUSGList qsg;
+ void *iov_buf;
+ size_t iov_size;
+ size_t iov_offset;
+ struct MegasasState *state;
+} MegasasCmd;
+
+typedef struct MegasasState {
+ PCIDevice dev;
+ MemoryRegion mmio_io;
+ MemoryRegion port_io;
+ MemoryRegion queue_io;
+ uint32_t frame_hi;
+
+ int fw_state;
+ uint32_t fw_sge;
+ uint32_t fw_cmds;
+ uint32_t flags;
+ int fw_luns;
+ int intr_mask;
+ int doorbell;
+ int busy;
+
+ MegasasCmd *event_cmd;
+ int event_locale;
+ int event_class;
+ int event_count;
+ int shutdown_event;
+ int boot_event;
+
+ uint64_t sas_addr;
+
+ uint64_t reply_queue_pa;
+ void *reply_queue;
+ int reply_queue_len;
+ int reply_queue_head;
+ int reply_queue_tail;
+ uint64_t consumer_pa;
+ uint64_t producer_pa;
+
+ MegasasCmd frames[MEGASAS_MAX_FRAMES];
+
+ SCSIBus bus;
+} MegasasState;
+
+#define MEGASAS_INTR_DISABLED_MASK 0xFFFFFFFF
+
+static bool megasas_intr_enabled(MegasasState *s)
+{
+ if ((s->intr_mask & MEGASAS_INTR_DISABLED_MASK) !=
+ MEGASAS_INTR_DISABLED_MASK) {
+ return true;
+ }
+ return false;
+}
+
+static bool megasas_use_queue64(MegasasState *s)
+{
+ return s->flags & MEGASAS_MASK_USE_QUEUE64;
+}
+
+static bool megasas_use_msix(MegasasState *s)
+{
+ return s->flags & MEGASAS_MASK_USE_MSIX;
+}
+
+static bool megasas_is_jbod(MegasasState *s)
+{
+ return s->flags & MEGASAS_MASK_USE_JBOD;
+}
+
+static void megasas_frame_set_cmd_status(unsigned long frame, uint8_t v)
+{
+ stb_phys(frame + offsetof(struct mfi_frame_header, cmd_status), v);
+}
+
+static void megasas_frame_set_scsi_status(unsigned long frame, uint8_t v)
+{
+ stb_phys(frame + offsetof(struct mfi_frame_header, scsi_status), v);
+}
+
+/*
+ * Context is considered opaque, but the HBA firmware is running
+ * in little endian mode. So convert it to little endian, too.
+ */
+static uint64_t megasas_frame_get_context(unsigned long frame)
+{
+ return ldq_le_phys(frame + offsetof(struct mfi_frame_header, context));
+}
+
+static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd)
+{
+ return cmd->flags & MFI_FRAME_IEEE_SGL;
+}
+
+static bool megasas_frame_is_sgl64(MegasasCmd *cmd)
+{
+ return cmd->flags & MFI_FRAME_SGL64;
+}
+
+static bool megasas_frame_is_sense64(MegasasCmd *cmd)
+{
+ return cmd->flags & MFI_FRAME_SENSE64;
+}
+
+static uint64_t megasas_sgl_get_addr(MegasasCmd *cmd,
+ union mfi_sgl *sgl)
+{
+ uint64_t addr;
+
+ if (megasas_frame_is_ieee_sgl(cmd)) {
+ addr = le64_to_cpu(sgl->sg_skinny->addr);
+ } else if (megasas_frame_is_sgl64(cmd)) {
+ addr = le64_to_cpu(sgl->sg64->addr);
+ } else {
+ addr = le32_to_cpu(sgl->sg32->addr);
+ }
+ return addr;
+}
+
+static uint32_t megasas_sgl_get_len(MegasasCmd *cmd,
+ union mfi_sgl *sgl)
+{
+ uint32_t len;
+
+ if (megasas_frame_is_ieee_sgl(cmd)) {
+ len = le32_to_cpu(sgl->sg_skinny->len);
+ } else if (megasas_frame_is_sgl64(cmd)) {
+ len = le32_to_cpu(sgl->sg64->len);
+ } else {
+ len = le32_to_cpu(sgl->sg32->len);
+ }
+ return len;
+}
+
+static union mfi_sgl *megasas_sgl_next(MegasasCmd *cmd,
+ union mfi_sgl *sgl)
+{
+ uint8_t *next = (uint8_t *)sgl;
+
+ if (megasas_frame_is_ieee_sgl(cmd)) {
+ next += sizeof(struct mfi_sg_skinny);
+ } else if (megasas_frame_is_sgl64(cmd)) {
+ next += sizeof(struct mfi_sg64);
+ } else {
+ next += sizeof(struct mfi_sg32);
+ }
+
+ if (next >= (uint8_t *)cmd->frame + cmd->pa_size) {
+ return NULL;
+ }
+ return (union mfi_sgl *)next;
+}
+
+static void megasas_soft_reset(MegasasState *s);
+
+static int megasas_map_sgl(MegasasState *s, MegasasCmd *cmd, union mfi_sgl *sgl)
+{
+ int i;
+ int iov_count = 0;
+ size_t iov_size = 0;
+
+ cmd->flags = le16_to_cpu(cmd->frame->header.flags);
+ iov_count = cmd->frame->header.sge_count;
+ if (iov_count > MEGASAS_MAX_SGE) {
+ trace_megasas_iovec_sgl_overflow(cmd->index, iov_count,
+ MEGASAS_MAX_SGE);
+ return iov_count;
+ }
+ qemu_sglist_init(&cmd->qsg, iov_count, pci_dma_context(&s->dev));
+ for (i = 0; i < iov_count; i++) {
+ dma_addr_t iov_pa, iov_size_p;
+
+ if (!sgl) {
+ trace_megasas_iovec_sgl_underflow(cmd->index, i);
+ goto unmap;
+ }
+ iov_pa = megasas_sgl_get_addr(cmd, sgl);
+ iov_size_p = megasas_sgl_get_len(cmd, sgl);
+ if (!iov_pa || !iov_size_p) {
+ trace_megasas_iovec_sgl_invalid(cmd->index, i,
+ iov_pa, iov_size_p);
+ goto unmap;
+ }
+ qemu_sglist_add(&cmd->qsg, iov_pa, iov_size_p);
+ sgl = megasas_sgl_next(cmd, sgl);
+ iov_size += (size_t)iov_size_p;
+ }
+ if (cmd->iov_size > iov_size) {
+ trace_megasas_iovec_overflow(cmd->index, iov_size, cmd->iov_size);
+ } else if (cmd->iov_size < iov_size) {
+ trace_megasas_iovec_underflow(cmd->iov_size, iov_size, cmd->iov_size);
+ }
+ cmd->iov_offset = 0;
+ return 0;
+unmap:
+ qemu_sglist_destroy(&cmd->qsg);
+ return iov_count - i;
+}
+
+static void megasas_unmap_sgl(MegasasCmd *cmd)
+{
+ qemu_sglist_destroy(&cmd->qsg);
+ cmd->iov_offset = 0;
+}
+
+/*
+ * passthrough sense and io sense are at the same offset
+ */
+static int megasas_build_sense(MegasasCmd *cmd, uint8_t *sense_ptr,
+ uint8_t sense_len)
+{
+ uint32_t pa_hi = 0, pa_lo;
+ target_phys_addr_t pa;
+
+ if (sense_len > cmd->frame->header.sense_len) {
+ sense_len = cmd->frame->header.sense_len;
+ }
+ if (sense_len) {
+ pa_lo = le32_to_cpu(cmd->frame->pass.sense_addr_lo);
+ if (megasas_frame_is_sense64(cmd)) {
+ pa_hi = le32_to_cpu(cmd->frame->pass.sense_addr_hi);
+ }
+ pa = ((uint64_t) pa_hi << 32) | pa_lo;
+ cpu_physical_memory_write(pa, sense_ptr, sense_len);
+ cmd->frame->header.sense_len = sense_len;
+ }
+ return sense_len;
+}
+
+static void megasas_write_sense(MegasasCmd *cmd, SCSISense sense)
+{
+ uint8_t sense_buf[SCSI_SENSE_BUF_SIZE];
+ uint8_t sense_len = 18;
+
+ memset(sense_buf, 0, sense_len);
+ sense_buf[0] = 0xf0;
+ sense_buf[2] = sense.key;
+ sense_buf[7] = 10;
+ sense_buf[12] = sense.asc;
+ sense_buf[13] = sense.ascq;
+ megasas_build_sense(cmd, sense_buf, sense_len);
+}
+
+static void megasas_copy_sense(MegasasCmd *cmd)
+{
+ uint8_t sense_buf[SCSI_SENSE_BUF_SIZE];
+ uint8_t sense_len;
+
+ sense_len = scsi_req_get_sense(cmd->req, sense_buf,
+ SCSI_SENSE_BUF_SIZE);
+ megasas_build_sense(cmd, sense_buf, sense_len);
+}
+
+/*
+ * Format an INQUIRY CDB
+ */
+static int megasas_setup_inquiry(uint8_t *cdb, int pg, int len)
+{
+ memset(cdb, 0, 6);
+ cdb[0] = INQUIRY;
+ if (pg > 0) {
+ cdb[1] = 0x1;
+ cdb[2] = pg;
+ }
+ cdb[3] = (len >> 8) & 0xff;
+ cdb[4] = (len & 0xff);
+ return len;
+}
+
+/*
+ * Encode lba and len into a READ_16/WRITE_16 CDB
+ */
+static void megasas_encode_lba(uint8_t *cdb, uint64_t lba,
+ uint32_t len, bool is_write)
+{
+ memset(cdb, 0x0, 16);
+ if (is_write) {
+ cdb[0] = WRITE_16;
+ } else {
+ cdb[0] = READ_16;
+ }
+ cdb[2] = (lba >> 56) & 0xff;
+ cdb[3] = (lba >> 48) & 0xff;
+ cdb[4] = (lba >> 40) & 0xff;
+ cdb[5] = (lba >> 32) & 0xff;
+ cdb[6] = (lba >> 24) & 0xff;
+ cdb[7] = (lba >> 16) & 0xff;
+ cdb[8] = (lba >> 8) & 0xff;
+ cdb[9] = (lba) & 0xff;
+ cdb[10] = (len >> 24) & 0xff;
+ cdb[11] = (len >> 16) & 0xff;
+ cdb[12] = (len >> 8) & 0xff;
+ cdb[13] = (len) & 0xff;
+}
+
+/*
+ * Utility functions
+ */
+static uint64_t megasas_fw_time(void)
+{
+ struct tm curtime;
+ uint64_t bcd_time;
+
+ qemu_get_timedate(&curtime, 0);
+ bcd_time = ((uint64_t)curtime.tm_sec & 0xff) << 48 |
+ ((uint64_t)curtime.tm_min & 0xff) << 40 |
+ ((uint64_t)curtime.tm_hour & 0xff) << 32 |
+ ((uint64_t)curtime.tm_mday & 0xff) << 24 |
+ ((uint64_t)curtime.tm_mon & 0xff) << 16 |
+ ((uint64_t)(curtime.tm_year + 1900) & 0xffff);
+
+ return bcd_time;
+}
+
+/*
+ * Default disk sata address
+ * 0x1221 is the magic number as
+ * present in real hardware,
+ * so use it here, too.
+ */
+static uint64_t megasas_get_sata_addr(uint16_t id)
+{
+ uint64_t addr = (0x1221ULL << 48);
+ return addr & (id << 24);
+}
+
+/*
+ * Frame handling
+ */
+static int megasas_next_index(MegasasState *s, int index, int limit)
+{
+ index++;
+ if (index == limit) {
+ index = 0;
+ }
+ return index;
+}
+
+static MegasasCmd *megasas_lookup_frame(MegasasState *s,
+ target_phys_addr_t frame)
+{
+ MegasasCmd *cmd = NULL;
+ int num = 0, index;
+
+ index = s->reply_queue_head;
+
+ while (num < s->fw_cmds) {
+ if (s->frames[index].pa && s->frames[index].pa == frame) {
+ cmd = &s->frames[index];
+ break;
+ }
+ index = megasas_next_index(s, index, s->fw_cmds);
+ num++;
+ }
+
+ return cmd;
+}
+
+static MegasasCmd *megasas_next_frame(MegasasState *s,
+ target_phys_addr_t frame)
+{
+ MegasasCmd *cmd = NULL;
+ int num = 0, index;
+
+ cmd = megasas_lookup_frame(s, frame);
+ if (cmd) {
+ trace_megasas_qf_found(cmd->index, cmd->pa);
+ return cmd;
+ }
+ index = s->reply_queue_head;
+ num = 0;
+ while (num < s->fw_cmds) {
+ if (!s->frames[index].pa) {
+ cmd = &s->frames[index];
+ break;
+ }
+ index = megasas_next_index(s, index, s->fw_cmds);
+ num++;
+ }
+ if (!cmd) {
+ trace_megasas_qf_failed(frame);
+ }
+ trace_megasas_qf_new(index, cmd);
+ return cmd;
+}
+
+static MegasasCmd *megasas_enqueue_frame(MegasasState *s,
+ target_phys_addr_t frame, uint64_t context, int count)
+{
+ MegasasCmd *cmd = NULL;
+ int frame_size = MFI_FRAME_SIZE * 16;
+ target_phys_addr_t frame_size_p = frame_size;
+
+ cmd = megasas_next_frame(s, frame);
+ /* All frames busy */
+ if (!cmd) {
+ return NULL;
+ }
+ if (!cmd->pa) {
+ cmd->pa = frame;
+ /* Map all possible frames */
+ cmd->frame = cpu_physical_memory_map(frame, &frame_size_p, 0);
+ if (frame_size_p != frame_size) {
+ trace_megasas_qf_map_failed(cmd->index, (unsigned long)frame);
+ if (cmd->frame) {
+ cpu_physical_memory_unmap(cmd->frame, frame_size_p, 0, 0);
+ cmd->frame = NULL;
+ cmd->pa = 0;
+ }
+ s->event_count++;
+ return NULL;
+ }
+ cmd->pa_size = frame_size_p;
+ cmd->context = context;
+ if (!megasas_use_queue64(s)) {
+ cmd->context &= (uint64_t)0xFFFFFFFF;
+ }
+ }
+ cmd->count = count;
+ s->busy++;
+
+ trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context,
+ s->reply_queue_head, s->busy);
+
+ return cmd;
+}
+
+static void megasas_complete_frame(MegasasState *s, uint64_t context)
+{
+ int tail, queue_offset;
+
+ /* Decrement busy count */
+ s->busy--;
+
+ if (s->reply_queue_pa) {
+ /*
+ * Put command on the reply queue.
+ * Context is opaque, but emulation is running in
+ * little endian. So convert it.
+ */
+ tail = s->reply_queue_head;
+ if (megasas_use_queue64(s)) {
+ queue_offset = tail * sizeof(uint64_t);
+ stq_le_phys(s->reply_queue_pa + queue_offset, context);
+ } else {
+ queue_offset = tail * sizeof(uint32_t);
+ stl_le_phys(s->reply_queue_pa + queue_offset, context);
+ }
+ s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds);
+ trace_megasas_qf_complete(context, tail, queue_offset,
+ s->busy, s->doorbell);
+ }
+
+ if (megasas_intr_enabled(s)) {
+ /* Notify HBA */
+ s->doorbell++;
+ if (s->doorbell == 1) {
+ if (msix_enabled(&s->dev)) {
+ trace_megasas_msix_raise(0);
+ msix_notify(&s->dev, 0);
+ } else {
+ trace_megasas_irq_raise();
+ qemu_irq_raise(s->dev.irq[0]);
+ }
+ }
+ } else {
+ trace_megasas_qf_complete_noirq(context);
+ }
+}
+
+static void megasas_reset_frames(MegasasState *s)
+{
+ int i;
+ MegasasCmd *cmd;
+
+ for (i = 0; i < s->fw_cmds; i++) {
+ cmd = &s->frames[i];
+ if (cmd->pa) {
+ cpu_physical_memory_unmap(cmd->frame, cmd->pa_size, 0, 0);
+ cmd->frame = NULL;
+ cmd->pa = 0;
+ }
+ }
+}
+
+static void megasas_abort_command(MegasasCmd *cmd)
+{
+ if (cmd->req) {
+ scsi_req_cancel(cmd->req);
+ cmd->req = NULL;
+ }
+}
+
+static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd)
+{
+ uint32_t pa_hi, pa_lo;
+ target_phys_addr_t iq_pa, initq_size;
+ struct mfi_init_qinfo *initq;
+ uint32_t flags;
+ int ret = MFI_STAT_OK;
+
+ pa_lo = le32_to_cpu(cmd->frame->init.qinfo_new_addr_lo);
+ pa_hi = le32_to_cpu(cmd->frame->init.qinfo_new_addr_hi);
+ iq_pa = (((uint64_t) pa_hi << 32) | pa_lo);
+ trace_megasas_init_firmware((uint64_t)iq_pa);
+ initq_size = sizeof(*initq);
+ initq = cpu_physical_memory_map(iq_pa, &initq_size, 0);
+ if (!initq || initq_size != sizeof(*initq)) {
+ trace_megasas_initq_map_failed(cmd->index);
+ s->event_count++;
+ ret = MFI_STAT_MEMORY_NOT_AVAILABLE;
+ goto out;
+ }
+ s->reply_queue_len = le32_to_cpu(initq->rq_entries) & 0xFFFF;
+ if (s->reply_queue_len > s->fw_cmds) {
+ trace_megasas_initq_mismatch(s->reply_queue_len, s->fw_cmds);
+ s->event_count++;
+ ret = MFI_STAT_INVALID_PARAMETER;
+ goto out;
+ }
+ pa_lo = le32_to_cpu(initq->rq_addr_lo);
+ pa_hi = le32_to_cpu(initq->rq_addr_hi);
+ s->reply_queue_pa = ((uint64_t) pa_hi << 32) | pa_lo;
+ pa_lo = le32_to_cpu(initq->ci_addr_lo);
+ pa_hi = le32_to_cpu(initq->ci_addr_hi);
+ s->consumer_pa = ((uint64_t) pa_hi << 32) | pa_lo;
+ pa_lo = le32_to_cpu(initq->pi_addr_lo);
+ pa_hi = le32_to_cpu(initq->pi_addr_hi);
+ s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo;
+ s->reply_queue_head = ldl_le_phys(s->producer_pa);
+ s->reply_queue_tail = ldl_le_phys(s->consumer_pa);
+ flags = le32_to_cpu(initq->flags);
+ if (flags & MFI_QUEUE_FLAG_CONTEXT64) {
+ s->flags |= MEGASAS_MASK_USE_QUEUE64;
+ }
+ trace_megasas_init_queue((unsigned long)s->reply_queue_pa,
+ s->reply_queue_len, s->reply_queue_head,
+ s->reply_queue_tail, flags);
+ megasas_reset_frames(s);
+ s->fw_state = MFI_FWSTATE_OPERATIONAL;
+out:
+ if (initq) {
+ cpu_physical_memory_unmap(initq, initq_size, 0, 0);
+ }
+ return ret;
+}
+
+static int megasas_map_dcmd(MegasasState *s, MegasasCmd *cmd)
+{
+ dma_addr_t iov_pa, iov_size;
+
+ cmd->flags = le16_to_cpu(cmd->frame->header.flags);
+ if (!cmd->frame->header.sge_count) {
+ trace_megasas_dcmd_zero_sge(cmd->index);
+ cmd->iov_size = 0;
+ return 0;
+ } else if (cmd->frame->header.sge_count > 1) {
+ trace_megasas_dcmd_invalid_sge(cmd->index,
+ cmd->frame->header.sge_count);
+ cmd->iov_size = 0;
+ return -1;
+ }
+ iov_pa = megasas_sgl_get_addr(cmd, &cmd->frame->dcmd.sgl);
+ iov_size = megasas_sgl_get_len(cmd, &cmd->frame->dcmd.sgl);
+ qemu_sglist_init(&cmd->qsg, 1, pci_dma_context(&s->dev));
+ qemu_sglist_add(&cmd->qsg, iov_pa, iov_size);
+ cmd->iov_size = iov_size;
+ return cmd->iov_size;
+}
+
+static void megasas_finish_dcmd(MegasasCmd *cmd, uint32_t iov_size)
+{
+ trace_megasas_finish_dcmd(cmd->index, iov_size);
+
+ if (cmd->frame->header.sge_count) {
+ qemu_sglist_destroy(&cmd->qsg);
+ }
+ if (iov_size > cmd->iov_size) {
+ if (megasas_frame_is_ieee_sgl(cmd)) {
+ cmd->frame->dcmd.sgl.sg_skinny->len = cpu_to_le32(iov_size);
+ } else if (megasas_frame_is_sgl64(cmd)) {
+ cmd->frame->dcmd.sgl.sg64->len = cpu_to_le32(iov_size);
+ } else {
+ cmd->frame->dcmd.sgl.sg32->len = cpu_to_le32(iov_size);
+ }
+ }
+ cmd->iov_size = 0;
+ return;
+}
+
+static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_ctrl_info info;
+ size_t dcmd_size = sizeof(info);
+ BusChild *kid;
+ int num_ld_disks = 0;
+ uint16_t sdev_id;
+
+ memset(&info, 0x0, cmd->iov_size);
+ if (cmd->iov_size < dcmd_size) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ info.pci.vendor = cpu_to_le16(PCI_VENDOR_ID_LSI_LOGIC);
+ info.pci.device = cpu_to_le16(PCI_DEVICE_ID_LSI_SAS1078);
+ info.pci.subvendor = cpu_to_le16(PCI_VENDOR_ID_LSI_LOGIC);
+ info.pci.subdevice = cpu_to_le16(0x1013);
+
+ /*
+ * For some reason the firmware supports
+ * only up to 8 device ports.
+ * Despite supporting a far larger number
+ * of devices for the physical devices.
+ * So just display the first 8 devices
+ * in the device port list, independent
+ * of how many logical devices are actually
+ * present.
+ */
+ info.host.type = MFI_INFO_HOST_PCIE;
+ info.device.type = MFI_INFO_DEV_SAS3G;
+ info.device.port_count = 8;
+ QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
+ SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child);
+
+ if (num_ld_disks < 8) {
+ sdev_id = ((sdev->id & 0xFF) >> 8) | (sdev->lun & 0xFF);
+ info.device.port_addr[num_ld_disks] =
+ cpu_to_le64(megasas_get_sata_addr(sdev_id));
+ }
+ num_ld_disks++;
+ }
+
+ memcpy(info.product_name, "MegaRAID SAS 8708EM2", 20);
+ snprintf(info.serial_number, 32, "QEMU%08lx",
+ (unsigned long)s & 0xFFFFFFFF);
+ snprintf(info.package_version, 0x60, "%s-QEMU", QEMU_VERSION);
+ memcpy(info.image_component[0].name, "APP", 3);
+ memcpy(info.image_component[0].version, MEGASAS_VERSION "-QEMU", 9);
+ memcpy(info.image_component[0].build_date, __DATE__, 11);
+ memcpy(info.image_component[0].build_time, __TIME__, 8);
+ info.image_component_count = 1;
+ if (s->dev.has_rom) {
+ uint8_t biosver[32];
+ uint8_t *ptr;
+
+ ptr = memory_region_get_ram_ptr(&s->dev.rom);
+ memcpy(biosver, ptr + 0x41, 31);
+ qemu_put_ram_ptr(ptr);
+ memcpy(info.image_component[1].name, "BIOS", 4);
+ memcpy(info.image_component[1].version, biosver,
+ strlen((const char *)biosver));
+ info.image_component_count++;
+ }
+ info.current_fw_time = cpu_to_le32(megasas_fw_time());
+ info.max_arms = 32;
+ info.max_spans = 8;
+ info.max_arrays = MEGASAS_MAX_ARRAYS;
+ info.max_lds = s->fw_luns;
+ info.max_cmds = cpu_to_le16(s->fw_cmds);
+ info.max_sg_elements = cpu_to_le16(s->fw_sge);
+ info.max_request_size = cpu_to_le32(MEGASAS_MAX_SECTORS);
+ info.lds_present = cpu_to_le16(num_ld_disks);
+ info.pd_present = cpu_to_le16(num_ld_disks);
+ info.pd_disks_present = cpu_to_le16(num_ld_disks);
+ info.hw_present = cpu_to_le32(MFI_INFO_HW_NVRAM |
+ MFI_INFO_HW_MEM |
+ MFI_INFO_HW_FLASH);
+ info.memory_size = cpu_to_le16(512);
+ info.nvram_size = cpu_to_le16(32);
+ info.flash_size = cpu_to_le16(16);
+ info.raid_levels = cpu_to_le32(MFI_INFO_RAID_0);
+ info.adapter_ops = cpu_to_le32(MFI_INFO_AOPS_RBLD_RATE |
+ MFI_INFO_AOPS_SELF_DIAGNOSTIC |
+ MFI_INFO_AOPS_MIXED_ARRAY);
+ info.ld_ops = cpu_to_le32(MFI_INFO_LDOPS_DISK_CACHE_POLICY |
+ MFI_INFO_LDOPS_ACCESS_POLICY |
+ MFI_INFO_LDOPS_IO_POLICY |
+ MFI_INFO_LDOPS_WRITE_POLICY |
+ MFI_INFO_LDOPS_READ_POLICY);
+ info.max_strips_per_io = cpu_to_le16(s->fw_sge);
+ info.stripe_sz_ops.min = 3;
+ info.stripe_sz_ops.max = ffs(MEGASAS_MAX_SECTORS + 1) - 1;
+ info.properties.pred_fail_poll_interval = cpu_to_le16(300);
+ info.properties.intr_throttle_cnt = cpu_to_le16(16);
+ info.properties.intr_throttle_timeout = cpu_to_le16(50);
+ info.properties.rebuild_rate = 30;
+ info.properties.patrol_read_rate = 30;
+ info.properties.bgi_rate = 30;
+ info.properties.cc_rate = 30;
+ info.properties.recon_rate = 30;
+ info.properties.cache_flush_interval = 4;
+ info.properties.spinup_drv_cnt = 2;
+ info.properties.spinup_delay = 6;
+ info.properties.ecc_bucket_size = 15;
+ info.properties.ecc_bucket_leak_rate = cpu_to_le16(1440);
+ info.properties.expose_encl_devices = 1;
+ info.properties.OnOffProperties = cpu_to_le32(MFI_CTRL_PROP_EnableJBOD);
+ info.pd_ops = cpu_to_le32(MFI_INFO_PDOPS_FORCE_ONLINE |
+ MFI_INFO_PDOPS_FORCE_OFFLINE);
+ info.pd_mix_support = cpu_to_le32(MFI_INFO_PDMIX_SAS |
+ MFI_INFO_PDMIX_SATA |
+ MFI_INFO_PDMIX_LD);
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_defaults info;
+ size_t dcmd_size = sizeof(struct mfi_defaults);
+
+ memset(&info, 0x0, dcmd_size);
+ if (cmd->iov_size < dcmd_size) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ info.sas_addr = cpu_to_le64(s->sas_addr);
+ info.stripe_size = 3;
+ info.flush_time = 4;
+ info.background_rate = 30;
+ info.allow_mix_in_enclosure = 1;
+ info.allow_mix_in_ld = 1;
+ info.direct_pd_mapping = 1;
+ /* Enable for BIOS support */
+ info.bios_enumerate_lds = 1;
+ info.disable_ctrl_r = 1;
+ info.expose_enclosure_devices = 1;
+ info.disable_preboot_cli = 1;
+ info.cluster_disable = 1;
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_bios_data info;
+ size_t dcmd_size = sizeof(info);
+
+ memset(&info, 0x0, dcmd_size);
+ if (cmd->iov_size < dcmd_size) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+ info.continue_on_error = 1;
+ info.verbose = 1;
+ if (megasas_is_jbod(s)) {
+ info.expose_all_drives = 1;
+ }
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_get_fw_time(MegasasState *s, MegasasCmd *cmd)
+{
+ uint64_t fw_time;
+ size_t dcmd_size = sizeof(fw_time);
+
+ fw_time = cpu_to_le64(megasas_fw_time());
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)&fw_time, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_set_fw_time(MegasasState *s, MegasasCmd *cmd)
+{
+ uint64_t fw_time;
+
+ /* This is a dummy; setting of firmware time is not allowed */
+ memcpy(&fw_time, cmd->frame->dcmd.mbox, sizeof(fw_time));
+
+ trace_megasas_dcmd_set_fw_time(cmd->index, fw_time);
+ fw_time = cpu_to_le64(megasas_fw_time());
+ return MFI_STAT_OK;
+}
+
+static int megasas_event_info(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_evt_log_state info;
+ size_t dcmd_size = sizeof(info);
+
+ memset(&info, 0, dcmd_size);
+
+ info.newest_seq_num = cpu_to_le32(s->event_count);
+ info.shutdown_seq_num = cpu_to_le32(s->shutdown_event);
+ info.boot_seq_num = cpu_to_le32(s->boot_event);
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_event_wait(MegasasState *s, MegasasCmd *cmd)
+{
+ union mfi_evt event;
+
+ if (cmd->iov_size < sizeof(struct mfi_evt_detail)) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ sizeof(struct mfi_evt_detail));
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+ s->event_count = cpu_to_le32(cmd->frame->dcmd.mbox[0]);
+ event.word = cpu_to_le32(cmd->frame->dcmd.mbox[4]);
+ s->event_locale = event.members.locale;
+ s->event_class = event.members.class;
+ s->event_cmd = cmd;
+ /* Decrease busy count; event frame doesn't count here */
+ s->busy--;
+ cmd->iov_size = sizeof(struct mfi_evt_detail);
+ return MFI_STAT_INVALID_STATUS;
+}
+
+static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_pd_list info;
+ size_t dcmd_size = sizeof(info);
+ BusChild *kid;
+ uint32_t offset, dcmd_limit, num_pd_disks = 0, max_pd_disks;
+ uint16_t sdev_id;
+
+ memset(&info, 0, dcmd_size);
+ offset = 8;
+ dcmd_limit = offset + sizeof(struct mfi_pd_address);
+ if (cmd->iov_size < dcmd_limit) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ dcmd_limit);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ max_pd_disks = (cmd->iov_size - offset) / sizeof(struct mfi_pd_address);
+ if (max_pd_disks > s->fw_luns) {
+ max_pd_disks = s->fw_luns;
+ }
+
+ QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
+ SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child);
+
+ sdev_id = ((sdev->id & 0xFF) >> 8) | (sdev->lun & 0xFF);
+ info.addr[num_pd_disks].device_id = cpu_to_le16(sdev_id);
+ info.addr[num_pd_disks].encl_device_id = 0xFFFF;
+ info.addr[num_pd_disks].encl_index = 0;
+ info.addr[num_pd_disks].slot_number = (sdev->id & 0xFF);
+ info.addr[num_pd_disks].scsi_dev_type = sdev->type;
+ info.addr[num_pd_disks].connect_port_bitmap = 0x1;
+ info.addr[num_pd_disks].sas_addr[0] =
+ cpu_to_le64(megasas_get_sata_addr(sdev_id));
+ num_pd_disks++;
+ offset += sizeof(struct mfi_pd_address);
+ }
+ trace_megasas_dcmd_pd_get_list(cmd->index, num_pd_disks,
+ max_pd_disks, offset);
+
+ info.size = cpu_to_le32(offset);
+ info.count = cpu_to_le32(num_pd_disks);
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)&info, offset, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_pd_list_query(MegasasState *s, MegasasCmd *cmd)
+{
+ uint16_t flags;
+
+ /* mbox0 contains flags */
+ flags = le16_to_cpu(cmd->frame->dcmd.mbox[0]);
+ trace_megasas_dcmd_pd_list_query(cmd->index, flags);
+ if (flags == MR_PD_QUERY_TYPE_ALL ||
+ megasas_is_jbod(s)) {
+ return megasas_dcmd_pd_get_list(s, cmd);
+ }
+
+ return MFI_STAT_OK;
+}
+
+static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun,
+ MegasasCmd *cmd)
+{
+ struct mfi_pd_info *info = cmd->iov_buf;
+ size_t dcmd_size = sizeof(struct mfi_pd_info);
+ BlockConf *conf = &sdev->conf;
+ uint64_t pd_size;
+ uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (lun & 0xFF);
+ uint8_t cmdbuf[6];
+ SCSIRequest *req;
+ size_t len, resid;
+
+ if (!cmd->iov_buf) {
+ cmd->iov_buf = g_malloc(dcmd_size);
+ memset(cmd->iov_buf, 0, dcmd_size);
+ info = cmd->iov_buf;
+ info->inquiry_data[0] = 0x7f; /* Force PQual 0x3, PType 0x1f */
+ info->vpd_page83[0] = 0x7f;
+ megasas_setup_inquiry(cmdbuf, 0, sizeof(info->inquiry_data));
+ req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd);
+ if (!req) {
+ trace_megasas_dcmd_req_alloc_failed(cmd->index,
+ "PD get info std inquiry");
+ g_free(cmd->iov_buf);
+ cmd->iov_buf = NULL;
+ return MFI_STAT_FLASH_ALLOC_FAIL;
+ }
+ trace_megasas_dcmd_internal_submit(cmd->index,
+ "PD get info std inquiry", lun);
+ len = scsi_req_enqueue(req);
+ if (len > 0) {
+ cmd->iov_size = len;
+ scsi_req_continue(req);
+ }
+ return MFI_STAT_INVALID_STATUS;
+ } else if (info->inquiry_data[0] != 0x7f && info->vpd_page83[0] == 0x7f) {
+ megasas_setup_inquiry(cmdbuf, 0x83, sizeof(info->vpd_page83));
+ req = scsi_req_new(sdev, cmd->index, lun, cmdbuf, cmd);
+ if (!req) {
+ trace_megasas_dcmd_req_alloc_failed(cmd->index,
+ "PD get info vpd inquiry");
+ return MFI_STAT_FLASH_ALLOC_FAIL;
+ }
+ trace_megasas_dcmd_internal_submit(cmd->index,
+ "PD get info vpd inquiry", lun);
+ len = scsi_req_enqueue(req);
+ if (len > 0) {
+ cmd->iov_size = len;
+ scsi_req_continue(req);
+ }
+ return MFI_STAT_INVALID_STATUS;
+ }
+ /* Finished, set FW state */
+ if ((info->inquiry_data[0] >> 5) == 0) {
+ if (megasas_is_jbod(cmd->state)) {
+ info->fw_state = cpu_to_le16(MFI_PD_STATE_SYSTEM);
+ } else {
+ info->fw_state = cpu_to_le16(MFI_PD_STATE_ONLINE);
+ }
+ } else {
+ info->fw_state = cpu_to_le16(MFI_PD_STATE_OFFLINE);
+ }
+
+ info->ref.v.device_id = cpu_to_le16(sdev_id);
+ info->state.ddf.pd_type = cpu_to_le16(MFI_PD_DDF_TYPE_IN_VD|
+ MFI_PD_DDF_TYPE_INTF_SAS);
+ bdrv_get_geometry(conf->bs, &pd_size);
+ info->raw_size = cpu_to_le64(pd_size);
+ info->non_coerced_size = cpu_to_le64(pd_size);
+ info->coerced_size = cpu_to_le64(pd_size);
+ info->encl_device_id = 0xFFFF;
+ info->slot_number = (sdev->id & 0xFF);
+ info->path_info.count = 1;
+ info->path_info.sas_addr[0] =
+ cpu_to_le64(megasas_get_sata_addr(sdev_id));
+ info->connected_port_bitmap = 0x1;
+ info->device_speed = 1;
+ info->link_speed = 1;
+ resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg);
+ g_free(cmd->iov_buf);
+ cmd->iov_size = dcmd_size - resid;
+ cmd->iov_buf = NULL;
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_pd_get_info(MegasasState *s, MegasasCmd *cmd)
+{
+ size_t dcmd_size = sizeof(struct mfi_pd_info);
+ uint16_t pd_id;
+ SCSIDevice *sdev = NULL;
+ int retval = MFI_STAT_DEVICE_NOT_FOUND;
+
+ if (cmd->iov_size < dcmd_size) {
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ /* mbox0 has the ID */
+ pd_id = le16_to_cpu(cmd->frame->dcmd.mbox[0]);
+ sdev = scsi_device_find(&s->bus, 0, pd_id, 0);
+ trace_megasas_dcmd_pd_get_info(cmd->index, pd_id);
+
+ if (sdev) {
+ /* Submit inquiry */
+ retval = megasas_pd_get_info_submit(sdev, pd_id, cmd);
+ }
+
+ return retval;
+}
+
+static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_ld_list info;
+ size_t dcmd_size = sizeof(info), resid;
+ uint32_t num_ld_disks = 0, max_ld_disks = s->fw_luns;
+ uint64_t ld_size;
+ BusChild *kid;
+
+ memset(&info, 0, dcmd_size);
+ if (cmd->iov_size < dcmd_size) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ if (megasas_is_jbod(s)) {
+ max_ld_disks = 0;
+ }
+ QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
+ SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child);
+ BlockConf *conf = &sdev->conf;
+
+ if (num_ld_disks >= max_ld_disks) {
+ break;
+ }
+ /* Logical device size is in blocks */
+ bdrv_get_geometry(conf->bs, &ld_size);
+ info.ld_list[num_ld_disks].ld.v.target_id = sdev->id;
+ info.ld_list[num_ld_disks].state = MFI_LD_STATE_OPTIMAL;
+ info.ld_list[num_ld_disks].size = cpu_to_le64(ld_size);
+ num_ld_disks++;
+ }
+ info.ld_count = cpu_to_le32(num_ld_disks);
+ trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks);
+
+ resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
+ cmd->iov_size = dcmd_size - resid;
+ return MFI_STAT_OK;
+}
+
+static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun,
+ MegasasCmd *cmd)
+{
+ struct mfi_ld_info *info = cmd->iov_buf;
+ size_t dcmd_size = sizeof(struct mfi_ld_info);
+ uint8_t cdb[6];
+ SCSIRequest *req;
+ ssize_t len, resid;
+ BlockConf *conf = &sdev->conf;
+ uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (lun & 0xFF);
+ uint64_t ld_size;
+
+ if (!cmd->iov_buf) {
+ cmd->iov_buf = g_malloc(dcmd_size);
+ memset(cmd->iov_buf, 0x0, dcmd_size);
+ info = cmd->iov_buf;
+ megasas_setup_inquiry(cdb, 0x83, sizeof(info->vpd_page83));
+ req = scsi_req_new(sdev, cmd->index, lun, cdb, cmd);
+ if (!req) {
+ trace_megasas_dcmd_req_alloc_failed(cmd->index,
+ "LD get info vpd inquiry");
+ g_free(cmd->iov_buf);
+ cmd->iov_buf = NULL;
+ return MFI_STAT_FLASH_ALLOC_FAIL;
+ }
+ trace_megasas_dcmd_internal_submit(cmd->index,
+ "LD get info vpd inquiry", lun);
+ len = scsi_req_enqueue(req);
+ if (len > 0) {
+ cmd->iov_size = len;
+ scsi_req_continue(req);
+ }
+ return MFI_STAT_INVALID_STATUS;
+ }
+
+ info->ld_config.params.state = MFI_LD_STATE_OPTIMAL;
+ info->ld_config.properties.ld.v.target_id = lun;
+ info->ld_config.params.stripe_size = 3;
+ info->ld_config.params.num_drives = 1;
+ info->ld_config.params.is_consistent = 1;
+ /* Logical device size is in blocks */
+ bdrv_get_geometry(conf->bs, &ld_size);
+ info->size = cpu_to_le64(ld_size);
+ memset(info->ld_config.span, 0, sizeof(info->ld_config.span));
+ info->ld_config.span[0].start_block = 0;
+ info->ld_config.span[0].num_blocks = info->size;
+ info->ld_config.span[0].array_ref = cpu_to_le16(sdev_id);
+
+ resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg);
+ g_free(cmd->iov_buf);
+ cmd->iov_size = dcmd_size - resid;
+ cmd->iov_buf = NULL;
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_ld_get_info(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_ld_info info;
+ size_t dcmd_size = sizeof(info);
+ uint16_t ld_id;
+ uint32_t max_ld_disks = s->fw_luns;
+ SCSIDevice *sdev = NULL;
+ int retval = MFI_STAT_DEVICE_NOT_FOUND;
+
+ if (cmd->iov_size < dcmd_size) {
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ /* mbox0 has the ID */
+ ld_id = le16_to_cpu(cmd->frame->dcmd.mbox[0]);
+ trace_megasas_dcmd_ld_get_info(cmd->index, ld_id);
+
+ if (megasas_is_jbod(s)) {
+ return MFI_STAT_DEVICE_NOT_FOUND;
+ }
+
+ if (ld_id < max_ld_disks) {
+ sdev = scsi_device_find(&s->bus, 0, ld_id, 0);
+ }
+
+ if (sdev) {
+ retval = megasas_ld_get_info_submit(sdev, ld_id, cmd);
+ }
+
+ return retval;
+}
+
+static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd)
+{
+ uint8_t data[4096];
+ struct mfi_config_data *info;
+ int num_pd_disks = 0, array_offset, ld_offset;
+ BusChild *kid;
+
+ if (cmd->iov_size > 4096) {
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
+ num_pd_disks++;
+ }
+ info = (struct mfi_config_data *)&data;
+ /*
+ * Array mapping:
+ * - One array per SCSI device
+ * - One logical drive per SCSI device
+ * spanning the entire device
+ */
+ info->array_count = num_pd_disks;
+ info->array_size = sizeof(struct mfi_array) * num_pd_disks;
+ info->log_drv_count = num_pd_disks;
+ info->log_drv_size = sizeof(struct mfi_ld_config) * num_pd_disks;
+ info->spares_count = 0;
+ info->spares_size = sizeof(struct mfi_spare);
+ info->size = sizeof(struct mfi_config_data) + info->array_size +
+ info->log_drv_size;
+ if (info->size > 4096) {
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+
+ array_offset = sizeof(struct mfi_config_data);
+ ld_offset = array_offset + sizeof(struct mfi_array) * num_pd_disks;
+
+ QTAILQ_FOREACH(kid, &s->bus.qbus.children, sibling) {
+ SCSIDevice *sdev = DO_UPCAST(SCSIDevice, qdev, kid->child);
+ BlockConf *conf = &sdev->conf;
+ uint16_t sdev_id = ((sdev->id & 0xFF) >> 8) | (sdev->lun & 0xFF);
+ struct mfi_array *array;
+ struct mfi_ld_config *ld;
+ uint64_t pd_size;
+ int i;
+
+ array = (struct mfi_array *)(data + array_offset);
+ bdrv_get_geometry(conf->bs, &pd_size);
+ array->size = cpu_to_le64(pd_size);
+ array->num_drives = 1;
+ array->array_ref = cpu_to_le16(sdev_id);
+ array->pd[0].ref.v.device_id = cpu_to_le16(sdev_id);
+ array->pd[0].ref.v.seq_num = 0;
+ array->pd[0].fw_state = MFI_PD_STATE_ONLINE;
+ array->pd[0].encl.pd = 0xFF;
+ array->pd[0].encl.slot = (sdev->id & 0xFF);
+ for (i = 1; i < MFI_MAX_ROW_SIZE; i++) {
+ array->pd[i].ref.v.device_id = 0xFFFF;
+ array->pd[i].ref.v.seq_num = 0;
+ array->pd[i].fw_state = MFI_PD_STATE_UNCONFIGURED_GOOD;
+ array->pd[i].encl.pd = 0xFF;
+ array->pd[i].encl.slot = 0xFF;
+ }
+ array_offset += sizeof(struct mfi_array);
+ ld = (struct mfi_ld_config *)(data + ld_offset);
+ memset(ld, 0, sizeof(struct mfi_ld_config));
+ ld->properties.ld.v.target_id = (sdev->id & 0xFF);
+ ld->properties.default_cache_policy = MR_LD_CACHE_READ_AHEAD |
+ MR_LD_CACHE_READ_ADAPTIVE;
+ ld->properties.current_cache_policy = MR_LD_CACHE_READ_AHEAD |
+ MR_LD_CACHE_READ_ADAPTIVE;
+ ld->params.state = MFI_LD_STATE_OPTIMAL;
+ ld->params.stripe_size = 3;
+ ld->params.num_drives = 1;
+ ld->params.span_depth = 1;
+ ld->params.is_consistent = 1;
+ ld->span[0].start_block = 0;
+ ld->span[0].num_blocks = cpu_to_le64(pd_size);
+ ld->span[0].array_ref = cpu_to_le16(sdev_id);
+ ld_offset += sizeof(struct mfi_ld_config);
+ }
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)data, info->size, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_ctrl_props info;
+ size_t dcmd_size = sizeof(info);
+
+ memset(&info, 0x0, dcmd_size);
+ if (cmd->iov_size < dcmd_size) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+ info.pred_fail_poll_interval = cpu_to_le16(300);
+ info.intr_throttle_cnt = cpu_to_le16(16);
+ info.intr_throttle_timeout = cpu_to_le16(50);
+ info.rebuild_rate = 30;
+ info.patrol_read_rate = 30;
+ info.bgi_rate = 30;
+ info.cc_rate = 30;
+ info.recon_rate = 30;
+ info.cache_flush_interval = 4;
+ info.spinup_drv_cnt = 2;
+ info.spinup_delay = 6;
+ info.ecc_bucket_size = 15;
+ info.ecc_bucket_leak_rate = cpu_to_le16(1440);
+ info.expose_encl_devices = 1;
+
+ cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg);
+ return MFI_STAT_OK;
+}
+
+static int megasas_cache_flush(MegasasState *s, MegasasCmd *cmd)
+{
+ qemu_aio_flush();
+ return MFI_STAT_OK;
+}
+
+static int megasas_ctrl_shutdown(MegasasState *s, MegasasCmd *cmd)
+{
+ s->fw_state = MFI_FWSTATE_READY;
+ return MFI_STAT_OK;
+}
+
+static int megasas_cluster_reset_ld(MegasasState *s, MegasasCmd *cmd)
+{
+ return MFI_STAT_INVALID_DCMD;
+}
+
+static int megasas_dcmd_set_properties(MegasasState *s, MegasasCmd *cmd)
+{
+ struct mfi_ctrl_props info;
+ size_t dcmd_size = sizeof(info);
+
+ if (cmd->iov_size < dcmd_size) {
+ trace_megasas_dcmd_invalid_xfer_len(cmd->index, cmd->iov_size,
+ dcmd_size);
+ return MFI_STAT_INVALID_PARAMETER;
+ }
+ dma_buf_write((uint8_t *)&info, cmd->iov_size, &cmd->qsg);
+ trace_megasas_dcmd_unsupported(cmd->index, cmd->iov_size);
+ return MFI_STAT_OK;
+}
+
+static int megasas_dcmd_dummy(MegasasState *s, MegasasCmd *cmd)
+{
+ trace_megasas_dcmd_dummy(cmd->index, cmd->iov_size);
+ return MFI_STAT_OK;
+}
+
+static const struct dcmd_cmd_tbl_t {
+ int opcode;
+ const char *desc;
+ int (*func)(MegasasState *s, MegasasCmd *cmd);
+} dcmd_cmd_tbl[] = {
+ { MFI_DCMD_CTRL_MFI_HOST_MEM_ALLOC, "CTRL_HOST_MEM_ALLOC",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_GET_INFO, "CTRL_GET_INFO",
+ megasas_ctrl_get_info },
+ { MFI_DCMD_CTRL_GET_PROPERTIES, "CTRL_GET_PROPERTIES",
+ megasas_dcmd_get_properties },
+ { MFI_DCMD_CTRL_SET_PROPERTIES, "CTRL_SET_PROPERTIES",
+ megasas_dcmd_set_properties },
+ { MFI_DCMD_CTRL_ALARM_GET, "CTRL_ALARM_GET",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_ALARM_ENABLE, "CTRL_ALARM_ENABLE",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_ALARM_DISABLE, "CTRL_ALARM_DISABLE",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_ALARM_SILENCE, "CTRL_ALARM_SILENCE",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_ALARM_TEST, "CTRL_ALARM_TEST",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_EVENT_GETINFO, "CTRL_EVENT_GETINFO",
+ megasas_event_info },
+ { MFI_DCMD_CTRL_EVENT_GET, "CTRL_EVENT_GET",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_EVENT_WAIT, "CTRL_EVENT_WAIT",
+ megasas_event_wait },
+ { MFI_DCMD_CTRL_SHUTDOWN, "CTRL_SHUTDOWN",
+ megasas_ctrl_shutdown },
+ { MFI_DCMD_HIBERNATE_STANDBY, "CTRL_STANDBY",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_GET_TIME, "CTRL_GET_TIME",
+ megasas_dcmd_get_fw_time },
+ { MFI_DCMD_CTRL_SET_TIME, "CTRL_SET_TIME",
+ megasas_dcmd_set_fw_time },
+ { MFI_DCMD_CTRL_BIOS_DATA_GET, "CTRL_BIOS_DATA_GET",
+ megasas_dcmd_get_bios_info },
+ { MFI_DCMD_CTRL_FACTORY_DEFAULTS, "CTRL_FACTORY_DEFAULTS",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_MFC_DEFAULTS_GET, "CTRL_MFC_DEFAULTS_GET",
+ megasas_mfc_get_defaults },
+ { MFI_DCMD_CTRL_MFC_DEFAULTS_SET, "CTRL_MFC_DEFAULTS_SET",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CTRL_CACHE_FLUSH, "CTRL_CACHE_FLUSH",
+ megasas_cache_flush },
+ { MFI_DCMD_PD_GET_LIST, "PD_GET_LIST",
+ megasas_dcmd_pd_get_list },
+ { MFI_DCMD_PD_LIST_QUERY, "PD_LIST_QUERY",
+ megasas_dcmd_pd_list_query },
+ { MFI_DCMD_PD_GET_INFO, "PD_GET_INFO",
+ megasas_dcmd_pd_get_info },
+ { MFI_DCMD_PD_STATE_SET, "PD_STATE_SET",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_PD_REBUILD, "PD_REBUILD",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_PD_BLINK, "PD_BLINK",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_PD_UNBLINK, "PD_UNBLINK",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_LD_GET_LIST, "LD_GET_LIST",
+ megasas_dcmd_ld_get_list},
+ { MFI_DCMD_LD_GET_INFO, "LD_GET_INFO",
+ megasas_dcmd_ld_get_info },
+ { MFI_DCMD_LD_GET_PROP, "LD_GET_PROP",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_LD_SET_PROP, "LD_SET_PROP",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_LD_DELETE, "LD_DELETE",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CFG_READ, "CFG_READ",
+ megasas_dcmd_cfg_read },
+ { MFI_DCMD_CFG_ADD, "CFG_ADD",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CFG_CLEAR, "CFG_CLEAR",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CFG_FOREIGN_READ, "CFG_FOREIGN_READ",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CFG_FOREIGN_IMPORT, "CFG_FOREIGN_IMPORT",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_BBU_STATUS, "BBU_STATUS",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_BBU_CAPACITY_INFO, "BBU_CAPACITY_INFO",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_BBU_DESIGN_INFO, "BBU_DESIGN_INFO",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_BBU_PROP_GET, "BBU_PROP_GET",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CLUSTER, "CLUSTER",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CLUSTER_RESET_ALL, "CLUSTER_RESET_ALL",
+ megasas_dcmd_dummy },
+ { MFI_DCMD_CLUSTER_RESET_LD, "CLUSTER_RESET_LD",
+ megasas_cluster_reset_ld },
+ { -1, NULL, NULL }
+};
+
+static int megasas_handle_dcmd(MegasasState *s, MegasasCmd *cmd)
+{
+ int opcode, len;
+ int retval = 0;
+ const struct dcmd_cmd_tbl_t *cmdptr = dcmd_cmd_tbl;
+
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+ trace_megasas_handle_dcmd(cmd->index, opcode);
+ len = megasas_map_dcmd(s, cmd);
+ if (len < 0) {
+ return MFI_STAT_MEMORY_NOT_AVAILABLE;
+ }
+ while (cmdptr->opcode != -1 && cmdptr->opcode != opcode) {
+ cmdptr++;
+ }
+ if (cmdptr->opcode == -1) {
+ trace_megasas_dcmd_unhandled(cmd->index, opcode, len);
+ retval = megasas_dcmd_dummy(s, cmd);
+ } else {
+ trace_megasas_dcmd_enter(cmd->index, cmdptr->desc, len);
+ retval = cmdptr->func(s, cmd);
+ }
+ if (retval != MFI_STAT_INVALID_STATUS) {
+ megasas_finish_dcmd(cmd, len);
+ }
+ return retval;
+}
+
+static int megasas_finish_internal_dcmd(MegasasCmd *cmd,
+ SCSIRequest *req)
+{
+ int opcode;
+ int retval = MFI_STAT_OK;
+ int lun = req->lun;
+
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+ scsi_req_unref(req);
+ trace_megasas_dcmd_internal_finish(cmd->index, opcode, lun);
+ switch (opcode) {
+ case MFI_DCMD_PD_GET_INFO:
+ retval = megasas_pd_get_info_submit(req->dev, lun, cmd);
+ break;
+ case MFI_DCMD_LD_GET_INFO:
+ retval = megasas_ld_get_info_submit(req->dev, lun, cmd);
+ break;
+ default:
+ trace_megasas_dcmd_internal_invalid(cmd->index, opcode);
+ retval = MFI_STAT_INVALID_DCMD;
+ break;
+ }
+ if (retval != MFI_STAT_INVALID_STATUS) {
+ megasas_finish_dcmd(cmd, cmd->iov_size);
+ }
+ return retval;
+}
+
+static int megasas_enqueue_req(MegasasCmd *cmd, bool is_write)
+{
+ int len;
+
+ len = scsi_req_enqueue(cmd->req);
+ if (len < 0) {
+ len = -len;
+ }
+ if (len > 0) {
+ if (len > cmd->iov_size) {
+ if (is_write) {
+ trace_megasas_iov_write_overflow(cmd->index, len,
+ cmd->iov_size);
+ } else {
+ trace_megasas_iov_read_overflow(cmd->index, len,
+ cmd->iov_size);
+ }
+ }
+ if (len < cmd->iov_size) {
+ if (is_write) {
+ trace_megasas_iov_write_underflow(cmd->index, len,
+ cmd->iov_size);
+ } else {
+ trace_megasas_iov_read_underflow(cmd->index, len,
+ cmd->iov_size);
+ }
+ cmd->iov_size = len;
+ }
+ scsi_req_continue(cmd->req);
+ }
+ return len;
+}
+
+static int megasas_handle_scsi(MegasasState *s, MegasasCmd *cmd,
+ bool is_logical)
+{
+ uint8_t *cdb;
+ int len;
+ bool is_write;
+ struct SCSIDevice *sdev = NULL;
+
+ cdb = cmd->frame->pass.cdb;
+
+ if (cmd->frame->header.target_id < s->fw_luns) {
+ sdev = scsi_device_find(&s->bus, 0, cmd->frame->header.target_id,
+ cmd->frame->header.lun_id);
+ }
+ cmd->iov_size = le32_to_cpu(cmd->frame->header.data_len);
+ trace_megasas_handle_scsi(mfi_frame_desc[cmd->frame->header.frame_cmd],
+ is_logical, cmd->frame->header.target_id,
+ cmd->frame->header.lun_id, sdev, cmd->iov_size);
+
+ if (!sdev || (megasas_is_jbod(s) && is_logical)) {
+ trace_megasas_scsi_target_not_present(
+ mfi_frame_desc[cmd->frame->header.frame_cmd], is_logical,
+ cmd->frame->header.target_id, cmd->frame->header.lun_id);
+ return MFI_STAT_DEVICE_NOT_FOUND;
+ }
+
+ if (cmd->frame->header.cdb_len > 16) {
+ trace_megasas_scsi_invalid_cdb_len(
+ mfi_frame_desc[cmd->frame->header.frame_cmd], is_logical,
+ cmd->frame->header.target_id, cmd->frame->header.lun_id,
+ cmd->frame->header.cdb_len);
+ megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE));
+ cmd->frame->header.scsi_status = CHECK_CONDITION;
+ s->event_count++;
+ return MFI_STAT_SCSI_DONE_WITH_ERROR;
+ }
+
+ if (megasas_map_sgl(s, cmd, &cmd->frame->pass.sgl)) {
+ megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE));
+ cmd->frame->header.scsi_status = CHECK_CONDITION;
+ s->event_count++;
+ return MFI_STAT_SCSI_DONE_WITH_ERROR;
+ }
+
+ cmd->req = scsi_req_new(sdev, cmd->index,
+ cmd->frame->header.lun_id, cdb, cmd);
+ if (!cmd->req) {
+ trace_megasas_scsi_req_alloc_failed(
+ mfi_frame_desc[cmd->frame->header.frame_cmd],
+ cmd->frame->header.target_id, cmd->frame->header.lun_id);
+ megasas_write_sense(cmd, SENSE_CODE(NO_SENSE));
+ cmd->frame->header.scsi_status = BUSY;
+ s->event_count++;
+ return MFI_STAT_SCSI_DONE_WITH_ERROR;
+ }
+
+ is_write = (cmd->req->cmd.mode == SCSI_XFER_TO_DEV);
+ len = megasas_enqueue_req(cmd, is_write);
+ if (len > 0) {
+ if (is_write) {
+ trace_megasas_scsi_write_start(cmd->index, len);
+ } else {
+ trace_megasas_scsi_read_start(cmd->index, len);
+ }
+ } else {
+ trace_megasas_scsi_nodata(cmd->index);
+ }
+ return MFI_STAT_INVALID_STATUS;
+}
+
+static int megasas_handle_io(MegasasState *s, MegasasCmd *cmd)
+{
+ uint32_t lba_count, lba_start_hi, lba_start_lo;
+ uint64_t lba_start;
+ bool is_write = (cmd->frame->header.frame_cmd == MFI_CMD_LD_WRITE);
+ uint8_t cdb[16];
+ int len;
+ struct SCSIDevice *sdev = NULL;
+
+ lba_count = le32_to_cpu(cmd->frame->io.header.data_len);
+ lba_start_lo = le32_to_cpu(cmd->frame->io.lba_lo);
+ lba_start_hi = le32_to_cpu(cmd->frame->io.lba_hi);
+ lba_start = ((uint64_t)lba_start_hi << 32) | lba_start_lo;
+
+ if (cmd->frame->header.target_id < s->fw_luns) {
+ sdev = scsi_device_find(&s->bus, 0, cmd->frame->header.target_id,
+ cmd->frame->header.lun_id);
+ }
+
+ trace_megasas_handle_io(cmd->index,
+ mfi_frame_desc[cmd->frame->header.frame_cmd],
+ cmd->frame->header.target_id,
+ cmd->frame->header.lun_id,
+ (unsigned long)lba_start, (unsigned long)lba_count);
+ if (!sdev) {
+ trace_megasas_io_target_not_present(cmd->index,
+ mfi_frame_desc[cmd->frame->header.frame_cmd],
+ cmd->frame->header.target_id, cmd->frame->header.lun_id);
+ return MFI_STAT_DEVICE_NOT_FOUND;
+ }
+
+ if (cmd->frame->header.cdb_len > 16) {
+ trace_megasas_scsi_invalid_cdb_len(
+ mfi_frame_desc[cmd->frame->header.frame_cmd], 1,
+ cmd->frame->header.target_id, cmd->frame->header.lun_id,
+ cmd->frame->header.cdb_len);
+ megasas_write_sense(cmd, SENSE_CODE(INVALID_OPCODE));
+ cmd->frame->header.scsi_status = CHECK_CONDITION;
+ s->event_count++;
+ return MFI_STAT_SCSI_DONE_WITH_ERROR;
+ }
+
+ cmd->iov_size = lba_count * sdev->blocksize;
+ if (megasas_map_sgl(s, cmd, &cmd->frame->io.sgl)) {
+ megasas_write_sense(cmd, SENSE_CODE(TARGET_FAILURE));
+ cmd->frame->header.scsi_status = CHECK_CONDITION;
+ s->event_count++;
+ return MFI_STAT_SCSI_DONE_WITH_ERROR;
+ }
+
+ megasas_encode_lba(cdb, lba_start, lba_count, is_write);
+ cmd->req = scsi_req_new(sdev, cmd->index,
+ cmd->frame->header.lun_id, cdb, cmd);
+ if (!cmd->req) {
+ trace_megasas_scsi_req_alloc_failed(
+ mfi_frame_desc[cmd->frame->header.frame_cmd],
+ cmd->frame->header.target_id, cmd->frame->header.lun_id);
+ megasas_write_sense(cmd, SENSE_CODE(NO_SENSE));
+ cmd->frame->header.scsi_status = BUSY;
+ s->event_count++;
+ return MFI_STAT_SCSI_DONE_WITH_ERROR;
+ }
+ len = megasas_enqueue_req(cmd, is_write);
+ if (len > 0) {
+ if (is_write) {
+ trace_megasas_io_write_start(cmd->index, lba_start, lba_count, len);
+ } else {
+ trace_megasas_io_read_start(cmd->index, lba_start, lba_count, len);
+ }
+ }
+ return MFI_STAT_INVALID_STATUS;
+}
+
+static int megasas_finish_internal_command(MegasasCmd *cmd,
+ SCSIRequest *req, size_t resid)
+{
+ int retval = MFI_STAT_INVALID_CMD;
+
+ if (cmd->frame->header.frame_cmd == MFI_CMD_DCMD) {
+ cmd->iov_size -= resid;
+ retval = megasas_finish_internal_dcmd(cmd, req);
+ }
+ return retval;
+}
+
+static QEMUSGList *megasas_get_sg_list(SCSIRequest *req)
+{
+ MegasasCmd *cmd = req->hba_private;
+
+ if (cmd->frame->header.frame_cmd == MFI_CMD_DCMD) {
+ return NULL;
+ } else {
+ return &cmd->qsg;
+ }
+}
+
+static void megasas_xfer_complete(SCSIRequest *req, uint32_t len)
+{
+ MegasasCmd *cmd = req->hba_private;
+ uint8_t *buf;
+ uint32_t opcode;
+
+ trace_megasas_io_complete(cmd->index, len);
+
+ if (cmd->frame->header.frame_cmd != MFI_CMD_DCMD) {
+ scsi_req_continue(req);
+ return;
+ }
+
+ buf = scsi_req_get_buf(req);
+ opcode = le32_to_cpu(cmd->frame->dcmd.opcode);
+ if (opcode == MFI_DCMD_PD_GET_INFO && cmd->iov_buf) {
+ struct mfi_pd_info *info = cmd->iov_buf;
+
+ if (info->inquiry_data[0] == 0x7f) {
+ memset(info->inquiry_data, 0, sizeof(info->inquiry_data));
+ memcpy(info->inquiry_data, buf, len);
+ } else if (info->vpd_page83[0] == 0x7f) {
+ memset(info->vpd_page83, 0, sizeof(info->vpd_page83));
+ memcpy(info->vpd_page83, buf, len);
+ }
+ scsi_req_continue(req);
+ } else if (opcode == MFI_DCMD_LD_GET_INFO) {
+ struct mfi_ld_info *info = cmd->iov_buf;
+
+ if (cmd->iov_buf) {
+ memcpy(info->vpd_page83, buf, sizeof(info->vpd_page83));
+ scsi_req_continue(req);
+ }
+ }
+}
+
+static void megasas_command_complete(SCSIRequest *req, uint32_t status,
+ size_t resid)
+{
+ MegasasCmd *cmd = req->hba_private;
+ uint8_t cmd_status = MFI_STAT_OK;
+
+ trace_megasas_command_complete(cmd->index, status, resid);
+
+ if (cmd->req != req) {
+ /*
+ * Internal command complete
+ */
+ cmd_status = megasas_finish_internal_command(cmd, req, resid);
+ if (cmd_status == MFI_STAT_INVALID_STATUS) {
+ return;
+ }
+ } else {
+ req->status = status;
+ trace_megasas_scsi_complete(cmd->index, req->status,
+ cmd->iov_size, req->cmd.xfer);
+ if (req->status != GOOD) {
+ cmd_status = MFI_STAT_SCSI_DONE_WITH_ERROR;
+ }
+ if (req->status == CHECK_CONDITION) {
+ megasas_copy_sense(cmd);
+ }
+
+ megasas_unmap_sgl(cmd);
+ cmd->frame->header.scsi_status = req->status;
+ scsi_req_unref(cmd->req);
+ cmd->req = NULL;
+ }
+ cmd->frame->header.cmd_status = cmd_status;
+ megasas_complete_frame(cmd->state, cmd->context);
+}
+
+static void megasas_command_cancel(SCSIRequest *req)
+{
+ MegasasCmd *cmd = req->hba_private;
+
+ if (cmd) {
+ megasas_abort_command(cmd);
+ } else {
+ scsi_req_unref(req);
+ }
+}
+
+static int megasas_handle_abort(MegasasState *s, MegasasCmd *cmd)
+{
+ uint64_t abort_ctx = le64_to_cpu(cmd->frame->abort.abort_context);
+ target_phys_addr_t abort_addr, addr_hi, addr_lo;
+ MegasasCmd *abort_cmd;
+
+ addr_hi = le32_to_cpu(cmd->frame->abort.abort_mfi_addr_hi);
+ addr_lo = le32_to_cpu(cmd->frame->abort.abort_mfi_addr_lo);
+ abort_addr = ((uint64_t)addr_hi << 32) | addr_lo;
+
+ abort_cmd = megasas_lookup_frame(s, abort_addr);
+ if (!abort_cmd) {
+ trace_megasas_abort_no_cmd(cmd->index, abort_ctx);
+ s->event_count++;
+ return MFI_STAT_OK;
+ }
+ if (!megasas_use_queue64(s)) {
+ abort_ctx &= (uint64_t)0xFFFFFFFF;
+ }
+ if (abort_cmd->context != abort_ctx) {
+ trace_megasas_abort_invalid_context(cmd->index, abort_cmd->index,
+ abort_cmd->context);
+ s->event_count++;
+ return MFI_STAT_ABORT_NOT_POSSIBLE;
+ }
+ trace_megasas_abort_frame(cmd->index, abort_cmd->index);
+ megasas_abort_command(abort_cmd);
+ if (!s->event_cmd || abort_cmd != s->event_cmd) {
+ s->event_cmd = NULL;
+ }
+ s->event_count++;
+ return MFI_STAT_OK;
+}
+
+static void megasas_handle_frame(MegasasState *s, uint64_t frame_addr,
+ uint32_t frame_count)
+{
+ uint8_t frame_status = MFI_STAT_INVALID_CMD;
+ uint64_t frame_context;
+ MegasasCmd *cmd;
+
+ /*
+ * Always read 64bit context, top bits will be
+ * masked out if required in megasas_enqueue_frame()
+ */
+ frame_context = megasas_frame_get_context(frame_addr);
+
+ cmd = megasas_enqueue_frame(s, frame_addr, frame_context, frame_count);
+ if (!cmd) {
+ /* reply queue full */
+ trace_megasas_frame_busy(frame_addr);
+ megasas_frame_set_scsi_status(frame_addr, BUSY);
+ megasas_frame_set_cmd_status(frame_addr, MFI_STAT_SCSI_DONE_WITH_ERROR);
+ megasas_complete_frame(s, frame_context);
+ s->event_count++;
+ return;
+ }
+ switch (cmd->frame->header.frame_cmd) {
+ case MFI_CMD_INIT:
+ frame_status = megasas_init_firmware(s, cmd);
+ break;
+ case MFI_CMD_DCMD:
+ frame_status = megasas_handle_dcmd(s, cmd);
+ break;
+ case MFI_CMD_ABORT:
+ frame_status = megasas_handle_abort(s, cmd);
+ break;
+ case MFI_CMD_PD_SCSI_IO:
+ frame_status = megasas_handle_scsi(s, cmd, 0);
+ break;
+ case MFI_CMD_LD_SCSI_IO:
+ frame_status = megasas_handle_scsi(s, cmd, 1);
+ break;
+ case MFI_CMD_LD_READ:
+ case MFI_CMD_LD_WRITE:
+ frame_status = megasas_handle_io(s, cmd);
+ break;
+ default:
+ trace_megasas_unhandled_frame_cmd(cmd->index,
+ cmd->frame->header.frame_cmd);
+ s->event_count++;
+ break;
+ }
+ if (frame_status != MFI_STAT_INVALID_STATUS) {
+ if (cmd->frame) {
+ cmd->frame->header.cmd_status = frame_status;
+ } else {
+ megasas_frame_set_cmd_status(frame_addr, frame_status);
+ }
+ megasas_complete_frame(s, cmd->context);
+ }
+}
+
+static uint64_t megasas_mmio_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
+{
+ MegasasState *s = opaque;
+ uint32_t retval = 0;
+
+ switch (addr) {
+ case MFI_IDB:
+ retval = 0;
+ break;
+ case MFI_OMSG0:
+ case MFI_OSP0:
+ retval = (megasas_use_msix(s) ? MFI_FWSTATE_MSIX_SUPPORTED : 0) |
+ (s->fw_state & MFI_FWSTATE_MASK) |
+ ((s->fw_sge & 0xff) << 16) |
+ (s->fw_cmds & 0xFFFF);
+ break;
+ case MFI_OSTS:
+ if (megasas_intr_enabled(s) && s->doorbell) {
+ retval = MFI_1078_RM | 1;
+ }
+ break;
+ case MFI_OMSK:
+ retval = s->intr_mask;
+ break;
+ case MFI_ODCR0:
+ retval = s->doorbell;
+ break;
+ default:
+ trace_megasas_mmio_invalid_readl(addr);
+ break;
+ }
+ trace_megasas_mmio_readl(addr, retval);
+ return retval;
+}
+
+static void megasas_mmio_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned size)
+{
+ MegasasState *s = opaque;
+ uint64_t frame_addr;
+ uint32_t frame_count;
+ int i;
+
+ trace_megasas_mmio_writel(addr, val);
+ switch (addr) {
+ case MFI_IDB:
+ if (val & MFI_FWINIT_ABORT) {
+ /* Abort all pending cmds */
+ for (i = 0; i < s->fw_cmds; i++) {
+ megasas_abort_command(&s->frames[i]);
+ }
+ }
+ if (val & MFI_FWINIT_READY) {
+ /* move to FW READY */
+ megasas_soft_reset(s);
+ }
+ if (val & MFI_FWINIT_MFIMODE) {
+ /* discard MFIs */
+ }
+ break;
+ case MFI_OMSK:
+ s->intr_mask = val;
+ if (!megasas_intr_enabled(s) && !msix_enabled(&s->dev)) {
+ trace_megasas_irq_lower();
+ qemu_irq_lower(s->dev.irq[0]);
+ }
+ if (megasas_intr_enabled(s)) {
+ trace_megasas_intr_enabled();
+ } else {
+ trace_megasas_intr_disabled();
+ }
+ break;
+ case MFI_ODCR0:
+ s->doorbell = 0;
+ if (s->producer_pa && megasas_intr_enabled(s)) {
+ /* Update reply queue pointer */
+ trace_megasas_qf_update(s->reply_queue_head, s->busy);
+ stl_le_phys(s->producer_pa, s->reply_queue_head);
+ if (!msix_enabled(&s->dev)) {
+ trace_megasas_irq_lower();
+ qemu_irq_lower(s->dev.irq[0]);
+ }
+ }
+ break;
+ case MFI_IQPH:
+ /* Received high 32 bits of a 64 bit MFI frame address */
+ s->frame_hi = val;
+ break;
+ case MFI_IQPL:
+ /* Received low 32 bits of a 64 bit MFI frame address */
+ case MFI_IQP:
+ /* Received 32 bit MFI frame address */
+ frame_addr = (val & ~0x1F);
+ /* Add possible 64 bit offset */
+ frame_addr |= ((uint64_t)s->frame_hi << 32);
+ s->frame_hi = 0;
+ frame_count = (val >> 1) & 0xF;
+ megasas_handle_frame(s, frame_addr, frame_count);
+ break;
+ default:
+ trace_megasas_mmio_invalid_writel(addr, val);
+ break;
+ }
+}
+
+static const MemoryRegionOps megasas_mmio_ops = {
+ .read = megasas_mmio_read,
+ .write = megasas_mmio_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ }
+};
+
+static uint64_t megasas_port_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
+{
+ return megasas_mmio_read(opaque, addr & 0xff, size);
+}
+
+static void megasas_port_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned size)
+{
+ megasas_mmio_write(opaque, addr & 0xff, val, size);
+}
+
+static const MemoryRegionOps megasas_port_ops = {
+ .read = megasas_port_read,
+ .write = megasas_port_write,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ }
+};
+
+static uint64_t megasas_queue_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
+{
+ return 0;
+}
+
+static const MemoryRegionOps megasas_queue_ops = {
+ .read = megasas_queue_read,
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ }
+};
+
+static void megasas_soft_reset(MegasasState *s)
+{
+ int i;
+ MegasasCmd *cmd;
+
+ trace_megasas_reset();
+ for (i = 0; i < s->fw_cmds; i++) {
+ cmd = &s->frames[i];
+ megasas_abort_command(cmd);
+ }
+ megasas_reset_frames(s);
+ s->reply_queue_len = s->fw_cmds;
+ s->reply_queue_pa = 0;
+ s->consumer_pa = 0;
+ s->producer_pa = 0;
+ s->fw_state = MFI_FWSTATE_READY;
+ s->doorbell = 0;
+ s->intr_mask = MEGASAS_INTR_DISABLED_MASK;
+ s->frame_hi = 0;
+ s->flags &= ~MEGASAS_MASK_USE_QUEUE64;
+ s->event_count++;
+ s->boot_event = s->event_count;
+}
+
+static void megasas_scsi_reset(DeviceState *dev)
+{
+ MegasasState *s = DO_UPCAST(MegasasState, dev.qdev, dev);
+
+ megasas_soft_reset(s);
+}
+
+static const VMStateDescription vmstate_megasas = {
+ .name = "megasas",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .minimum_version_id_old = 0,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, MegasasState),
+
+ VMSTATE_INT32(fw_state, MegasasState),
+ VMSTATE_INT32(intr_mask, MegasasState),
+ VMSTATE_INT32(doorbell, MegasasState),
+ VMSTATE_UINT64(reply_queue_pa, MegasasState),
+ VMSTATE_UINT64(consumer_pa, MegasasState),
+ VMSTATE_UINT64(producer_pa, MegasasState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void megasas_scsi_uninit(PCIDevice *d)
+{
+ MegasasState *s = DO_UPCAST(MegasasState, dev, d);
+
+#ifdef USE_MSIX
+ msix_uninit(&s->dev, &s->mmio_io);
+#endif
+ memory_region_destroy(&s->mmio_io);
+ memory_region_destroy(&s->port_io);
+ memory_region_destroy(&s->queue_io);
+}
+
+static const struct SCSIBusInfo megasas_scsi_info = {
+ .tcq = true,
+ .max_target = MFI_MAX_LD,
+ .max_lun = 255,
+
+ .transfer_data = megasas_xfer_complete,
+ .get_sg_list = megasas_get_sg_list,
+ .complete = megasas_command_complete,
+ .cancel = megasas_command_cancel,
+};
+
+static int megasas_scsi_init(PCIDevice *dev)
+{
+ MegasasState *s = DO_UPCAST(MegasasState, dev, dev);
+ uint8_t *pci_conf;
+ int i, bar_type;
+
+ pci_conf = s->dev.config;
+
+ /* PCI latency timer = 0 */
+ pci_conf[PCI_LATENCY_TIMER] = 0;
+ /* Interrupt pin 1 */
+ pci_conf[PCI_INTERRUPT_PIN] = 0x01;
+
+ memory_region_init_io(&s->mmio_io, &megasas_mmio_ops, s,
+ "megasas-mmio", 0x4000);
+ memory_region_init_io(&s->port_io, &megasas_port_ops, s,
+ "megasas-io", 256);
+ memory_region_init_io(&s->queue_io, &megasas_queue_ops, s,
+ "megasas-queue", 0x40000);
+
+#ifdef USE_MSIX
+ /* MSI-X support is currently broken */
+ if (megasas_use_msix(s) &&
+ msix_init(&s->dev, 15, &s->mmio_io, 0, 0x2000)) {
+ s->flags &= ~MEGASAS_MASK_USE_MSIX;
+ }
+#else
+ s->flags &= ~MEGASAS_MASK_USE_MSIX;
+#endif
+
+ bar_type = PCI_BASE_ADDRESS_SPACE_MEMORY | PCI_BASE_ADDRESS_MEM_TYPE_64;
+ pci_register_bar(&s->dev, 0, bar_type, &s->mmio_io);
+ pci_register_bar(&s->dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &s->port_io);
+ pci_register_bar(&s->dev, 3, bar_type, &s->queue_io);
+
+ if (megasas_use_msix(s)) {
+ msix_vector_use(&s->dev, 0);
+ }
+
+ if (!s->sas_addr) {
+ s->sas_addr = ((NAA_LOCALLY_ASSIGNED_ID << 24) |
+ IEEE_COMPANY_LOCALLY_ASSIGNED) << 36;
+ s->sas_addr |= (pci_bus_num(dev->bus) << 16);
+ s->sas_addr |= (PCI_SLOT(dev->devfn) << 8);
+ s->sas_addr |= PCI_FUNC(dev->devfn);
+ }
+ if (s->fw_sge >= MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE) {
+ s->fw_sge = MEGASAS_MAX_SGE - MFI_PASS_FRAME_SIZE;
+ } else if (s->fw_sge >= 128 - MFI_PASS_FRAME_SIZE) {
+ s->fw_sge = 128 - MFI_PASS_FRAME_SIZE;
+ } else {
+ s->fw_sge = 64 - MFI_PASS_FRAME_SIZE;
+ }
+ if (s->fw_cmds > MEGASAS_MAX_FRAMES) {
+ s->fw_cmds = MEGASAS_MAX_FRAMES;
+ }
+ trace_megasas_init(s->fw_sge, s->fw_cmds,
+ megasas_use_msix(s) ? "MSI-X" : "INTx",
+ megasas_is_jbod(s) ? "jbod" : "raid");
+ s->fw_luns = (MFI_MAX_LD > MAX_SCSI_DEVS) ?
+ MAX_SCSI_DEVS : MFI_MAX_LD;
+ s->producer_pa = 0;
+ s->consumer_pa = 0;
+ for (i = 0; i < s->fw_cmds; i++) {
+ s->frames[i].index = i;
+ s->frames[i].context = -1;
+ s->frames[i].pa = 0;
+ s->frames[i].state = s;
+ }
+
+ scsi_bus_new(&s->bus, &dev->qdev, &megasas_scsi_info);
+ scsi_bus_legacy_handle_cmdline(&s->bus);
+ return 0;
+}
+
+static Property megasas_properties[] = {
+ DEFINE_PROP_UINT32("max_sge", MegasasState, fw_sge,
+ MEGASAS_DEFAULT_SGE),
+ DEFINE_PROP_UINT32("max_cmds", MegasasState, fw_cmds,
+ MEGASAS_DEFAULT_FRAMES),
+ DEFINE_PROP_HEX64("sas_address", MegasasState, sas_addr, 0),
+#ifdef USE_MSIX
+ DEFINE_PROP_BIT("use_msix", MegasasState, flags,
+ MEGASAS_FLAG_USE_MSIX, false),
+#endif
+ DEFINE_PROP_BIT("use_jbod", MegasasState, flags,
+ MEGASAS_FLAG_USE_JBOD, false),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void megasas_class_init(ObjectClass *oc, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(oc);
+ PCIDeviceClass *pc = PCI_DEVICE_CLASS(oc);
+
+ pc->init = megasas_scsi_init;
+ pc->exit = megasas_scsi_uninit;
+ pc->vendor_id = PCI_VENDOR_ID_LSI_LOGIC;
+ pc->device_id = PCI_DEVICE_ID_LSI_SAS1078;
+ pc->subsystem_vendor_id = PCI_VENDOR_ID_LSI_LOGIC;
+ pc->subsystem_id = 0x1013;
+ pc->class_id = PCI_CLASS_STORAGE_RAID;
+ dc->props = megasas_properties;
+ dc->reset = megasas_scsi_reset;
+ dc->vmsd = &vmstate_megasas;
+ dc->desc = "LSI MegaRAID SAS 1078";
+}
+
+static const TypeInfo megasas_info = {
+ .name = "megasas",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(MegasasState),
+ .class_init = megasas_class_init,
+};
+
+static void megasas_register_types(void)
+{
+ type_register_static(&megasas_info);
+}
+
+type_init(megasas_register_types)
diff --git a/hw/mfi.h b/hw/mfi.h
new file mode 100644
index 0000000000..436b6906b1
--- /dev/null
+++ b/hw/mfi.h
@@ -0,0 +1,1249 @@
+/*
+ * NetBSD header file, copied from
+ * http://gitorious.org/freebsd/freebsd/blobs/HEAD/sys/dev/mfi/mfireg.h
+ */
+/*-
+ * Copyright (c) 2006 IronPort Systems
+ * Copyright (c) 2007 LSI Corp.
+ * Copyright (c) 2007 Rajesh Prabhakaran.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef MFI_REG_H
+#define MFI_REG_H
+
+/*
+ * MegaRAID SAS MFI firmware definitions
+ */
+
+/*
+ * Start with the register set. All registers are 32 bits wide.
+ * The usual Intel IOP style setup.
+ */
+#define MFI_IMSG0 0x10 /* Inbound message 0 */
+#define MFI_IMSG1 0x14 /* Inbound message 1 */
+#define MFI_OMSG0 0x18 /* Outbound message 0 */
+#define MFI_OMSG1 0x1c /* Outbound message 1 */
+#define MFI_IDB 0x20 /* Inbound doorbell */
+#define MFI_ISTS 0x24 /* Inbound interrupt status */
+#define MFI_IMSK 0x28 /* Inbound interrupt mask */
+#define MFI_ODB 0x2c /* Outbound doorbell */
+#define MFI_OSTS 0x30 /* Outbound interrupt status */
+#define MFI_OMSK 0x34 /* Outbound interrupt mask */
+#define MFI_IQP 0x40 /* Inbound queue port */
+#define MFI_OQP 0x44 /* Outbound queue port */
+
+/*
+ * 1078 specific related register
+ */
+#define MFI_ODR0 0x9c /* outbound doorbell register0 */
+#define MFI_ODCR0 0xa0 /* outbound doorbell clear register0 */
+#define MFI_OSP0 0xb0 /* outbound scratch pad0 */
+#define MFI_IQPL 0xc0 /* Inbound queue port (low bytes) */
+#define MFI_IQPH 0xc4 /* Inbound queue port (high bytes) */
+#define MFI_DIAG 0xf8 /* Host diag */
+#define MFI_SEQ 0xfc /* Sequencer offset */
+#define MFI_1078_EIM 0x80000004 /* 1078 enable intrrupt mask */
+#define MFI_RMI 0x2 /* reply message interrupt */
+#define MFI_1078_RM 0x80000000 /* reply 1078 message interrupt */
+#define MFI_ODC 0x4 /* outbound doorbell change interrupt */
+
+/*
+ * gen2 specific changes
+ */
+#define MFI_GEN2_EIM 0x00000005 /* gen2 enable interrupt mask */
+#define MFI_GEN2_RM 0x00000001 /* reply gen2 message interrupt */
+
+/*
+ * skinny specific changes
+ */
+#define MFI_SKINNY_IDB 0x00 /* Inbound doorbell is at 0x00 for skinny */
+#define MFI_SKINNY_RM 0x00000001 /* reply skinny message interrupt */
+
+/* Bits for MFI_OSTS */
+#define MFI_OSTS_INTR_VALID 0x00000002
+
+/*
+ * Firmware state values. Found in OMSG0 during initialization.
+ */
+#define MFI_FWSTATE_MASK 0xf0000000
+#define MFI_FWSTATE_UNDEFINED 0x00000000
+#define MFI_FWSTATE_BB_INIT 0x10000000
+#define MFI_FWSTATE_FW_INIT 0x40000000
+#define MFI_FWSTATE_WAIT_HANDSHAKE 0x60000000
+#define MFI_FWSTATE_FW_INIT_2 0x70000000
+#define MFI_FWSTATE_DEVICE_SCAN 0x80000000
+#define MFI_FWSTATE_BOOT_MSG_PENDING 0x90000000
+#define MFI_FWSTATE_FLUSH_CACHE 0xa0000000
+#define MFI_FWSTATE_READY 0xb0000000
+#define MFI_FWSTATE_OPERATIONAL 0xc0000000
+#define MFI_FWSTATE_FAULT 0xf0000000
+#define MFI_FWSTATE_MAXSGL_MASK 0x00ff0000
+#define MFI_FWSTATE_MAXCMD_MASK 0x0000ffff
+#define MFI_FWSTATE_MSIX_SUPPORTED 0x04000000
+#define MFI_FWSTATE_HOSTMEMREQD_MASK 0x08000000
+
+/*
+ * Control bits to drive the card to ready state. These go into the IDB
+ * register.
+ */
+#define MFI_FWINIT_ABORT 0x00000001 /* Abort all pending commands */
+#define MFI_FWINIT_READY 0x00000002 /* Move from operational to ready */
+#define MFI_FWINIT_MFIMODE 0x00000004 /* unknown */
+#define MFI_FWINIT_CLEAR_HANDSHAKE 0x00000008 /* Respond to WAIT_HANDSHAKE */
+#define MFI_FWINIT_HOTPLUG 0x00000010
+#define MFI_FWINIT_STOP_ADP 0x00000020 /* Move to operational, stop */
+#define MFI_FWINIT_ADP_RESET 0x00000040 /* Reset ADP */
+
+/* MFI Commands */
+typedef enum {
+ MFI_CMD_INIT = 0x00,
+ MFI_CMD_LD_READ,
+ MFI_CMD_LD_WRITE,
+ MFI_CMD_LD_SCSI_IO,
+ MFI_CMD_PD_SCSI_IO,
+ MFI_CMD_DCMD,
+ MFI_CMD_ABORT,
+ MFI_CMD_SMP,
+ MFI_CMD_STP
+} mfi_cmd_t;
+
+/* Direct commands */
+typedef enum {
+ MFI_DCMD_CTRL_MFI_HOST_MEM_ALLOC = 0x0100e100,
+ MFI_DCMD_CTRL_GET_INFO = 0x01010000,
+ MFI_DCMD_CTRL_GET_PROPERTIES = 0x01020100,
+ MFI_DCMD_CTRL_SET_PROPERTIES = 0x01020200,
+ MFI_DCMD_CTRL_ALARM = 0x01030000,
+ MFI_DCMD_CTRL_ALARM_GET = 0x01030100,
+ MFI_DCMD_CTRL_ALARM_ENABLE = 0x01030200,
+ MFI_DCMD_CTRL_ALARM_DISABLE = 0x01030300,
+ MFI_DCMD_CTRL_ALARM_SILENCE = 0x01030400,
+ MFI_DCMD_CTRL_ALARM_TEST = 0x01030500,
+ MFI_DCMD_CTRL_EVENT_GETINFO = 0x01040100,
+ MFI_DCMD_CTRL_EVENT_CLEAR = 0x01040200,
+ MFI_DCMD_CTRL_EVENT_GET = 0x01040300,
+ MFI_DCMD_CTRL_EVENT_COUNT = 0x01040400,
+ MFI_DCMD_CTRL_EVENT_WAIT = 0x01040500,
+ MFI_DCMD_CTRL_SHUTDOWN = 0x01050000,
+ MFI_DCMD_HIBERNATE_STANDBY = 0x01060000,
+ MFI_DCMD_CTRL_GET_TIME = 0x01080101,
+ MFI_DCMD_CTRL_SET_TIME = 0x01080102,
+ MFI_DCMD_CTRL_BIOS_DATA_GET = 0x010c0100,
+ MFI_DCMD_CTRL_BIOS_DATA_SET = 0x010c0200,
+ MFI_DCMD_CTRL_FACTORY_DEFAULTS = 0x010d0000,
+ MFI_DCMD_CTRL_MFC_DEFAULTS_GET = 0x010e0201,
+ MFI_DCMD_CTRL_MFC_DEFAULTS_SET = 0x010e0202,
+ MFI_DCMD_CTRL_CACHE_FLUSH = 0x01101000,
+ MFI_DCMD_PD_GET_LIST = 0x02010000,
+ MFI_DCMD_PD_LIST_QUERY = 0x02010100,
+ MFI_DCMD_PD_GET_INFO = 0x02020000,
+ MFI_DCMD_PD_STATE_SET = 0x02030100,
+ MFI_DCMD_PD_REBUILD = 0x02040100,
+ MFI_DCMD_PD_BLINK = 0x02070100,
+ MFI_DCMD_PD_UNBLINK = 0x02070200,
+ MFI_DCMD_LD_GET_LIST = 0x03010000,
+ MFI_DCMD_LD_GET_INFO = 0x03020000,
+ MFI_DCMD_LD_GET_PROP = 0x03030000,
+ MFI_DCMD_LD_SET_PROP = 0x03040000,
+ MFI_DCMD_LD_DELETE = 0x03090000,
+ MFI_DCMD_CFG_READ = 0x04010000,
+ MFI_DCMD_CFG_ADD = 0x04020000,
+ MFI_DCMD_CFG_CLEAR = 0x04030000,
+ MFI_DCMD_CFG_FOREIGN_READ = 0x04060100,
+ MFI_DCMD_CFG_FOREIGN_IMPORT = 0x04060400,
+ MFI_DCMD_BBU_STATUS = 0x05010000,
+ MFI_DCMD_BBU_CAPACITY_INFO = 0x05020000,
+ MFI_DCMD_BBU_DESIGN_INFO = 0x05030000,
+ MFI_DCMD_BBU_PROP_GET = 0x05050100,
+ MFI_DCMD_CLUSTER = 0x08000000,
+ MFI_DCMD_CLUSTER_RESET_ALL = 0x08010100,
+ MFI_DCMD_CLUSTER_RESET_LD = 0x08010200
+} mfi_dcmd_t;
+
+/* Modifiers for MFI_DCMD_CTRL_FLUSHCACHE */
+#define MFI_FLUSHCACHE_CTRL 0x01
+#define MFI_FLUSHCACHE_DISK 0x02
+
+/* Modifiers for MFI_DCMD_CTRL_SHUTDOWN */
+#define MFI_SHUTDOWN_SPINDOWN 0x01
+
+/*
+ * MFI Frame flags
+ */
+typedef enum {
+ MFI_FRAME_DONT_POST_IN_REPLY_QUEUE = 0x0001,
+ MFI_FRAME_SGL64 = 0x0002,
+ MFI_FRAME_SENSE64 = 0x0004,
+ MFI_FRAME_DIR_WRITE = 0x0008,
+ MFI_FRAME_DIR_READ = 0x0010,
+ MFI_FRAME_IEEE_SGL = 0x0020,
+} mfi_frame_flags;
+
+/* MFI Status codes */
+typedef enum {
+ MFI_STAT_OK = 0x00,
+ MFI_STAT_INVALID_CMD,
+ MFI_STAT_INVALID_DCMD,
+ MFI_STAT_INVALID_PARAMETER,
+ MFI_STAT_INVALID_SEQUENCE_NUMBER,
+ MFI_STAT_ABORT_NOT_POSSIBLE,
+ MFI_STAT_APP_HOST_CODE_NOT_FOUND,
+ MFI_STAT_APP_IN_USE,
+ MFI_STAT_APP_NOT_INITIALIZED,
+ MFI_STAT_ARRAY_INDEX_INVALID,
+ MFI_STAT_ARRAY_ROW_NOT_EMPTY,
+ MFI_STAT_CONFIG_RESOURCE_CONFLICT,
+ MFI_STAT_DEVICE_NOT_FOUND,
+ MFI_STAT_DRIVE_TOO_SMALL,
+ MFI_STAT_FLASH_ALLOC_FAIL,
+ MFI_STAT_FLASH_BUSY,
+ MFI_STAT_FLASH_ERROR = 0x10,
+ MFI_STAT_FLASH_IMAGE_BAD,
+ MFI_STAT_FLASH_IMAGE_INCOMPLETE,
+ MFI_STAT_FLASH_NOT_OPEN,
+ MFI_STAT_FLASH_NOT_STARTED,
+ MFI_STAT_FLUSH_FAILED,
+ MFI_STAT_HOST_CODE_NOT_FOUNT,
+ MFI_STAT_LD_CC_IN_PROGRESS,
+ MFI_STAT_LD_INIT_IN_PROGRESS,
+ MFI_STAT_LD_LBA_OUT_OF_RANGE,
+ MFI_STAT_LD_MAX_CONFIGURED,
+ MFI_STAT_LD_NOT_OPTIMAL,
+ MFI_STAT_LD_RBLD_IN_PROGRESS,
+ MFI_STAT_LD_RECON_IN_PROGRESS,
+ MFI_STAT_LD_WRONG_RAID_LEVEL,
+ MFI_STAT_MAX_SPARES_EXCEEDED,
+ MFI_STAT_MEMORY_NOT_AVAILABLE = 0x20,
+ MFI_STAT_MFC_HW_ERROR,
+ MFI_STAT_NO_HW_PRESENT,
+ MFI_STAT_NOT_FOUND,
+ MFI_STAT_NOT_IN_ENCL,
+ MFI_STAT_PD_CLEAR_IN_PROGRESS,
+ MFI_STAT_PD_TYPE_WRONG,
+ MFI_STAT_PR_DISABLED,
+ MFI_STAT_ROW_INDEX_INVALID,
+ MFI_STAT_SAS_CONFIG_INVALID_ACTION,
+ MFI_STAT_SAS_CONFIG_INVALID_DATA,
+ MFI_STAT_SAS_CONFIG_INVALID_PAGE,
+ MFI_STAT_SAS_CONFIG_INVALID_TYPE,
+ MFI_STAT_SCSI_DONE_WITH_ERROR,
+ MFI_STAT_SCSI_IO_FAILED,
+ MFI_STAT_SCSI_RESERVATION_CONFLICT,
+ MFI_STAT_SHUTDOWN_FAILED = 0x30,
+ MFI_STAT_TIME_NOT_SET,
+ MFI_STAT_WRONG_STATE,
+ MFI_STAT_LD_OFFLINE,
+ MFI_STAT_PEER_NOTIFICATION_REJECTED,
+ MFI_STAT_PEER_NOTIFICATION_FAILED,
+ MFI_STAT_RESERVATION_IN_PROGRESS,
+ MFI_STAT_I2C_ERRORS_DETECTED,
+ MFI_STAT_PCI_ERRORS_DETECTED,
+ MFI_STAT_DIAG_FAILED,
+ MFI_STAT_BOOT_MSG_PENDING,
+ MFI_STAT_FOREIGN_CONFIG_INCOMPLETE,
+ MFI_STAT_INVALID_SGL,
+ MFI_STAT_UNSUPPORTED_HW,
+ MFI_STAT_CC_SCHEDULE_DISABLED,
+ MFI_STAT_PD_COPYBACK_IN_PROGRESS,
+ MFI_STAT_MULTIPLE_PDS_IN_ARRAY = 0x40,
+ MFI_STAT_FW_DOWNLOAD_ERROR,
+ MFI_STAT_FEATURE_SECURITY_NOT_ENABLED,
+ MFI_STAT_LOCK_KEY_ALREADY_EXISTS,
+ MFI_STAT_LOCK_KEY_BACKUP_NOT_ALLOWED,
+ MFI_STAT_LOCK_KEY_VERIFY_NOT_ALLOWED,
+ MFI_STAT_LOCK_KEY_VERIFY_FAILED,
+ MFI_STAT_LOCK_KEY_REKEY_NOT_ALLOWED,
+ MFI_STAT_LOCK_KEY_INVALID,
+ MFI_STAT_LOCK_KEY_ESCROW_INVALID,
+ MFI_STAT_LOCK_KEY_BACKUP_REQUIRED,
+ MFI_STAT_SECURE_LD_EXISTS,
+ MFI_STAT_LD_SECURE_NOT_ALLOWED,
+ MFI_STAT_REPROVISION_NOT_ALLOWED,
+ MFI_STAT_PD_SECURITY_TYPE_WRONG,
+ MFI_STAT_LD_ENCRYPTION_TYPE_INVALID,
+ MFI_STAT_CONFIG_FDE_NON_FDE_MIX_NOT_ALLOWED = 0x50,
+ MFI_STAT_CONFIG_LD_ENCRYPTION_TYPE_MIX_NOT_ALLOWED,
+ MFI_STAT_SECRET_KEY_NOT_ALLOWED,
+ MFI_STAT_PD_HW_ERRORS_DETECTED,
+ MFI_STAT_LD_CACHE_PINNED,
+ MFI_STAT_POWER_STATE_SET_IN_PROGRESS,
+ MFI_STAT_POWER_STATE_SET_BUSY,
+ MFI_STAT_POWER_STATE_WRONG,
+ MFI_STAT_PR_NO_AVAILABLE_PD_FOUND,
+ MFI_STAT_CTRL_RESET_REQUIRED,
+ MFI_STAT_LOCK_KEY_EKM_NO_BOOT_AGENT,
+ MFI_STAT_SNAP_NO_SPACE,
+ MFI_STAT_SNAP_PARTIAL_FAILURE,
+ MFI_STAT_UPGRADE_KEY_INCOMPATIBLE,
+ MFI_STAT_PFK_INCOMPATIBLE,
+ MFI_STAT_PD_MAX_UNCONFIGURED,
+ MFI_STAT_IO_METRICS_DISABLED = 0x60,
+ MFI_STAT_AEC_NOT_STOPPED,
+ MFI_STAT_PI_TYPE_WRONG,
+ MFI_STAT_LD_PD_PI_INCOMPATIBLE,
+ MFI_STAT_PI_NOT_ENABLED,
+ MFI_STAT_LD_BLOCK_SIZE_MISMATCH,
+ MFI_STAT_INVALID_STATUS = 0xFF
+} mfi_status_t;
+
+/* Event classes */
+typedef enum {
+ MFI_EVT_CLASS_DEBUG = -2,
+ MFI_EVT_CLASS_PROGRESS = -1,
+ MFI_EVT_CLASS_INFO = 0,
+ MFI_EVT_CLASS_WARNING = 1,
+ MFI_EVT_CLASS_CRITICAL = 2,
+ MFI_EVT_CLASS_FATAL = 3,
+ MFI_EVT_CLASS_DEAD = 4
+} mfi_evt_class_t;
+
+/* Event locales */
+typedef enum {
+ MFI_EVT_LOCALE_LD = 0x0001,
+ MFI_EVT_LOCALE_PD = 0x0002,
+ MFI_EVT_LOCALE_ENCL = 0x0004,
+ MFI_EVT_LOCALE_BBU = 0x0008,
+ MFI_EVT_LOCALE_SAS = 0x0010,
+ MFI_EVT_LOCALE_CTRL = 0x0020,
+ MFI_EVT_LOCALE_CONFIG = 0x0040,
+ MFI_EVT_LOCALE_CLUSTER = 0x0080,
+ MFI_EVT_LOCALE_ALL = 0xffff
+} mfi_evt_locale_t;
+
+/* Event args */
+typedef enum {
+ MR_EVT_ARGS_NONE = 0x00,
+ MR_EVT_ARGS_CDB_SENSE,
+ MR_EVT_ARGS_LD,
+ MR_EVT_ARGS_LD_COUNT,
+ MR_EVT_ARGS_LD_LBA,
+ MR_EVT_ARGS_LD_OWNER,
+ MR_EVT_ARGS_LD_LBA_PD_LBA,
+ MR_EVT_ARGS_LD_PROG,
+ MR_EVT_ARGS_LD_STATE,
+ MR_EVT_ARGS_LD_STRIP,
+ MR_EVT_ARGS_PD,
+ MR_EVT_ARGS_PD_ERR,
+ MR_EVT_ARGS_PD_LBA,
+ MR_EVT_ARGS_PD_LBA_LD,
+ MR_EVT_ARGS_PD_PROG,
+ MR_EVT_ARGS_PD_STATE,
+ MR_EVT_ARGS_PCI,
+ MR_EVT_ARGS_RATE,
+ MR_EVT_ARGS_STR,
+ MR_EVT_ARGS_TIME,
+ MR_EVT_ARGS_ECC,
+ MR_EVT_ARGS_LD_PROP,
+ MR_EVT_ARGS_PD_SPARE,
+ MR_EVT_ARGS_PD_INDEX,
+ MR_EVT_ARGS_DIAG_PASS,
+ MR_EVT_ARGS_DIAG_FAIL,
+ MR_EVT_ARGS_PD_LBA_LBA,
+ MR_EVT_ARGS_PORT_PHY,
+ MR_EVT_ARGS_PD_MISSING,
+ MR_EVT_ARGS_PD_ADDRESS,
+ MR_EVT_ARGS_BITMAP,
+ MR_EVT_ARGS_CONNECTOR,
+ MR_EVT_ARGS_PD_PD,
+ MR_EVT_ARGS_PD_FRU,
+ MR_EVT_ARGS_PD_PATHINFO,
+ MR_EVT_ARGS_PD_POWER_STATE,
+ MR_EVT_ARGS_GENERIC,
+} mfi_evt_args;
+
+/* Event codes */
+#define MR_EVT_CFG_CLEARED 0x0004
+#define MR_EVT_CTRL_SHUTDOWN 0x002a
+#define MR_EVT_LD_STATE_CHANGE 0x0051
+#define MR_EVT_PD_INSERTED 0x005b
+#define MR_EVT_PD_REMOVED 0x0070
+#define MR_EVT_PD_STATE_CHANGED 0x0072
+#define MR_EVT_LD_CREATED 0x008a
+#define MR_EVT_LD_DELETED 0x008b
+#define MR_EVT_FOREIGN_CFG_IMPORTED 0x00db
+#define MR_EVT_LD_OFFLINE 0x00fc
+#define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED 0x0152
+
+typedef enum {
+ MR_LD_CACHE_WRITE_BACK = 0x01,
+ MR_LD_CACHE_WRITE_ADAPTIVE = 0x02,
+ MR_LD_CACHE_READ_AHEAD = 0x04,
+ MR_LD_CACHE_READ_ADAPTIVE = 0x08,
+ MR_LD_CACHE_WRITE_CACHE_BAD_BBU = 0x10,
+ MR_LD_CACHE_ALLOW_WRITE_CACHE = 0x20,
+ MR_LD_CACHE_ALLOW_READ_CACHE = 0x40
+} mfi_ld_cache;
+
+typedef enum {
+ MR_PD_CACHE_UNCHANGED = 0,
+ MR_PD_CACHE_ENABLE = 1,
+ MR_PD_CACHE_DISABLE = 2
+} mfi_pd_cache;
+
+typedef enum {
+ MR_PD_QUERY_TYPE_ALL = 0,
+ MR_PD_QUERY_TYPE_STATE = 1,
+ MR_PD_QUERY_TYPE_POWER_STATE = 2,
+ MR_PD_QUERY_TYPE_MEDIA_TYPE = 3,
+ MR_PD_QUERY_TYPE_SPEED = 4,
+ MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, /*query for system drives */
+} mfi_pd_query_type;
+
+/*
+ * Other propertities and definitions
+ */
+#define MFI_MAX_PD_CHANNELS 2
+#define MFI_MAX_LD_CHANNELS 2
+#define MFI_MAX_CHANNELS (MFI_MAX_PD_CHANNELS + MFI_MAX_LD_CHANNELS)
+#define MFI_MAX_CHANNEL_DEVS 128
+#define MFI_DEFAULT_ID -1
+#define MFI_MAX_LUN 8
+#define MFI_MAX_LD 64
+
+#define MFI_FRAME_SIZE 64
+#define MFI_MBOX_SIZE 12
+
+/* Firmware flashing can take 40s */
+#define MFI_POLL_TIMEOUT_SECS 50
+
+/* Allow for speedier math calculations */
+#define MFI_SECTOR_LEN 512
+
+/* Scatter Gather elements */
+struct mfi_sg32 {
+ uint32_t addr;
+ uint32_t len;
+} QEMU_PACKED;
+
+struct mfi_sg64 {
+ uint64_t addr;
+ uint32_t len;
+} QEMU_PACKED;
+
+struct mfi_sg_skinny {
+ uint64_t addr;
+ uint32_t len;
+ uint32_t flag;
+} QEMU_PACKED;
+
+union mfi_sgl {
+ struct mfi_sg32 sg32[1];
+ struct mfi_sg64 sg64[1];
+ struct mfi_sg_skinny sg_skinny[1];
+} QEMU_PACKED;
+
+/* Message frames. All messages have a common header */
+struct mfi_frame_header {
+ uint8_t frame_cmd;
+ uint8_t sense_len;
+ uint8_t cmd_status;
+ uint8_t scsi_status;
+ uint8_t target_id;
+ uint8_t lun_id;
+ uint8_t cdb_len;
+ uint8_t sge_count;
+ uint64_t context;
+ uint16_t flags;
+ uint16_t timeout;
+ uint32_t data_len;
+} QEMU_PACKED;
+
+struct mfi_init_frame {
+ struct mfi_frame_header header;
+ uint32_t qinfo_new_addr_lo;
+ uint32_t qinfo_new_addr_hi;
+ uint32_t qinfo_old_addr_lo;
+ uint32_t qinfo_old_addr_hi;
+ uint32_t reserved[6];
+};
+
+#define MFI_IO_FRAME_SIZE 40
+struct mfi_io_frame {
+ struct mfi_frame_header header;
+ uint32_t sense_addr_lo;
+ uint32_t sense_addr_hi;
+ uint32_t lba_lo;
+ uint32_t lba_hi;
+ union mfi_sgl sgl;
+} QEMU_PACKED;
+
+#define MFI_PASS_FRAME_SIZE 48
+struct mfi_pass_frame {
+ struct mfi_frame_header header;
+ uint32_t sense_addr_lo;
+ uint32_t sense_addr_hi;
+ uint8_t cdb[16];
+ union mfi_sgl sgl;
+} QEMU_PACKED;
+
+#define MFI_DCMD_FRAME_SIZE 40
+struct mfi_dcmd_frame {
+ struct mfi_frame_header header;
+ uint32_t opcode;
+ uint8_t mbox[MFI_MBOX_SIZE];
+ union mfi_sgl sgl;
+} QEMU_PACKED;
+
+struct mfi_abort_frame {
+ struct mfi_frame_header header;
+ uint64_t abort_context;
+ uint32_t abort_mfi_addr_lo;
+ uint32_t abort_mfi_addr_hi;
+ uint32_t reserved1[6];
+} QEMU_PACKED;
+
+struct mfi_smp_frame {
+ struct mfi_frame_header header;
+ uint64_t sas_addr;
+ union {
+ struct mfi_sg32 sg32[2];
+ struct mfi_sg64 sg64[2];
+ } sgl;
+} QEMU_PACKED;
+
+struct mfi_stp_frame {
+ struct mfi_frame_header header;
+ uint16_t fis[10];
+ uint32_t stp_flags;
+ union {
+ struct mfi_sg32 sg32[2];
+ struct mfi_sg64 sg64[2];
+ } sgl;
+} QEMU_PACKED;
+
+union mfi_frame {
+ struct mfi_frame_header header;
+ struct mfi_init_frame init;
+ struct mfi_io_frame io;
+ struct mfi_pass_frame pass;
+ struct mfi_dcmd_frame dcmd;
+ struct mfi_abort_frame abort;
+ struct mfi_smp_frame smp;
+ struct mfi_stp_frame stp;
+ uint64_t raw[8];
+ uint8_t bytes[MFI_FRAME_SIZE];
+};
+
+#define MFI_SENSE_LEN 128
+struct mfi_sense {
+ uint8_t data[MFI_SENSE_LEN];
+};
+
+#define MFI_QUEUE_FLAG_CONTEXT64 0x00000002
+
+/* The queue init structure that is passed with the init message */
+struct mfi_init_qinfo {
+ uint32_t flags;
+ uint32_t rq_entries;
+ uint32_t rq_addr_lo;
+ uint32_t rq_addr_hi;
+ uint32_t pi_addr_lo;
+ uint32_t pi_addr_hi;
+ uint32_t ci_addr_lo;
+ uint32_t ci_addr_hi;
+} QEMU_PACKED;
+
+/* Controller properties */
+struct mfi_ctrl_props {
+ uint16_t seq_num;
+ uint16_t pred_fail_poll_interval;
+ uint16_t intr_throttle_cnt;
+ uint16_t intr_throttle_timeout;
+ uint8_t rebuild_rate;
+ uint8_t patrol_read_rate;
+ uint8_t bgi_rate;
+ uint8_t cc_rate;
+ uint8_t recon_rate;
+ uint8_t cache_flush_interval;
+ uint8_t spinup_drv_cnt;
+ uint8_t spinup_delay;
+ uint8_t cluster_enable;
+ uint8_t coercion_mode;
+ uint8_t alarm_enable;
+ uint8_t disable_auto_rebuild;
+ uint8_t disable_battery_warn;
+ uint8_t ecc_bucket_size;
+ uint16_t ecc_bucket_leak_rate;
+ uint8_t restore_hotspare_on_insertion;
+ uint8_t expose_encl_devices;
+ uint8_t maintainPdFailHistory;
+ uint8_t disallowHostRequestReordering;
+ uint8_t abortCCOnError;
+ uint8_t loadBalanceMode;
+ uint8_t disableAutoDetectBackplane;
+ uint8_t snapVDSpace;
+ uint32_t OnOffProperties;
+/* set TRUE to disable copyBack (0=copyback enabled) */
+#define MFI_CTRL_PROP_CopyBackDisabled (1 << 0)
+#define MFI_CTRL_PROP_SMARTerEnabled (1 << 1)
+#define MFI_CTRL_PROP_PRCorrectUnconfiguredAreas (1 << 2)
+#define MFI_CTRL_PROP_UseFdeOnly (1 << 3)
+#define MFI_CTRL_PROP_DisableNCQ (1 << 4)
+#define MFI_CTRL_PROP_SSDSMARTerEnabled (1 << 5)
+#define MFI_CTRL_PROP_SSDPatrolReadEnabled (1 << 6)
+#define MFI_CTRL_PROP_EnableSpinDownUnconfigured (1 << 7)
+#define MFI_CTRL_PROP_AutoEnhancedImport (1 << 8)
+#define MFI_CTRL_PROP_EnableSecretKeyControl (1 << 9)
+#define MFI_CTRL_PROP_DisableOnlineCtrlReset (1 << 10)
+#define MFI_CTRL_PROP_AllowBootWithPinnedCache (1 << 11)
+#define MFI_CTRL_PROP_DisableSpinDownHS (1 << 12)
+#define MFI_CTRL_PROP_EnableJBOD (1 << 13)
+
+ uint8_t autoSnapVDSpace; /* % of source LD to be
+ * reserved for auto snapshot
+ * in snapshot repository, for
+ * metadata and user data
+ * 1=5%, 2=10%, 3=15% and so on
+ */
+ uint8_t viewSpace; /* snapshot writeable VIEWs
+ * capacity as a % of source LD
+ * capacity. 0=READ only
+ * 1=5%, 2=10%, 3=15% and so on
+ */
+ uint16_t spinDownTime; /* # of idle minutes before device
+ * is spun down (0=use FW defaults)
+ */
+ uint8_t reserved[24];
+} QEMU_PACKED;
+
+/* PCI information about the card. */
+struct mfi_info_pci {
+ uint16_t vendor;
+ uint16_t device;
+ uint16_t subvendor;
+ uint16_t subdevice;
+ uint8_t reserved[24];
+} QEMU_PACKED;
+
+/* Host (front end) interface information */
+struct mfi_info_host {
+ uint8_t type;
+#define MFI_INFO_HOST_PCIX 0x01
+#define MFI_INFO_HOST_PCIE 0x02
+#define MFI_INFO_HOST_ISCSI 0x04
+#define MFI_INFO_HOST_SAS3G 0x08
+ uint8_t reserved[6];
+ uint8_t port_count;
+ uint64_t port_addr[8];
+} QEMU_PACKED;
+
+/* Device (back end) interface information */
+struct mfi_info_device {
+ uint8_t type;
+#define MFI_INFO_DEV_SPI 0x01
+#define MFI_INFO_DEV_SAS3G 0x02
+#define MFI_INFO_DEV_SATA1 0x04
+#define MFI_INFO_DEV_SATA3G 0x08
+#define MFI_INFO_DEV_PCIE 0x10
+ uint8_t reserved[6];
+ uint8_t port_count;
+ uint64_t port_addr[8];
+} QEMU_PACKED;
+
+/* Firmware component information */
+struct mfi_info_component {
+ char name[8];
+ char version[32];
+ char build_date[16];
+ char build_time[16];
+} QEMU_PACKED;
+
+/* Controller default settings */
+struct mfi_defaults {
+ uint64_t sas_addr;
+ uint8_t phy_polarity;
+ uint8_t background_rate;
+ uint8_t stripe_size;
+ uint8_t flush_time;
+ uint8_t write_back;
+ uint8_t read_ahead;
+ uint8_t cache_when_bbu_bad;
+ uint8_t cached_io;
+ uint8_t smart_mode;
+ uint8_t alarm_disable;
+ uint8_t coercion;
+ uint8_t zrc_config;
+ uint8_t dirty_led_shows_drive_activity;
+ uint8_t bios_continue_on_error;
+ uint8_t spindown_mode;
+ uint8_t allowed_device_types;
+ uint8_t allow_mix_in_enclosure;
+ uint8_t allow_mix_in_ld;
+ uint8_t allow_sata_in_cluster;
+ uint8_t max_chained_enclosures;
+ uint8_t disable_ctrl_r;
+ uint8_t enable_web_bios;
+ uint8_t phy_polarity_split;
+ uint8_t direct_pd_mapping;
+ uint8_t bios_enumerate_lds;
+ uint8_t restored_hot_spare_on_insertion;
+ uint8_t expose_enclosure_devices;
+ uint8_t maintain_pd_fail_history;
+ uint8_t disable_puncture;
+ uint8_t zero_based_enumeration;
+ uint8_t disable_preboot_cli;
+ uint8_t show_drive_led_on_activity;
+ uint8_t cluster_disable;
+ uint8_t sas_disable;
+ uint8_t auto_detect_backplane;
+ uint8_t fde_only;
+ uint8_t delay_during_post;
+ uint8_t resv[19];
+} QEMU_PACKED;
+
+/* Controller default settings */
+struct mfi_bios_data {
+ uint16_t boot_target_id;
+ uint8_t do_not_int_13;
+ uint8_t continue_on_error;
+ uint8_t verbose;
+ uint8_t geometry;
+ uint8_t expose_all_drives;
+ uint8_t reserved[56];
+ uint8_t check_sum;
+} QEMU_PACKED;
+
+/* SAS (?) controller info, returned from MFI_DCMD_CTRL_GETINFO. */
+struct mfi_ctrl_info {
+ struct mfi_info_pci pci;
+ struct mfi_info_host host;
+ struct mfi_info_device device;
+
+ /* Firmware components that are present and active. */
+ uint32_t image_check_word;
+ uint32_t image_component_count;
+ struct mfi_info_component image_component[8];
+
+ /* Firmware components that have been flashed but are inactive */
+ uint32_t pending_image_component_count;
+ struct mfi_info_component pending_image_component[8];
+
+ uint8_t max_arms;
+ uint8_t max_spans;
+ uint8_t max_arrays;
+ uint8_t max_lds;
+ char product_name[80];
+ char serial_number[32];
+ uint32_t hw_present;
+#define MFI_INFO_HW_BBU 0x01
+#define MFI_INFO_HW_ALARM 0x02
+#define MFI_INFO_HW_NVRAM 0x04
+#define MFI_INFO_HW_UART 0x08
+#define MFI_INFO_HW_MEM 0x10
+#define MFI_INFO_HW_FLASH 0x20
+ uint32_t current_fw_time;
+ uint16_t max_cmds;
+ uint16_t max_sg_elements;
+ uint32_t max_request_size;
+ uint16_t lds_present;
+ uint16_t lds_degraded;
+ uint16_t lds_offline;
+ uint16_t pd_present;
+ uint16_t pd_disks_present;
+ uint16_t pd_disks_pred_failure;
+ uint16_t pd_disks_failed;
+ uint16_t nvram_size;
+ uint16_t memory_size;
+ uint16_t flash_size;
+ uint16_t ram_correctable_errors;
+ uint16_t ram_uncorrectable_errors;
+ uint8_t cluster_allowed;
+ uint8_t cluster_active;
+ uint16_t max_strips_per_io;
+
+ uint32_t raid_levels;
+#define MFI_INFO_RAID_0 0x01
+#define MFI_INFO_RAID_1 0x02
+#define MFI_INFO_RAID_5 0x04
+#define MFI_INFO_RAID_1E 0x08
+#define MFI_INFO_RAID_6 0x10
+
+ uint32_t adapter_ops;
+#define MFI_INFO_AOPS_RBLD_RATE 0x0001
+#define MFI_INFO_AOPS_CC_RATE 0x0002
+#define MFI_INFO_AOPS_BGI_RATE 0x0004
+#define MFI_INFO_AOPS_RECON_RATE 0x0008
+#define MFI_INFO_AOPS_PATROL_RATE 0x0010
+#define MFI_INFO_AOPS_ALARM_CONTROL 0x0020
+#define MFI_INFO_AOPS_CLUSTER_SUPPORTED 0x0040
+#define MFI_INFO_AOPS_BBU 0x0080
+#define MFI_INFO_AOPS_SPANNING_ALLOWED 0x0100
+#define MFI_INFO_AOPS_DEDICATED_SPARES 0x0200
+#define MFI_INFO_AOPS_REVERTIBLE_SPARES 0x0400
+#define MFI_INFO_AOPS_FOREIGN_IMPORT 0x0800
+#define MFI_INFO_AOPS_SELF_DIAGNOSTIC 0x1000
+#define MFI_INFO_AOPS_MIXED_ARRAY 0x2000
+#define MFI_INFO_AOPS_GLOBAL_SPARES 0x4000
+
+ uint32_t ld_ops;
+#define MFI_INFO_LDOPS_READ_POLICY 0x01
+#define MFI_INFO_LDOPS_WRITE_POLICY 0x02
+#define MFI_INFO_LDOPS_IO_POLICY 0x04
+#define MFI_INFO_LDOPS_ACCESS_POLICY 0x08
+#define MFI_INFO_LDOPS_DISK_CACHE_POLICY 0x10
+
+ struct {
+ uint8_t min;
+ uint8_t max;
+ uint8_t reserved[2];
+ } QEMU_PACKED stripe_sz_ops;
+
+ uint32_t pd_ops;
+#define MFI_INFO_PDOPS_FORCE_ONLINE 0x01
+#define MFI_INFO_PDOPS_FORCE_OFFLINE 0x02
+#define MFI_INFO_PDOPS_FORCE_REBUILD 0x04
+
+ uint32_t pd_mix_support;
+#define MFI_INFO_PDMIX_SAS 0x01
+#define MFI_INFO_PDMIX_SATA 0x02
+#define MFI_INFO_PDMIX_ENCL 0x04
+#define MFI_INFO_PDMIX_LD 0x08
+#define MFI_INFO_PDMIX_SATA_CLUSTER 0x10
+
+ uint8_t ecc_bucket_count;
+ uint8_t reserved2[11];
+ struct mfi_ctrl_props properties;
+ char package_version[0x60];
+ uint8_t pad[0x800 - 0x6a0];
+} QEMU_PACKED;
+
+/* keep track of an event. */
+union mfi_evt {
+ struct {
+ uint16_t locale;
+ uint8_t reserved;
+ int8_t class;
+ } members;
+ uint32_t word;
+} QEMU_PACKED;
+
+/* event log state. */
+struct mfi_evt_log_state {
+ uint32_t newest_seq_num;
+ uint32_t oldest_seq_num;
+ uint32_t clear_seq_num;
+ uint32_t shutdown_seq_num;
+ uint32_t boot_seq_num;
+} QEMU_PACKED;
+
+struct mfi_progress {
+ uint16_t progress;
+ uint16_t elapsed_seconds;
+} QEMU_PACKED;
+
+struct mfi_evt_ld {
+ uint16_t target_id;
+ uint8_t ld_index;
+ uint8_t reserved;
+} QEMU_PACKED;
+
+struct mfi_evt_pd {
+ uint16_t device_id;
+ uint8_t enclosure_index;
+ uint8_t slot_number;
+} QEMU_PACKED;
+
+/* event detail, returned from MFI_DCMD_CTRL_EVENT_WAIT. */
+struct mfi_evt_detail {
+ uint32_t seq;
+ uint32_t time;
+ uint32_t code;
+ union mfi_evt class;
+ uint8_t arg_type;
+ uint8_t reserved1[15];
+
+ union {
+ struct {
+ struct mfi_evt_pd pd;
+ uint8_t cdb_len;
+ uint8_t sense_len;
+ uint8_t reserved[2];
+ uint8_t cdb[16];
+ uint8_t sense[64];
+ } cdb_sense;
+
+ struct mfi_evt_ld ld;
+
+ struct {
+ struct mfi_evt_ld ld;
+ uint64_t count;
+ } ld_count;
+
+ struct {
+ uint64_t lba;
+ struct mfi_evt_ld ld;
+ } ld_lba;
+
+ struct {
+ struct mfi_evt_ld ld;
+ uint32_t pre_owner;
+ uint32_t new_owner;
+ } ld_owner;
+
+ struct {
+ uint64_t ld_lba;
+ uint64_t pd_lba;
+ struct mfi_evt_ld ld;
+ struct mfi_evt_pd pd;
+ } ld_lba_pd_lba;
+
+ struct {
+ struct mfi_evt_ld ld;
+ struct mfi_progress prog;
+ } ld_prog;
+
+ struct {
+ struct mfi_evt_ld ld;
+ uint32_t prev_state;
+ uint32_t new_state;
+ } ld_state;
+
+ struct {
+ uint64_t strip;
+ struct mfi_evt_ld ld;
+ } ld_strip;
+
+ struct mfi_evt_pd pd;
+
+ struct {
+ struct mfi_evt_pd pd;
+ uint32_t err;
+ } pd_err;
+
+ struct {
+ uint64_t lba;
+ struct mfi_evt_pd pd;
+ } pd_lba;
+
+ struct {
+ uint64_t lba;
+ struct mfi_evt_pd pd;
+ struct mfi_evt_ld ld;
+ } pd_lba_ld;
+
+ struct {
+ struct mfi_evt_pd pd;
+ struct mfi_progress prog;
+ } pd_prog;
+
+ struct {
+ struct mfi_evt_pd ld;
+ uint32_t prev_state;
+ uint32_t new_state;
+ } pd_state;
+
+ struct {
+ uint16_t venderId;
+ uint16_t deviceId;
+ uint16_t subVenderId;
+ uint16_t subDeviceId;
+ } pci;
+
+ uint32_t rate;
+
+ char str[96];
+
+ struct {
+ uint32_t rtc;
+ uint16_t elapsedSeconds;
+ } time;
+
+ struct {
+ uint32_t ecar;
+ uint32_t elog;
+ char str[64];
+ } ecc;
+
+ uint8_t b[96];
+ uint16_t s[48];
+ uint32_t w[24];
+ uint64_t d[12];
+ } args;
+
+ char description[128];
+} QEMU_PACKED;
+
+struct mfi_evt_list {
+ uint32_t count;
+ uint32_t reserved;
+ struct mfi_evt_detail event[1];
+} QEMU_PACKED;
+
+union mfi_pd_ref {
+ struct {
+ uint16_t device_id;
+ uint16_t seq_num;
+ } v;
+ uint32_t ref;
+} QEMU_PACKED;
+
+union mfi_pd_ddf_type {
+ struct {
+ uint16_t pd_type;
+#define MFI_PD_DDF_TYPE_FORCED_PD_GUID (1 << 0)
+#define MFI_PD_DDF_TYPE_IN_VD (1 << 1)
+#define MFI_PD_DDF_TYPE_IS_GLOBAL_SPARE (1 << 2)
+#define MFI_PD_DDF_TYPE_IS_SPARE (1 << 3)
+#define MFI_PD_DDF_TYPE_IS_FOREIGN (1 << 4)
+#define MFI_PD_DDF_TYPE_INTF_SPI (1 << 12)
+#define MFI_PD_DDF_TYPE_INTF_SAS (1 << 13)
+#define MFI_PD_DDF_TYPE_INTF_SATA1 (1 << 14)
+#define MFI_PD_DDF_TYPE_INTF_SATA3G (1 << 15)
+ uint16_t reserved;
+ } ddf;
+ struct {
+ uint32_t reserved;
+ } non_disk;
+ uint32_t type;
+} QEMU_PACKED;
+
+struct mfi_pd_progress {
+ uint32_t active;
+#define PD_PROGRESS_ACTIVE_REBUILD (1 << 0)
+#define PD_PROGRESS_ACTIVE_PATROL (1 << 1)
+#define PD_PROGRESS_ACTIVE_CLEAR (1 << 2)
+ struct mfi_progress rbld;
+ struct mfi_progress patrol;
+ struct mfi_progress clear;
+ struct mfi_progress reserved[4];
+} QEMU_PACKED;
+
+struct mfi_pd_info {
+ union mfi_pd_ref ref;
+ uint8_t inquiry_data[96];
+ uint8_t vpd_page83[64];
+ uint8_t not_supported;
+ uint8_t scsi_dev_type;
+ uint8_t connected_port_bitmap;
+ uint8_t device_speed;
+ uint32_t media_err_count;
+ uint32_t other_err_count;
+ uint32_t pred_fail_count;
+ uint32_t last_pred_fail_event_seq_num;
+ uint16_t fw_state;
+ uint8_t disable_for_removal;
+ uint8_t link_speed;
+ union mfi_pd_ddf_type state;
+ struct {
+ uint8_t count;
+ uint8_t is_path_broken;
+ uint8_t reserved[6];
+ uint64_t sas_addr[4];
+ } path_info;
+ uint64_t raw_size;
+ uint64_t non_coerced_size;
+ uint64_t coerced_size;
+ uint16_t encl_device_id;
+ uint8_t encl_index;
+ uint8_t slot_number;
+ struct mfi_pd_progress prog_info;
+ uint8_t bad_block_table_full;
+ uint8_t unusable_in_current_config;
+ uint8_t vpd_page83_ext[64];
+ uint8_t reserved[512-358];
+} QEMU_PACKED;
+
+struct mfi_pd_address {
+ uint16_t device_id;
+ uint16_t encl_device_id;
+ uint8_t encl_index;
+ uint8_t slot_number;
+ uint8_t scsi_dev_type;
+ uint8_t connect_port_bitmap;
+ uint64_t sas_addr[2];
+} QEMU_PACKED;
+
+#define MFI_MAX_SYS_PDS 240
+struct mfi_pd_list {
+ uint32_t size;
+ uint32_t count;
+ struct mfi_pd_address addr[MFI_MAX_SYS_PDS];
+} QEMU_PACKED;
+
+union mfi_ld_ref {
+ struct {
+ uint8_t target_id;
+ uint8_t reserved;
+ uint16_t seq;
+ } v;
+ uint32_t ref;
+} QEMU_PACKED;
+
+struct mfi_ld_list {
+ uint32_t ld_count;
+ uint32_t reserved1;
+ struct {
+ union mfi_ld_ref ld;
+ uint8_t state;
+ uint8_t reserved2[3];
+ uint64_t size;
+ } ld_list[MFI_MAX_LD];
+} QEMU_PACKED;
+
+enum mfi_ld_access {
+ MFI_LD_ACCESS_RW = 0,
+ MFI_LD_ACCSSS_RO = 2,
+ MFI_LD_ACCESS_BLOCKED = 3,
+};
+#define MFI_LD_ACCESS_MASK 3
+
+enum mfi_ld_state {
+ MFI_LD_STATE_OFFLINE = 0,
+ MFI_LD_STATE_PARTIALLY_DEGRADED = 1,
+ MFI_LD_STATE_DEGRADED = 2,
+ MFI_LD_STATE_OPTIMAL = 3
+};
+
+enum mfi_syspd_state {
+ MFI_PD_STATE_UNCONFIGURED_GOOD = 0x00,
+ MFI_PD_STATE_UNCONFIGURED_BAD = 0x01,
+ MFI_PD_STATE_HOT_SPARE = 0x02,
+ MFI_PD_STATE_OFFLINE = 0x10,
+ MFI_PD_STATE_FAILED = 0x11,
+ MFI_PD_STATE_REBUILD = 0x14,
+ MFI_PD_STATE_ONLINE = 0x18,
+ MFI_PD_STATE_COPYBACK = 0x20,
+ MFI_PD_STATE_SYSTEM = 0x40
+};
+
+struct mfi_ld_props {
+ union mfi_ld_ref ld;
+ char name[16];
+ uint8_t default_cache_policy;
+ uint8_t access_policy;
+ uint8_t disk_cache_policy;
+ uint8_t current_cache_policy;
+ uint8_t no_bgi;
+ uint8_t reserved[7];
+} QEMU_PACKED;
+
+struct mfi_ld_params {
+ uint8_t primary_raid_level;
+ uint8_t raid_level_qualifier;
+ uint8_t secondary_raid_level;
+ uint8_t stripe_size;
+ uint8_t num_drives;
+ uint8_t span_depth;
+ uint8_t state;
+ uint8_t init_state;
+ uint8_t is_consistent;
+ uint8_t reserved[23];
+} QEMU_PACKED;
+
+struct mfi_ld_progress {
+ uint32_t active;
+#define MFI_LD_PROGRESS_CC (1<<0)
+#define MFI_LD_PROGRESS_BGI (1<<1)
+#define MFI_LD_PROGRESS_FGI (1<<2)
+#define MFI_LD_PORGRESS_RECON (1<<3)
+ struct mfi_progress cc;
+ struct mfi_progress bgi;
+ struct mfi_progress fgi;
+ struct mfi_progress recon;
+ struct mfi_progress reserved[4];
+} QEMU_PACKED;
+
+struct mfi_span {
+ uint64_t start_block;
+ uint64_t num_blocks;
+ uint16_t array_ref;
+ uint8_t reserved[6];
+} QEMU_PACKED;
+
+#define MFI_MAX_SPAN_DEPTH 8
+struct mfi_ld_config {
+ struct mfi_ld_props properties;
+ struct mfi_ld_params params;
+ struct mfi_span span[MFI_MAX_SPAN_DEPTH];
+} QEMU_PACKED;
+
+struct mfi_ld_info {
+ struct mfi_ld_config ld_config;
+ uint64_t size;
+ struct mfi_ld_progress progress;
+ uint16_t cluster_owner;
+ uint8_t reconstruct_active;
+ uint8_t reserved1[1];
+ uint8_t vpd_page83[64];
+ uint8_t reserved2[16];
+} QEMU_PACKED;
+
+union mfi_spare_type {
+ uint8_t flags;
+#define MFI_SPARE_IS_DEDICATED (1 << 0)
+#define MFI_SPARE_IS_REVERTABLE (1 << 1)
+#define MFI_SPARE_IS_ENCL_AFFINITY (1 << 2)
+ uint8_t type;
+} QEMU_PACKED;
+
+#define MFI_MAX_ARRAYS 16
+struct mfi_spare {
+ union mfi_pd_ref ref;
+ union mfi_spare_type spare_type;
+ uint8_t reserved[2];
+ uint8_t array_count;
+ uint16_t array_refd[MFI_MAX_ARRAYS];
+} QEMU_PACKED;
+
+#define MFI_MAX_ROW_SIZE 32
+struct mfi_array {
+ uint64_t size;
+ uint8_t num_drives;
+ uint8_t reserved;
+ uint16_t array_ref;
+ uint8_t pad[20];
+ struct {
+ union mfi_pd_ref ref;
+ uint16_t fw_state; /* enum mfi_syspd_state */
+ struct {
+ uint8_t pd;
+ uint8_t slot;
+ } encl;
+ } pd[MFI_MAX_ROW_SIZE];
+} QEMU_PACKED;
+
+struct mfi_config_data {
+ uint32_t size;
+ uint16_t array_count;
+ uint16_t array_size;
+ uint16_t log_drv_count;
+ uint16_t log_drv_size;
+ uint16_t spares_count;
+ uint16_t spares_size;
+ uint8_t reserved[16];
+ /*
+ struct mfi_array array[];
+ struct mfi_ld_config ld[];
+ struct mfi_spare spare[];
+ */
+} QEMU_PACKED;
+
+#define MFI_SCSI_MAX_TARGETS 128
+#define MFI_SCSI_MAX_LUNS 8
+#define MFI_SCSI_INITIATOR_ID 255
+#define MFI_SCSI_MAX_CMDS 8
+#define MFI_SCSI_MAX_CDB_LEN 16
+
+#endif /* MFI_REG_H */
diff --git a/hw/milkymist-minimac2.c b/hw/milkymist-minimac2.c
index 70bf336add..b483a02f21 100644
--- a/hw/milkymist-minimac2.c
+++ b/hw/milkymist-minimac2.c
@@ -278,7 +278,7 @@ static void update_rx_interrupt(MilkymistMinimac2State *s)
}
}
-static ssize_t minimac2_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t minimac2_rx(NetClientState *nc, const uint8_t *buf, size_t size)
{
MilkymistMinimac2State *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -408,7 +408,7 @@ static const MemoryRegionOps minimac2_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static int minimac2_can_rx(VLANClientState *nc)
+static int minimac2_can_rx(NetClientState *nc)
{
MilkymistMinimac2State *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -422,7 +422,7 @@ static int minimac2_can_rx(VLANClientState *nc)
return 0;
}
-static void minimac2_cleanup(VLANClientState *nc)
+static void minimac2_cleanup(NetClientState *nc)
{
MilkymistMinimac2State *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -448,7 +448,7 @@ static void milkymist_minimac2_reset(DeviceState *d)
}
static NetClientInfo net_milkymist_minimac2_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = minimac2_can_rx,
.receive = minimac2_rx,
diff --git a/hw/mips_jazz.c b/hw/mips_jazz.c
index bf1b799c4d..db927f14d0 100644
--- a/hw/mips_jazz.c
+++ b/hw/mips_jazz.c
@@ -239,7 +239,7 @@ static void mips_jazz_init(MemoryRegion *address_space,
dp83932_init(nd, 0x80001000, 2, get_system_memory(), rc4030[4],
rc4030_opaque, rc4030_dma_memory_rw);
break;
- } else if (strcmp(nd->model, "?") == 0) {
+ } else if (is_help_option(nd->model)) {
fprintf(stderr, "qemu: Supported NICs: dp83932\n");
exit(1);
} else {
diff --git a/hw/mips_mipssim.c b/hw/mips_mipssim.c
index eb03047433..830f635597 100644
--- a/hw/mips_mipssim.c
+++ b/hw/mips_mipssim.c
@@ -217,7 +217,7 @@ mips_mipssim_init (ram_addr_t ram_size,
if (serial_hds[0])
serial_init(0x3f8, env->irq[4], 115200, serial_hds[0]);
- if (nd_table[0].vlan)
+ if (nd_table[0].used)
/* MIPSnet uses the MIPS CPU INT0, which is interrupt 2. */
mipsnet_init(0x4200, env->irq[2], &nd_table[0]);
}
diff --git a/hw/mips_r4k.c b/hw/mips_r4k.c
index d68599965a..967a76e533 100644
--- a/hw/mips_r4k.c
+++ b/hw/mips_r4k.c
@@ -283,7 +283,7 @@ void mips_r4k_init (ram_addr_t ram_size,
isa_vga_init(isa_bus);
- if (nd_table[0].vlan)
+ if (nd_table[0].used)
isa_ne2000_init(isa_bus, 0x300, 9, &nd_table[0]);
ide_drive_get(hd, MAX_IDE_BUS);
diff --git a/hw/mipsnet.c b/hw/mipsnet.c
index 31072463f4..28063b1106 100644
--- a/hw/mipsnet.c
+++ b/hw/mipsnet.c
@@ -62,7 +62,7 @@ static int mipsnet_buffer_full(MIPSnetState *s)
return 0;
}
-static int mipsnet_can_receive(VLANClientState *nc)
+static int mipsnet_can_receive(NetClientState *nc)
{
MIPSnetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -71,7 +71,7 @@ static int mipsnet_can_receive(VLANClientState *nc)
return !mipsnet_buffer_full(s);
}
-static ssize_t mipsnet_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t mipsnet_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
MIPSnetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -209,7 +209,7 @@ static const VMStateDescription vmstate_mipsnet = {
}
};
-static void mipsnet_cleanup(VLANClientState *nc)
+static void mipsnet_cleanup(NetClientState *nc)
{
MIPSnetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -217,7 +217,7 @@ static void mipsnet_cleanup(VLANClientState *nc)
}
static NetClientInfo net_mipsnet_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = mipsnet_can_receive,
.receive = mipsnet_receive,
diff --git a/hw/msi.c b/hw/msi.c
index 52332041e7..e2273a09ae 100644
--- a/hw/msi.c
+++ b/hw/msi.c
@@ -105,6 +105,23 @@ static inline uint8_t msi_pending_off(const PCIDevice* dev, bool msi64bit)
return dev->msi_cap + (msi64bit ? PCI_MSI_PENDING_64 : PCI_MSI_PENDING_32);
}
+/*
+ * Special API for POWER to configure the vectors through
+ * a side channel. Should never be used by devices.
+ */
+void msi_set_message(PCIDevice *dev, MSIMessage msg)
+{
+ uint16_t flags = pci_get_word(dev->config + msi_flags_off(dev));
+ bool msi64bit = flags & PCI_MSI_FLAGS_64BIT;
+
+ if (msi64bit) {
+ pci_set_quad(dev->config + msi_address_lo_off(dev), msg.address);
+ } else {
+ pci_set_long(dev->config + msi_address_lo_off(dev), msg.address);
+ }
+ pci_set_word(dev->config + msi_data_off(dev, msi64bit), msg.data);
+}
+
bool msi_enabled(const PCIDevice *dev)
{
return msi_present(dev) &&
diff --git a/hw/msi.h b/hw/msi.h
index 75747abc25..6ec1f99f80 100644
--- a/hw/msi.h
+++ b/hw/msi.h
@@ -31,6 +31,7 @@ struct MSIMessage {
extern bool msi_supported;
+void msi_set_message(PCIDevice *dev, MSIMessage msg);
bool msi_enabled(const PCIDevice *dev);
int msi_init(struct PCIDevice *dev, uint8_t offset,
unsigned int nr_vectors, bool msi64bit, bool msi_per_vector_mask);
diff --git a/hw/msix.c b/hw/msix.c
index ded3c55b92..800fc32f0b 100644
--- a/hw/msix.c
+++ b/hw/msix.c
@@ -27,17 +27,9 @@
#define MSIX_ENABLE_MASK (PCI_MSIX_FLAGS_ENABLE >> 8)
#define MSIX_MASKALL_MASK (PCI_MSIX_FLAGS_MASKALL >> 8)
-/* How much space does an MSIX table need. */
-/* The spec requires giving the table structure
- * a 4K aligned region all by itself. */
-#define MSIX_PAGE_SIZE 0x1000
-/* Reserve second half of the page for pending bits */
-#define MSIX_PAGE_PENDING (MSIX_PAGE_SIZE / 2)
-#define MSIX_MAX_ENTRIES 32
-
static MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
{
- uint8_t *table_entry = dev->msix_table_page + vector * PCI_MSIX_ENTRY_SIZE;
+ uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
MSIMessage msg;
msg.address = pci_get_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR);
@@ -45,62 +37,17 @@ static MSIMessage msix_get_message(PCIDevice *dev, unsigned vector)
return msg;
}
-/* Add MSI-X capability to the config space for the device. */
-/* Given a bar and its size, add MSI-X table on top of it
- * and fill MSI-X capability in the config space.
- * Original bar size must be a power of 2 or 0.
- * New bar size is returned. */
-static int msix_add_config(struct PCIDevice *pdev, unsigned short nentries,
- unsigned bar_nr, unsigned bar_size)
-{
- int config_offset;
- uint8_t *config;
- uint32_t new_size;
-
- if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1)
- return -EINVAL;
- if (bar_size > 0x80000000)
- return -ENOSPC;
-
- /* Add space for MSI-X structures */
- if (!bar_size) {
- new_size = MSIX_PAGE_SIZE;
- } else if (bar_size < MSIX_PAGE_SIZE) {
- bar_size = MSIX_PAGE_SIZE;
- new_size = MSIX_PAGE_SIZE * 2;
- } else {
- new_size = bar_size * 2;
- }
-
- pdev->msix_bar_size = new_size;
- config_offset = pci_add_capability(pdev, PCI_CAP_ID_MSIX,
- 0, MSIX_CAP_LENGTH);
- if (config_offset < 0)
- return config_offset;
- config = pdev->config + config_offset;
-
- pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
- /* Table on top of BAR */
- pci_set_long(config + PCI_MSIX_TABLE, bar_size | bar_nr);
- /* Pending bits on top of that */
- pci_set_long(config + PCI_MSIX_PBA, (bar_size + MSIX_PAGE_PENDING) |
- bar_nr);
- pdev->msix_cap = config_offset;
- /* Make flags bit writable. */
- pdev->wmask[config_offset + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
- MSIX_MASKALL_MASK;
- pdev->msix_function_masked = true;
- return 0;
-}
-
-static uint64_t msix_mmio_read(void *opaque, target_phys_addr_t addr,
- unsigned size)
+/*
+ * Special API for POWER to configure the vectors through
+ * a side channel. Should never be used by devices.
+ */
+void msix_set_message(PCIDevice *dev, int vector, struct MSIMessage msg)
{
- PCIDevice *dev = opaque;
- unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3;
- void *page = dev->msix_table_page;
+ uint8_t *table_entry = dev->msix_table + vector * PCI_MSIX_ENTRY_SIZE;
- return pci_get_long(page + offset);
+ pci_set_quad(table_entry + PCI_MSIX_ENTRY_LOWER_ADDR, msg.address);
+ pci_set_long(table_entry + PCI_MSIX_ENTRY_DATA, msg.data);
+ table_entry[PCI_MSIX_ENTRY_VECTOR_CTRL] &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT;
}
static uint8_t msix_pending_mask(int vector)
@@ -110,7 +57,7 @@ static uint8_t msix_pending_mask(int vector)
static uint8_t *msix_pending_byte(PCIDevice *dev, int vector)
{
- return dev->msix_table_page + MSIX_PAGE_PENDING + vector / 8;
+ return dev->msix_pba + vector / 8;
}
static int msix_is_pending(PCIDevice *dev, int vector)
@@ -131,7 +78,7 @@ static void msix_clr_pending(PCIDevice *dev, int vector)
static bool msix_vector_masked(PCIDevice *dev, int vector, bool fmask)
{
unsigned offset = vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
- return fmask || dev->msix_table_page[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ return fmask || dev->msix_table[offset] & PCI_MSIX_ENTRY_CTRL_MASKBIT;
}
static bool msix_is_masked(PCIDevice *dev, int vector)
@@ -210,27 +157,30 @@ void msix_write_config(PCIDevice *dev, uint32_t addr,
}
}
-static void msix_mmio_write(void *opaque, target_phys_addr_t addr,
- uint64_t val, unsigned size)
+static uint64_t msix_table_mmio_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
{
PCIDevice *dev = opaque;
- unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3;
- int vector = offset / PCI_MSIX_ENTRY_SIZE;
- bool was_masked;
- /* MSI-X page includes a read-only PBA and a writeable Vector Control. */
- if (vector >= dev->msix_entries_nr) {
- return;
- }
+ return pci_get_long(dev->msix_table + addr);
+}
+
+static void msix_table_mmio_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned size)
+{
+ PCIDevice *dev = opaque;
+ int vector = addr / PCI_MSIX_ENTRY_SIZE;
+ bool was_masked;
was_masked = msix_is_masked(dev, vector);
- pci_set_long(dev->msix_table_page + offset, val);
+ pci_set_long(dev->msix_table + addr, val);
msix_handle_mask_update(dev, vector, was_masked);
}
-static const MemoryRegionOps msix_mmio_ops = {
- .read = msix_mmio_read,
- .write = msix_mmio_write,
+static const MemoryRegionOps msix_table_mmio_ops = {
+ .read = msix_table_mmio_read,
+ .write = msix_table_mmio_write,
+ /* TODO: MSIX should be LITTLE_ENDIAN. */
.endianness = DEVICE_NATIVE_ENDIAN,
.valid = {
.min_access_size = 4,
@@ -238,17 +188,24 @@ static const MemoryRegionOps msix_mmio_ops = {
},
};
-static void msix_mmio_setup(PCIDevice *d, MemoryRegion *bar)
+static uint64_t msix_pba_mmio_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
{
- uint8_t *config = d->config + d->msix_cap;
- uint32_t table = pci_get_long(config + PCI_MSIX_TABLE);
- uint32_t offset = table & ~(MSIX_PAGE_SIZE - 1);
- /* TODO: for assigned devices, we'll want to make it possible to map
- * pending bits separately in case they are in a separate bar. */
+ PCIDevice *dev = opaque;
- memory_region_add_subregion(bar, offset, &d->msix_mmio);
+ return pci_get_long(dev->msix_pba + addr);
}
+static const MemoryRegionOps msix_pba_mmio_ops = {
+ .read = msix_pba_mmio_read,
+ /* TODO: MSIX should be LITTLE_ENDIAN. */
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ },
+};
+
static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
{
int vector;
@@ -258,52 +215,119 @@ static void msix_mask_all(struct PCIDevice *dev, unsigned nentries)
vector * PCI_MSIX_ENTRY_SIZE + PCI_MSIX_ENTRY_VECTOR_CTRL;
bool was_masked = msix_is_masked(dev, vector);
- dev->msix_table_page[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ dev->msix_table[offset] |= PCI_MSIX_ENTRY_CTRL_MASKBIT;
msix_handle_mask_update(dev, vector, was_masked);
}
}
-/* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is
- * modified, it should be retrieved with msix_bar_size. */
+/* Initialize the MSI-X structures */
int msix_init(struct PCIDevice *dev, unsigned short nentries,
- MemoryRegion *bar,
- unsigned bar_nr, unsigned bar_size)
+ MemoryRegion *table_bar, uint8_t table_bar_nr,
+ unsigned table_offset, MemoryRegion *pba_bar,
+ uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos)
{
- int ret;
+ int cap;
+ unsigned table_size, pba_size;
+ uint8_t *config;
/* Nothing to do if MSI is not supported by interrupt controller */
if (!msi_supported) {
return -ENOTSUP;
}
- if (nentries > MSIX_MAX_ENTRIES)
+
+ if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
return -EINVAL;
+ }
- dev->msix_entry_used = g_malloc0(MSIX_MAX_ENTRIES *
- sizeof *dev->msix_entry_used);
+ table_size = nentries * PCI_MSIX_ENTRY_SIZE;
+ pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;
- dev->msix_table_page = g_malloc0(MSIX_PAGE_SIZE);
- msix_mask_all(dev, nentries);
+ /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
+ if ((table_bar_nr == pba_bar_nr &&
+ ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
+ table_offset + table_size > memory_region_size(table_bar) ||
+ pba_offset + pba_size > memory_region_size(pba_bar) ||
+ (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
+ return -EINVAL;
+ }
- memory_region_init_io(&dev->msix_mmio, &msix_mmio_ops, dev,
- "msix", MSIX_PAGE_SIZE);
+ cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, cap_pos, MSIX_CAP_LENGTH);
+ if (cap < 0) {
+ return cap;
+ }
+
+ dev->msix_cap = cap;
+ dev->cap_present |= QEMU_PCI_CAP_MSIX;
+ config = dev->config + cap;
+ pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
dev->msix_entries_nr = nentries;
- ret = msix_add_config(dev, nentries, bar_nr, bar_size);
- if (ret)
- goto err_config;
+ dev->msix_function_masked = true;
+
+ pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
+ pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);
+
+ /* Make flags bit writable. */
+ dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
+ MSIX_MASKALL_MASK;
+
+ dev->msix_table = g_malloc0(table_size);
+ dev->msix_pba = g_malloc0(pba_size);
+ dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);
+
+ msix_mask_all(dev, nentries);
+
+ memory_region_init_io(&dev->msix_table_mmio, &msix_table_mmio_ops, dev,
+ "msix-table", table_size);
+ memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
+ memory_region_init_io(&dev->msix_pba_mmio, &msix_pba_mmio_ops, dev,
+ "msix-pba", pba_size);
+ memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);
- dev->cap_present |= QEMU_PCI_CAP_MSIX;
- msix_mmio_setup(dev, bar);
return 0;
+}
-err_config:
- dev->msix_entries_nr = 0;
- memory_region_destroy(&dev->msix_mmio);
- g_free(dev->msix_table_page);
- dev->msix_table_page = NULL;
- g_free(dev->msix_entry_used);
- dev->msix_entry_used = NULL;
- return ret;
+int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
+ uint8_t bar_nr)
+{
+ int ret;
+ char *name;
+
+ /*
+ * Migration compatibility dictates that this remains a 4k
+ * BAR with the vector table in the lower half and PBA in
+ * the upper half. Do not use these elsewhere!
+ */
+#define MSIX_EXCLUSIVE_BAR_SIZE 4096
+#define MSIX_EXCLUSIVE_BAR_TABLE_OFFSET 0
+#define MSIX_EXCLUSIVE_BAR_PBA_OFFSET (MSIX_EXCLUSIVE_BAR_SIZE / 2)
+#define MSIX_EXCLUSIVE_CAP_OFFSET 0
+
+ if (nentries * PCI_MSIX_ENTRY_SIZE > MSIX_EXCLUSIVE_BAR_PBA_OFFSET) {
+ return -EINVAL;
+ }
+
+ if (asprintf(&name, "%s-msix", dev->name) == -1) {
+ return -ENOMEM;
+ }
+
+ memory_region_init(&dev->msix_exclusive_bar, name, MSIX_EXCLUSIVE_BAR_SIZE);
+
+ free(name);
+
+ ret = msix_init(dev, nentries, &dev->msix_exclusive_bar, bar_nr,
+ MSIX_EXCLUSIVE_BAR_TABLE_OFFSET, &dev->msix_exclusive_bar,
+ bar_nr, MSIX_EXCLUSIVE_BAR_PBA_OFFSET,
+ MSIX_EXCLUSIVE_CAP_OFFSET);
+ if (ret) {
+ memory_region_destroy(&dev->msix_exclusive_bar);
+ return ret;
+ }
+
+ pci_register_bar(dev, bar_nr, PCI_BASE_ADDRESS_SPACE_MEMORY,
+ &dev->msix_exclusive_bar);
+
+ return 0;
}
static void msix_free_irq_entries(PCIDevice *dev)
@@ -317,23 +341,35 @@ static void msix_free_irq_entries(PCIDevice *dev)
}
/* Clean up resources for the device. */
-int msix_uninit(PCIDevice *dev, MemoryRegion *bar)
+void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar, MemoryRegion *pba_bar)
{
if (!msix_present(dev)) {
- return 0;
+ return;
}
pci_del_capability(dev, PCI_CAP_ID_MSIX, MSIX_CAP_LENGTH);
dev->msix_cap = 0;
msix_free_irq_entries(dev);
dev->msix_entries_nr = 0;
- memory_region_del_subregion(bar, &dev->msix_mmio);
- memory_region_destroy(&dev->msix_mmio);
- g_free(dev->msix_table_page);
- dev->msix_table_page = NULL;
+ memory_region_del_subregion(pba_bar, &dev->msix_pba_mmio);
+ memory_region_destroy(&dev->msix_pba_mmio);
+ g_free(dev->msix_pba);
+ dev->msix_pba = NULL;
+ memory_region_del_subregion(table_bar, &dev->msix_table_mmio);
+ memory_region_destroy(&dev->msix_table_mmio);
+ g_free(dev->msix_table);
+ dev->msix_table = NULL;
g_free(dev->msix_entry_used);
dev->msix_entry_used = NULL;
dev->cap_present &= ~QEMU_PCI_CAP_MSIX;
- return 0;
+ return;
+}
+
+void msix_uninit_exclusive_bar(PCIDevice *dev)
+{
+ if (msix_present(dev)) {
+ msix_uninit(dev, &dev->msix_exclusive_bar, &dev->msix_exclusive_bar);
+ memory_region_destroy(&dev->msix_exclusive_bar);
+ }
}
void msix_save(PCIDevice *dev, QEMUFile *f)
@@ -344,8 +380,8 @@ void msix_save(PCIDevice *dev, QEMUFile *f)
return;
}
- qemu_put_buffer(f, dev->msix_table_page, n * PCI_MSIX_ENTRY_SIZE);
- qemu_put_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8);
+ qemu_put_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
+ qemu_put_buffer(f, dev->msix_pba, (n + 7) / 8);
}
/* Should be called after restoring the config space. */
@@ -359,8 +395,8 @@ void msix_load(PCIDevice *dev, QEMUFile *f)
}
msix_free_irq_entries(dev);
- qemu_get_buffer(f, dev->msix_table_page, n * PCI_MSIX_ENTRY_SIZE);
- qemu_get_buffer(f, dev->msix_table_page + MSIX_PAGE_PENDING, (n + 7) / 8);
+ qemu_get_buffer(f, dev->msix_table, n * PCI_MSIX_ENTRY_SIZE);
+ qemu_get_buffer(f, dev->msix_pba, (n + 7) / 8);
msix_update_function_masked(dev);
for (vector = 0; vector < n; vector++) {
@@ -382,13 +418,6 @@ int msix_enabled(PCIDevice *dev)
MSIX_ENABLE_MASK);
}
-/* Size of bar where MSI-X table resides, or 0 if MSI-X not supported. */
-uint32_t msix_bar_size(PCIDevice *dev)
-{
- return (dev->cap_present & QEMU_PCI_CAP_MSIX) ?
- dev->msix_bar_size : 0;
-}
-
/* Send an MSI-X message */
void msix_notify(PCIDevice *dev, unsigned vector)
{
@@ -414,7 +443,8 @@ void msix_reset(PCIDevice *dev)
msix_free_irq_entries(dev);
dev->config[dev->msix_cap + MSIX_CONTROL_OFFSET] &=
~dev->wmask[dev->msix_cap + MSIX_CONTROL_OFFSET];
- memset(dev->msix_table_page, 0, MSIX_PAGE_SIZE);
+ memset(dev->msix_table, 0, dev->msix_entries_nr * PCI_MSIX_ENTRY_SIZE);
+ memset(dev->msix_pba, 0, QEMU_ALIGN_UP(dev->msix_entries_nr, 64) / 8);
msix_mask_all(dev, dev->msix_entries_nr);
}
diff --git a/hw/msix.h b/hw/msix.h
index 50aee8221a..15211cb592 100644
--- a/hw/msix.h
+++ b/hw/msix.h
@@ -4,14 +4,19 @@
#include "qemu-common.h"
#include "pci.h"
-int msix_init(PCIDevice *pdev, unsigned short nentries,
- MemoryRegion *bar,
- unsigned bar_nr, unsigned bar_size);
+void msix_set_message(PCIDevice *dev, int vector, MSIMessage msg);
+int msix_init(PCIDevice *dev, unsigned short nentries,
+ MemoryRegion *table_bar, uint8_t table_bar_nr,
+ unsigned table_offset, MemoryRegion *pba_bar,
+ uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos);
+int msix_init_exclusive_bar(PCIDevice *dev, unsigned short nentries,
+ uint8_t bar_nr);
-void msix_write_config(PCIDevice *pci_dev, uint32_t address,
- uint32_t val, int len);
+void msix_write_config(PCIDevice *dev, uint32_t address, uint32_t val, int len);
-int msix_uninit(PCIDevice *d, MemoryRegion *bar);
+void msix_uninit(PCIDevice *dev, MemoryRegion *table_bar,
+ MemoryRegion *pba_bar);
+void msix_uninit_exclusive_bar(PCIDevice *dev);
unsigned int msix_nr_vectors_allocated(const PCIDevice *dev);
@@ -21,8 +26,6 @@ void msix_load(PCIDevice *dev, QEMUFile *f);
int msix_enabled(PCIDevice *dev);
int msix_present(PCIDevice *dev);
-uint32_t msix_bar_size(PCIDevice *dev);
-
int msix_vector_use(PCIDevice *dev, unsigned vector);
void msix_vector_unuse(PCIDevice *dev, unsigned vector);
void msix_unuse_all_vectors(PCIDevice *dev);
diff --git a/hw/musicpal.c b/hw/musicpal.c
index f14f20d689..ad725b5599 100644
--- a/hw/musicpal.c
+++ b/hw/musicpal.c
@@ -182,12 +182,12 @@ static void eth_rx_desc_get(uint32_t addr, mv88w8618_rx_desc *desc)
le32_to_cpus(&desc->next);
}
-static int eth_can_receive(VLANClientState *nc)
+static int eth_can_receive(NetClientState *nc)
{
return 1;
}
-static ssize_t eth_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
mv88w8618_eth_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
uint32_t desc_addr;
@@ -366,7 +366,7 @@ static const MemoryRegionOps mv88w8618_eth_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void eth_cleanup(VLANClientState *nc)
+static void eth_cleanup(NetClientState *nc)
{
mv88w8618_eth_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -374,7 +374,7 @@ static void eth_cleanup(VLANClientState *nc)
}
static NetClientInfo net_mv88w8618_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = eth_can_receive,
.receive = eth_receive,
diff --git a/hw/ne2000-isa.c b/hw/ne2000-isa.c
index a4a783ab89..69982a9abb 100644
--- a/hw/ne2000-isa.c
+++ b/hw/ne2000-isa.c
@@ -36,7 +36,7 @@ typedef struct ISANE2000State {
NE2000State ne2000;
} ISANE2000State;
-static void isa_ne2000_cleanup(VLANClientState *nc)
+static void isa_ne2000_cleanup(NetClientState *nc)
{
NE2000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -44,7 +44,7 @@ static void isa_ne2000_cleanup(VLANClientState *nc)
}
static NetClientInfo net_ne2000_isa_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = ne2000_can_receive,
.receive = ne2000_receive,
diff --git a/hw/ne2000.c b/hw/ne2000.c
index d02e60c4a6..15605c478f 100644
--- a/hw/ne2000.c
+++ b/hw/ne2000.c
@@ -165,7 +165,7 @@ static int ne2000_buffer_full(NE2000State *s)
return 0;
}
-int ne2000_can_receive(VLANClientState *nc)
+int ne2000_can_receive(NetClientState *nc)
{
NE2000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -176,7 +176,7 @@ int ne2000_can_receive(VLANClientState *nc)
#define MIN_BUF_SIZE 60
-ssize_t ne2000_receive(VLANClientState *nc, const uint8_t *buf, size_t size_)
+ssize_t ne2000_receive(NetClientState *nc, const uint8_t *buf, size_t size_)
{
NE2000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
int size = size_;
@@ -677,15 +677,15 @@ static void ne2000_write(void *opaque, target_phys_addr_t addr,
NE2000State *s = opaque;
if (addr < 0x10 && size == 1) {
- return ne2000_ioport_write(s, addr, data);
+ ne2000_ioport_write(s, addr, data);
} else if (addr == 0x10) {
if (size <= 2) {
- return ne2000_asic_ioport_write(s, addr, data);
+ ne2000_asic_ioport_write(s, addr, data);
} else {
- return ne2000_asic_ioport_writel(s, addr, data);
+ ne2000_asic_ioport_writel(s, addr, data);
}
} else if (addr == 0x1f && size == 1) {
- return ne2000_reset_ioport_write(s, addr, data);
+ ne2000_reset_ioport_write(s, addr, data);
}
}
@@ -703,7 +703,7 @@ void ne2000_setup_io(NE2000State *s, unsigned size)
memory_region_init_io(&s->io, &ne2000_ops, s, "ne2000", size);
}
-static void ne2000_cleanup(VLANClientState *nc)
+static void ne2000_cleanup(NetClientState *nc)
{
NE2000State *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -711,7 +711,7 @@ static void ne2000_cleanup(VLANClientState *nc)
}
static NetClientInfo net_ne2000_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = ne2000_can_receive,
.receive = ne2000_receive,
@@ -744,14 +744,13 @@ static int pci_ne2000_init(PCIDevice *pci_dev)
return 0;
}
-static int pci_ne2000_exit(PCIDevice *pci_dev)
+static void pci_ne2000_exit(PCIDevice *pci_dev)
{
PCINE2000State *d = DO_UPCAST(PCINE2000State, dev, pci_dev);
NE2000State *s = &d->ne2000;
memory_region_destroy(&s->io);
- qemu_del_vlan_client(&s->nic->nc);
- return 0;
+ qemu_del_net_client(&s->nic->nc);
}
static Property ne2000_properties[] = {
diff --git a/hw/ne2000.h b/hw/ne2000.h
index 5fee052194..1e7ab073e3 100644
--- a/hw/ne2000.h
+++ b/hw/ne2000.h
@@ -31,5 +31,5 @@ typedef struct NE2000State {
void ne2000_setup_io(NE2000State *s, unsigned size);
extern const VMStateDescription vmstate_ne2000;
void ne2000_reset(NE2000State *s);
-int ne2000_can_receive(VLANClientState *vc);
-ssize_t ne2000_receive(VLANClientState *vc, const uint8_t *buf, size_t size_);
+int ne2000_can_receive(NetClientState *nc);
+ssize_t ne2000_receive(NetClientState *nc, const uint8_t *buf, size_t size_);
diff --git a/hw/omap.h b/hw/omap.h
index 3d98941b72..413851bc34 100644
--- a/hw/omap.h
+++ b/hw/omap.h
@@ -942,13 +942,7 @@ struct omap_mpu_state_s *omap2420_mpu_init(MemoryRegion *sysmem,
unsigned long sdram_size,
const char *core);
-# if TARGET_PHYS_ADDR_BITS == 32
-# define OMAP_FMT_plx "%#08x"
-# elif TARGET_PHYS_ADDR_BITS == 64
-# define OMAP_FMT_plx "%#08" PRIx64
-# else
-# error TARGET_PHYS_ADDR_BITS undefined
-# endif
+#define OMAP_FMT_plx "%#08" TARGET_PRIxPHYS
uint32_t omap_badwidth_read8(void *opaque, target_phys_addr_t addr);
void omap_badwidth_write8(void *opaque, target_phys_addr_t addr,
diff --git a/hw/opencores_eth.c b/hw/opencores_eth.c
index 350f73173a..8c15969e2b 100644
--- a/hw/opencores_eth.c
+++ b/hw/opencores_eth.c
@@ -311,7 +311,7 @@ static void open_eth_int_source_write(OpenEthState *s,
s->regs[INT_SOURCE] & s->regs[INT_MASK]);
}
-static void open_eth_set_link_status(VLANClientState *nc)
+static void open_eth_set_link_status(NetClientState *nc)
{
OpenEthState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -342,7 +342,7 @@ static void open_eth_reset(void *opaque)
open_eth_set_link_status(&s->nic->nc);
}
-static int open_eth_can_receive(VLANClientState *nc)
+static int open_eth_can_receive(NetClientState *nc)
{
OpenEthState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -351,7 +351,7 @@ static int open_eth_can_receive(VLANClientState *nc)
(rx_desc(s)->len_flags & RXD_E);
}
-static ssize_t open_eth_receive(VLANClientState *nc,
+static ssize_t open_eth_receive(NetClientState *nc,
const uint8_t *buf, size_t size)
{
OpenEthState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -462,12 +462,12 @@ static ssize_t open_eth_receive(VLANClientState *nc,
return size;
}
-static void open_eth_cleanup(VLANClientState *nc)
+static void open_eth_cleanup(NetClientState *nc)
{
}
static NetClientInfo net_open_eth_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = open_eth_can_receive,
.receive = open_eth_receive,
diff --git a/hw/openrisc/Makefile.objs b/hw/openrisc/Makefile.objs
new file mode 100644
index 0000000000..38ff8f5d6d
--- /dev/null
+++ b/hw/openrisc/Makefile.objs
@@ -0,0 +1,3 @@
+obj-y = openrisc_pic.o openrisc_sim.o openrisc_timer.o
+
+obj-y := $(addprefix ../,$(obj-y))
diff --git a/hw/openrisc_pic.c b/hw/openrisc_pic.c
new file mode 100644
index 0000000000..aaeb9a9171
--- /dev/null
+++ b/hw/openrisc_pic.c
@@ -0,0 +1,60 @@
+/*
+ * OpenRISC Programmable Interrupt Controller support.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw.h"
+#include "cpu.h"
+
+/* OpenRISC pic handler */
+static void openrisc_pic_cpu_handler(void *opaque, int irq, int level)
+{
+ OpenRISCCPU *cpu = (OpenRISCCPU *)opaque;
+ int i;
+ uint32_t irq_bit = 1 << irq;
+
+ if (irq > 31 || irq < 0) {
+ return;
+ }
+
+ if (level) {
+ cpu->env.picsr |= irq_bit;
+ } else {
+ cpu->env.picsr &= ~irq_bit;
+ }
+
+ for (i = 0; i < 32; i++) {
+ if ((cpu->env.picsr && (1 << i)) && (cpu->env.picmr && (1 << i))) {
+ cpu_interrupt(&cpu->env, CPU_INTERRUPT_HARD);
+ } else {
+ cpu_reset_interrupt(&cpu->env, CPU_INTERRUPT_HARD);
+ cpu->env.picsr &= ~(1 << i);
+ }
+ }
+}
+
+void cpu_openrisc_pic_init(OpenRISCCPU *cpu)
+{
+ int i;
+ qemu_irq *qi;
+ qi = qemu_allocate_irqs(openrisc_pic_cpu_handler, cpu, NR_IRQS);
+
+ for (i = 0; i < NR_IRQS; i++) {
+ cpu->env.irq[i] = qi[i];
+ }
+}
diff --git a/hw/openrisc_sim.c b/hw/openrisc_sim.c
new file mode 100644
index 0000000000..55e97f0959
--- /dev/null
+++ b/hw/openrisc_sim.c
@@ -0,0 +1,150 @@
+/*
+ * OpenRISC simulator for use as an IIS.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw.h"
+#include "boards.h"
+#include "elf.h"
+#include "pc.h"
+#include "loader.h"
+#include "exec-memory.h"
+#include "sysemu.h"
+#include "sysbus.h"
+#include "qtest.h"
+
+#define KERNEL_LOAD_ADDR 0x100
+
+static void main_cpu_reset(void *opaque)
+{
+ OpenRISCCPU *cpu = opaque;
+
+ cpu_reset(CPU(cpu));
+}
+
+static void openrisc_sim_net_init(MemoryRegion *address_space,
+ target_phys_addr_t base,
+ target_phys_addr_t descriptors,
+ qemu_irq irq, NICInfo *nd)
+{
+ DeviceState *dev;
+ SysBusDevice *s;
+
+ dev = qdev_create(NULL, "open_eth");
+ qdev_set_nic_properties(dev, nd);
+ qdev_init_nofail(dev);
+
+ s = sysbus_from_qdev(dev);
+ sysbus_connect_irq(s, 0, irq);
+ memory_region_add_subregion(address_space, base,
+ sysbus_mmio_get_region(s, 0));
+ memory_region_add_subregion(address_space, descriptors,
+ sysbus_mmio_get_region(s, 1));
+}
+
+static void cpu_openrisc_load_kernel(ram_addr_t ram_size,
+ const char *kernel_filename,
+ OpenRISCCPU *cpu)
+{
+ long kernel_size;
+ uint64_t elf_entry;
+ target_phys_addr_t entry;
+
+ if (kernel_filename && !qtest_enabled()) {
+ kernel_size = load_elf(kernel_filename, NULL, NULL,
+ &elf_entry, NULL, NULL, 1, ELF_MACHINE, 1);
+ entry = elf_entry;
+ if (kernel_size < 0) {
+ kernel_size = load_uimage(kernel_filename,
+ &entry, NULL, NULL);
+ }
+ if (kernel_size < 0) {
+ kernel_size = load_image_targphys(kernel_filename,
+ KERNEL_LOAD_ADDR,
+ ram_size - KERNEL_LOAD_ADDR);
+ entry = KERNEL_LOAD_ADDR;
+ }
+
+ if (kernel_size < 0) {
+ qemu_log("QEMU: couldn't load the kernel '%s'\n",
+ kernel_filename);
+ exit(1);
+ }
+ }
+
+ cpu->env.pc = entry;
+}
+
+static void openrisc_sim_init(ram_addr_t ram_size,
+ const char *boot_device,
+ const char *kernel_filename,
+ const char *kernel_cmdline,
+ const char *initrd_filename,
+ const char *cpu_model)
+{
+ OpenRISCCPU *cpu = NULL;
+ MemoryRegion *ram;
+ int n;
+
+ if (!cpu_model) {
+ cpu_model = "or1200";
+ }
+
+ for (n = 0; n < smp_cpus; n++) {
+ cpu = cpu_openrisc_init(cpu_model);
+ if (cpu == NULL) {
+ qemu_log("Unable to find CPU defineition!\n");
+ exit(1);
+ }
+ qemu_register_reset(main_cpu_reset, cpu);
+ main_cpu_reset(cpu);
+ }
+
+ ram = g_malloc(sizeof(*ram));
+ memory_region_init_ram(ram, "openrisc.ram", ram_size);
+ vmstate_register_ram_global(ram);
+ memory_region_add_subregion(get_system_memory(), 0, ram);
+
+ cpu_openrisc_pic_init(cpu);
+ cpu_openrisc_clock_init(cpu);
+
+ serial_mm_init(get_system_memory(), 0x90000000, 0, cpu->env.irq[2],
+ 115200, serial_hds[0], DEVICE_NATIVE_ENDIAN);
+
+ if (nd_table[0].used) {
+ openrisc_sim_net_init(get_system_memory(), 0x92000000,
+ 0x92000400, cpu->env.irq[4], nd_table);
+ }
+
+ cpu_openrisc_load_kernel(ram_size, kernel_filename, cpu);
+}
+
+static QEMUMachine openrisc_sim_machine = {
+ .name = "or32-sim",
+ .desc = "or32 simulation",
+ .init = openrisc_sim_init,
+ .max_cpus = 1,
+ .is_default = 1,
+};
+
+static void openrisc_sim_machine_init(void)
+{
+ qemu_register_machine(&openrisc_sim_machine);
+}
+
+machine_init(openrisc_sim_machine_init);
diff --git a/hw/openrisc_timer.c b/hw/openrisc_timer.c
new file mode 100644
index 0000000000..7916e61d24
--- /dev/null
+++ b/hw/openrisc_timer.c
@@ -0,0 +1,101 @@
+/*
+ * QEMU OpenRISC timer support
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "hw.h"
+#include "qemu-timer.h"
+
+#define TIMER_FREQ (20 * 1000 * 1000) /* 20MHz */
+
+/* The time when TTCR changes */
+static uint64_t last_clk;
+static int is_counting;
+
+void cpu_openrisc_count_update(OpenRISCCPU *cpu)
+{
+ uint64_t now, next;
+ uint32_t wait;
+
+ now = qemu_get_clock_ns(vm_clock);
+ if (!is_counting) {
+ qemu_del_timer(cpu->env.timer);
+ last_clk = now;
+ return;
+ }
+
+ cpu->env.ttcr += (uint32_t)muldiv64(now - last_clk, TIMER_FREQ,
+ get_ticks_per_sec());
+ last_clk = now;
+
+ if ((cpu->env.ttmr & TTMR_TP) <= (cpu->env.ttcr & TTMR_TP)) {
+ wait = TTMR_TP - (cpu->env.ttcr & TTMR_TP) + 1;
+ wait += cpu->env.ttmr & TTMR_TP;
+ } else {
+ wait = (cpu->env.ttmr & TTMR_TP) - (cpu->env.ttcr & TTMR_TP);
+ }
+
+ next = now + muldiv64(wait, get_ticks_per_sec(), TIMER_FREQ);
+ qemu_mod_timer(cpu->env.timer, next);
+}
+
+void cpu_openrisc_count_start(OpenRISCCPU *cpu)
+{
+ is_counting = 1;
+ cpu_openrisc_count_update(cpu);
+}
+
+void cpu_openrisc_count_stop(OpenRISCCPU *cpu)
+{
+ is_counting = 0;
+ cpu_openrisc_count_update(cpu);
+}
+
+static void openrisc_timer_cb(void *opaque)
+{
+ OpenRISCCPU *cpu = opaque;
+
+ if ((cpu->env.ttmr & TTMR_IE) &&
+ qemu_timer_expired(cpu->env.timer, qemu_get_clock_ns(vm_clock))) {
+ cpu->env.ttmr |= TTMR_IP;
+ cpu->env.interrupt_request |= CPU_INTERRUPT_TIMER;
+ }
+
+ switch (cpu->env.ttmr & TTMR_M) {
+ case TIMER_NONE:
+ break;
+ case TIMER_INTR:
+ cpu->env.ttcr = 0;
+ cpu_openrisc_count_start(cpu);
+ break;
+ case TIMER_SHOT:
+ cpu_openrisc_count_stop(cpu);
+ break;
+ case TIMER_CONT:
+ cpu_openrisc_count_start(cpu);
+ break;
+ }
+}
+
+void cpu_openrisc_clock_init(OpenRISCCPU *cpu)
+{
+ cpu->env.timer = qemu_new_timer_ns(vm_clock, &openrisc_timer_cb, cpu);
+ cpu->env.ttmr = 0x00000000;
+ cpu->env.ttcr = 0x00000000;
+}
diff --git a/hw/pc.c b/hw/pc.c
index c7e9ab3ee1..81c391cd6a 100644
--- a/hw/pc.c
+++ b/hw/pc.c
@@ -44,10 +44,12 @@
#include "kvm.h"
#include "xen.h"
#include "blockdev.h"
+#include "hw/block-common.h"
#include "ui/qemu-spice.h"
#include "memory.h"
#include "exec-memory.h"
#include "arch_init.h"
+#include "bitmap.h"
/* output Bochs bios info messages */
//#define DEBUG_BIOS
@@ -216,11 +218,9 @@ static int cmos_get_fd_drive_type(FDriveType fd0)
return val;
}
-static void cmos_init_hd(int type_ofs, int info_ofs, BlockDriverState *hd,
- ISADevice *s)
+static void cmos_init_hd(ISADevice *s, int type_ofs, int info_ofs,
+ int16_t cylinders, int8_t heads, int8_t sectors)
{
- int cylinders, heads, sectors;
- bdrv_get_geometry_hint(hd, &cylinders, &heads, &sectors);
rtc_set_memory(s, type_ofs, 47);
rtc_set_memory(s, info_ofs, cylinders);
rtc_set_memory(s, info_ofs + 1, cylinders >> 8);
@@ -281,48 +281,42 @@ static int pc_boot_set(void *opaque, const char *boot_device)
typedef struct pc_cmos_init_late_arg {
ISADevice *rtc_state;
- BusState *idebus0, *idebus1;
+ BusState *idebus[2];
} pc_cmos_init_late_arg;
static void pc_cmos_init_late(void *opaque)
{
pc_cmos_init_late_arg *arg = opaque;
ISADevice *s = arg->rtc_state;
+ int16_t cylinders;
+ int8_t heads, sectors;
int val;
- BlockDriverState *hd_table[4];
- int i;
-
- ide_get_bs(hd_table, arg->idebus0);
- ide_get_bs(hd_table + 2, arg->idebus1);
+ int i, trans;
- rtc_set_memory(s, 0x12, (hd_table[0] ? 0xf0 : 0) | (hd_table[1] ? 0x0f : 0));
- if (hd_table[0])
- cmos_init_hd(0x19, 0x1b, hd_table[0], s);
- if (hd_table[1])
- cmos_init_hd(0x1a, 0x24, hd_table[1], s);
+ val = 0;
+ if (ide_get_geometry(arg->idebus[0], 0,
+ &cylinders, &heads, &sectors) >= 0) {
+ cmos_init_hd(s, 0x19, 0x1b, cylinders, heads, sectors);
+ val |= 0xf0;
+ }
+ if (ide_get_geometry(arg->idebus[0], 1,
+ &cylinders, &heads, &sectors) >= 0) {
+ cmos_init_hd(s, 0x1a, 0x24, cylinders, heads, sectors);
+ val |= 0x0f;
+ }
+ rtc_set_memory(s, 0x12, val);
val = 0;
for (i = 0; i < 4; i++) {
- if (hd_table[i]) {
- int cylinders, heads, sectors, translation;
- /* NOTE: bdrv_get_geometry_hint() returns the physical
- geometry. It is always such that: 1 <= sects <= 63, 1
- <= heads <= 16, 1 <= cylinders <= 16383. The BIOS
- geometry can be different if a translation is done. */
- translation = bdrv_get_translation_hint(hd_table[i]);
- if (translation == BIOS_ATA_TRANSLATION_AUTO) {
- bdrv_get_geometry_hint(hd_table[i], &cylinders, &heads, &sectors);
- if (cylinders <= 1024 && heads <= 16 && sectors <= 63) {
- /* No translation. */
- translation = 0;
- } else {
- /* LBA translation. */
- translation = 1;
- }
- } else {
- translation--;
- }
- val |= translation << (i * 2);
+ /* NOTE: ide_get_geometry() returns the physical
+ geometry. It is always such that: 1 <= sects <= 63, 1
+ <= heads <= 16, 1 <= cylinders <= 16383. The BIOS
+ geometry can be different if a translation is done. */
+ if (ide_get_geometry(arg->idebus[i / 2], i % 2,
+ &cylinders, &heads, &sectors) >= 0) {
+ trans = ide_get_bios_chs_trans(arg->idebus[i / 2], i % 2) - 1;
+ assert((trans & ~3) == 0);
+ val |= trans << (i * 2);
}
}
rtc_set_memory(s, 0x39, val);
@@ -335,10 +329,8 @@ void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size,
ISADevice *floppy, BusState *idebus0, BusState *idebus1,
ISADevice *s)
{
- int val, nb, nb_heads, max_track, last_sect, i;
+ int val, nb, i;
FDriveType fd_type[2] = { FDRIVE_DRV_NONE, FDRIVE_DRV_NONE };
- FDriveRate rate;
- BlockDriverState *fd[MAX_FD];
static pc_cmos_init_late_arg arg;
/* various important CMOS locations needed by PC/Bochs bios */
@@ -381,13 +373,8 @@ void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size,
/* floppy type */
if (floppy) {
- fdc_get_bs(fd, floppy);
for (i = 0; i < 2; i++) {
- if (fd[i]) {
- bdrv_get_floppy_geometry_hint(fd[i], &nb_heads, &max_track,
- &last_sect, FDRIVE_DRV_NONE,
- &fd_type[i], &rate);
- }
+ fd_type[i] = isa_fdc_get_drive_type(floppy, i);
}
}
val = (cmos_get_fd_drive_type(fd_type[0]) << 4) |
@@ -418,8 +405,8 @@ void pc_cmos_init(ram_addr_t ram_size, ram_addr_t above_4g_mem_size,
/* hard drives */
arg.rtc_state = s;
- arg.idebus0 = idebus0;
- arg.idebus1 = idebus1;
+ arg.idebus[0] = idebus0;
+ arg.idebus[1] = idebus1;
qemu_register_reset(pc_cmos_init_late, &arg);
}
@@ -639,7 +626,7 @@ static void *bochs_bios_init(void)
numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes);
for (i = 0; i < max_cpus; i++) {
for (j = 0; j < nb_numa_nodes; j++) {
- if (node_cpumask[j] & (1 << i)) {
+ if (test_bit(i, node_cpumask[j])) {
numa_fw_cfg[i + 1] = cpu_to_le64(j);
break;
}
@@ -871,12 +858,6 @@ void pc_init_ne2k_isa(ISABus *bus, NICInfo *nd)
nb_ne2k++;
}
-int cpu_is_bsp(CPUX86State *env)
-{
- /* We hard-wire the BSP to the first CPU. */
- return env->cpu_index == 0;
-}
-
DeviceState *cpu_get_current_apic(void)
{
if (cpu_single_env) {
@@ -924,15 +905,6 @@ void pc_acpi_smi_interrupt(void *opaque, int irq, int level)
}
}
-static void pc_cpu_reset(void *opaque)
-{
- X86CPU *cpu = opaque;
- CPUX86State *env = &cpu->env;
-
- cpu_reset(CPU(cpu));
- env->halted = !cpu_is_bsp(env);
-}
-
static X86CPU *pc_new_cpu(const char *cpu_model)
{
X86CPU *cpu;
@@ -947,8 +919,7 @@ static X86CPU *pc_new_cpu(const char *cpu_model)
if ((env->cpuid_features & CPUID_APIC) || smp_cpus > 1) {
env->apic_state = apic_init(env, env->cpuid_apic_id);
}
- qemu_register_reset(pc_cpu_reset, cpu);
- pc_cpu_reset(cpu);
+ cpu_reset(CPU(cpu));
return cpu;
}
diff --git a/hw/pc_piix.c b/hw/pc_piix.c
index eae258cefd..0c0096fd7e 100644
--- a/hw/pc_piix.c
+++ b/hw/pc_piix.c
@@ -349,8 +349,8 @@ static void pc_xen_hvm_init(ram_addr_t ram_size,
}
#endif
-static QEMUMachine pc_machine_v1_1 = {
- .name = "pc-1.1",
+static QEMUMachine pc_machine_v1_2 = {
+ .name = "pc-1.2",
.alias = "pc",
.desc = "Standard PC",
.init = pc_init_pci,
@@ -358,7 +358,38 @@ static QEMUMachine pc_machine_v1_1 = {
.is_default = 1,
};
+#define PC_COMPAT_1_1 \
+ {\
+ .driver = "VGA",\
+ .property = "vgamem_mb",\
+ .value = stringify(8),\
+ },{\
+ .driver = "vmware-svga",\
+ .property = "vgamem_mb",\
+ .value = stringify(8),\
+ },{\
+ .driver = "qxl-vga",\
+ .property = "vgamem_mb",\
+ .value = stringify(8),\
+ },{\
+ .driver = "qxl",\
+ .property = "vgamem_mb",\
+ .value = stringify(8),\
+ }
+
+static QEMUMachine pc_machine_v1_1 = {
+ .name = "pc-1.1",
+ .desc = "Standard PC",
+ .init = pc_init_pci,
+ .max_cpus = 255,
+ .compat_props = (GlobalProperty[]) {
+ PC_COMPAT_1_1,
+ { /* end of list */ }
+ },
+};
+
#define PC_COMPAT_1_0 \
+ PC_COMPAT_1_1,\
{\
.driver = "pc-sysfw",\
.property = "rom_only",\
@@ -612,6 +643,7 @@ static QEMUMachine xenfv_machine = {
static void pc_machine_init(void)
{
+ qemu_register_machine(&pc_machine_v1_2);
qemu_register_machine(&pc_machine_v1_1);
qemu_register_machine(&pc_machine_v1_0);
qemu_register_machine(&pc_machine_v0_15);
diff --git a/hw/pci.c b/hw/pci.c
index bdfb3d6540..4d95984807 100644
--- a/hw/pci.c
+++ b/hw/pci.c
@@ -775,6 +775,9 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, PCIBus *bus,
return NULL;
}
pci_dev->bus = bus;
+ if (bus->dma_context_fn) {
+ pci_dev->dma = bus->dma_context_fn(bus, bus->dma_context_opaque, devfn);
+ }
pci_dev->devfn = devfn;
pstrcpy(pci_dev->name, sizeof(pci_dev->name), name);
pci_dev->irq_state = 0;
@@ -846,15 +849,14 @@ static int pci_unregister_device(DeviceState *dev)
{
PCIDevice *pci_dev = PCI_DEVICE(dev);
PCIDeviceClass *pc = PCI_DEVICE_GET_CLASS(pci_dev);
- int ret = 0;
-
- if (pc->exit)
- ret = pc->exit(pci_dev);
- if (ret)
- return ret;
pci_unregister_io_regions(pci_dev);
pci_del_option_rom(pci_dev);
+
+ if (pc->exit) {
+ pc->exit(pci_dev);
+ }
+
do_pci_unregister_device(pci_dev);
return 0;
}
@@ -1076,6 +1078,49 @@ static void pci_set_irq(void *opaque, int irq_num, int level)
pci_change_irq_level(pci_dev, irq_num, change);
}
+/* Special hooks used by device assignment */
+void pci_bus_set_route_irq_fn(PCIBus *bus, pci_route_irq_fn route_intx_to_irq)
+{
+ assert(!bus->parent_dev);
+ bus->route_intx_to_irq = route_intx_to_irq;
+}
+
+PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin)
+{
+ PCIBus *bus;
+
+ do {
+ bus = dev->bus;
+ pin = bus->map_irq(dev, pin);
+ dev = bus->parent_dev;
+ } while (dev);
+ assert(bus->route_intx_to_irq);
+ return bus->route_intx_to_irq(bus->irq_opaque, pin);
+}
+
+void pci_bus_fire_intx_routing_notifier(PCIBus *bus)
+{
+ PCIDevice *dev;
+ PCIBus *sec;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(bus->devices); ++i) {
+ dev = bus->devices[i];
+ if (dev && dev->intx_routing_notifier) {
+ dev->intx_routing_notifier(dev);
+ }
+ QLIST_FOREACH(sec, &bus->child, sibling) {
+ pci_bus_fire_intx_routing_notifier(sec);
+ }
+ }
+}
+
+void pci_device_set_intx_routing_notifier(PCIDevice *dev,
+ PCIINTxRoutingNotifier notifier)
+{
+ dev->intx_routing_notifier = notifier;
+}
+
/***********************************************************/
/* monitor info on PCI */
@@ -1144,7 +1189,9 @@ static const pci_class_desc pci_class_descriptions[] =
};
static void pci_for_each_device_under_bus(PCIBus *bus,
- void (*fn)(PCIBus *b, PCIDevice *d))
+ void (*fn)(PCIBus *b, PCIDevice *d,
+ void *opaque),
+ void *opaque)
{
PCIDevice *d;
int devfn;
@@ -1152,18 +1199,19 @@ static void pci_for_each_device_under_bus(PCIBus *bus,
for(devfn = 0; devfn < ARRAY_SIZE(bus->devices); devfn++) {
d = bus->devices[devfn];
if (d) {
- fn(bus, d);
+ fn(bus, d, opaque);
}
}
}
void pci_for_each_device(PCIBus *bus, int bus_num,
- void (*fn)(PCIBus *b, PCIDevice *d))
+ void (*fn)(PCIBus *b, PCIDevice *d, void *opaque),
+ void *opaque)
{
bus = pci_find_bus_nr(bus, bus_num);
if (bus) {
- pci_for_each_device_under_bus(bus, fn);
+ pci_for_each_device_under_bus(bus, fn, opaque);
}
}
@@ -2021,6 +2069,12 @@ static void pci_device_class_init(ObjectClass *klass, void *data)
k->props = pci_props;
}
+void pci_setup_iommu(PCIBus *bus, PCIDMAContextFunc fn, void *opaque)
+{
+ bus->dma_context_fn = fn;
+ bus->dma_context_opaque = opaque;
+}
+
static TypeInfo pci_device_type_info = {
.name = TYPE_PCI_DEVICE,
.parent = TYPE_DEVICE,
diff --git a/hw/pci.h b/hw/pci.h
index 7f223c01e1..4b6ab3d190 100644
--- a/hw/pci.h
+++ b/hw/pci.h
@@ -85,7 +85,7 @@ typedef uint32_t PCIConfigReadFunc(PCIDevice *pci_dev,
uint32_t address, int len);
typedef void PCIMapIORegionFunc(PCIDevice *pci_dev, int region_num,
pcibus_t addr, pcibus_t size, int type);
-typedef int PCIUnregisterFunc(PCIDevice *pci_dev);
+typedef void PCIUnregisterFunc(PCIDevice *pci_dev);
typedef struct PCIIORegion {
pcibus_t addr; /* current PCI mapping address. -1 means not mapped */
@@ -141,6 +141,15 @@ enum {
#define PCI_DEVICE_GET_CLASS(obj) \
OBJECT_GET_CLASS(PCIDeviceClass, (obj), TYPE_PCI_DEVICE)
+typedef struct PCIINTxRoute {
+ enum {
+ PCI_INTX_ENABLED,
+ PCI_INTX_INVERTED,
+ PCI_INTX_DISABLED,
+ } mode;
+ int irq;
+} PCIINTxRoute;
+
typedef struct PCIDeviceClass {
DeviceClass parent_class;
@@ -173,12 +182,14 @@ typedef struct PCIDeviceClass {
const char *romfile;
} PCIDeviceClass;
+typedef void (*PCIINTxRoutingNotifier)(PCIDevice *dev);
typedef int (*MSIVectorUseNotifier)(PCIDevice *dev, unsigned int vector,
MSIMessage msg);
typedef void (*MSIVectorReleaseNotifier)(PCIDevice *dev, unsigned int vector);
struct PCIDevice {
DeviceState qdev;
+
/* PCI config space */
uint8_t *config;
@@ -200,6 +211,7 @@ struct PCIDevice {
int32_t devfn;
char name[64];
PCIIORegion io_regions[PCI_NUM_REGIONS];
+ DMAContext *dma;
/* do not access the following fields */
PCIConfigReadFunc *config_read;
@@ -220,14 +232,16 @@ struct PCIDevice {
/* MSI-X entries */
int msix_entries_nr;
- /* Space to store MSIX table */
- uint8_t *msix_table_page;
- /* MMIO index used to map MSIX table and pending bit entries. */
- MemoryRegion msix_mmio;
+ /* Space to store MSIX table & pending bit array */
+ uint8_t *msix_table;
+ uint8_t *msix_pba;
+ /* MemoryRegion container for msix exclusive BAR setup */
+ MemoryRegion msix_exclusive_bar;
+ /* Memory Regions for MSIX table and pending bit entries. */
+ MemoryRegion msix_table_mmio;
+ MemoryRegion msix_pba_mmio;
/* Reference-count for entries actually in use by driver. */
unsigned *msix_entry_used;
- /* Region including the MSI-X table */
- uint32_t msix_bar_size;
/* MSIX function mask set or MSIX disabled */
bool msix_function_masked;
/* Version id needed for VMState */
@@ -248,6 +262,9 @@ struct PCIDevice {
MemoryRegion rom;
uint32_t rom_bar;
+ /* INTx routing notifier */
+ PCIINTxRoutingNotifier intx_routing_notifier;
+
/* MSI-X notifiers */
MSIVectorUseNotifier msix_vector_use_notifier;
MSIVectorReleaseNotifier msix_vector_release_notifier;
@@ -276,6 +293,7 @@ MemoryRegion *pci_address_space_io(PCIDevice *dev);
typedef void (*pci_set_irq_fn)(void *opaque, int irq_num, int level);
typedef int (*pci_map_irq_fn)(PCIDevice *pci_dev, int irq_num);
+typedef PCIINTxRoute (*pci_route_irq_fn)(void *opaque, int pin);
typedef enum {
PCI_HOTPLUG_DISABLED,
@@ -304,6 +322,11 @@ PCIBus *pci_register_bus(DeviceState *parent, const char *name,
MemoryRegion *address_space_mem,
MemoryRegion *address_space_io,
uint8_t devfn_min, int nirq);
+void pci_bus_set_route_irq_fn(PCIBus *, pci_route_irq_fn);
+PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin);
+void pci_bus_fire_intx_routing_notifier(PCIBus *bus);
+void pci_device_set_intx_routing_notifier(PCIDevice *dev,
+ PCIINTxRoutingNotifier notifier);
void pci_device_reset(PCIDevice *dev);
void pci_bus_reset(PCIBus *bus);
@@ -312,7 +335,9 @@ PCIDevice *pci_nic_init(NICInfo *nd, const char *default_model,
PCIDevice *pci_nic_init_nofail(NICInfo *nd, const char *default_model,
const char *default_devaddr);
int pci_bus_num(PCIBus *s);
-void pci_for_each_device(PCIBus *bus, int bus_num, void (*fn)(PCIBus *bus, PCIDevice *d));
+void pci_for_each_device(PCIBus *bus, int bus_num,
+ void (*fn)(PCIBus *bus, PCIDevice *d, void *opaque),
+ void *opaque);
PCIBus *pci_find_root_bus(int domain);
int pci_find_domain(const PCIBus *bus);
PCIDevice *pci_find_device(PCIBus *bus, int bus_num, uint8_t devfn);
@@ -324,6 +349,10 @@ int pci_read_devaddr(Monitor *mon, const char *addr, int *domp, int *busp,
void pci_device_deassert_intx(PCIDevice *dev);
+typedef DMAContext *(*PCIDMAContextFunc)(PCIBus *, void *, int);
+
+void pci_setup_iommu(PCIBus *bus, PCIDMAContextFunc fn, void *opaque);
+
static inline void
pci_set_byte(uint8_t *config, uint8_t val)
{
@@ -558,10 +587,15 @@ static inline uint32_t pci_config_size(const PCIDevice *d)
}
/* DMA access functions */
+static inline DMAContext *pci_dma_context(PCIDevice *dev)
+{
+ return dev->dma;
+}
+
static inline int pci_dma_rw(PCIDevice *dev, dma_addr_t addr,
void *buf, dma_addr_t len, DMADirection dir)
{
- cpu_physical_memory_rw(addr, buf, len, dir == DMA_DIRECTION_FROM_DEVICE);
+ dma_memory_rw(pci_dma_context(dev), addr, buf, len, dir);
return 0;
}
@@ -581,12 +615,12 @@ static inline int pci_dma_write(PCIDevice *dev, dma_addr_t addr,
static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \
dma_addr_t addr) \
{ \
- return ld##_l##_phys(addr); \
+ return ld##_l##_dma(pci_dma_context(dev), addr); \
} \
static inline void st##_s##_pci_dma(PCIDevice *dev, \
- dma_addr_t addr, uint##_bits##_t val) \
+ dma_addr_t addr, uint##_bits##_t val) \
{ \
- st##_s##_phys(addr, val); \
+ st##_s##_dma(pci_dma_context(dev), addr, val); \
}
PCI_DMA_DEFINE_LDST(ub, b, 8);
@@ -602,25 +636,22 @@ PCI_DMA_DEFINE_LDST(q_be, q_be, 64);
static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr,
dma_addr_t *plen, DMADirection dir)
{
- target_phys_addr_t len = *plen;
void *buf;
- buf = cpu_physical_memory_map(addr, &len, dir == DMA_DIRECTION_FROM_DEVICE);
- *plen = len;
+ buf = dma_memory_map(pci_dma_context(dev), addr, plen, dir);
return buf;
}
static inline void pci_dma_unmap(PCIDevice *dev, void *buffer, dma_addr_t len,
DMADirection dir, dma_addr_t access_len)
{
- cpu_physical_memory_unmap(buffer, len, dir == DMA_DIRECTION_FROM_DEVICE,
- access_len);
+ dma_memory_unmap(pci_dma_context(dev), buffer, len, dir, access_len);
}
static inline void pci_dma_sglist_init(QEMUSGList *qsg, PCIDevice *dev,
int alloc_hint)
{
- qemu_sglist_init(qsg, alloc_hint);
+ qemu_sglist_init(qsg, alloc_hint, pci_dma_context(dev));
}
extern const VMStateDescription vmstate_pci_device;
diff --git a/hw/pci_bridge.c b/hw/pci_bridge.c
index 0916276c4d..5c6455f6fa 100644
--- a/hw/pci_bridge.c
+++ b/hw/pci_bridge.c
@@ -333,7 +333,7 @@ int pci_bridge_initfn(PCIDevice *dev)
}
/* default qdev clean up function for PCI-to-PCI bridge */
-int pci_bridge_exitfn(PCIDevice *pci_dev)
+void pci_bridge_exitfn(PCIDevice *pci_dev)
{
PCIBridge *s = DO_UPCAST(PCIBridge, dev, pci_dev);
assert(QLIST_EMPTY(&s->sec_bus.child));
@@ -342,7 +342,6 @@ int pci_bridge_exitfn(PCIDevice *pci_dev)
memory_region_destroy(&s->address_space_mem);
memory_region_destroy(&s->address_space_io);
/* qbus_free() is called automatically by qdev_free() */
- return 0;
}
/*
diff --git a/hw/pci_bridge.h b/hw/pci_bridge.h
index 84411a69dc..a00accc172 100644
--- a/hw/pci_bridge.h
+++ b/hw/pci_bridge.h
@@ -44,7 +44,7 @@ void pci_bridge_reset_reg(PCIDevice *dev);
void pci_bridge_reset(DeviceState *qdev);
int pci_bridge_initfn(PCIDevice *pci_dev);
-int pci_bridge_exitfn(PCIDevice *pci_dev);
+void pci_bridge_exitfn(PCIDevice *pci_dev);
/*
diff --git a/hw/pci_bridge_dev.c b/hw/pci_bridge_dev.c
index 1cc1d2049c..f7063961a0 100644
--- a/hw/pci_bridge_dev.c
+++ b/hw/pci_bridge_dev.c
@@ -52,7 +52,8 @@ static int pci_bridge_dev_initfn(PCIDevice *dev)
{
PCIBridge *br = DO_UPCAST(PCIBridge, dev, dev);
PCIBridgeDev *bridge_dev = DO_UPCAST(PCIBridgeDev, bridge, br);
- int err, ret;
+ int err;
+
pci_bridge_map_irq(br, NULL, pci_bridge_dev_map_irq_fn);
err = pci_bridge_initfn(dev);
if (err) {
@@ -86,26 +87,22 @@ slotid_error:
shpc_cleanup(dev, &bridge_dev->bar);
shpc_error:
memory_region_destroy(&bridge_dev->bar);
- ret = pci_bridge_exitfn(dev);
- assert(!ret);
+ pci_bridge_exitfn(dev);
bridge_error:
return err;
}
-static int pci_bridge_dev_exitfn(PCIDevice *dev)
+static void pci_bridge_dev_exitfn(PCIDevice *dev)
{
PCIBridge *br = DO_UPCAST(PCIBridge, dev, dev);
PCIBridgeDev *bridge_dev = DO_UPCAST(PCIBridgeDev, bridge, br);
- int ret;
if (msi_present(dev)) {
msi_uninit(dev);
}
slotid_cap_cleanup(dev);
shpc_cleanup(dev, &bridge_dev->bar);
memory_region_destroy(&bridge_dev->bar);
- ret = pci_bridge_exitfn(dev);
- assert(!ret);
- return 0;
+ pci_bridge_exitfn(dev);
}
static void pci_bridge_dev_write_config(PCIDevice *d,
diff --git a/hw/pci_ids.h b/hw/pci_ids.h
index e8235a7d05..301bf1cd86 100644
--- a/hw/pci_ids.h
+++ b/hw/pci_ids.h
@@ -15,6 +15,7 @@
#define PCI_CLASS_STORAGE_SCSI 0x0100
#define PCI_CLASS_STORAGE_IDE 0x0101
+#define PCI_CLASS_STORAGE_RAID 0x0104
#define PCI_CLASS_STORAGE_SATA 0x0106
#define PCI_CLASS_STORAGE_OTHER 0x0180
@@ -47,6 +48,7 @@
#define PCI_VENDOR_ID_LSI_LOGIC 0x1000
#define PCI_DEVICE_ID_LSI_53C895A 0x0012
+#define PCI_DEVICE_ID_LSI_SAS1078 0x0060
#define PCI_VENDOR_ID_DEC 0x1011
#define PCI_DEVICE_ID_DEC_21154 0x0026
@@ -57,6 +59,7 @@
#define PCI_VENDOR_ID_AMD 0x1022
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
+#define PCI_DEVICE_ID_AMD_SCSI 0x2020
#define PCI_VENDOR_ID_TI 0x104c
@@ -118,6 +121,7 @@
#define PCI_DEVICE_ID_INTEL_82801I_UHCI6 0x2939
#define PCI_DEVICE_ID_INTEL_82801I_EHCI1 0x293a
#define PCI_DEVICE_ID_INTEL_82801I_EHCI2 0x293c
+#define PCI_DEVICE_ID_INTEL_82599_SFP_VF 0x10ed
#define PCI_VENDOR_ID_XEN 0x5853
#define PCI_DEVICE_ID_XEN_PLATFORM 0x0001
diff --git a/hw/pci_internals.h b/hw/pci_internals.h
index 399c6d475c..c931b64b46 100644
--- a/hw/pci_internals.h
+++ b/hw/pci_internals.h
@@ -17,9 +17,12 @@
struct PCIBus {
BusState qbus;
+ PCIDMAContextFunc dma_context_fn;
+ void *dma_context_opaque;
uint8_t devfn_min;
pci_set_irq_fn set_irq;
pci_map_irq_fn map_irq;
+ pci_route_irq_fn route_intx_to_irq;
pci_hotplug_fn hotplug;
DeviceState *hotplug_qdev;
void *irq_opaque;
diff --git a/hw/pcnet-pci.c b/hw/pcnet-pci.c
index 34d73aaea1..48fd447996 100644
--- a/hw/pcnet-pci.c
+++ b/hw/pcnet-pci.c
@@ -264,14 +264,14 @@ static void pci_physical_memory_read(void *dma_opaque, target_phys_addr_t addr,
pci_dma_read(dma_opaque, addr, buf, len);
}
-static void pci_pcnet_cleanup(VLANClientState *nc)
+static void pci_pcnet_cleanup(NetClientState *nc)
{
PCNetState *d = DO_UPCAST(NICState, nc, nc)->opaque;
pcnet_common_cleanup(d);
}
-static int pci_pcnet_uninit(PCIDevice *dev)
+static void pci_pcnet_uninit(PCIDevice *dev)
{
PCIPCNetState *d = DO_UPCAST(PCIPCNetState, pci_dev, dev);
@@ -279,12 +279,11 @@ static int pci_pcnet_uninit(PCIDevice *dev)
memory_region_destroy(&d->io_bar);
qemu_del_timer(d->state.poll_timer);
qemu_free_timer(d->state.poll_timer);
- qemu_del_vlan_client(&d->state.nic->nc);
- return 0;
+ qemu_del_net_client(&d->state.nic->nc);
}
static NetClientInfo net_pci_pcnet_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = pcnet_can_receive,
.receive = pcnet_receive,
diff --git a/hw/pcnet.c b/hw/pcnet.c
index d769b08b78..40820b3632 100644
--- a/hw/pcnet.c
+++ b/hw/pcnet.c
@@ -1004,7 +1004,7 @@ static int pcnet_tdte_poll(PCNetState *s)
return !!(CSR_CXST(s) & 0x8000);
}
-int pcnet_can_receive(VLANClientState *nc)
+int pcnet_can_receive(NetClientState *nc)
{
PCNetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
if (CSR_STOP(s) || CSR_SPND(s))
@@ -1015,7 +1015,7 @@ int pcnet_can_receive(VLANClientState *nc)
#define MIN_BUF_SIZE 60
-ssize_t pcnet_receive(VLANClientState *nc, const uint8_t *buf, size_t size_)
+ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_)
{
PCNetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
int is_padr = 0, is_bcast = 0, is_ladr = 0;
@@ -1197,7 +1197,7 @@ ssize_t pcnet_receive(VLANClientState *nc, const uint8_t *buf, size_t size_)
return size_;
}
-void pcnet_set_link_status(VLANClientState *nc)
+void pcnet_set_link_status(NetClientState *nc)
{
PCNetState *d = DO_UPCAST(NICState, nc, nc)->opaque;
diff --git a/hw/pcnet.h b/hw/pcnet.h
index 803a2cc1ec..d0af54a46a 100644
--- a/hw/pcnet.h
+++ b/hw/pcnet.h
@@ -57,9 +57,9 @@ uint32_t pcnet_ioport_readw(void *opaque, uint32_t addr);
void pcnet_ioport_writel(void *opaque, uint32_t addr, uint32_t val);
uint32_t pcnet_ioport_readl(void *opaque, uint32_t addr);
uint32_t pcnet_bcr_readw(PCNetState *s, uint32_t rap);
-int pcnet_can_receive(VLANClientState *nc);
-ssize_t pcnet_receive(VLANClientState *nc, const uint8_t *buf, size_t size_);
-void pcnet_set_link_status(VLANClientState *nc);
+int pcnet_can_receive(NetClientState *nc);
+ssize_t pcnet_receive(NetClientState *nc, const uint8_t *buf, size_t size_);
+void pcnet_set_link_status(NetClientState *nc);
void pcnet_common_cleanup(PCNetState *d);
int pcnet_common_init(DeviceState *dev, PCNetState *s, NetClientInfo *info);
extern const VMStateDescription vmstate_pcnet;
diff --git a/hw/piix_pci.c b/hw/piix_pci.c
index 09e84f59b6..c497a014af 100644
--- a/hw/piix_pci.c
+++ b/hw/piix_pci.c
@@ -89,6 +89,7 @@ struct PCII440FXState {
#define I440FX_SMRAM 0x72
static void piix3_set_irq(void *opaque, int pirq, int level);
+static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pci_intx);
static void piix3_write_config_xen(PCIDevice *dev,
uint32_t address, uint32_t val, int len);
@@ -315,6 +316,7 @@ static PCIBus *i440fx_common_init(const char *device_name,
pci_create_simple_multifunction(b, -1, true, "PIIX3"));
pci_bus_irqs(b, piix3_set_irq, pci_slot_get_pirq, piix3,
PIIX_NUM_PIRQS);
+ pci_bus_set_route_irq_fn(b, piix3_route_intx_pin_to_irq);
}
piix3->pic = pic;
*isa_bus = DO_UPCAST(ISABus, qbus,
@@ -386,6 +388,22 @@ static void piix3_set_irq(void *opaque, int pirq, int level)
piix3_set_irq_level(piix3, pirq, level);
}
+static PCIINTxRoute piix3_route_intx_pin_to_irq(void *opaque, int pin)
+{
+ PIIX3State *piix3 = opaque;
+ int irq = piix3->dev.config[PIIX_PIRQC + pin];
+ PCIINTxRoute route;
+
+ if (irq < PIIX_NUM_PIC_IRQS) {
+ route.mode = PCI_INTX_ENABLED;
+ route.irq = irq;
+ } else {
+ route.mode = PCI_INTX_DISABLED;
+ route.irq = -1;
+ }
+ return route;
+}
+
/* irq routing is changed. so rebuild bitmap */
static void piix3_update_irq_levels(PIIX3State *piix3)
{
@@ -405,6 +423,8 @@ static void piix3_write_config(PCIDevice *dev,
if (ranges_overlap(address, len, PIIX_PIRQC, 4)) {
PIIX3State *piix3 = DO_UPCAST(PIIX3State, dev, dev);
int pic_irq;
+
+ pci_bus_fire_intx_routing_notifier(piix3->dev.bus);
piix3_update_irq_levels(piix3);
for (pic_irq = 0; pic_irq < PIIX_NUM_PIC_IRQS; pic_irq++) {
piix3_set_irq_pic(piix3, pic_irq);
diff --git a/hw/pl011.c b/hw/pl011.c
index 8a5a8f554a..3245702df0 100644
--- a/hw/pl011.c
+++ b/hw/pl011.c
@@ -78,7 +78,9 @@ static uint64_t pl011_read(void *opaque, target_phys_addr_t offset,
if (s->read_count == s->read_trigger - 1)
s->int_level &= ~ PL011_INT_RX;
pl011_update(s);
- qemu_chr_accept_input(s->chr);
+ if (s->chr) {
+ qemu_chr_accept_input(s->chr);
+ }
return c;
case 1: /* UARTCR */
return 0;
diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
index d18dbaf6cc..aa4bbeb664 100644
--- a/hw/ppc/Makefile.objs
+++ b/hw/ppc/Makefile.objs
@@ -10,7 +10,7 @@ obj-y += ppc_newworld.o
# IBM pSeries (sPAPR)
obj-$(CONFIG_PSERIES) += spapr.o spapr_hcall.o spapr_rtas.o spapr_vio.o
obj-$(CONFIG_PSERIES) += xics.o spapr_vty.o spapr_llan.o spapr_vscsi.o
-obj-$(CONFIG_PSERIES) += spapr_pci.o pci-hotplug.o
+obj-$(CONFIG_PSERIES) += spapr_pci.o pci-hotplug.o spapr_iommu.o
# PowerPC 4xx boards
obj-y += ppc4xx_devs.o ppc4xx_pci.o ppc405_uc.o ppc405_boards.o
obj-y += ppc440_bamboo.o
diff --git a/hw/ppce500_spin.c b/hw/ppce500_spin.c
index fddf2197a9..c5b8e051ec 100644
--- a/hw/ppce500_spin.c
+++ b/hw/ppce500_spin.c
@@ -40,7 +40,7 @@ typedef struct spin_info {
uint32_t resv;
uint32_t pir;
uint64_t reserved;
-} __attribute__ ((packed)) SpinInfo;
+} QEMU_PACKED SpinInfo;
typedef struct spin_state {
SysBusDevice busdev;
diff --git a/hw/qdev-dma.h b/hw/qdev-dma.h
new file mode 100644
index 0000000000..6812735e3d
--- /dev/null
+++ b/hw/qdev-dma.h
@@ -0,0 +1,10 @@
+/*
+ * Support for dma_addr_t typed properties
+ *
+ * Copyright (C) 2012 David Gibson, IBM Corporation.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#define DEFINE_PROP_DMAADDR(_n, _s, _f, _d) \
+ DEFINE_PROP_HEX64(_n, _s, _f, _d)
diff --git a/hw/qdev-monitor.c b/hw/qdev-monitor.c
index 7915b4500d..b22a37a00c 100644
--- a/hw/qdev-monitor.c
+++ b/hw/qdev-monitor.c
@@ -138,13 +138,13 @@ int qdev_device_help(QemuOpts *opts)
ObjectClass *klass;
driver = qemu_opt_get(opts, "driver");
- if (driver && !strcmp(driver, "?")) {
+ if (driver && is_help_option(driver)) {
bool show_no_user = false;
object_class_foreach(qdev_print_devinfo, TYPE_DEVICE, false, &show_no_user);
return 1;
}
- if (!driver || !qemu_opt_get(opts, "?")) {
+ if (!driver || !qemu_opt_has_help_opt(opts)) {
return 0;
}
diff --git a/hw/qdev-properties.c b/hw/qdev-properties.c
index 099a7aa96f..8aca0d43fe 100644
--- a/hw/qdev-properties.c
+++ b/hw/qdev-properties.c
@@ -2,6 +2,8 @@
#include "qdev.h"
#include "qerror.h"
#include "blockdev.h"
+#include "hw/block-common.h"
+#include "net/hub.h"
void *qdev_get_prop_ptr(DeviceState *dev, Property *prop)
{
@@ -10,6 +12,78 @@ void *qdev_get_prop_ptr(DeviceState *dev, Property *prop)
return ptr;
}
+static void get_pointer(Object *obj, Visitor *v, Property *prop,
+ const char *(*print)(void *ptr),
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ void **ptr = qdev_get_prop_ptr(dev, prop);
+ char *p;
+
+ p = (char *) (*ptr ? print(*ptr) : "");
+ visit_type_str(v, &p, name, errp);
+}
+
+static void set_pointer(Object *obj, Visitor *v, Property *prop,
+ int (*parse)(DeviceState *dev, const char *str,
+ void **ptr),
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Error *local_err = NULL;
+ void **ptr = qdev_get_prop_ptr(dev, prop);
+ char *str;
+ int ret;
+
+ if (dev->state != DEV_STATE_CREATED) {
+ error_set(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ visit_type_str(v, &str, name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (!*str) {
+ g_free(str);
+ *ptr = NULL;
+ return;
+ }
+ ret = parse(dev, str, ptr);
+ error_set_from_qdev_prop_error(errp, ret, dev, prop, str);
+ g_free(str);
+}
+
+static void get_enum(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ int *ptr = qdev_get_prop_ptr(dev, prop);
+
+ visit_type_enum(v, ptr, prop->info->enum_table,
+ prop->info->name, prop->name, errp);
+}
+
+static void set_enum(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ int *ptr = qdev_get_prop_ptr(dev, prop);
+
+ if (dev->state != DEV_STATE_CREATED) {
+ error_set(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ visit_type_enum(v, ptr, prop->info->enum_table,
+ prop->info->name, prop->name, errp);
+}
+
+/* Bit */
+
static uint32_t qdev_get_prop_mask(Property *prop)
{
assert(prop->info == &qdev_prop_bit);
@@ -26,8 +100,6 @@ static void bit_prop_set(DeviceState *dev, Property *props, bool val)
*p &= ~mask;
}
-/* Bit */
-
static int print_bit(DeviceState *dev, Property *prop, char *dest, size_t len)
{
uint32_t *p = qdev_get_prop_ptr(dev, prop);
@@ -435,48 +507,6 @@ static const char *print_drive(void *ptr)
return bdrv_get_device_name(ptr);
}
-static void get_pointer(Object *obj, Visitor *v, Property *prop,
- const char *(*print)(void *ptr),
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- void **ptr = qdev_get_prop_ptr(dev, prop);
- char *p;
-
- p = (char *) (*ptr ? print(*ptr) : "");
- visit_type_str(v, &p, name, errp);
-}
-
-static void set_pointer(Object *obj, Visitor *v, Property *prop,
- int (*parse)(DeviceState *dev, const char *str, void **ptr),
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- Error *local_err = NULL;
- void **ptr = qdev_get_prop_ptr(dev, prop);
- char *str;
- int ret;
-
- if (dev->state != DEV_STATE_CREATED) {
- error_set(errp, QERR_PERMISSION_DENIED);
- return;
- }
-
- visit_type_str(v, &str, name, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- return;
- }
- if (!*str) {
- g_free(str);
- *ptr = NULL;
- return;
- }
- ret = parse(dev, str, ptr);
- error_set_from_qdev_prop_error(errp, ret, dev, prop, str);
- g_free(str);
-}
-
static void get_drive(Object *obj, Visitor *v, void *opaque,
const char *name, Error **errp)
{
@@ -554,7 +584,7 @@ PropertyInfo qdev_prop_chr = {
static int parse_netdev(DeviceState *dev, const char *str, void **ptr)
{
- VLANClientState *netdev = qemu_find_netdev(str);
+ NetClientState *netdev = qemu_find_netdev(str);
if (netdev == NULL) {
return -ENOENT;
@@ -568,7 +598,7 @@ static int parse_netdev(DeviceState *dev, const char *str, void **ptr)
static const char *print_netdev(void *ptr)
{
- VLANClientState *netdev = ptr;
+ NetClientState *netdev = ptr;
return netdev->name ? netdev->name : "";
}
@@ -595,13 +625,16 @@ PropertyInfo qdev_prop_netdev = {
static int print_vlan(DeviceState *dev, Property *prop, char *dest, size_t len)
{
- VLANState **ptr = qdev_get_prop_ptr(dev, prop);
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
if (*ptr) {
- return snprintf(dest, len, "%d", (*ptr)->id);
- } else {
- return snprintf(dest, len, "<null>");
+ int id;
+ if (!net_hub_id_for_client(*ptr, &id)) {
+ return snprintf(dest, len, "%d", id);
+ }
}
+
+ return snprintf(dest, len, "<null>");
}
static void get_vlan(Object *obj, Visitor *v, void *opaque,
@@ -609,11 +642,17 @@ static void get_vlan(Object *obj, Visitor *v, void *opaque,
{
DeviceState *dev = DEVICE(obj);
Property *prop = opaque;
- VLANState **ptr = qdev_get_prop_ptr(dev, prop);
- int64_t id;
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
+ int32_t id = -1;
+
+ if (*ptr) {
+ int hub_id;
+ if (!net_hub_id_for_client(*ptr, &hub_id)) {
+ id = hub_id;
+ }
+ }
- id = *ptr ? (*ptr)->id : -1;
- visit_type_int64(v, &id, name, errp);
+ visit_type_int32(v, &id, name, errp);
}
static void set_vlan(Object *obj, Visitor *v, void *opaque,
@@ -621,17 +660,17 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
{
DeviceState *dev = DEVICE(obj);
Property *prop = opaque;
- VLANState **ptr = qdev_get_prop_ptr(dev, prop);
+ NetClientState **ptr = qdev_get_prop_ptr(dev, prop);
Error *local_err = NULL;
- int64_t id;
- VLANState *vlan;
+ int32_t id;
+ NetClientState *hubport;
if (dev->state != DEV_STATE_CREATED) {
error_set(errp, QERR_PERMISSION_DENIED);
return;
}
- visit_type_int64(v, &id, name, &local_err);
+ visit_type_int32(v, &id, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
@@ -640,13 +679,14 @@ static void set_vlan(Object *obj, Visitor *v, void *opaque,
*ptr = NULL;
return;
}
- vlan = qemu_find_vlan(id, 1);
- if (!vlan) {
+
+ hubport = net_hub_port_find(id);
+ if (!hubport) {
error_set(errp, QERR_INVALID_PARAMETER_VALUE,
name, prop->info->name);
return;
}
- *ptr = vlan;
+ *ptr = hubport;
}
PropertyInfo qdev_prop_vlan = {
@@ -735,7 +775,6 @@ PropertyInfo qdev_prop_macaddr = {
.set = set_mac,
};
-
/* --- lost tick policy --- */
static const char *lost_tick_policy_table[LOST_TICK_MAX+1] = {
@@ -748,33 +787,6 @@ static const char *lost_tick_policy_table[LOST_TICK_MAX+1] = {
QEMU_BUILD_BUG_ON(sizeof(LostTickPolicy) != sizeof(int));
-static void get_enum(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
- int *ptr = qdev_get_prop_ptr(dev, prop);
-
- visit_type_enum(v, ptr, prop->info->enum_table,
- prop->info->name, prop->name, errp);
-}
-
-static void set_enum(Object *obj, Visitor *v, void *opaque,
- const char *name, Error **errp)
-{
- DeviceState *dev = DEVICE(obj);
- Property *prop = opaque;
- int *ptr = qdev_get_prop_ptr(dev, prop);
-
- if (dev->state != DEV_STATE_CREATED) {
- error_set(errp, QERR_PERMISSION_DENIED);
- return;
- }
-
- visit_type_enum(v, ptr, prop->info->enum_table,
- prop->info->name, prop->name, errp);
-}
-
PropertyInfo qdev_prop_losttickpolicy = {
.name = "LostTickPolicy",
.enum_table = lost_tick_policy_table,
@@ -782,6 +794,21 @@ PropertyInfo qdev_prop_losttickpolicy = {
.set = set_enum,
};
+/* --- BIOS CHS translation */
+
+static const char *bios_chs_trans_table[] = {
+ [BIOS_ATA_TRANSLATION_AUTO] = "auto",
+ [BIOS_ATA_TRANSLATION_NONE] = "none",
+ [BIOS_ATA_TRANSLATION_LBA] = "lba",
+};
+
+PropertyInfo qdev_prop_bios_chs_trans = {
+ .name = "bios-chs-trans",
+ .enum_table = bios_chs_trans_table,
+ .get = get_enum,
+ .set = set_enum,
+};
+
/* --- pci address --- */
/*
@@ -899,6 +926,113 @@ PropertyInfo qdev_prop_blocksize = {
.set = set_blocksize,
};
+/* --- pci host address --- */
+
+static void get_pci_host_devaddr(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ PCIHostDeviceAddress *addr = qdev_get_prop_ptr(dev, prop);
+ char buffer[] = "xxxx:xx:xx.x";
+ char *p = buffer;
+ int rc = 0;
+
+ rc = snprintf(buffer, sizeof(buffer), "%04x:%02x:%02x.%d",
+ addr->domain, addr->bus, addr->slot, addr->function);
+ assert(rc == sizeof(buffer) - 1);
+
+ visit_type_str(v, &p, name, errp);
+}
+
+/*
+ * Parse [<domain>:]<bus>:<slot>.<func>
+ * if <domain> is not supplied, it's assumed to be 0.
+ */
+static void set_pci_host_devaddr(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ DeviceState *dev = DEVICE(obj);
+ Property *prop = opaque;
+ PCIHostDeviceAddress *addr = qdev_get_prop_ptr(dev, prop);
+ Error *local_err = NULL;
+ char *str, *p;
+ char *e;
+ unsigned long val;
+ unsigned long dom = 0, bus = 0;
+ unsigned int slot = 0, func = 0;
+
+ if (dev->state != DEV_STATE_CREATED) {
+ error_set(errp, QERR_PERMISSION_DENIED);
+ return;
+ }
+
+ visit_type_str(v, &str, name, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ p = str;
+ val = strtoul(p, &e, 16);
+ if (e == p || *e != ':') {
+ goto inval;
+ }
+ bus = val;
+
+ p = e + 1;
+ val = strtoul(p, &e, 16);
+ if (e == p) {
+ goto inval;
+ }
+ if (*e == ':') {
+ dom = bus;
+ bus = val;
+ p = e + 1;
+ val = strtoul(p, &e, 16);
+ if (e == p) {
+ goto inval;
+ }
+ }
+ slot = val;
+
+ if (*e != '.') {
+ goto inval;
+ }
+ p = e + 1;
+ val = strtoul(p, &e, 10);
+ if (e == p) {
+ goto inval;
+ }
+ func = val;
+
+ if (dom > 0xffff || bus > 0xff || slot > 0x1f || func > 7) {
+ goto inval;
+ }
+
+ if (*e) {
+ goto inval;
+ }
+
+ addr->domain = dom;
+ addr->bus = bus;
+ addr->slot = slot;
+ addr->function = func;
+
+ g_free(str);
+ return;
+
+inval:
+ error_set_from_qdev_prop_error(errp, EINVAL, dev, prop, str);
+ g_free(str);
+}
+
+PropertyInfo qdev_prop_pci_host_devaddr = {
+ .name = "pci-host-devaddr",
+ .get = get_pci_host_devaddr,
+ .set = set_pci_host_devaddr,
+};
+
/* --- public helpers --- */
static Property *qdev_prop_walk(Property *props, const char *name)
@@ -1016,7 +1150,7 @@ void qdev_prop_set_uint64(DeviceState *dev, const char *name, uint64_t value)
assert_no_error(errp);
}
-void qdev_prop_set_string(DeviceState *dev, const char *name, char *value)
+void qdev_prop_set_string(DeviceState *dev, const char *name, const char *value)
{
Error *errp = NULL;
object_property_set_str(OBJECT(dev), value, name, &errp);
@@ -1052,7 +1186,7 @@ void qdev_prop_set_chr(DeviceState *dev, const char *name, CharDriverState *valu
assert_no_error(errp);
}
-void qdev_prop_set_netdev(DeviceState *dev, const char *name, VLANClientState *value)
+void qdev_prop_set_netdev(DeviceState *dev, const char *name, NetClientState *value)
{
Error *errp = NULL;
assert(!value || value->name);
@@ -1061,13 +1195,6 @@ void qdev_prop_set_netdev(DeviceState *dev, const char *name, VLANClientState *v
assert_no_error(errp);
}
-void qdev_prop_set_vlan(DeviceState *dev, const char *name, VLANState *value)
-{
- Error *errp = NULL;
- object_property_set_int(OBJECT(dev), value ? value->id : -1, name, &errp);
- assert_no_error(errp);
-}
-
void qdev_prop_set_macaddr(DeviceState *dev, const char *name, uint8_t *value)
{
Error *errp = NULL;
diff --git a/hw/qdev.c b/hw/qdev.c
index a6c4c02947..b5b74b9135 100644
--- a/hw/qdev.c
+++ b/hw/qdev.c
@@ -258,9 +258,10 @@ int qdev_simple_unplug_cb(DeviceState *dev)
way is somewhat unclean, and best avoided. */
void qdev_init_nofail(DeviceState *dev)
{
+ const char *typename = object_get_typename(OBJECT(dev));
+
if (qdev_init(dev) < 0) {
- error_report("Initialization of device %s failed",
- object_get_typename(OBJECT(dev)));
+ error_report("Initialization of device %s failed", typename);
exit(1);
}
}
@@ -319,8 +320,6 @@ void qdev_connect_gpio_out(DeviceState * dev, int n, qemu_irq pin)
void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd)
{
qdev_prop_set_macaddr(dev, "mac", nd->macaddr.a);
- if (nd->vlan)
- qdev_prop_set_vlan(dev, "vlan", nd->vlan);
if (nd->netdev)
qdev_prop_set_netdev(dev, "netdev", nd->netdev);
if (nd->nvectors != DEV_NVECTORS_UNSPECIFIED &&
diff --git a/hw/qdev.h b/hw/qdev.h
index ae1d2812bf..d699194418 100644
--- a/hw/qdev.h
+++ b/hw/qdev.h
@@ -78,12 +78,6 @@ struct DeviceState {
int alias_required_for_version;
};
-/*
- * This callback is used to create Open Firmware device path in accordance with
- * OF spec http://forthworks.com/standards/of1275.pdf. Indicidual bus bindings
- * can be found here http://playground.sun.com/1275/bindings/.
- */
-
#define TYPE_BUS "bus"
#define BUS(obj) OBJECT_CHECK(BusState, (obj), TYPE_BUS)
#define BUS_CLASS(klass) OBJECT_CLASS_CHECK(BusClass, (klass), TYPE_BUS)
@@ -95,6 +89,11 @@ struct BusClass {
/* FIXME first arg should be BusState */
void (*print_dev)(Monitor *mon, DeviceState *dev, int indent);
char *(*get_dev_path)(DeviceState *dev);
+ /*
+ * This callback is used to create Open Firmware device path in accordance
+ * with OF spec http://forthworks.com/standards/of1275.pdf. Individual bus
+ * bindings can be found at http://playground.sun.com/1275/bindings/.
+ */
char *(*get_fw_dev_path)(DeviceState *dev);
int (*reset)(BusState *bus);
};
@@ -232,11 +231,13 @@ extern PropertyInfo qdev_prop_chr;
extern PropertyInfo qdev_prop_ptr;
extern PropertyInfo qdev_prop_macaddr;
extern PropertyInfo qdev_prop_losttickpolicy;
+extern PropertyInfo qdev_prop_bios_chs_trans;
extern PropertyInfo qdev_prop_drive;
extern PropertyInfo qdev_prop_netdev;
extern PropertyInfo qdev_prop_vlan;
extern PropertyInfo qdev_prop_pci_devfn;
extern PropertyInfo qdev_prop_blocksize;
+extern PropertyInfo qdev_prop_pci_host_devaddr;
#define DEFINE_PROP(_name, _state, _field, _prop, _type) { \
.name = (_name), \
@@ -288,9 +289,9 @@ extern PropertyInfo qdev_prop_blocksize;
#define DEFINE_PROP_STRING(_n, _s, _f) \
DEFINE_PROP(_n, _s, _f, qdev_prop_string, char*)
#define DEFINE_PROP_NETDEV(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_netdev, VLANClientState*)
+ DEFINE_PROP(_n, _s, _f, qdev_prop_netdev, NetClientState*)
#define DEFINE_PROP_VLAN(_n, _s, _f) \
- DEFINE_PROP(_n, _s, _f, qdev_prop_vlan, VLANState*)
+ DEFINE_PROP(_n, _s, _f, qdev_prop_vlan, NetClientState*)
#define DEFINE_PROP_DRIVE(_n, _s, _f) \
DEFINE_PROP(_n, _s, _f, qdev_prop_drive, BlockDriverState *)
#define DEFINE_PROP_MACADDR(_n, _s, _f) \
@@ -298,8 +299,12 @@ extern PropertyInfo qdev_prop_blocksize;
#define DEFINE_PROP_LOSTTICKPOLICY(_n, _s, _f, _d) \
DEFINE_PROP_DEFAULT(_n, _s, _f, _d, qdev_prop_losttickpolicy, \
LostTickPolicy)
+#define DEFINE_PROP_BIOS_CHS_TRANS(_n, _s, _f, _d) \
+ DEFINE_PROP_DEFAULT(_n, _s, _f, _d, qdev_prop_bios_chs_trans, int)
#define DEFINE_PROP_BLOCKSIZE(_n, _s, _f, _d) \
DEFINE_PROP_DEFAULT(_n, _s, _f, _d, qdev_prop_blocksize, uint16_t)
+#define DEFINE_PROP_PCI_HOST_DEVADDR(_n, _s, _f) \
+ DEFINE_PROP(_n, _s, _f, qdev_prop_pci_host_devaddr, PCIHostDeviceAddress)
#define DEFINE_PROP_END_OF_LIST() \
{}
@@ -313,10 +318,9 @@ void qdev_prop_set_uint16(DeviceState *dev, const char *name, uint16_t value);
void qdev_prop_set_uint32(DeviceState *dev, const char *name, uint32_t value);
void qdev_prop_set_int32(DeviceState *dev, const char *name, int32_t value);
void qdev_prop_set_uint64(DeviceState *dev, const char *name, uint64_t value);
-void qdev_prop_set_string(DeviceState *dev, const char *name, char *value);
+void qdev_prop_set_string(DeviceState *dev, const char *name, const char *value);
void qdev_prop_set_chr(DeviceState *dev, const char *name, CharDriverState *value);
-void qdev_prop_set_netdev(DeviceState *dev, const char *name, VLANClientState *value);
-void qdev_prop_set_vlan(DeviceState *dev, const char *name, VLANState *value);
+void qdev_prop_set_netdev(DeviceState *dev, const char *name, NetClientState *value);
int qdev_prop_set_drive(DeviceState *dev, const char *name, BlockDriverState *value) QEMU_WARN_UNUSED_RESULT;
void qdev_prop_set_drive_nofail(DeviceState *dev, const char *name, BlockDriverState *value);
void qdev_prop_set_macaddr(DeviceState *dev, const char *name, uint8_t *value);
diff --git a/hw/qxl.c b/hw/qxl.c
index 3da3399934..c2dd3b471b 100644
--- a/hw/qxl.c
+++ b/hw/qxl.c
@@ -30,7 +30,7 @@
/*
* NOTE: SPICE_RING_PROD_ITEM accesses memory on the pci bar and as
* such can be changed by the guest, so to avoid a guest trigerrable
- * abort we just set qxl_guest_bug and set the return to NULL. Still
+ * abort we just qxl_set_guest_bug and set the return to NULL. Still
* it may happen as a result of emulator bug as well.
*/
#undef SPICE_RING_PROD_ITEM
@@ -40,7 +40,7 @@
uint32_t prod = (r)->prod & SPICE_RING_INDEX_MASK(r); \
typeof(&(r)->items[prod]) m_item = &(r)->items[prod]; \
if (!((uint8_t*)m_item >= (uint8_t*)(start) && (uint8_t*)(m_item + 1) <= (uint8_t*)(end))) { \
- qxl_guest_bug(qxl, "SPICE_RING_PROD_ITEM indices mismatch " \
+ qxl_set_guest_bug(qxl, "SPICE_RING_PROD_ITEM indices mismatch " \
"! %p <= %p < %p", (uint8_t *)start, \
(uint8_t *)m_item, (uint8_t *)end); \
ret = NULL; \
@@ -56,7 +56,7 @@
uint32_t cons = (r)->cons & SPICE_RING_INDEX_MASK(r); \
typeof(&(r)->items[cons]) m_item = &(r)->items[cons]; \
if (!((uint8_t*)m_item >= (uint8_t*)(start) && (uint8_t*)(m_item + 1) <= (uint8_t*)(end))) { \
- qxl_guest_bug(qxl, "SPICE_RING_CONS_ITEM indices mismatch " \
+ qxl_set_guest_bug(qxl, "SPICE_RING_CONS_ITEM indices mismatch " \
"! %p <= %p < %p", (uint8_t *)start, \
(uint8_t *)m_item, (uint8_t *)end); \
ret = NULL; \
@@ -114,20 +114,16 @@ static QXLMode qxl_modes[] = {
QXL_MODE_EX(1600, 1200),
QXL_MODE_EX(1680, 1050),
QXL_MODE_EX(1920, 1080),
-#if VGA_RAM_SIZE >= (16 * 1024 * 1024)
/* these modes need more than 8 MB video memory */
QXL_MODE_EX(1920, 1200),
QXL_MODE_EX(1920, 1440),
QXL_MODE_EX(2048, 1536),
QXL_MODE_EX(2560, 1440),
QXL_MODE_EX(2560, 1600),
-#endif
-#if VGA_RAM_SIZE >= (32 * 1024 * 1024)
/* these modes need more than 16 MB video memory */
QXL_MODE_EX(2560, 2048),
QXL_MODE_EX(2800, 2100),
QXL_MODE_EX(3200, 2400),
-#endif
};
static PCIQXLDevice *qxl0;
@@ -138,9 +134,10 @@ static void qxl_reset_memslots(PCIQXLDevice *d);
static void qxl_reset_surfaces(PCIQXLDevice *d);
static void qxl_ring_set_dirty(PCIQXLDevice *qxl);
-void qxl_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
+void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
{
qxl_send_events(qxl, QXL_INTERRUPT_ERROR);
+ qxl->guest_bug = 1;
if (qxl->guestdebug) {
va_list ap;
va_start(ap, msg);
@@ -151,6 +148,10 @@ void qxl_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
}
}
+static void qxl_clear_guest_bug(PCIQXLDevice *qxl)
+{
+ qxl->guest_bug = 0;
+}
void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id,
struct QXLRect *area, struct QXLRect *dirty_rects,
@@ -279,6 +280,7 @@ static inline uint32_t msb_mask(uint32_t val)
static ram_addr_t qxl_rom_size(void)
{
uint32_t rom_size = sizeof(QXLRom) + sizeof(QXLModes) + sizeof(qxl_modes);
+
rom_size = MAX(rom_size, TARGET_PAGE_SIZE);
rom_size = msb_mask(rom_size * 2 - 1);
return rom_size;
@@ -291,8 +293,8 @@ static void init_qxl_rom(PCIQXLDevice *d)
uint32_t ram_header_size;
uint32_t surface0_area_size;
uint32_t num_pages;
- uint32_t fb, maxfb = 0;
- int i;
+ uint32_t fb;
+ int i, n;
memset(rom, 0, d->rom_size);
@@ -307,26 +309,25 @@ static void init_qxl_rom(PCIQXLDevice *d)
rom->slots_end = NUM_MEMSLOTS - 1;
rom->n_surfaces = cpu_to_le32(NUM_SURFACES);
- modes->n_modes = cpu_to_le32(ARRAY_SIZE(qxl_modes));
- for (i = 0; i < modes->n_modes; i++) {
+ for (i = 0, n = 0; i < ARRAY_SIZE(qxl_modes); i++) {
fb = qxl_modes[i].y_res * qxl_modes[i].stride;
- if (maxfb < fb) {
- maxfb = fb;
+ if (fb > d->vgamem_size) {
+ continue;
}
- modes->modes[i].id = cpu_to_le32(i);
- modes->modes[i].x_res = cpu_to_le32(qxl_modes[i].x_res);
- modes->modes[i].y_res = cpu_to_le32(qxl_modes[i].y_res);
- modes->modes[i].bits = cpu_to_le32(qxl_modes[i].bits);
- modes->modes[i].stride = cpu_to_le32(qxl_modes[i].stride);
- modes->modes[i].x_mili = cpu_to_le32(qxl_modes[i].x_mili);
- modes->modes[i].y_mili = cpu_to_le32(qxl_modes[i].y_mili);
- modes->modes[i].orientation = cpu_to_le32(qxl_modes[i].orientation);
- }
- if (maxfb < VGA_RAM_SIZE && d->id == 0)
- maxfb = VGA_RAM_SIZE;
+ modes->modes[n].id = cpu_to_le32(i);
+ modes->modes[n].x_res = cpu_to_le32(qxl_modes[i].x_res);
+ modes->modes[n].y_res = cpu_to_le32(qxl_modes[i].y_res);
+ modes->modes[n].bits = cpu_to_le32(qxl_modes[i].bits);
+ modes->modes[n].stride = cpu_to_le32(qxl_modes[i].stride);
+ modes->modes[n].x_mili = cpu_to_le32(qxl_modes[i].x_mili);
+ modes->modes[n].y_mili = cpu_to_le32(qxl_modes[i].y_mili);
+ modes->modes[n].orientation = cpu_to_le32(qxl_modes[i].orientation);
+ n++;
+ }
+ modes->n_modes = cpu_to_le32(n);
ram_header_size = ALIGN(sizeof(QXLRam), 4096);
- surface0_area_size = ALIGN(maxfb, 4096);
+ surface0_area_size = ALIGN(d->vgamem_size, 4096);
num_pages = d->vga.vram_size;
num_pages -= ram_header_size;
num_pages -= surface0_area_size;
@@ -411,7 +412,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext)
uint32_t id = le32_to_cpu(cmd->surface_id);
if (id >= NUM_SURFACES) {
- qxl_guest_bug(qxl, "QXL_CMD_SURFACE id %d >= %d", id, NUM_SURFACES);
+ qxl_set_guest_bug(qxl, "QXL_CMD_SURFACE id %d >= %d", id,
+ NUM_SURFACES);
return 1;
}
qemu_mutex_lock(&qxl->track_lock);
@@ -571,7 +573,7 @@ static int interface_get_command(QXLInstance *sin, struct QXLCommandExt *ext)
case QXL_MODE_NATIVE:
case QXL_MODE_UNDEFINED:
ring = &qxl->ram->cmd_ring;
- if (SPICE_RING_IS_EMPTY(ring)) {
+ if (qxl->guest_bug || SPICE_RING_IS_EMPTY(ring)) {
return false;
}
SPICE_RING_CONS_ITEM(qxl, ring, cmd);
@@ -931,6 +933,7 @@ static void qxl_enter_vga_mode(PCIQXLDevice *d)
qemu_spice_create_host_primary(&d->ssd);
d->mode = QXL_MODE_VGA;
memset(&d->ssd.dirty, 0, sizeof(d->ssd.dirty));
+ vga_dirty_log_start(&d->vga);
}
static void qxl_exit_vga_mode(PCIQXLDevice *d)
@@ -939,6 +942,7 @@ static void qxl_exit_vga_mode(PCIQXLDevice *d)
return;
}
trace_qxl_exit_vga_mode(d->id);
+ vga_dirty_log_stop(&d->vga);
qxl_destroy_primary(d, QXL_SYNC);
}
@@ -977,6 +981,8 @@ static void qxl_soft_reset(PCIQXLDevice *d)
{
trace_qxl_soft_reset(d->id);
qxl_check_state(d);
+ qxl_clear_guest_bug(d);
+ d->current_async = QXL_UNDEFINED_IO;
if (d->id == 0) {
qxl_enter_vga_mode(d);
@@ -1061,12 +1067,12 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta,
trace_qxl_memslot_add_guest(d->id, slot_id, guest_start, guest_end);
if (slot_id >= NUM_MEMSLOTS) {
- qxl_guest_bug(d, "%s: slot_id >= NUM_MEMSLOTS %d >= %d", __func__,
+ qxl_set_guest_bug(d, "%s: slot_id >= NUM_MEMSLOTS %d >= %d", __func__,
slot_id, NUM_MEMSLOTS);
return 1;
}
if (guest_start > guest_end) {
- qxl_guest_bug(d, "%s: guest_start > guest_end 0x%" PRIx64
+ qxl_set_guest_bug(d, "%s: guest_start > guest_end 0x%" PRIx64
" > 0x%" PRIx64, __func__, guest_start, guest_end);
return 1;
}
@@ -1091,7 +1097,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta,
break;
}
if (i == ARRAY_SIZE(regions)) {
- qxl_guest_bug(d, "%s: finished loop without match", __func__);
+ qxl_set_guest_bug(d, "%s: finished loop without match", __func__);
return 1;
}
@@ -1105,7 +1111,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta,
break;
default:
/* should not happen */
- qxl_guest_bug(d, "%s: pci_region = %d", __func__, pci_region);
+ qxl_set_guest_bug(d, "%s: pci_region = %d", __func__, pci_region);
return 1;
}
@@ -1156,21 +1162,24 @@ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id)
return (void *)(intptr_t)offset;
case MEMSLOT_GROUP_GUEST:
if (slot >= NUM_MEMSLOTS) {
- qxl_guest_bug(qxl, "slot too large %d >= %d", slot, NUM_MEMSLOTS);
+ qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot,
+ NUM_MEMSLOTS);
return NULL;
}
if (!qxl->guest_slots[slot].active) {
- qxl_guest_bug(qxl, "inactive slot %d\n", slot);
+ qxl_set_guest_bug(qxl, "inactive slot %d\n", slot);
return NULL;
}
if (offset < qxl->guest_slots[slot].delta) {
- qxl_guest_bug(qxl, "slot %d offset %"PRIu64" < delta %"PRIu64"\n",
+ qxl_set_guest_bug(qxl,
+ "slot %d offset %"PRIu64" < delta %"PRIu64"\n",
slot, offset, qxl->guest_slots[slot].delta);
return NULL;
}
offset -= qxl->guest_slots[slot].delta;
if (offset > qxl->guest_slots[slot].size) {
- qxl_guest_bug(qxl, "slot %d offset %"PRIu64" > size %"PRIu64"\n",
+ qxl_set_guest_bug(qxl,
+ "slot %d offset %"PRIu64" > size %"PRIu64"\n",
slot, offset, qxl->guest_slots[slot].size);
return NULL;
}
@@ -1190,9 +1199,19 @@ static void qxl_create_guest_primary(PCIQXLDevice *qxl, int loadvm,
{
QXLDevSurfaceCreate surface;
QXLSurfaceCreate *sc = &qxl->guest_primary.surface;
+ int size;
+ int requested_height = le32_to_cpu(sc->height);
+ int requested_stride = le32_to_cpu(sc->stride);
+
+ size = abs(requested_stride) * requested_height;
+ if (size > qxl->vgamem_size) {
+ qxl_set_guest_bug(qxl, "%s: requested primary larger then framebuffer"
+ " size", __func__);
+ return;
+ }
if (qxl->mode == QXL_MODE_NATIVE) {
- qxl_guest_bug(qxl, "%s: nop since already in QXL_MODE_NATIVE",
+ qxl_set_guest_bug(qxl, "%s: nop since already in QXL_MODE_NATIVE",
__func__);
}
qxl_exit_vga_mode(qxl);
@@ -1291,6 +1310,10 @@ static void ioport_write(void *opaque, target_phys_addr_t addr,
qxl_async_io async = QXL_SYNC;
uint32_t orig_io_port = io_port;
+ if (d->guest_bug && !io_port == QXL_IO_RESET) {
+ return;
+ }
+
switch (io_port) {
case QXL_IO_RESET:
case QXL_IO_SET_MODE:
@@ -1342,7 +1365,7 @@ async_common:
async = QXL_ASYNC;
qemu_mutex_lock(&d->async_lock);
if (d->current_async != QXL_UNDEFINED_IO) {
- qxl_guest_bug(d, "%d async started before last (%d) complete",
+ qxl_set_guest_bug(d, "%d async started before last (%d) complete",
io_port, d->current_async);
qemu_mutex_unlock(&d->async_lock);
return;
@@ -1403,11 +1426,12 @@ async_common:
break;
case QXL_IO_MEMSLOT_ADD:
if (val >= NUM_MEMSLOTS) {
- qxl_guest_bug(d, "QXL_IO_MEMSLOT_ADD: val out of range");
+ qxl_set_guest_bug(d, "QXL_IO_MEMSLOT_ADD: val out of range");
break;
}
if (d->guest_slots[val].active) {
- qxl_guest_bug(d, "QXL_IO_MEMSLOT_ADD: memory slot already active");
+ qxl_set_guest_bug(d,
+ "QXL_IO_MEMSLOT_ADD: memory slot already active");
break;
}
d->guest_slots[val].slot = d->ram->mem_slot;
@@ -1415,14 +1439,14 @@ async_common:
break;
case QXL_IO_MEMSLOT_DEL:
if (val >= NUM_MEMSLOTS) {
- qxl_guest_bug(d, "QXL_IO_MEMSLOT_DEL: val out of range");
+ qxl_set_guest_bug(d, "QXL_IO_MEMSLOT_DEL: val out of range");
break;
}
qxl_del_memslot(d, val);
break;
case QXL_IO_CREATE_PRIMARY:
if (val != 0) {
- qxl_guest_bug(d, "QXL_IO_CREATE_PRIMARY (async=%d): val != 0",
+ qxl_set_guest_bug(d, "QXL_IO_CREATE_PRIMARY (async=%d): val != 0",
async);
goto cancel_async;
}
@@ -1431,7 +1455,7 @@ async_common:
break;
case QXL_IO_DESTROY_PRIMARY:
if (val != 0) {
- qxl_guest_bug(d, "QXL_IO_DESTROY_PRIMARY (async=%d): val != 0",
+ qxl_set_guest_bug(d, "QXL_IO_DESTROY_PRIMARY (async=%d): val != 0",
async);
goto cancel_async;
}
@@ -1443,7 +1467,7 @@ async_common:
break;
case QXL_IO_DESTROY_SURFACE_WAIT:
if (val >= NUM_SURFACES) {
- qxl_guest_bug(d, "QXL_IO_DESTROY_SURFACE (async=%d):"
+ qxl_set_guest_bug(d, "QXL_IO_DESTROY_SURFACE (async=%d):"
"%" PRIu64 " >= NUM_SURFACES", async, val);
goto cancel_async;
}
@@ -1467,7 +1491,7 @@ async_common:
qxl_spice_destroy_surfaces(d, async);
break;
default:
- qxl_guest_bug(d, "%s: unexpected ioport=0x%x\n", __func__, io_port);
+ qxl_set_guest_bug(d, "%s: unexpected ioport=0x%x\n", __func__, io_port);
}
return;
cancel_async:
@@ -1694,14 +1718,20 @@ static DisplayChangeListener display_listener = {
.dpy_refresh = display_refresh,
};
-static void qxl_init_ramsize(PCIQXLDevice *qxl, uint32_t ram_min_mb)
+static void qxl_init_ramsize(PCIQXLDevice *qxl)
{
- /* vga ram (bar 0) */
+ /* vga mode framebuffer / primary surface (bar 0, first part) */
+ if (qxl->vgamem_size_mb < 8) {
+ qxl->vgamem_size_mb = 8;
+ }
+ qxl->vgamem_size = qxl->vgamem_size_mb * 1024 * 1024;
+
+ /* vga ram (bar 0, total) */
if (qxl->ram_size_mb != -1) {
qxl->vga.vram_size = qxl->ram_size_mb * 1024 * 1024;
}
- if (qxl->vga.vram_size < ram_min_mb * 1024 * 1024) {
- qxl->vga.vram_size = ram_min_mb * 1024 * 1024;
+ if (qxl->vga.vram_size < qxl->vgamem_size * 2) {
+ qxl->vga.vram_size = qxl->vgamem_size * 2;
}
/* vram32 (surfaces, 32bit, bar 1) */
@@ -1724,6 +1754,7 @@ static void qxl_init_ramsize(PCIQXLDevice *qxl, uint32_t ram_min_mb)
qxl->vram32_size = 4096;
qxl->vram_size = 4096;
}
+ qxl->vgamem_size = msb_mask(qxl->vgamem_size * 2 - 1);
qxl->vga.vram_size = msb_mask(qxl->vga.vram_size * 2 - 1);
qxl->vram32_size = msb_mask(qxl->vram32_size * 2 - 1);
qxl->vram_size = msb_mask(qxl->vram_size * 2 - 1);
@@ -1742,6 +1773,7 @@ static int qxl_init_common(PCIQXLDevice *qxl)
qemu_mutex_init(&qxl->track_lock);
qemu_mutex_init(&qxl->async_lock);
qxl->current_async = QXL_UNDEFINED_IO;
+ qxl->guest_bug = 0;
switch (qxl->revision) {
case 1: /* spice 0.4 -- qxl-1 */
@@ -1834,8 +1866,9 @@ static int qxl_init_primary(PCIDevice *dev)
PortioList *qxl_vga_port_list = g_new(PortioList, 1);
qxl->id = 0;
- qxl_init_ramsize(qxl, 32);
- vga_common_init(vga, qxl->vga.vram_size);
+ qxl_init_ramsize(qxl);
+ vga->vram_size_mb = qxl->vga.vram_size >> 20;
+ vga_common_init(vga);
vga_init(vga, pci_address_space(dev), pci_address_space_io(dev), false);
portio_list_init(qxl_vga_port_list, qxl_vga_portio_list, vga, "vga");
portio_list_add(qxl_vga_port_list, pci_address_space_io(dev), 0x3b0);
@@ -1856,7 +1889,7 @@ static int qxl_init_secondary(PCIDevice *dev)
PCIQXLDevice *qxl = DO_UPCAST(PCIQXLDevice, pci, dev);
qxl->id = device_id++;
- qxl_init_ramsize(qxl, 16);
+ qxl_init_ramsize(qxl);
memory_region_init_ram(&qxl->vga.vram, "qxl.vgavram", qxl->vga.vram_size);
vmstate_register_ram(&qxl->vga.vram, &qxl->pci.qdev);
qxl->vga.vram_ptr = memory_region_get_ram_ptr(&qxl->vga.vram);
@@ -2034,6 +2067,7 @@ static Property qxl_properties[] = {
DEFINE_PROP_UINT32("ram_size_mb", PCIQXLDevice, ram_size_mb, -1),
DEFINE_PROP_UINT32("vram_size_mb", PCIQXLDevice, vram32_size_mb, -1),
DEFINE_PROP_UINT32("vram64_size_mb", PCIQXLDevice, vram_size_mb, -1),
+ DEFINE_PROP_UINT32("vgamem_mb", PCIQXLDevice, vgamem_size_mb, 16),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/qxl.h b/hw/qxl.h
index 31029503fe..172baf6cc2 100644
--- a/hw/qxl.h
+++ b/hw/qxl.h
@@ -31,6 +31,9 @@ typedef struct PCIQXLDevice {
uint32_t debug;
uint32_t guestdebug;
uint32_t cmdlog;
+
+ uint32_t guest_bug;
+
enum qxl_mode mode;
uint32_t cmdflags;
int generation;
@@ -81,6 +84,7 @@ typedef struct PCIQXLDevice {
QXLReleaseInfo *last_release;
uint32_t last_release_offset;
uint32_t oom_running;
+ uint32_t vgamem_size;
/* rom pci bar */
QXLRom shadow_rom;
@@ -102,6 +106,7 @@ typedef struct PCIQXLDevice {
uint32_t ram_size_mb;
uint32_t vram_size_mb;
uint32_t vram32_size_mb;
+ uint32_t vgamem_size_mb;
/* qxl_render_update state */
int render_update_cookie_num;
@@ -127,7 +132,8 @@ typedef struct PCIQXLDevice {
/* qxl.c */
void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id);
-void qxl_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) GCC_FMT_ATTR(2, 3);
+void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...)
+ GCC_FMT_ATTR(2, 3);
void qxl_spice_update_area(PCIQXLDevice *qxl, uint32_t surface_id,
struct QXLRect *area, struct QXLRect *dirty_rects,
diff --git a/hw/rtl8139.c b/hw/rtl8139.c
index f6f144b525..844f1b8c3f 100644
--- a/hw/rtl8139.c
+++ b/hw/rtl8139.c
@@ -781,7 +781,14 @@ static inline dma_addr_t rtl8139_addr64(uint32_t low, uint32_t high)
#endif
}
-static int rtl8139_can_receive(VLANClientState *nc)
+/* Workaround for buggy guest driver such as linux who allocates rx
+ * rings after the receiver were enabled. */
+static bool rtl8139_cp_rx_valid(RTL8139State *s)
+{
+ return !(s->RxRingAddrLO == 0 && s->RxRingAddrHI == 0);
+}
+
+static int rtl8139_can_receive(NetClientState *nc)
{
RTL8139State *s = DO_UPCAST(NICState, nc, nc)->opaque;
int avail;
@@ -791,11 +798,8 @@ static int rtl8139_can_receive(VLANClientState *nc)
return 1;
if (!rtl8139_receiver_enabled(s))
return 1;
- /* network/host communication happens only in normal mode */
- if ((s->Cfg9346 & Chip9346_op_mask) != Cfg9346_Normal)
- return 0;
- if (rtl8139_cp_receiver_enabled(s)) {
+ if (rtl8139_cp_receiver_enabled(s) && rtl8139_cp_rx_valid(s)) {
/* ??? Flow control not implemented in c+ mode.
This is a hack to work around slirp deficiencies anyway. */
return 1;
@@ -806,7 +810,7 @@ static int rtl8139_can_receive(VLANClientState *nc)
}
}
-static ssize_t rtl8139_do_receive(VLANClientState *nc, const uint8_t *buf, size_t size_, int do_interrupt)
+static ssize_t rtl8139_do_receive(NetClientState *nc, const uint8_t *buf, size_t size_, int do_interrupt)
{
RTL8139State *s = DO_UPCAST(NICState, nc, nc)->opaque;
/* size is the length of the buffer passed to the driver */
@@ -836,12 +840,6 @@ static ssize_t rtl8139_do_receive(VLANClientState *nc, const uint8_t *buf, size_
return -1;
}
- /* check whether we are in normal mode */
- if ((s->Cfg9346 & Chip9346_op_mask) != Cfg9346_Normal) {
- DPRINTF("not in normal op mode\n");
- return -1;
- }
-
/* XXX: check this */
if (s->RxConfig & AcceptAllPhys) {
/* promiscuous: receive all */
@@ -946,6 +944,10 @@ static ssize_t rtl8139_do_receive(VLANClientState *nc, const uint8_t *buf, size_
if (rtl8139_cp_receiver_enabled(s))
{
+ if (!rtl8139_cp_rx_valid(s)) {
+ return size;
+ }
+
DPRINTF("in C+ Rx mode ================\n");
/* begin C+ receiver mode */
@@ -1185,7 +1187,7 @@ static ssize_t rtl8139_do_receive(VLANClientState *nc, const uint8_t *buf, size_
return size_;
}
-static ssize_t rtl8139_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t rtl8139_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
return rtl8139_do_receive(nc, buf, size, 1);
}
@@ -1783,7 +1785,7 @@ static void rtl8139_transfer_frame(RTL8139State *s, uint8_t *buf, int size,
if (iov) {
buf2_size = iov_size(iov, 3);
buf2 = g_malloc(buf2_size);
- iov_to_buf(iov, 3, buf2, 0, buf2_size);
+ iov_to_buf(iov, 3, 0, buf2, buf2_size);
buf = buf2;
}
@@ -3429,14 +3431,14 @@ static void rtl8139_timer(void *opaque)
rtl8139_set_next_tctr_time(s, qemu_get_clock_ns(vm_clock));
}
-static void rtl8139_cleanup(VLANClientState *nc)
+static void rtl8139_cleanup(NetClientState *nc)
{
RTL8139State *s = DO_UPCAST(NICState, nc, nc)->opaque;
s->nic = NULL;
}
-static int pci_rtl8139_uninit(PCIDevice *dev)
+static void pci_rtl8139_uninit(PCIDevice *dev)
{
RTL8139State *s = DO_UPCAST(RTL8139State, dev, dev);
@@ -3448,12 +3450,11 @@ static int pci_rtl8139_uninit(PCIDevice *dev)
}
qemu_del_timer(s->timer);
qemu_free_timer(s->timer);
- qemu_del_vlan_client(&s->nic->nc);
- return 0;
+ qemu_del_net_client(&s->nic->nc);
}
static NetClientInfo net_rtl8139_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = rtl8139_can_receive,
.receive = rtl8139_receive,
diff --git a/hw/s390-virtio-bus.c b/hw/s390-virtio-bus.c
index 4d49b96f94..a245684692 100644
--- a/hw/s390-virtio-bus.c
+++ b/hw/s390-virtio-bus.c
@@ -402,6 +402,7 @@ static TypeInfo s390_virtio_net = {
static Property s390_virtio_blk_properties[] = {
DEFINE_BLOCK_PROPERTIES(VirtIOS390Device, blk.conf),
+ DEFINE_BLOCK_CHS_PROPERTIES(VirtIOS390Device, blk.conf),
DEFINE_PROP_STRING("serial", VirtIOS390Device, blk.serial),
#ifdef __linux__
DEFINE_PROP_BIT("scsi", VirtIOS390Device, blk.scsi, 0, true),
diff --git a/hw/scsi-bus.c b/hw/scsi-bus.c
index 14e2f730b8..b8a857d145 100644
--- a/hw/scsi-bus.c
+++ b/hw/scsi-bus.c
@@ -186,6 +186,10 @@ static int scsi_qdev_init(DeviceState *qdev)
dev);
}
+ if (bus->info->hotplug) {
+ bus->info->hotplug(bus, dev);
+ }
+
err:
return rc;
}
@@ -729,25 +733,87 @@ static int scsi_get_performance_length(int num_desc, int type, int data_type)
}
}
+static int ata_passthrough_xfer_unit(SCSIDevice *dev, uint8_t *buf)
+{
+ int byte_block = (buf[2] >> 2) & 0x1;
+ int type = (buf[2] >> 4) & 0x1;
+ int xfer_unit;
+
+ if (byte_block) {
+ if (type) {
+ xfer_unit = dev->blocksize;
+ } else {
+ xfer_unit = 512;
+ }
+ } else {
+ xfer_unit = 1;
+ }
+
+ return xfer_unit;
+}
+
+static int ata_passthrough_12_xfer_size(SCSIDevice *dev, uint8_t *buf)
+{
+ int length = buf[2] & 0x3;
+ int xfer;
+ int unit = ata_passthrough_xfer_unit(dev, buf);
+
+ switch (length) {
+ case 0:
+ case 3: /* USB-specific. */
+ xfer = 0;
+ break;
+ case 1:
+ xfer = buf[3];
+ break;
+ case 2:
+ xfer = buf[4];
+ break;
+ }
+
+ return xfer * unit;
+}
+
+static int ata_passthrough_16_xfer_size(SCSIDevice *dev, uint8_t *buf)
+{
+ int extend = buf[1] & 0x1;
+ int length = buf[2] & 0x3;
+ int xfer;
+ int unit = ata_passthrough_xfer_unit(dev, buf);
+
+ switch (length) {
+ case 0:
+ case 3: /* USB-specific. */
+ xfer = 0;
+ break;
+ case 1:
+ xfer = buf[4];
+ xfer |= (extend ? buf[3] << 8 : 0);
+ break;
+ case 2:
+ xfer = buf[6];
+ xfer |= (extend ? buf[5] << 8 : 0);
+ break;
+ }
+
+ return xfer * unit;
+}
+
static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
{
switch (buf[0] >> 5) {
case 0:
cmd->xfer = buf[4];
- cmd->len = 6;
break;
case 1:
case 2:
cmd->xfer = lduw_be_p(&buf[7]);
- cmd->len = 10;
break;
case 4:
cmd->xfer = ldl_be_p(&buf[10]) & 0xffffffffULL;
- cmd->len = 16;
break;
case 5:
cmd->xfer = ldl_be_p(&buf[6]) & 0xffffffffULL;
- cmd->len = 12;
break;
default:
return -1;
@@ -771,11 +837,9 @@ static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
case SYNCHRONIZE_CACHE_16:
case LOCATE_16:
case LOCK_UNLOCK_CACHE:
- case LOAD_UNLOAD:
case SET_CD_SPEED:
case SET_LIMITS:
case WRITE_LONG_10:
- case MOVE_MEDIUM:
case UPDATE_BLOCK:
case RESERVE_TRACK:
case SET_READ_AHEAD:
@@ -869,6 +933,17 @@ static int scsi_req_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
cmd->xfer = buf[9] | (buf[8] << 8);
}
break;
+ case ATA_PASSTHROUGH_12:
+ if (dev->type == TYPE_ROM) {
+ /* BLANK command of MMC */
+ cmd->xfer = 0;
+ } else {
+ cmd->xfer = ata_passthrough_12_xfer_size(dev, buf);
+ }
+ break;
+ case ATA_PASSTHROUGH_16:
+ cmd->xfer = ata_passthrough_16_xfer_size(dev, buf);
+ break;
}
return 0;
}
@@ -885,7 +960,6 @@ static int scsi_req_stream_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *bu
case READ_REVERSE:
case RECOVER_BUFFERED_DATA:
case WRITE_6:
- cmd->len = 6;
cmd->xfer = buf[4] | (buf[3] << 8) | (buf[2] << 16);
if (buf[1] & 0x01) { /* fixed */
cmd->xfer *= dev->blocksize;
@@ -895,22 +969,34 @@ static int scsi_req_stream_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *bu
case READ_REVERSE_16:
case VERIFY_16:
case WRITE_16:
- cmd->len = 16;
cmd->xfer = buf[14] | (buf[13] << 8) | (buf[12] << 16);
if (buf[1] & 0x01) { /* fixed */
cmd->xfer *= dev->blocksize;
}
break;
case REWIND:
- case START_STOP:
- cmd->len = 6;
+ case LOAD_UNLOAD:
cmd->xfer = 0;
break;
case SPACE_16:
cmd->xfer = buf[13] | (buf[12] << 8);
break;
case READ_POSITION:
- cmd->xfer = buf[8] | (buf[7] << 8);
+ switch (buf[1] & 0x1f) /* operation code */ {
+ case SHORT_FORM_BLOCK_ID:
+ case SHORT_FORM_VENDOR_SPECIFIC:
+ cmd->xfer = 20;
+ break;
+ case LONG_FORM:
+ cmd->xfer = 32;
+ break;
+ case EXTENDED_FORM:
+ cmd->xfer = buf[8] | (buf[7] << 8);
+ break;
+ default:
+ return -1;
+ }
+
break;
case FORMAT_UNIT:
cmd->xfer = buf[4] | (buf[3] << 8);
@@ -922,6 +1008,29 @@ static int scsi_req_stream_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *bu
return 0;
}
+static int scsi_req_medium_changer_length(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
+{
+ switch (buf[0]) {
+ /* medium changer commands */
+ case EXCHANGE_MEDIUM:
+ case INITIALIZE_ELEMENT_STATUS:
+ case INITIALIZE_ELEMENT_STATUS_WITH_RANGE:
+ case MOVE_MEDIUM:
+ case POSITION_TO_ELEMENT:
+ cmd->xfer = 0;
+ break;
+ case READ_ELEMENT_STATUS:
+ cmd->xfer = buf[9] | (buf[8] << 8) | (buf[7] << 16);
+ break;
+
+ /* generic commands */
+ default:
+ return scsi_req_length(cmd, dev, buf);
+ }
+ return 0;
+}
+
+
static void scsi_cmd_xfer_mode(SCSICommand *cmd)
{
if (!cmd->xfer) {
@@ -964,9 +1073,14 @@ static void scsi_cmd_xfer_mode(SCSICommand *cmd)
case SEND_DVD_STRUCTURE:
case PERSISTENT_RESERVE_OUT:
case MAINTENANCE_OUT:
- case ATA_PASSTHROUGH:
cmd->mode = SCSI_XFER_TO_DEV;
break;
+ case ATA_PASSTHROUGH_12:
+ case ATA_PASSTHROUGH_16:
+ /* T_DIR */
+ cmd->mode = (cmd->buf[2] & 0x8) ?
+ SCSI_XFER_FROM_DEV : SCSI_XFER_TO_DEV;
+ break;
default:
cmd->mode = SCSI_XFER_FROM_DEV;
break;
@@ -1001,11 +1115,36 @@ int scsi_req_parse(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
{
int rc;
- if (dev->type == TYPE_TAPE) {
+ switch (buf[0] >> 5) {
+ case 0:
+ cmd->len = 6;
+ break;
+ case 1:
+ case 2:
+ cmd->len = 10;
+ break;
+ case 4:
+ cmd->len = 16;
+ break;
+ case 5:
+ cmd->len = 12;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (dev->type) {
+ case TYPE_TAPE:
rc = scsi_req_stream_length(cmd, dev, buf);
- } else {
+ break;
+ case TYPE_MEDIUM_CHANGER:
+ rc = scsi_req_medium_changer_length(cmd, dev, buf);
+ break;
+ default:
rc = scsi_req_length(cmd, dev, buf);
+ break;
}
+
if (rc != 0)
return rc;
@@ -1015,6 +1154,16 @@ int scsi_req_parse(SCSICommand *cmd, SCSIDevice *dev, uint8_t *buf)
return 0;
}
+void scsi_device_report_change(SCSIDevice *dev, SCSISense sense)
+{
+ SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
+
+ scsi_device_set_ua(dev, sense);
+ if (bus->info->change) {
+ bus->info->change(bus, dev, sense);
+ }
+}
+
/*
* Predefined sense codes
*/
@@ -1036,7 +1185,7 @@ const struct SCSISense sense_code_NO_MEDIUM = {
/* LUN not ready, medium removal prevented */
const struct SCSISense sense_code_NOT_READY_REMOVAL_PREVENTED = {
- .key = NOT_READY, .asc = 0x53, .ascq = 0x00
+ .key = NOT_READY, .asc = 0x53, .ascq = 0x02
};
/* Hardware error, internal target failure */
@@ -1059,6 +1208,16 @@ const struct SCSISense sense_code_INVALID_FIELD = {
.key = ILLEGAL_REQUEST, .asc = 0x24, .ascq = 0x00
};
+/* Illegal request, Invalid field in parameter list */
+const struct SCSISense sense_code_INVALID_PARAM = {
+ .key = ILLEGAL_REQUEST, .asc = 0x26, .ascq = 0x00
+};
+
+/* Illegal request, Parameter list length error */
+const struct SCSISense sense_code_INVALID_PARAM_LEN = {
+ .key = ILLEGAL_REQUEST, .asc = 0x1a, .ascq = 0x00
+};
+
/* Illegal request, LUN not supported */
const struct SCSISense sense_code_LUN_NOT_SUPPORTED = {
.key = ILLEGAL_REQUEST, .asc = 0x25, .ascq = 0x00
@@ -1076,7 +1235,7 @@ const struct SCSISense sense_code_INCOMPATIBLE_FORMAT = {
/* Illegal request, medium removal prevented */
const struct SCSISense sense_code_ILLEGAL_REQ_REMOVAL_PREVENTED = {
- .key = ILLEGAL_REQUEST, .asc = 0x53, .ascq = 0x00
+ .key = ILLEGAL_REQUEST, .asc = 0x53, .ascq = 0x02
};
/* Command aborted, I/O process terminated */
@@ -1094,6 +1253,11 @@ const struct SCSISense sense_code_LUN_FAILURE = {
.key = ABORTED_COMMAND, .asc = 0x3e, .ascq = 0x01
};
+/* Unit attention, Capacity data has changed */
+const struct SCSISense sense_code_CAPACITY_CHANGED = {
+ .key = UNIT_ATTENTION, .asc = 0x2a, .ascq = 0x09
+};
+
/* Unit attention, Power on, reset or bus device reset occurred */
const struct SCSISense sense_code_RESET = {
.key = UNIT_ATTENTION, .asc = 0x29, .ascq = 0x00
@@ -1119,6 +1283,11 @@ const struct SCSISense sense_code_DEVICE_INTERNAL_RESET = {
.key = UNIT_ATTENTION, .asc = 0x29, .ascq = 0x04
};
+/* Data Protection, Write Protected */
+const struct SCSISense sense_code_WRITE_PROTECTED = {
+ .key = DATA_PROTECT, .asc = 0x27, .ascq = 0x00
+};
+
/*
* scsi_build_sense
*
@@ -1183,7 +1352,8 @@ static const char *scsi_command_name(uint8_t cmd)
[ REQUEST_SENSE ] = "REQUEST_SENSE",
[ FORMAT_UNIT ] = "FORMAT_UNIT",
[ READ_BLOCK_LIMITS ] = "READ_BLOCK_LIMITS",
- [ REASSIGN_BLOCKS ] = "REASSIGN_BLOCKS",
+ [ REASSIGN_BLOCKS ] = "REASSIGN_BLOCKS/INITIALIZE ELEMENT STATUS",
+ /* LOAD_UNLOAD and INITIALIZE_ELEMENT_STATUS use the same operation code */
[ READ_6 ] = "READ_6",
[ WRITE_6 ] = "WRITE_6",
[ SET_CAPACITY ] = "SET_CAPACITY",
@@ -1200,14 +1370,16 @@ static const char *scsi_command_name(uint8_t cmd)
[ COPY ] = "COPY",
[ ERASE ] = "ERASE",
[ MODE_SENSE ] = "MODE_SENSE",
- [ START_STOP ] = "START_STOP",
+ [ START_STOP ] = "START_STOP/LOAD_UNLOAD",
+ /* LOAD_UNLOAD and START_STOP use the same operation code */
[ RECEIVE_DIAGNOSTIC ] = "RECEIVE_DIAGNOSTIC",
[ SEND_DIAGNOSTIC ] = "SEND_DIAGNOSTIC",
[ ALLOW_MEDIUM_REMOVAL ] = "ALLOW_MEDIUM_REMOVAL",
[ READ_CAPACITY_10 ] = "READ_CAPACITY_10",
[ READ_10 ] = "READ_10",
[ WRITE_10 ] = "WRITE_10",
- [ SEEK_10 ] = "SEEK_10",
+ [ SEEK_10 ] = "SEEK_10/POSITION_TO_ELEMENT",
+ /* SEEK_10 and POSITION_TO_ELEMENT use the same operation code */
[ WRITE_VERIFY_10 ] = "WRITE_VERIFY_10",
[ VERIFY_10 ] = "VERIFY_10",
[ SEARCH_HIGH ] = "SEARCH_HIGH",
@@ -1218,7 +1390,8 @@ static const char *scsi_command_name(uint8_t cmd)
/* READ_POSITION and PRE_FETCH use the same operation code */
[ SYNCHRONIZE_CACHE ] = "SYNCHRONIZE_CACHE",
[ LOCK_UNLOCK_CACHE ] = "LOCK_UNLOCK_CACHE",
- [ READ_DEFECT_DATA ] = "READ_DEFECT_DATA",
+ [ READ_DEFECT_DATA ] = "READ_DEFECT_DATA/INITIALIZE_ELEMENT_STATUS_WITH_RANGE",
+ /* READ_DEFECT_DATA and INITIALIZE_ELEMENT_STATUS_WITH_RANGE use the same operation code */
[ MEDIUM_SCAN ] = "MEDIUM_SCAN",
[ COMPARE ] = "COMPARE",
[ COPY_VERIFY ] = "COPY_VERIFY",
@@ -1244,7 +1417,7 @@ static const char *scsi_command_name(uint8_t cmd)
[ PERSISTENT_RESERVE_OUT ] = "PERSISTENT_RESERVE_OUT",
[ WRITE_FILEMARKS_16 ] = "WRITE_FILEMARKS_16",
[ EXTENDED_COPY ] = "EXTENDED_COPY",
- [ ATA_PASSTHROUGH ] = "ATA_PASSTHROUGH",
+ [ ATA_PASSTHROUGH_16 ] = "ATA_PASSTHROUGH_16",
[ ACCESS_CONTROL_IN ] = "ACCESS_CONTROL_IN",
[ ACCESS_CONTROL_OUT ] = "ACCESS_CONTROL_OUT",
[ READ_16 ] = "READ_16",
@@ -1261,9 +1434,9 @@ static const char *scsi_command_name(uint8_t cmd)
[ SERVICE_ACTION_IN_16 ] = "SERVICE_ACTION_IN_16",
[ WRITE_LONG_16 ] = "WRITE_LONG_16",
[ REPORT_LUNS ] = "REPORT_LUNS",
- [ BLANK ] = "BLANK",
+ [ ATA_PASSTHROUGH_12 ] = "BLANK/ATA_PASSTHROUGH_12",
[ MOVE_MEDIUM ] = "MOVE_MEDIUM",
- [ LOAD_UNLOAD ] = "LOAD_UNLOAD",
+ [ EXCHANGE_MEDIUM ] = "EXCHANGE MEDIUM",
[ READ_12 ] = "READ_12",
[ WRITE_12 ] = "WRITE_12",
[ ERASE_12 ] = "ERASE_12/GET_PERFORMANCE",
@@ -1296,6 +1469,7 @@ static const char *scsi_command_name(uint8_t cmd)
SCSIRequest *scsi_req_ref(SCSIRequest *req)
{
+ assert(req->refcount > 0);
req->refcount++;
return req;
}
@@ -1304,6 +1478,10 @@ void scsi_req_unref(SCSIRequest *req)
{
assert(req->refcount > 0);
if (--req->refcount == 0) {
+ SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, req->dev->qdev.parent_bus);
+ if (bus->info->free_request && req->hba_private) {
+ bus->info->free_request(bus, req->hba_private);
+ }
if (req->ops->free_req) {
req->ops->free_req(req);
}
@@ -1389,7 +1567,7 @@ void scsi_req_complete(SCSIRequest *req, int status)
assert(req->status == -1);
req->status = status;
- assert(req->sense_len < sizeof(req->sense));
+ assert(req->sense_len <= sizeof(req->sense));
if (status == GOOD) {
req->sense_len = 0;
}
@@ -1418,6 +1596,7 @@ void scsi_req_complete(SCSIRequest *req, int status)
void scsi_req_cancel(SCSIRequest *req)
{
+ trace_scsi_req_cancel(req->dev->id, req->lun, req->tag);
if (!req->enqueued) {
return;
}
@@ -1448,6 +1627,55 @@ void scsi_req_abort(SCSIRequest *req, int status)
scsi_req_unref(req);
}
+static int scsi_ua_precedence(SCSISense sense)
+{
+ if (sense.key != UNIT_ATTENTION) {
+ return INT_MAX;
+ }
+ if (sense.asc == 0x29 && sense.ascq == 0x04) {
+ /* DEVICE INTERNAL RESET goes with POWER ON OCCURRED */
+ return 1;
+ } else if (sense.asc == 0x3F && sense.ascq == 0x01) {
+ /* MICROCODE HAS BEEN CHANGED goes with SCSI BUS RESET OCCURRED */
+ return 2;
+ } else if (sense.asc == 0x29 && (sense.ascq == 0x05 || sense.ascq == 0x06)) {
+ /* These two go with "all others". */
+ ;
+ } else if (sense.asc == 0x29 && sense.ascq <= 0x07) {
+ /* POWER ON, RESET OR BUS DEVICE RESET OCCURRED = 0
+ * POWER ON OCCURRED = 1
+ * SCSI BUS RESET OCCURRED = 2
+ * BUS DEVICE RESET FUNCTION OCCURRED = 3
+ * I_T NEXUS LOSS OCCURRED = 7
+ */
+ return sense.ascq;
+ } else if (sense.asc == 0x2F && sense.ascq == 0x01) {
+ /* COMMANDS CLEARED BY POWER LOSS NOTIFICATION */
+ return 8;
+ }
+ return (sense.asc << 8) | sense.ascq;
+}
+
+void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense)
+{
+ int prec1, prec2;
+ if (sense.key != UNIT_ATTENTION) {
+ return;
+ }
+ trace_scsi_device_set_ua(sdev->id, sdev->lun, sense.key,
+ sense.asc, sense.ascq);
+
+ /*
+ * Override a pre-existing unit attention condition, except for a more
+ * important reset condition.
+ */
+ prec1 = scsi_ua_precedence(sdev->unit_attention);
+ prec2 = scsi_ua_precedence(sense);
+ if (prec2 < prec1) {
+ sdev->unit_attention = sense;
+ }
+}
+
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
{
SCSIRequest *req;
@@ -1456,7 +1684,8 @@ void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense)
req = QTAILQ_FIRST(&sdev->requests);
scsi_req_cancel(req);
}
- sdev->unit_attention = sense;
+
+ scsi_device_set_ua(sdev, sense);
}
static char *scsibus_get_dev_path(DeviceState *dev)
@@ -1571,6 +1800,17 @@ static int get_scsi_requests(QEMUFile *f, void *pv, size_t size)
return 0;
}
+static int scsi_qdev_unplug(DeviceState *qdev)
+{
+ SCSIDevice *dev = SCSI_DEVICE(qdev);
+ SCSIBus *bus = DO_UPCAST(SCSIBus, qbus, dev->qdev.parent_bus);
+
+ if (bus->info->hot_unplug) {
+ bus->info->hot_unplug(bus, dev);
+ }
+ return qdev_simple_unplug_cb(qdev);
+}
+
static const VMStateInfo vmstate_info_scsi_requests = {
.name = "scsi-requests",
.get = get_scsi_requests,
@@ -1607,7 +1847,7 @@ static void scsi_device_class_init(ObjectClass *klass, void *data)
DeviceClass *k = DEVICE_CLASS(klass);
k->bus_type = TYPE_SCSI_BUS;
k->init = scsi_qdev_init;
- k->unplug = qdev_simple_unplug_cb;
+ k->unplug = scsi_qdev_unplug;
k->exit = scsi_qdev_exit;
k->props = scsi_props;
}
diff --git a/hw/scsi-defs.h b/hw/scsi-defs.h
index 219c84dfb1..d7a401912b 100644
--- a/hw/scsi-defs.h
+++ b/hw/scsi-defs.h
@@ -29,6 +29,7 @@
#define REQUEST_SENSE 0x03
#define FORMAT_UNIT 0x04
#define READ_BLOCK_LIMITS 0x05
+#define INITIALIZE_ELEMENT_STATUS 0x07
#define REASSIGN_BLOCKS 0x07
#define READ_6 0x08
#define WRITE_6 0x0a
@@ -44,6 +45,7 @@
#define COPY 0x18
#define ERASE 0x19
#define MODE_SENSE 0x1a
+#define LOAD_UNLOAD 0x1b
#define START_STOP 0x1b
#define RECEIVE_DIAGNOSTIC 0x1c
#define SEND_DIAGNOSTIC 0x1d
@@ -53,6 +55,7 @@
#define WRITE_10 0x2a
#define SEEK_10 0x2b
#define LOCATE_10 0x2b
+#define POSITION_TO_ELEMENT 0x2b
#define WRITE_VERIFY_10 0x2e
#define VERIFY_10 0x2f
#define SEARCH_HIGH 0x30
@@ -63,6 +66,7 @@
#define READ_POSITION 0x34
#define SYNCHRONIZE_CACHE 0x35
#define LOCK_UNLOCK_CACHE 0x36
+#define INITIALIZE_ELEMENT_STATUS_WITH_RANGE 0x37
#define READ_DEFECT_DATA 0x37
#define MEDIUM_SCAN 0x38
#define COMPARE 0x39
@@ -82,6 +86,7 @@
#define GET_EVENT_STATUS_NOTIFICATION 0x4a
#define LOG_SELECT 0x4c
#define LOG_SENSE 0x4d
+#define READ_DISC_INFORMATION 0x51
#define RESERVE_TRACK 0x53
#define MODE_SELECT_10 0x55
#define RESERVE_10 0x56
@@ -95,7 +100,7 @@
#define READ_REVERSE_16 0x81
#define ALLOW_OVERWRITE 0x82
#define EXTENDED_COPY 0x83
-#define ATA_PASSTHROUGH 0x85
+#define ATA_PASSTHROUGH_16 0x85
#define ACCESS_CONTROL_IN 0x86
#define ACCESS_CONTROL_OUT 0x87
#define READ_16 0x88
@@ -112,11 +117,11 @@
#define SERVICE_ACTION_IN_16 0x9e
#define WRITE_LONG_16 0x9f
#define REPORT_LUNS 0xa0
-#define BLANK 0xa1
+#define ATA_PASSTHROUGH_12 0xa1
#define MAINTENANCE_IN 0xa3
#define MAINTENANCE_OUT 0xa4
#define MOVE_MEDIUM 0xa5
-#define LOAD_UNLOAD 0xa6
+#define EXCHANGE_MEDIUM 0xa6
#define SET_READ_AHEAD 0xa7
#define READ_12 0xa8
#define WRITE_12 0xaa
@@ -142,6 +147,14 @@
#define SAI_READ_CAPACITY_16 0x10
/*
+ * READ POSITION service action codes
+ */
+#define SHORT_FORM_BLOCK_ID 0x00
+#define SHORT_FORM_VENDOR_SPECIFIC 0x01
+#define LONG_FORM 0x06
+#define EXTENDED_FORM 0x08
+
+/*
* SAM Status codes
*/
diff --git a/hw/scsi-disk.c b/hw/scsi-disk.c
index ae2519458c..c8d5edd86e 100644
--- a/hw/scsi-disk.c
+++ b/hw/scsi-disk.c
@@ -34,6 +34,7 @@ do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
#include "scsi-defs.h"
#include "sysemu.h"
#include "blockdev.h"
+#include "hw/block-common.h"
#include "dma.h"
#ifdef __linux
@@ -42,6 +43,7 @@ do { printf("scsi-disk: " fmt , ## __VA_ARGS__); } while (0)
#define SCSI_DMA_BUF_SIZE 131072
#define SCSI_MAX_INQUIRY_LEN 256
+#define SCSI_MAX_MODE_LEN 256
typedef struct SCSIDiskState SCSIDiskState;
@@ -67,9 +69,12 @@ struct SCSIDiskState
bool media_changed;
bool media_event;
bool eject_request;
+ uint64_t wwn;
QEMUBH *bh;
char *version;
char *serial;
+ char *vendor;
+ char *product;
bool tray_open;
bool tray_locked;
};
@@ -165,7 +170,7 @@ static void scsi_disk_load_request(QEMUFile *f, SCSIRequest *req)
qemu_iovec_init_external(&r->qiov, &r->iov, 1);
}
-static void scsi_flush_complete(void * opaque, int ret)
+static void scsi_aio_complete(void *opaque, int ret)
{
SCSIDiskReq *r = (SCSIDiskReq *)opaque;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
@@ -218,7 +223,7 @@ static void scsi_write_do_fua(SCSIDiskReq *r)
if (scsi_is_cmd_fua(&r->req.cmd)) {
bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
- r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_flush_complete, r);
+ r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
return;
}
@@ -339,13 +344,6 @@ static void scsi_read_data(SCSIRequest *req)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
bool first;
- if (r->sector_count == (uint32_t)-1) {
- DPRINTF("Read buf_len=%zd\n", r->iov.iov_len);
- r->sector_count = 0;
- r->started = true;
- scsi_req_data(&r->req, r->iov.iov_len);
- return;
- }
DPRINTF("Read sector_count=%d\n", r->sector_count);
if (r->sector_count == 0) {
/* This also clears the sense buffer for REQUEST SENSE. */
@@ -449,7 +447,7 @@ static void scsi_write_complete(void * opaque, int ret)
return;
} else {
scsi_init_iovec(r, SCSI_DMA_BUF_SIZE);
- DPRINTF("Write complete tag=0x%x more=%d\n", r->req.tag, r->qiov.size);
+ DPRINTF("Write complete tag=0x%x more=%zd\n", r->req.tag, r->qiov.size);
scsi_req_data(&r->req, r->qiov.size);
}
@@ -522,6 +520,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
int buflen = 0;
+ int start;
if (req->cmd.buf[1] & 0x1) {
/* Vital product data */
@@ -530,14 +529,14 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
outbuf[buflen++] = s->qdev.type & 0x1f;
outbuf[buflen++] = page_code ; // this page
outbuf[buflen++] = 0x00;
+ outbuf[buflen++] = 0x00;
+ start = buflen;
switch (page_code) {
case 0x00: /* Supported page codes, mandatory */
{
- int pages;
DPRINTF("Inquiry EVPD[Supported pages] "
"buffer size %zd\n", req->cmd.xfer);
- pages = buflen++;
outbuf[buflen++] = 0x00; // list of supported pages (this page)
if (s->serial) {
outbuf[buflen++] = 0x80; // unit serial number
@@ -547,7 +546,6 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
outbuf[buflen++] = 0xb0; // block limits
outbuf[buflen++] = 0xb2; // thin provisioning
}
- outbuf[pages] = buflen - pages - 1; // number of pages
break;
}
case 0x80: /* Device serial number, optional */
@@ -566,7 +564,6 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
DPRINTF("Inquiry EVPD[Serial number] "
"buffer size %zd\n", req->cmd.xfer);
- outbuf[buflen++] = l;
memcpy(outbuf+buflen, s->serial, l);
buflen += l;
break;
@@ -584,14 +581,21 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
DPRINTF("Inquiry EVPD[Device identification] "
"buffer size %zd\n", req->cmd.xfer);
- outbuf[buflen++] = 4 + id_len;
outbuf[buflen++] = 0x2; // ASCII
outbuf[buflen++] = 0; // not officially assigned
outbuf[buflen++] = 0; // reserved
outbuf[buflen++] = id_len; // length of data following
-
memcpy(outbuf+buflen, str, id_len);
buflen += id_len;
+
+ if (s->wwn) {
+ outbuf[buflen++] = 0x1; // Binary
+ outbuf[buflen++] = 0x3; // NAA
+ outbuf[buflen++] = 0; // reserved
+ outbuf[buflen++] = 8;
+ stq_be_p(&outbuf[buflen], s->wwn);
+ buflen += 8;
+ }
break;
}
case 0xb0: /* block limits */
@@ -609,8 +613,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
return -1;
}
/* required VPD size with unmap support */
- outbuf[3] = buflen = 0x3c;
-
+ buflen = 0x40;
memset(outbuf + 4, 0, buflen - 4);
/* optimal transfer length granularity */
@@ -632,7 +635,7 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
}
case 0xb2: /* thin provisioning */
{
- outbuf[3] = buflen = 8;
+ buflen = 8;
outbuf[4] = 0;
outbuf[5] = 0x60; /* write_same 10/16 supported */
outbuf[6] = s->qdev.conf.discard_granularity ? 2 : 1;
@@ -643,6 +646,8 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
return -1;
}
/* done with EVPD */
+ assert(buflen - start <= 255);
+ outbuf[start - 1] = buflen - start;
return buflen;
}
@@ -660,12 +665,10 @@ static int scsi_disk_emulate_inquiry(SCSIRequest *req, uint8_t *outbuf)
outbuf[0] = s->qdev.type & 0x1f;
outbuf[1] = (s->features & (1 << SCSI_DISK_F_REMOVABLE)) ? 0x80 : 0;
- if (s->qdev.type == TYPE_ROM) {
- memcpy(&outbuf[16], "QEMU CD-ROM ", 16);
- } else {
- memcpy(&outbuf[16], "QEMU HARDDISK ", 16);
- }
- memcpy(&outbuf[8], "QEMU ", 8);
+
+ strpadcpy((char *) &outbuf[16], 16, s->product, ' ');
+ strpadcpy((char *) &outbuf[8], 8, s->vendor, ' ');
+
memset(&outbuf[32], 0, 4);
memcpy(&outbuf[32], s->version, MIN(4, strlen(s->version)));
/*
@@ -716,6 +719,39 @@ static inline bool media_is_cd(SCSIDiskState *s)
return nb_sectors <= CD_MAX_SECTORS;
}
+static int scsi_read_disc_information(SCSIDiskState *s, SCSIDiskReq *r,
+ uint8_t *outbuf)
+{
+ uint8_t type = r->req.cmd.buf[1] & 7;
+
+ if (s->qdev.type != TYPE_ROM) {
+ return -1;
+ }
+
+ /* Types 1/2 are only defined for Blu-Ray. */
+ if (type != 0) {
+ scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+ return -1;
+ }
+
+ memset(outbuf, 0, 34);
+ outbuf[1] = 32;
+ outbuf[2] = 0xe; /* last session complete, disc finalized */
+ outbuf[3] = 1; /* first track on disc */
+ outbuf[4] = 1; /* # of sessions */
+ outbuf[5] = 1; /* first track of last session */
+ outbuf[6] = 1; /* last track of last session */
+ outbuf[7] = 0x20; /* unrestricted use */
+ outbuf[8] = 0x00; /* CD-ROM or DVD-ROM */
+ /* 9-10-11: most significant byte corresponding bytes 4-5-6 */
+ /* 12-23: not meaningful for CD-ROM or DVD-ROM */
+ /* 24-31: disc bar code */
+ /* 32: disc application code */
+ /* 33: number of OPC tables */
+
+ return 34;
+}
+
static int scsi_read_dvd_structure(SCSIDiskState *s, SCSIDiskReq *r,
uint8_t *outbuf)
{
@@ -925,152 +961,156 @@ static int mode_sense_page(SCSIDiskState *s, int page, uint8_t **p_outbuf,
[MODE_PAGE_CAPABILITIES] = (1 << TYPE_ROM),
};
- BlockDriverState *bdrv = s->qdev.conf.bs;
- int cylinders, heads, secs;
- uint8_t *p = *p_outbuf;
+ uint8_t *p = *p_outbuf + 2;
+ int length;
if ((mode_sense_valid[page] & (1 << s->qdev.type)) == 0) {
return -1;
}
- p[0] = page;
-
/*
* If Changeable Values are requested, a mask denoting those mode parameters
* that are changeable shall be returned. As we currently don't support
* parameter changes via MODE_SELECT all bits are returned set to zero.
* The buffer was already menset to zero by the caller of this function.
+ *
+ * The offsets here are off by two compared to the descriptions in the
+ * SCSI specs, because those include a 2-byte header. This is unfortunate,
+ * but it is done so that offsets are consistent within our implementation
+ * of MODE SENSE and MODE SELECT. MODE SELECT has to deal with both
+ * 2-byte and 4-byte headers.
*/
switch (page) {
case MODE_PAGE_HD_GEOMETRY:
- p[1] = 0x16;
+ length = 0x16;
if (page_control == 1) { /* Changeable Values */
break;
}
/* if a geometry hint is available, use it */
- bdrv_guess_geometry(bdrv, &cylinders, &heads, &secs);
- p[2] = (cylinders >> 16) & 0xff;
- p[3] = (cylinders >> 8) & 0xff;
- p[4] = cylinders & 0xff;
- p[5] = heads & 0xff;
+ p[0] = (s->qdev.conf.cyls >> 16) & 0xff;
+ p[1] = (s->qdev.conf.cyls >> 8) & 0xff;
+ p[2] = s->qdev.conf.cyls & 0xff;
+ p[3] = s->qdev.conf.heads & 0xff;
/* Write precomp start cylinder, disabled */
- p[6] = (cylinders >> 16) & 0xff;
- p[7] = (cylinders >> 8) & 0xff;
- p[8] = cylinders & 0xff;
+ p[4] = (s->qdev.conf.cyls >> 16) & 0xff;
+ p[5] = (s->qdev.conf.cyls >> 8) & 0xff;
+ p[6] = s->qdev.conf.cyls & 0xff;
/* Reduced current start cylinder, disabled */
- p[9] = (cylinders >> 16) & 0xff;
- p[10] = (cylinders >> 8) & 0xff;
- p[11] = cylinders & 0xff;
+ p[7] = (s->qdev.conf.cyls >> 16) & 0xff;
+ p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
+ p[9] = s->qdev.conf.cyls & 0xff;
/* Device step rate [ns], 200ns */
- p[12] = 0;
- p[13] = 200;
+ p[10] = 0;
+ p[11] = 200;
/* Landing zone cylinder */
+ p[12] = 0xff;
+ p[13] = 0xff;
p[14] = 0xff;
- p[15] = 0xff;
- p[16] = 0xff;
/* Medium rotation rate [rpm], 5400 rpm */
- p[20] = (5400 >> 8) & 0xff;
- p[21] = 5400 & 0xff;
+ p[18] = (5400 >> 8) & 0xff;
+ p[19] = 5400 & 0xff;
break;
case MODE_PAGE_FLEXIBLE_DISK_GEOMETRY:
- p[1] = 0x1e;
+ length = 0x1e;
if (page_control == 1) { /* Changeable Values */
break;
}
/* Transfer rate [kbit/s], 5Mbit/s */
- p[2] = 5000 >> 8;
- p[3] = 5000 & 0xff;
+ p[0] = 5000 >> 8;
+ p[1] = 5000 & 0xff;
/* if a geometry hint is available, use it */
- bdrv_guess_geometry(bdrv, &cylinders, &heads, &secs);
- p[4] = heads & 0xff;
- p[5] = secs & 0xff;
- p[6] = s->qdev.blocksize >> 8;
- p[8] = (cylinders >> 8) & 0xff;
- p[9] = cylinders & 0xff;
+ p[2] = s->qdev.conf.heads & 0xff;
+ p[3] = s->qdev.conf.secs & 0xff;
+ p[4] = s->qdev.blocksize >> 8;
+ p[6] = (s->qdev.conf.cyls >> 8) & 0xff;
+ p[7] = s->qdev.conf.cyls & 0xff;
/* Write precomp start cylinder, disabled */
- p[10] = (cylinders >> 8) & 0xff;
- p[11] = cylinders & 0xff;
+ p[8] = (s->qdev.conf.cyls >> 8) & 0xff;
+ p[9] = s->qdev.conf.cyls & 0xff;
/* Reduced current start cylinder, disabled */
- p[12] = (cylinders >> 8) & 0xff;
- p[13] = cylinders & 0xff;
+ p[10] = (s->qdev.conf.cyls >> 8) & 0xff;
+ p[11] = s->qdev.conf.cyls & 0xff;
/* Device step rate [100us], 100us */
- p[14] = 0;
- p[15] = 1;
+ p[12] = 0;
+ p[13] = 1;
/* Device step pulse width [us], 1us */
- p[16] = 1;
+ p[14] = 1;
/* Device head settle delay [100us], 100us */
- p[17] = 0;
- p[18] = 1;
+ p[15] = 0;
+ p[16] = 1;
/* Motor on delay [0.1s], 0.1s */
- p[19] = 1;
+ p[17] = 1;
/* Motor off delay [0.1s], 0.1s */
- p[20] = 1;
+ p[18] = 1;
/* Medium rotation rate [rpm], 5400 rpm */
- p[28] = (5400 >> 8) & 0xff;
- p[29] = 5400 & 0xff;
+ p[26] = (5400 >> 8) & 0xff;
+ p[27] = 5400 & 0xff;
break;
case MODE_PAGE_CACHING:
- p[0] = 8;
- p[1] = 0x12;
- if (page_control == 1) { /* Changeable Values */
- break;
- }
- if (bdrv_enable_write_cache(s->qdev.conf.bs)) {
- p[2] = 4; /* WCE */
+ length = 0x12;
+ if (page_control == 1 || /* Changeable Values */
+ bdrv_enable_write_cache(s->qdev.conf.bs)) {
+ p[0] = 4; /* WCE */
}
break;
case MODE_PAGE_R_W_ERROR:
- p[1] = 10;
- p[2] = 0x80; /* Automatic Write Reallocation Enabled */
+ length = 10;
+ if (page_control == 1) { /* Changeable Values */
+ break;
+ }
+ p[0] = 0x80; /* Automatic Write Reallocation Enabled */
if (s->qdev.type == TYPE_ROM) {
- p[3] = 0x20; /* Read Retry Count */
+ p[1] = 0x20; /* Read Retry Count */
}
break;
case MODE_PAGE_AUDIO_CTL:
- p[1] = 14;
+ length = 14;
break;
case MODE_PAGE_CAPABILITIES:
- p[1] = 0x14;
+ length = 0x14;
if (page_control == 1) { /* Changeable Values */
break;
}
- p[2] = 0x3b; /* CD-R & CD-RW read */
- p[3] = 0; /* Writing not supported */
- p[4] = 0x7f; /* Audio, composite, digital out,
+ p[0] = 0x3b; /* CD-R & CD-RW read */
+ p[1] = 0; /* Writing not supported */
+ p[2] = 0x7f; /* Audio, composite, digital out,
mode 2 form 1&2, multi session */
- p[5] = 0xff; /* CD DA, DA accurate, RW supported,
+ p[3] = 0xff; /* CD DA, DA accurate, RW supported,
RW corrected, C2 errors, ISRC,
UPC, Bar code */
- p[6] = 0x2d | (s->tray_locked ? 2 : 0);
+ p[4] = 0x2d | (s->tray_locked ? 2 : 0);
/* Locking supported, jumper present, eject, tray */
- p[7] = 0; /* no volume & mute control, no
+ p[5] = 0; /* no volume & mute control, no
changer */
- p[8] = (50 * 176) >> 8; /* 50x read speed */
- p[9] = (50 * 176) & 0xff;
- p[10] = 2 >> 8; /* Two volume levels */
- p[11] = 2 & 0xff;
- p[12] = 2048 >> 8; /* 2M buffer */
- p[13] = 2048 & 0xff;
- p[14] = (16 * 176) >> 8; /* 16x read speed current */
- p[15] = (16 * 176) & 0xff;
- p[18] = (16 * 176) >> 8; /* 16x write speed */
+ p[6] = (50 * 176) >> 8; /* 50x read speed */
+ p[7] = (50 * 176) & 0xff;
+ p[8] = 2 >> 8; /* Two volume levels */
+ p[9] = 2 & 0xff;
+ p[10] = 2048 >> 8; /* 2M buffer */
+ p[11] = 2048 & 0xff;
+ p[12] = (16 * 176) >> 8; /* 16x read speed current */
+ p[13] = (16 * 176) & 0xff;
+ p[16] = (16 * 176) >> 8; /* 16x write speed */
+ p[17] = (16 * 176) & 0xff;
+ p[18] = (16 * 176) >> 8; /* 16x write speed current */
p[19] = (16 * 176) & 0xff;
- p[20] = (16 * 176) >> 8; /* 16x write speed current */
- p[21] = (16 * 176) & 0xff;
break;
default:
return -1;
}
- *p_outbuf += p[1] + 2;
- return p[1] + 2;
+ assert(length < 256);
+ (*p_outbuf)[0] = page;
+ (*p_outbuf)[1] = length;
+ *p_outbuf += length + 2;
+ return length + 2;
}
static int scsi_disk_emulate_mode_sense(SCSIDiskReq *r, uint8_t *outbuf)
@@ -1207,8 +1247,14 @@ static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
bool start = req->cmd.buf[4] & 1;
bool loej = req->cmd.buf[4] & 2; /* load on start, eject on !start */
+ int pwrcnd = req->cmd.buf[4] & 0xf0;
+
+ if (pwrcnd) {
+ /* eject/load only happens for power condition == 0 */
+ return 0;
+ }
- if (s->qdev.type == TYPE_ROM && loej) {
+ if ((s->features & (1 << SCSI_DISK_F_REMOVABLE)) && loej) {
if (!start && !s->tray_open && s->tray_locked) {
scsi_check_condition(r,
bdrv_is_inserted(s->qdev.conf.bs)
@@ -1225,13 +1271,239 @@ static int scsi_disk_emulate_start_stop(SCSIDiskReq *r)
return 0;
}
-static int scsi_disk_emulate_command(SCSIDiskReq *r)
+static void scsi_disk_emulate_read_data(SCSIRequest *req)
{
- SCSIRequest *req = &r->req;
+ SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
+ int buflen = r->iov.iov_len;
+
+ if (buflen) {
+ DPRINTF("Read buf_len=%d\n", buflen);
+ r->iov.iov_len = 0;
+ r->started = true;
+ scsi_req_data(&r->req, buflen);
+ return;
+ }
+
+ /* This also clears the sense buffer for REQUEST SENSE. */
+ scsi_req_complete(&r->req, GOOD);
+}
+
+static int scsi_disk_check_mode_select(SCSIDiskState *s, int page,
+ uint8_t *inbuf, int inlen)
+{
+ uint8_t mode_current[SCSI_MAX_MODE_LEN];
+ uint8_t mode_changeable[SCSI_MAX_MODE_LEN];
+ uint8_t *p;
+ int len, expected_len, changeable_len, i;
+
+ /* The input buffer does not include the page header, so it is
+ * off by 2 bytes.
+ */
+ expected_len = inlen + 2;
+ if (expected_len > SCSI_MAX_MODE_LEN) {
+ return -1;
+ }
+
+ p = mode_current;
+ memset(mode_current, 0, inlen + 2);
+ len = mode_sense_page(s, page, &p, 0);
+ if (len < 0 || len != expected_len) {
+ return -1;
+ }
+
+ p = mode_changeable;
+ memset(mode_changeable, 0, inlen + 2);
+ changeable_len = mode_sense_page(s, page, &p, 1);
+ assert(changeable_len == len);
+
+ /* Check that unchangeable bits are the same as what MODE SENSE
+ * would return.
+ */
+ for (i = 2; i < len; i++) {
+ if (((mode_current[i] ^ inbuf[i - 2]) & ~mode_changeable[i]) != 0) {
+ return -1;
+ }
+ }
+ return 0;
+}
+
+static void scsi_disk_apply_mode_select(SCSIDiskState *s, int page, uint8_t *p)
+{
+ switch (page) {
+ case MODE_PAGE_CACHING:
+ bdrv_set_enable_write_cache(s->qdev.conf.bs, (p[0] & 4) != 0);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static int mode_select_pages(SCSIDiskReq *r, uint8_t *p, int len, bool change)
+{
+ SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev);
+
+ while (len > 0) {
+ int page, subpage, page_len;
+
+ /* Parse both possible formats for the mode page headers. */
+ page = p[0] & 0x3f;
+ if (p[0] & 0x40) {
+ if (len < 4) {
+ goto invalid_param_len;
+ }
+ subpage = p[1];
+ page_len = lduw_be_p(&p[2]);
+ p += 4;
+ len -= 4;
+ } else {
+ if (len < 2) {
+ goto invalid_param_len;
+ }
+ subpage = 0;
+ page_len = p[1];
+ p += 2;
+ len -= 2;
+ }
+
+ if (subpage) {
+ goto invalid_param;
+ }
+ if (page_len > len) {
+ goto invalid_param_len;
+ }
+
+ if (!change) {
+ if (scsi_disk_check_mode_select(s, page, p, page_len) < 0) {
+ goto invalid_param;
+ }
+ } else {
+ scsi_disk_apply_mode_select(s, page, p);
+ }
+
+ p += page_len;
+ len -= page_len;
+ }
+ return 0;
+
+invalid_param:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
+ return -1;
+
+invalid_param_len:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
+ return -1;
+}
+
+static void scsi_disk_emulate_mode_select(SCSIDiskReq *r, uint8_t *inbuf)
+{
+ uint8_t *p = inbuf;
+ int cmd = r->req.cmd.buf[0];
+ int len = r->req.cmd.xfer;
+ int hdr_len = (cmd == MODE_SELECT ? 4 : 8);
+ int bd_len;
+ int pass;
+
+ /* We only support PF=1, SP=0. */
+ if ((r->req.cmd.buf[1] & 0x11) != 0x10) {
+ goto invalid_field;
+ }
+
+ if (len < hdr_len) {
+ goto invalid_param_len;
+ }
+
+ bd_len = (cmd == MODE_SELECT ? p[3] : lduw_be_p(&p[6]));
+ len -= hdr_len;
+ p += hdr_len;
+ if (len < bd_len) {
+ goto invalid_param_len;
+ }
+ if (bd_len != 0 && bd_len != 8) {
+ goto invalid_param;
+ }
+
+ len -= bd_len;
+ p += bd_len;
+
+ /* Ensure no change is made if there is an error! */
+ for (pass = 0; pass < 2; pass++) {
+ if (mode_select_pages(r, p, len, pass == 1) < 0) {
+ assert(pass == 0);
+ return;
+ }
+ }
+ scsi_req_complete(&r->req, GOOD);
+ return;
+
+invalid_param:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM));
+ return;
+
+invalid_param_len:
+ scsi_check_condition(r, SENSE_CODE(INVALID_PARAM_LEN));
+ return;
+
+invalid_field:
+ scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
+ return;
+}
+
+static void scsi_disk_emulate_write_data(SCSIRequest *req)
+{
+ SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
+
+ if (r->iov.iov_len) {
+ int buflen = r->iov.iov_len;
+ DPRINTF("Write buf_len=%d\n", buflen);
+ r->iov.iov_len = 0;
+ scsi_req_data(&r->req, buflen);
+ return;
+ }
+
+ switch (req->cmd.buf[0]) {
+ case MODE_SELECT:
+ case MODE_SELECT_10:
+ /* This also clears the sense buffer for REQUEST SENSE. */
+ scsi_disk_emulate_mode_select(r, r->iov.iov_base);
+ break;
+
+ default:
+ abort();
+ }
+}
+
+static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf)
+{
+ SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
uint64_t nb_sectors;
uint8_t *outbuf;
- int buflen = 0;
+ int buflen;
+
+ switch (req->cmd.buf[0]) {
+ case INQUIRY:
+ case MODE_SENSE:
+ case MODE_SENSE_10:
+ case RESERVE:
+ case RESERVE_10:
+ case RELEASE:
+ case RELEASE_10:
+ case START_STOP:
+ case ALLOW_MEDIUM_REMOVAL:
+ case GET_CONFIGURATION:
+ case GET_EVENT_STATUS_NOTIFICATION:
+ case MECHANISM_STATUS:
+ case REQUEST_SENSE:
+ break;
+
+ default:
+ if (s->tray_open || !bdrv_is_inserted(s->qdev.conf.bs)) {
+ scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
+ return 0;
+ }
+ break;
+ }
if (!r->iov.iov_base) {
/*
@@ -1249,6 +1521,7 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r)
r->iov.iov_base = qemu_blockalign(s->qdev.conf.bs, r->buflen);
}
+ buflen = req->cmd.xfer;
outbuf = r->iov.iov_base;
switch (req->cmd.buf[0]) {
case TEST_UNIT_READY:
@@ -1295,7 +1568,7 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r)
break;
case START_STOP:
if (scsi_disk_emulate_start_stop(r) < 0) {
- return -1;
+ return 0;
}
break;
case ALLOW_MEDIUM_REMOVAL:
@@ -1355,6 +1628,12 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r)
goto illegal_request;
}
break;
+ case READ_DISC_INFORMATION:
+ buflen = scsi_read_disc_information(s, r, outbuf);
+ if (buflen < 0) {
+ goto illegal_request;
+ }
+ break;
case READ_DVD_STRUCTURE:
buflen = scsi_read_dvd_structure(s, r, outbuf);
if (buflen < 0) {
@@ -1405,18 +1684,78 @@ static int scsi_disk_emulate_command(SCSIDiskReq *r)
}
DPRINTF("Unsupported Service Action In\n");
goto illegal_request;
+ case SYNCHRONIZE_CACHE:
+ /* The request is used as the AIO opaque value, so add a ref. */
+ scsi_req_ref(&r->req);
+ bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
+ r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_aio_complete, r);
+ return 0;
+ case SEEK_10:
+ DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
+ if (r->req.cmd.lba > s->qdev.max_lba) {
+ goto illegal_lba;
+ }
+ break;
+ case MODE_SELECT:
+ DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer);
+ break;
+ case MODE_SELECT_10:
+ DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer);
+ break;
+ case WRITE_SAME_10:
+ nb_sectors = lduw_be_p(&req->cmd.buf[7]);
+ goto write_same;
+ case WRITE_SAME_16:
+ nb_sectors = ldl_be_p(&req->cmd.buf[10]) & 0xffffffffULL;
+ write_same:
+ if (bdrv_is_read_only(s->qdev.conf.bs)) {
+ scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
+ return 0;
+ }
+ if (r->req.cmd.lba > s->qdev.max_lba) {
+ goto illegal_lba;
+ }
+
+ /*
+ * We only support WRITE SAME with the unmap bit set for now.
+ */
+ if (!(req->cmd.buf[1] & 0x8)) {
+ goto illegal_request;
+ }
+
+ /* The request is used as the AIO opaque value, so add a ref. */
+ scsi_req_ref(&r->req);
+ r->req.aiocb = bdrv_aio_discard(s->qdev.conf.bs,
+ r->req.cmd.lba * (s->qdev.blocksize / 512),
+ nb_sectors * (s->qdev.blocksize / 512),
+ scsi_aio_complete, r);
+ return 0;
default:
+ DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]);
scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
- return -1;
+ return 0;
+ }
+ assert(!r->req.aiocb);
+ r->iov.iov_len = MIN(buflen, req->cmd.xfer);
+ if (r->iov.iov_len == 0) {
+ scsi_req_complete(&r->req, GOOD);
+ }
+ if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
+ assert(r->iov.iov_len == req->cmd.xfer);
+ return -r->iov.iov_len;
+ } else {
+ return r->iov.iov_len;
}
- buflen = MIN(buflen, req->cmd.xfer);
- return buflen;
illegal_request:
if (r->req.status == -1) {
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
}
- return -1;
+ return 0;
+
+illegal_lba:
+ scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
+ return 0;
}
/* Execute a scsi command. Returns the length of the data expected by the
@@ -1424,98 +1763,37 @@ illegal_request:
(eg. disk reads), negative for transfers to the device (eg. disk writes),
and zero if the command does not transfer any data. */
-static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf)
+static int32_t scsi_disk_dma_command(SCSIRequest *req, uint8_t *buf)
{
SCSIDiskReq *r = DO_UPCAST(SCSIDiskReq, req, req);
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
int32_t len;
uint8_t command;
- int rc;
command = buf[0];
- DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", req->lun, req->tag, buf[0]);
-
-#ifdef DEBUG_SCSI
- {
- int i;
- for (i = 1; i < r->req.cmd.len; i++) {
- printf(" 0x%02x", buf[i]);
- }
- printf("\n");
- }
-#endif
-
- switch (command) {
- case INQUIRY:
- case MODE_SENSE:
- case MODE_SENSE_10:
- case RESERVE:
- case RESERVE_10:
- case RELEASE:
- case RELEASE_10:
- case START_STOP:
- case ALLOW_MEDIUM_REMOVAL:
- case GET_CONFIGURATION:
- case GET_EVENT_STATUS_NOTIFICATION:
- case MECHANISM_STATUS:
- case REQUEST_SENSE:
- break;
- default:
- if (s->tray_open || !bdrv_is_inserted(s->qdev.conf.bs)) {
- scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
- return 0;
- }
- break;
+ if (s->tray_open || !bdrv_is_inserted(s->qdev.conf.bs)) {
+ scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
+ return 0;
}
switch (command) {
- case TEST_UNIT_READY:
- case INQUIRY:
- case MODE_SENSE:
- case MODE_SENSE_10:
- case RESERVE:
- case RESERVE_10:
- case RELEASE:
- case RELEASE_10:
- case START_STOP:
- case ALLOW_MEDIUM_REMOVAL:
- case READ_CAPACITY_10:
- case READ_TOC:
- case READ_DVD_STRUCTURE:
- case GET_CONFIGURATION:
- case GET_EVENT_STATUS_NOTIFICATION:
- case MECHANISM_STATUS:
- case SERVICE_ACTION_IN_16:
- case REQUEST_SENSE:
- rc = scsi_disk_emulate_command(r);
- if (rc < 0) {
- return 0;
- }
-
- r->iov.iov_len = rc;
- break;
- case SYNCHRONIZE_CACHE:
- /* The request is used as the AIO opaque value, so add a ref. */
- scsi_req_ref(&r->req);
- bdrv_acct_start(s->qdev.conf.bs, &r->acct, 0, BDRV_ACCT_FLUSH);
- r->req.aiocb = bdrv_aio_flush(s->qdev.conf.bs, scsi_flush_complete, r);
- return 0;
case READ_6:
case READ_10:
case READ_12:
case READ_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("Read (sector %" PRId64 ", count %d)\n", r->req.cmd.lba, len);
- if (r->req.cmd.lba > s->qdev.max_lba) {
+ if (r->req.cmd.buf[1] & 0xe0) {
+ goto illegal_request;
+ }
+ if (r->req.cmd.lba > r->req.cmd.lba + len ||
+ r->req.cmd.lba + len - 1 > s->qdev.max_lba) {
goto illegal_lba;
}
r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
r->sector_count = len * (s->qdev.blocksize / 512);
break;
- case VERIFY_10:
- case VERIFY_12:
- case VERIFY_16:
case WRITE_6:
case WRITE_10:
case WRITE_12:
@@ -1523,90 +1801,45 @@ static int32_t scsi_send_command(SCSIRequest *req, uint8_t *buf)
case WRITE_VERIFY_10:
case WRITE_VERIFY_12:
case WRITE_VERIFY_16:
+ if (bdrv_is_read_only(s->qdev.conf.bs)) {
+ scsi_check_condition(r, SENSE_CODE(WRITE_PROTECTED));
+ return 0;
+ }
+ /* fallthrough */
+ case VERIFY_10:
+ case VERIFY_12:
+ case VERIFY_16:
len = r->req.cmd.xfer / s->qdev.blocksize;
DPRINTF("Write %s(sector %" PRId64 ", count %d)\n",
(command & 0xe) == 0xe ? "And Verify " : "",
r->req.cmd.lba, len);
- if (r->req.cmd.lba > s->qdev.max_lba) {
+ if (r->req.cmd.buf[1] & 0xe0) {
+ goto illegal_request;
+ }
+ if (r->req.cmd.lba > r->req.cmd.lba + len ||
+ r->req.cmd.lba + len - 1 > s->qdev.max_lba) {
goto illegal_lba;
}
r->sector = r->req.cmd.lba * (s->qdev.blocksize / 512);
r->sector_count = len * (s->qdev.blocksize / 512);
break;
- case MODE_SELECT:
- DPRINTF("Mode Select(6) (len %lu)\n", (long)r->req.cmd.xfer);
- /* We don't support mode parameter changes.
- Allow the mode parameter header + block descriptors only. */
- if (r->req.cmd.xfer > 12) {
- goto fail;
- }
- break;
- case MODE_SELECT_10:
- DPRINTF("Mode Select(10) (len %lu)\n", (long)r->req.cmd.xfer);
- /* We don't support mode parameter changes.
- Allow the mode parameter header + block descriptors only. */
- if (r->req.cmd.xfer > 16) {
- goto fail;
- }
- break;
- case SEEK_10:
- DPRINTF("Seek(10) (sector %" PRId64 ")\n", r->req.cmd.lba);
- if (r->req.cmd.lba > s->qdev.max_lba) {
- goto illegal_lba;
- }
- break;
- case WRITE_SAME_10:
- len = lduw_be_p(&buf[7]);
- goto write_same;
- case WRITE_SAME_16:
- len = ldl_be_p(&buf[10]) & 0xffffffffULL;
- write_same:
-
- DPRINTF("WRITE SAME() (sector %" PRId64 ", count %d)\n",
- r->req.cmd.lba, len);
-
- if (r->req.cmd.lba > s->qdev.max_lba) {
- goto illegal_lba;
- }
-
- /*
- * We only support WRITE SAME with the unmap bit set for now.
- */
- if (!(buf[1] & 0x8)) {
- goto fail;
- }
-
- rc = bdrv_discard(s->qdev.conf.bs,
- r->req.cmd.lba * (s->qdev.blocksize / 512),
- len * (s->qdev.blocksize / 512));
- if (rc < 0) {
- /* XXX: better error code ?*/
- goto fail;
- }
-
- break;
default:
- DPRINTF("Unknown SCSI command (%2.2x)\n", buf[0]);
- scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
- return 0;
- fail:
+ abort();
+ illegal_request:
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
return 0;
illegal_lba:
scsi_check_condition(r, SENSE_CODE(LBA_OUT_OF_RANGE));
return 0;
}
- if (r->sector_count == 0 && r->iov.iov_len == 0) {
+ if (r->sector_count == 0) {
scsi_req_complete(&r->req, GOOD);
}
- len = r->sector_count * 512 + r->iov.iov_len;
+ assert(r->iov.iov_len == 0);
if (r->req.cmd.mode == SCSI_XFER_TO_DEV) {
- return -len;
+ return -r->sector_count * 512;
} else {
- if (!r->sector_count) {
- r->sector_count = -1;
- }
- return len;
+ return r->sector_count * 512;
}
}
@@ -1633,6 +1866,19 @@ static void scsi_destroy(SCSIDevice *dev)
blockdev_mark_auto_del(s->qdev.conf.bs);
}
+static void scsi_disk_resize_cb(void *opaque)
+{
+ SCSIDiskState *s = opaque;
+
+ /* SPC lists this sense code as available only for
+ * direct-access devices.
+ */
+ if (s->qdev.type == TYPE_DISK) {
+ scsi_device_set_ua(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
+ scsi_device_report_change(&s->qdev, SENSE_CODE(CAPACITY_CHANGED));
+ }
+}
+
static void scsi_cd_change_media_cb(void *opaque, bool load)
{
SCSIDiskState *s = opaque;
@@ -1649,7 +1895,7 @@ static void scsi_cd_change_media_cb(void *opaque, bool load)
*/
s->media_changed = load;
s->tray_open = !load;
- s->qdev.unit_attention = SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM);
+ scsi_device_set_ua(&s->qdev, SENSE_CODE(UNIT_ATTENTION_NO_MEDIUM));
s->media_event = true;
s->eject_request = false;
}
@@ -1674,11 +1920,17 @@ static bool scsi_cd_is_medium_locked(void *opaque)
return ((SCSIDiskState *)opaque)->tray_locked;
}
-static const BlockDevOps scsi_cd_block_ops = {
+static const BlockDevOps scsi_disk_removable_block_ops = {
.change_media_cb = scsi_cd_change_media_cb,
.eject_request_cb = scsi_cd_eject_request_cb,
.is_tray_open = scsi_cd_is_tray_open,
.is_medium_locked = scsi_cd_is_medium_locked,
+
+ .resize_cb = scsi_disk_resize_cb,
+};
+
+static const BlockDevOps scsi_disk_block_ops = {
+ .resize_cb = scsi_disk_resize_cb,
};
static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
@@ -1686,14 +1938,13 @@ static void scsi_disk_unit_attention_reported(SCSIDevice *dev)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
if (s->media_changed) {
s->media_changed = false;
- s->qdev.unit_attention = SENSE_CODE(MEDIUM_CHANGED);
+ scsi_device_set_ua(&s->qdev, SENSE_CODE(MEDIUM_CHANGED));
}
}
static int scsi_initfn(SCSIDevice *dev)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
- DriveInfo *dinfo;
if (!s->qdev.conf.bs) {
error_report("drive property not set");
@@ -1706,17 +1957,18 @@ static int scsi_initfn(SCSIDevice *dev)
return -1;
}
- if (!s->serial) {
- /* try to fall back to value set with legacy -drive serial=... */
- dinfo = drive_get_by_blockdev(s->qdev.conf.bs);
- if (*dinfo->serial) {
- s->serial = g_strdup(dinfo->serial);
- }
+ blkconf_serial(&s->qdev.conf, &s->serial);
+ if (dev->type == TYPE_DISK
+ && blkconf_geometry(&dev->conf, NULL, 65535, 255, 255) < 0) {
+ return -1;
}
if (!s->version) {
s->version = g_strdup(qemu_get_version());
}
+ if (!s->vendor) {
+ s->vendor = g_strdup("QEMU");
+ }
if (bdrv_is_sg(s->qdev.conf.bs)) {
error_report("unwanted /dev/sg*");
@@ -1724,7 +1976,9 @@ static int scsi_initfn(SCSIDevice *dev)
}
if (s->features & (1 << SCSI_DISK_F_REMOVABLE)) {
- bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_cd_block_ops, s);
+ bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_disk_removable_block_ops, s);
+ } else {
+ bdrv_set_dev_ops(s->qdev.conf.bs, &scsi_disk_block_ops, s);
}
bdrv_set_buffer_alignment(s->qdev.conf.bs, s->qdev.blocksize);
@@ -1738,6 +1992,9 @@ static int scsi_hd_initfn(SCSIDevice *dev)
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, dev);
s->qdev.blocksize = s->qdev.conf.logical_block_size;
s->qdev.type = TYPE_DISK;
+ if (!s->product) {
+ s->product = g_strdup("QEMU HARDDISK");
+ }
return scsi_initfn(&s->qdev);
}
@@ -1747,6 +2004,9 @@ static int scsi_cd_initfn(SCSIDevice *dev)
s->qdev.blocksize = 2048;
s->qdev.type = TYPE_ROM;
s->features |= 1 << SCSI_DISK_F_REMOVABLE;
+ if (!s->product) {
+ s->product = g_strdup("QEMU CD-ROM");
+ }
return scsi_initfn(&s->qdev);
}
@@ -1766,10 +2026,19 @@ static int scsi_disk_initfn(SCSIDevice *dev)
}
}
-static const SCSIReqOps scsi_disk_reqops = {
+static const SCSIReqOps scsi_disk_emulate_reqops = {
.size = sizeof(SCSIDiskReq),
.free_req = scsi_free_request,
- .send_command = scsi_send_command,
+ .send_command = scsi_disk_emulate_command,
+ .read_data = scsi_disk_emulate_read_data,
+ .write_data = scsi_disk_emulate_write_data,
+ .get_buf = scsi_get_buf,
+};
+
+static const SCSIReqOps scsi_disk_dma_reqops = {
+ .size = sizeof(SCSIDiskReq),
+ .free_req = scsi_free_request,
+ .send_command = scsi_disk_dma_command,
.read_data = scsi_read_data,
.write_data = scsi_write_data,
.cancel_io = scsi_cancel_io,
@@ -1778,13 +2047,71 @@ static const SCSIReqOps scsi_disk_reqops = {
.save_request = scsi_disk_save_request,
};
+static const SCSIReqOps *const scsi_disk_reqops_dispatch[256] = {
+ [TEST_UNIT_READY] = &scsi_disk_emulate_reqops,
+ [INQUIRY] = &scsi_disk_emulate_reqops,
+ [MODE_SENSE] = &scsi_disk_emulate_reqops,
+ [MODE_SENSE_10] = &scsi_disk_emulate_reqops,
+ [START_STOP] = &scsi_disk_emulate_reqops,
+ [ALLOW_MEDIUM_REMOVAL] = &scsi_disk_emulate_reqops,
+ [READ_CAPACITY_10] = &scsi_disk_emulate_reqops,
+ [READ_TOC] = &scsi_disk_emulate_reqops,
+ [READ_DVD_STRUCTURE] = &scsi_disk_emulate_reqops,
+ [READ_DISC_INFORMATION] = &scsi_disk_emulate_reqops,
+ [GET_CONFIGURATION] = &scsi_disk_emulate_reqops,
+ [GET_EVENT_STATUS_NOTIFICATION] = &scsi_disk_emulate_reqops,
+ [MECHANISM_STATUS] = &scsi_disk_emulate_reqops,
+ [SERVICE_ACTION_IN_16] = &scsi_disk_emulate_reqops,
+ [REQUEST_SENSE] = &scsi_disk_emulate_reqops,
+ [SYNCHRONIZE_CACHE] = &scsi_disk_emulate_reqops,
+ [SEEK_10] = &scsi_disk_emulate_reqops,
+ [MODE_SELECT] = &scsi_disk_emulate_reqops,
+ [MODE_SELECT_10] = &scsi_disk_emulate_reqops,
+ [WRITE_SAME_10] = &scsi_disk_emulate_reqops,
+ [WRITE_SAME_16] = &scsi_disk_emulate_reqops,
+
+ [READ_6] = &scsi_disk_dma_reqops,
+ [READ_10] = &scsi_disk_dma_reqops,
+ [READ_12] = &scsi_disk_dma_reqops,
+ [READ_16] = &scsi_disk_dma_reqops,
+ [VERIFY_10] = &scsi_disk_dma_reqops,
+ [VERIFY_12] = &scsi_disk_dma_reqops,
+ [VERIFY_16] = &scsi_disk_dma_reqops,
+ [WRITE_6] = &scsi_disk_dma_reqops,
+ [WRITE_10] = &scsi_disk_dma_reqops,
+ [WRITE_12] = &scsi_disk_dma_reqops,
+ [WRITE_16] = &scsi_disk_dma_reqops,
+ [WRITE_VERIFY_10] = &scsi_disk_dma_reqops,
+ [WRITE_VERIFY_12] = &scsi_disk_dma_reqops,
+ [WRITE_VERIFY_16] = &scsi_disk_dma_reqops,
+};
+
static SCSIRequest *scsi_new_request(SCSIDevice *d, uint32_t tag, uint32_t lun,
uint8_t *buf, void *hba_private)
{
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, d);
SCSIRequest *req;
+ const SCSIReqOps *ops;
+ uint8_t command;
+
+ command = buf[0];
+ ops = scsi_disk_reqops_dispatch[command];
+ if (!ops) {
+ ops = &scsi_disk_emulate_reqops;
+ }
+ req = scsi_req_alloc(ops, &s->qdev, tag, lun, hba_private);
+
+#ifdef DEBUG_SCSI
+ DPRINTF("Command: lun=%d tag=0x%x data=0x%02x", lun, tag, buf[0]);
+ {
+ int i;
+ for (i = 1; i < req->cmd.len; i++) {
+ printf(" 0x%02x", buf[i]);
+ }
+ printf("\n");
+ }
+#endif
- req = scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun, hba_private);
return req;
}
@@ -1898,15 +2225,14 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
* unreliable, too. It is even possible that reads deliver random data
* from the host page cache (this is probably a Linux bug).
*
- * We might use scsi_disk_reqops as long as no writing commands are
+ * We might use scsi_disk_dma_reqops as long as no writing commands are
* seen, but performance usually isn't paramount on optical media. So,
* just make scsi-block operate the same as scsi-generic for them.
*/
- if (s->qdev.type == TYPE_ROM) {
- break;
- }
- return scsi_req_alloc(&scsi_disk_reqops, &s->qdev, tag, lun,
- hba_private);
+ if (s->qdev.type != TYPE_ROM) {
+ return scsi_req_alloc(&scsi_disk_dma_reqops, &s->qdev, tag, lun,
+ hba_private);
+ }
}
return scsi_req_alloc(&scsi_generic_req_ops, &s->qdev, tag, lun,
@@ -1914,10 +2240,12 @@ static SCSIRequest *scsi_block_new_request(SCSIDevice *d, uint32_t tag,
}
#endif
-#define DEFINE_SCSI_DISK_PROPERTIES() \
- DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
- DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
- DEFINE_PROP_STRING("serial", SCSIDiskState, serial)
+#define DEFINE_SCSI_DISK_PROPERTIES() \
+ DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \
+ DEFINE_PROP_STRING("ver", SCSIDiskState, version), \
+ DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \
+ DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \
+ DEFINE_PROP_STRING("product", SCSIDiskState, product)
static Property scsi_hd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
@@ -1925,6 +2253,8 @@ static Property scsi_hd_properties[] = {
SCSI_DISK_F_REMOVABLE, false),
DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
SCSI_DISK_F_DPOFUA, false),
+ DEFINE_PROP_HEX64("wwn", SCSIDiskState, wwn, 0),
+ DEFINE_BLOCK_CHS_PROPERTIES(SCSIDiskState, qdev.conf),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1969,6 +2299,7 @@ static TypeInfo scsi_hd_info = {
static Property scsi_cd_properties[] = {
DEFINE_SCSI_DISK_PROPERTIES(),
+ DEFINE_PROP_HEX64("wwn", SCSIDiskState, wwn, 0),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1997,7 +2328,7 @@ static TypeInfo scsi_cd_info = {
#ifdef __linux__
static Property scsi_block_properties[] = {
- DEFINE_SCSI_DISK_PROPERTIES(),
+ DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.bs),
DEFINE_PROP_END_OF_LIST(),
};
@@ -2030,6 +2361,7 @@ static Property scsi_disk_properties[] = {
SCSI_DISK_F_REMOVABLE, false),
DEFINE_PROP_BIT("dpofua", SCSIDiskState, features,
SCSI_DISK_F_DPOFUA, false),
+ DEFINE_PROP_HEX64("wwn", SCSIDiskState, wwn, 0),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/scsi-generic.c b/hw/scsi-generic.c
index d856d23b3b..8d5106061e 100644
--- a/hw/scsi-generic.c
+++ b/hw/scsi-generic.c
@@ -400,12 +400,6 @@ static int scsi_generic_initfn(SCSIDevice *s)
return -1;
}
- /* check we are really using a /dev/sg* file */
- if (!bdrv_is_sg(s->conf.bs)) {
- error_report("not /dev/sg*");
- return -1;
- }
-
if (bdrv_get_on_error(s->conf.bs, 0) != BLOCK_ERR_STOP_ENOSPC) {
error_report("Device doesn't support drive option werror");
return -1;
@@ -416,8 +410,11 @@ static int scsi_generic_initfn(SCSIDevice *s)
}
/* check we are using a driver managing SG_IO (version 3 and after */
- if (bdrv_ioctl(s->conf.bs, SG_GET_VERSION_NUM, &sg_version) < 0 ||
- sg_version < 30000) {
+ if (bdrv_ioctl(s->conf.bs, SG_GET_VERSION_NUM, &sg_version) < 0) {
+ error_report("scsi generic interface not supported");
+ return -1;
+ }
+ if (sg_version < 30000) {
error_report("scsi generic interface too old");
return -1;
}
diff --git a/hw/scsi.h b/hw/scsi.h
index 76f06d41de..1aeee4659c 100644
--- a/hw/scsi.h
+++ b/hw/scsi.h
@@ -3,6 +3,7 @@
#include "qdev.h"
#include "block.h"
+#include "hw/block-common.h"
#include "sysemu.h"
#define MAX_SCSI_DEVS 255
@@ -130,10 +131,14 @@ struct SCSIBusInfo {
void (*transfer_data)(SCSIRequest *req, uint32_t arg);
void (*complete)(SCSIRequest *req, uint32_t arg, size_t resid);
void (*cancel)(SCSIRequest *req);
+ void (*hotplug)(SCSIBus *bus, SCSIDevice *dev);
+ void (*hot_unplug)(SCSIBus *bus, SCSIDevice *dev);
+ void (*change)(SCSIBus *bus, SCSIDevice *dev, SCSISense sense);
QEMUSGList *(*get_sg_list)(SCSIRequest *req);
void (*save_request)(QEMUFile *f, SCSIRequest *req);
void *(*load_request)(QEMUFile *f, SCSIRequest *req);
+ void (*free_request)(SCSIBus *bus, void *priv);
};
#define TYPE_SCSI_BUS "SCSI"
@@ -178,6 +183,10 @@ extern const struct SCSISense sense_code_INVALID_OPCODE;
extern const struct SCSISense sense_code_LBA_OUT_OF_RANGE;
/* Illegal request, Invalid field in CDB */
extern const struct SCSISense sense_code_INVALID_FIELD;
+/* Illegal request, Invalid field in parameter list */
+extern const struct SCSISense sense_code_INVALID_PARAM;
+/* Illegal request, Parameter list length error */
+extern const struct SCSISense sense_code_INVALID_PARAM_LEN;
/* Illegal request, LUN not supported */
extern const struct SCSISense sense_code_LUN_NOT_SUPPORTED;
/* Illegal request, Saving parameters not supported */
@@ -192,6 +201,8 @@ extern const struct SCSISense sense_code_IO_ERROR;
extern const struct SCSISense sense_code_I_T_NEXUS_LOSS;
/* Command aborted, Logical Unit failure */
extern const struct SCSISense sense_code_LUN_FAILURE;
+/* LUN not ready, Capacity data has changed */
+extern const struct SCSISense sense_code_CAPACITY_CHANGED;
/* LUN not ready, Medium not present */
extern const struct SCSISense sense_code_UNIT_ATTENTION_NO_MEDIUM;
/* Unit attention, Power on, reset or bus device reset occurred */
@@ -202,6 +213,8 @@ extern const struct SCSISense sense_code_MEDIUM_CHANGED;
extern const struct SCSISense sense_code_REPORTED_LUNS_CHANGED;
/* Unit attention, Device internal reset */
extern const struct SCSISense sense_code_DEVICE_INTERNAL_RESET;
+/* Data Protection, Write Protected */
+extern const struct SCSISense sense_code_WRITE_PROTECTED;
#define SENSE_CODE(x) sense_code_ ## x
@@ -229,6 +242,8 @@ void scsi_req_abort(SCSIRequest *req, int status);
void scsi_req_cancel(SCSIRequest *req);
void scsi_req_retry(SCSIRequest *req);
void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense);
+void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense);
+void scsi_device_report_change(SCSIDevice *dev, SCSISense sense);
int scsi_device_get_sense(SCSIDevice *dev, uint8_t *buf, int len, bool fixed);
SCSIDevice *scsi_device_find(SCSIBus *bus, int channel, int target, int lun);
diff --git a/hw/sh_serial.c b/hw/sh_serial.c
index 43b0eb1c1d..1d1883dd20 100644
--- a/hw/sh_serial.c
+++ b/hw/sh_serial.c
@@ -186,7 +186,8 @@ static void sh_serial_write(void *opaque, target_phys_addr_t offs,
}
}
- fprintf(stderr, "sh_serial: unsupported write to 0x%02x\n", offs);
+ fprintf(stderr, "sh_serial: unsupported write to 0x%02"
+ TARGET_PRIxPHYS "\n", offs);
abort();
}
@@ -287,7 +288,8 @@ static uint64_t sh_serial_read(void *opaque, target_phys_addr_t offs,
#endif
if (ret & ~((1 << 16) - 1)) {
- fprintf(stderr, "sh_serial: unsupported read from 0x%02x\n", offs);
+ fprintf(stderr, "sh_serial: unsupported read from 0x%02"
+ TARGET_PRIxPHYS "\n", offs);
abort();
}
diff --git a/hw/smc91c111.c b/hw/smc91c111.c
index 1a5213fa56..d6ef302c6d 100644
--- a/hw/smc91c111.c
+++ b/hw/smc91c111.c
@@ -628,7 +628,7 @@ static uint32_t smc91c111_readl(void *opaque, target_phys_addr_t offset)
return val;
}
-static int smc91c111_can_receive(VLANClientState *nc)
+static int smc91c111_can_receive(NetClientState *nc)
{
smc91c111_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -639,7 +639,7 @@ static int smc91c111_can_receive(VLANClientState *nc)
return 1;
}
-static ssize_t smc91c111_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t smc91c111_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
smc91c111_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
int status;
@@ -728,7 +728,7 @@ static const MemoryRegionOps smc91c111_mem_ops = {
.endianness = DEVICE_NATIVE_ENDIAN,
};
-static void smc91c111_cleanup(VLANClientState *nc)
+static void smc91c111_cleanup(NetClientState *nc)
{
smc91c111_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -736,7 +736,7 @@ static void smc91c111_cleanup(VLANClientState *nc)
}
static NetClientInfo net_smc91c111_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = smc91c111_can_receive,
.receive = smc91c111_receive,
diff --git a/hw/spapr.c b/hw/spapr.c
index 09a23ff092..81c9343ca5 100644
--- a/hw/spapr.c
+++ b/hw/spapr.c
@@ -674,6 +674,9 @@ static void ppc_spapr_init(ram_addr_t ram_size,
spapr->icp = xics_system_init(XICS_IRQS);
spapr->next_irq = 16;
+ /* Set up IOMMU */
+ spapr_iommu_init();
+
/* Set up VIO bus */
spapr->vio_bus = spapr_vio_bus_init();
diff --git a/hw/spapr.h b/hw/spapr.h
index c75172e0c0..9153f29a60 100644
--- a/hw/spapr.h
+++ b/hw/spapr.h
@@ -1,6 +1,7 @@
#if !defined(__HW_SPAPR_H__)
#define __HW_SPAPR_H__
+#include "dma.h"
#include "hw/xics.h"
struct VIOsPAPRBus;
@@ -320,4 +321,21 @@ target_ulong spapr_rtas_call(sPAPREnvironment *spapr,
int spapr_rtas_device_tree_setup(void *fdt, target_phys_addr_t rtas_addr,
target_phys_addr_t rtas_size);
+#define SPAPR_TCE_PAGE_SHIFT 12
+#define SPAPR_TCE_PAGE_SIZE (1ULL << SPAPR_TCE_PAGE_SHIFT)
+#define SPAPR_TCE_PAGE_MASK (SPAPR_TCE_PAGE_SIZE - 1)
+
+typedef struct sPAPRTCE {
+ uint64_t tce;
+} sPAPRTCE;
+
+#define SPAPR_VIO_BASE_LIOBN 0x00000000
+#define SPAPR_PCI_BASE_LIOBN 0x80000000
+
+void spapr_iommu_init(void);
+DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size);
+void spapr_tce_free(DMAContext *dma);
+int spapr_dma_dt(void *fdt, int node_off, const char *propname,
+ DMAContext *dma);
+
#endif /* !defined (__HW_SPAPR_H__) */
diff --git a/hw/spapr_iommu.c b/hw/spapr_iommu.c
new file mode 100644
index 0000000000..388ffa4b22
--- /dev/null
+++ b/hw/spapr_iommu.c
@@ -0,0 +1,246 @@
+/*
+ * QEMU sPAPR IOMMU (TCE) code
+ *
+ * Copyright (c) 2010 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+#include "hw.h"
+#include "kvm.h"
+#include "qdev.h"
+#include "kvm_ppc.h"
+#include "dma.h"
+
+#include "hw/spapr.h"
+
+#include <libfdt.h>
+
+/* #define DEBUG_TCE */
+
+enum sPAPRTCEAccess {
+ SPAPR_TCE_FAULT = 0,
+ SPAPR_TCE_RO = 1,
+ SPAPR_TCE_WO = 2,
+ SPAPR_TCE_RW = 3,
+};
+
+typedef struct sPAPRTCETable sPAPRTCETable;
+
+struct sPAPRTCETable {
+ DMAContext dma;
+ uint32_t liobn;
+ uint32_t window_size;
+ sPAPRTCE *table;
+ int fd;
+ QLIST_ENTRY(sPAPRTCETable) list;
+};
+
+
+QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
+
+static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
+{
+ sPAPRTCETable *tcet;
+
+ QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
+ if (tcet->liobn == liobn) {
+ return tcet;
+ }
+ }
+
+ return NULL;
+}
+
+static int spapr_tce_translate(DMAContext *dma,
+ dma_addr_t addr,
+ target_phys_addr_t *paddr,
+ target_phys_addr_t *len,
+ DMADirection dir)
+{
+ sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma);
+ enum sPAPRTCEAccess access = (dir == DMA_DIRECTION_FROM_DEVICE)
+ ? SPAPR_TCE_WO : SPAPR_TCE_RO;
+ uint64_t tce;
+
+#ifdef DEBUG_TCE
+ fprintf(stderr, "spapr_tce_translate liobn=0x%" PRIx32 " addr=0x"
+ DMA_ADDR_FMT "\n", tcet->liobn, addr);
+#endif
+
+ /* Check if we are in bound */
+ if (addr >= tcet->window_size) {
+#ifdef DEBUG_TCE
+ fprintf(stderr, "spapr_tce_translate out of bounds\n");
+#endif
+ return -EFAULT;
+ }
+
+ tce = tcet->table[addr >> SPAPR_TCE_PAGE_SHIFT].tce;
+
+ /* Check TCE */
+ if (!(tce & access)) {
+ return -EPERM;
+ }
+
+ /* How much til end of page ? */
+ *len = ((~addr) & SPAPR_TCE_PAGE_MASK) + 1;
+
+ /* Translate */
+ *paddr = (tce & ~SPAPR_TCE_PAGE_MASK) |
+ (addr & SPAPR_TCE_PAGE_MASK);
+
+#ifdef DEBUG_TCE
+ fprintf(stderr, " -> *paddr=0x" TARGET_FMT_plx ", *len=0x"
+ TARGET_FMT_plx "\n", *paddr, *len);
+#endif
+
+ return 0;
+}
+
+DMAContext *spapr_tce_new_dma_context(uint32_t liobn, size_t window_size)
+{
+ sPAPRTCETable *tcet;
+
+ if (!window_size) {
+ return NULL;
+ }
+
+ tcet = g_malloc0(sizeof(*tcet));
+ dma_context_init(&tcet->dma, spapr_tce_translate, NULL, NULL);
+
+ tcet->liobn = liobn;
+ tcet->window_size = window_size;
+
+ if (kvm_enabled()) {
+ tcet->table = kvmppc_create_spapr_tce(liobn,
+ window_size,
+ &tcet->fd);
+ }
+
+ if (!tcet->table) {
+ size_t table_size = (window_size >> SPAPR_TCE_PAGE_SHIFT)
+ * sizeof(sPAPRTCE);
+ tcet->table = g_malloc0(table_size);
+ }
+
+#ifdef DEBUG_TCE
+ fprintf(stderr, "spapr_iommu: New TCE table, liobn=0x%x, context @ %p, "
+ "table @ %p, fd=%d\n", liobn, &tcet->dma, tcet->table, tcet->fd);
+#endif
+
+ QLIST_INSERT_HEAD(&spapr_tce_tables, tcet, list);
+
+ return &tcet->dma;
+}
+
+void spapr_tce_free(DMAContext *dma)
+{
+
+ if (dma) {
+ sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma);
+
+ QLIST_REMOVE(tcet, list);
+
+ if (!kvm_enabled() ||
+ (kvmppc_remove_spapr_tce(tcet->table, tcet->fd,
+ tcet->window_size) != 0)) {
+ g_free(tcet->table);
+ }
+
+ g_free(tcet);
+ }
+}
+
+static target_ulong put_tce_emu(sPAPRTCETable *tcet, target_ulong ioba,
+ target_ulong tce)
+{
+ sPAPRTCE *tcep;
+
+ if (ioba >= tcet->window_size) {
+ hcall_dprintf("spapr_vio_put_tce on out-of-boards IOBA 0x"
+ TARGET_FMT_lx "\n", ioba);
+ return H_PARAMETER;
+ }
+
+ tcep = tcet->table + (ioba >> SPAPR_TCE_PAGE_SHIFT);
+ tcep->tce = tce;
+
+ return H_SUCCESS;
+}
+
+static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr,
+ target_ulong opcode, target_ulong *args)
+{
+ target_ulong liobn = args[0];
+ target_ulong ioba = args[1];
+ target_ulong tce = args[2];
+ sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
+
+ if (liobn & 0xFFFFFFFF00000000ULL) {
+ hcall_dprintf("spapr_vio_put_tce on out-of-boundsw LIOBN "
+ TARGET_FMT_lx "\n", liobn);
+ return H_PARAMETER;
+ }
+
+ ioba &= ~(SPAPR_TCE_PAGE_SIZE - 1);
+
+ if (tcet) {
+ return put_tce_emu(tcet, ioba, tce);
+ }
+#ifdef DEBUG_TCE
+ fprintf(stderr, "%s on liobn=" TARGET_FMT_lx /*%s*/
+ " ioba 0x" TARGET_FMT_lx " TCE 0x" TARGET_FMT_lx "\n",
+ __func__, liobn, /*dev->qdev.id, */ioba, tce);
+#endif
+
+ return H_PARAMETER;
+}
+
+void spapr_iommu_init(void)
+{
+ QLIST_INIT(&spapr_tce_tables);
+
+ /* hcall-tce */
+ spapr_register_hypercall(H_PUT_TCE, h_put_tce);
+}
+
+int spapr_dma_dt(void *fdt, int node_off, const char *propname,
+ DMAContext *dma)
+{
+ if (dma) {
+ sPAPRTCETable *tcet = DO_UPCAST(sPAPRTCETable, dma, dma);
+ uint32_t dma_prop[] = {cpu_to_be32(tcet->liobn),
+ 0, 0,
+ 0, cpu_to_be32(tcet->window_size)};
+ int ret;
+
+ ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
+ if (ret < 0) {
+ return ret;
+ }
+
+ ret = fdt_setprop(fdt, node_off, propname, dma_prop,
+ sizeof(dma_prop));
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/hw/spapr_llan.c b/hw/spapr_llan.c
index 8313043652..01e54f3675 100644
--- a/hw/spapr_llan.c
+++ b/hw/spapr_llan.c
@@ -71,7 +71,7 @@ typedef uint64_t vlan_bd_t;
#define VLAN_RXQ_BD_OFF 0
#define VLAN_FILTER_BD_OFF 8
#define VLAN_RX_BDS_OFF 16
-#define VLAN_MAX_BUFS ((SPAPR_VIO_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF) / 8)
+#define VLAN_MAX_BUFS ((SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF) / 8)
typedef struct VIOsPAPRVLANDevice {
VIOsPAPRDevice sdev;
@@ -83,19 +83,19 @@ typedef struct VIOsPAPRVLANDevice {
target_ulong rxq_ptr;
} VIOsPAPRVLANDevice;
-static int spapr_vlan_can_receive(VLANClientState *nc)
+static int spapr_vlan_can_receive(NetClientState *nc)
{
VIOsPAPRVLANDevice *dev = DO_UPCAST(NICState, nc, nc)->opaque;
return (dev->isopen && dev->rx_bufs > 0);
}
-static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
+static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
size_t size)
{
VIOsPAPRDevice *sdev = DO_UPCAST(NICState, nc, nc)->opaque;
VIOsPAPRVLANDevice *dev = (VIOsPAPRVLANDevice *)sdev;
- vlan_bd_t rxq_bd = ldq_tce(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
+ vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
vlan_bd_t bd;
int buf_ptr = dev->use_buf_ptr;
uint64_t handle;
@@ -114,11 +114,11 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
do {
buf_ptr += 8;
- if (buf_ptr >= SPAPR_VIO_TCE_PAGE_SIZE) {
+ if (buf_ptr >= SPAPR_TCE_PAGE_SIZE) {
buf_ptr = VLAN_RX_BDS_OFF;
}
- bd = ldq_tce(sdev, dev->buf_list + buf_ptr);
+ bd = vio_ldq(sdev, dev->buf_list + buf_ptr);
dprintf("use_buf_ptr=%d bd=0x%016llx\n",
buf_ptr, (unsigned long long)bd);
} while ((!(bd & VLAN_BD_VALID) || (VLAN_BD_LEN(bd) < (size + 8)))
@@ -132,12 +132,12 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
/* Remove the buffer from the pool */
dev->rx_bufs--;
dev->use_buf_ptr = buf_ptr;
- stq_tce(sdev, dev->buf_list + dev->use_buf_ptr, 0);
+ vio_stq(sdev, dev->buf_list + dev->use_buf_ptr, 0);
dprintf("Found buffer: ptr=%d num=%d\n", dev->use_buf_ptr, dev->rx_bufs);
/* Transfer the packet data */
- if (spapr_tce_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
+ if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
return -1;
}
@@ -149,23 +149,23 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
control ^= VLAN_RXQC_TOGGLE;
}
- handle = ldq_tce(sdev, VLAN_BD_ADDR(bd));
- stq_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle);
- stw_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size);
- sth_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
- stb_tce(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
+ handle = vio_ldq(sdev, VLAN_BD_ADDR(bd));
+ vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle);
+ vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size);
+ vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
+ vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
dprintf("wrote rxq entry (ptr=0x%llx): 0x%016llx 0x%016llx\n",
(unsigned long long)dev->rxq_ptr,
- (unsigned long long)ldq_tce(sdev, VLAN_BD_ADDR(rxq_bd) +
+ (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr),
- (unsigned long long)ldq_tce(sdev, VLAN_BD_ADDR(rxq_bd) +
+ (unsigned long long)vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
dev->rxq_ptr + 8));
dev->rxq_ptr += 16;
if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
dev->rxq_ptr = 0;
- stq_tce(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE);
+ vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE);
}
if (sdev->signal_state & 1) {
@@ -176,7 +176,7 @@ static ssize_t spapr_vlan_receive(VLANClientState *nc, const uint8_t *buf,
}
static NetClientInfo net_spapr_vlan_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = spapr_vlan_can_receive,
.receive = spapr_vlan_receive,
@@ -254,8 +254,10 @@ static int check_bd(VIOsPAPRVLANDevice *dev, vlan_bd_t bd,
return -1;
}
- if (spapr_vio_check_tces(&dev->sdev, VLAN_BD_ADDR(bd),
- VLAN_BD_LEN(bd), SPAPR_TCE_RW) != 0) {
+ if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
+ VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE)
+ || !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
+ VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) {
return -1;
}
@@ -285,14 +287,14 @@ static target_ulong h_register_logical_lan(CPUPPCState *env,
return H_RESOURCE;
}
- if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_VIO_TCE_PAGE_SIZE),
- SPAPR_VIO_TCE_PAGE_SIZE) < 0) {
+ if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE),
+ SPAPR_TCE_PAGE_SIZE) < 0) {
hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list);
return H_PARAMETER;
}
- filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_VIO_TCE_PAGE_SIZE);
- if (check_bd(dev, filter_list_bd, SPAPR_VIO_TCE_PAGE_SIZE) < 0) {
+ filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE);
+ if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) {
hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list);
return H_PARAMETER;
}
@@ -309,17 +311,17 @@ static target_ulong h_register_logical_lan(CPUPPCState *env,
rec_queue &= ~VLAN_BD_TOGGLE;
/* Initialize the buffer list */
- stq_tce(sdev, buf_list, rec_queue);
- stq_tce(sdev, buf_list + 8, filter_list_bd);
- spapr_tce_dma_zero(sdev, buf_list + VLAN_RX_BDS_OFF,
- SPAPR_VIO_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF);
+ vio_stq(sdev, buf_list, rec_queue);
+ vio_stq(sdev, buf_list + 8, filter_list_bd);
+ spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0,
+ SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF);
dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8;
dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8;
dev->rx_bufs = 0;
dev->rxq_ptr = 0;
/* Initialize the receive queue */
- spapr_tce_dma_zero(sdev, VLAN_BD_ADDR(rec_queue), VLAN_BD_LEN(rec_queue));
+ spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue));
dev->isopen = 1;
return H_SUCCESS;
@@ -378,14 +380,14 @@ static target_ulong h_add_logical_lan_buffer(CPUPPCState *env,
do {
dev->add_buf_ptr += 8;
- if (dev->add_buf_ptr >= SPAPR_VIO_TCE_PAGE_SIZE) {
+ if (dev->add_buf_ptr >= SPAPR_TCE_PAGE_SIZE) {
dev->add_buf_ptr = VLAN_RX_BDS_OFF;
}
- bd = ldq_tce(sdev, dev->buf_list + dev->add_buf_ptr);
+ bd = vio_ldq(sdev, dev->buf_list + dev->add_buf_ptr);
} while (bd & VLAN_BD_VALID);
- stq_tce(sdev, dev->buf_list + dev->add_buf_ptr, buf);
+ vio_stq(sdev, dev->buf_list + dev->add_buf_ptr, buf);
dev->rx_bufs++;
@@ -451,7 +453,7 @@ static target_ulong h_send_logical_lan(CPUPPCState *env, sPAPREnvironment *spapr
lbuf = alloca(total_len);
p = lbuf;
for (i = 0; i < nbufs; i++) {
- ret = spapr_tce_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),
+ ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),
p, VLAN_BD_LEN(bufs[i]));
if (ret < 0) {
return ret;
@@ -479,7 +481,7 @@ static target_ulong h_multicast_ctrl(CPUPPCState *env, sPAPREnvironment *spapr,
}
static Property spapr_vlan_properties[] = {
- DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev, 0x10000000),
+ DEFINE_SPAPR_PROPERTIES(VIOsPAPRVLANDevice, sdev),
DEFINE_NIC_PROPERTIES(VIOsPAPRVLANDevice, nicconf),
DEFINE_PROP_END_OF_LIST(),
};
@@ -497,6 +499,7 @@ static void spapr_vlan_class_init(ObjectClass *klass, void *data)
k->dt_compatible = "IBM,l-lan";
k->signal_mask = 0x1;
dc->props = spapr_vlan_properties;
+ k->rtce_window_size = 0x10000000;
}
static TypeInfo spapr_vlan_info = {
diff --git a/hw/spapr_pci.c b/hw/spapr_pci.c
index 97d417a997..b2e4f785ea 100644
--- a/hw/spapr_pci.c
+++ b/hw/spapr_pci.c
@@ -266,12 +266,21 @@ static const MemoryRegionOps spapr_io_ops = {
/*
* PHB PCI device
*/
+static DMAContext *spapr_pci_dma_context_fn(PCIBus *bus, void *opaque,
+ int devfn)
+{
+ sPAPRPHBState *phb = opaque;
+
+ return phb->dma;
+}
+
static int spapr_phb_init(SysBusDevice *s)
{
sPAPRPHBState *phb = FROM_SYSBUS(sPAPRPHBState, s);
char *namebuf;
int i;
PCIBus *bus;
+ uint32_t liobn;
phb->dtbusname = g_strdup_printf("pci@%" PRIx64, phb->buid);
namebuf = alloca(strlen(phb->dtbusname) + 32);
@@ -312,6 +321,10 @@ static int spapr_phb_init(SysBusDevice *s)
PCI_DEVFN(0, 0), PCI_NUM_PINS);
phb->host_state.bus = bus;
+ liobn = SPAPR_PCI_BASE_LIOBN | (pci_find_domain(bus) << 16);
+ phb->dma = spapr_tce_new_dma_context(liobn, 0x40000000);
+ pci_setup_iommu(bus, spapr_pci_dma_context_fn, phb);
+
QLIST_INSERT_HEAD(&spapr->phbs, phb, list);
/* Initialize the LSI table */
@@ -405,7 +418,7 @@ int spapr_populate_pci_devices(sPAPRPHBState *phb,
uint64_t child;
uint64_t parent;
uint64_t size;
- } __attribute__((packed)) ranges[] = {
+ } QEMU_PACKED ranges[] = {
{
cpu_to_be32(b_ss(1)), cpu_to_be64(0),
cpu_to_be64(phb->io_win_addr),
@@ -472,6 +485,8 @@ int spapr_populate_pci_devices(sPAPRPHBState *phb,
_FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
sizeof(interrupt_map)));
+ spapr_dma_dt(fdt, bus_off, "ibm,dma-window", phb->dma);
+
return 0;
}
diff --git a/hw/spapr_pci.h b/hw/spapr_pci.h
index f54c2e8108..d9e46e22e3 100644
--- a/hw/spapr_pci.h
+++ b/hw/spapr_pci.h
@@ -38,6 +38,7 @@ typedef struct sPAPRPHBState {
MemoryRegion memspace, iospace;
target_phys_addr_t mem_win_addr, mem_win_size, io_win_addr, io_win_size;
MemoryRegion memwindow, iowindow;
+ DMAContext *dma;
struct {
uint32_t dt_irq;
diff --git a/hw/spapr_vio.c b/hw/spapr_vio.c
index c8271c626c..05b55032a9 100644
--- a/hw/spapr_vio.c
+++ b/hw/spapr_vio.c
@@ -39,7 +39,6 @@
#endif /* CONFIG_FDT */
/* #define DEBUG_SPAPR */
-/* #define DEBUG_TCE */
#ifdef DEBUG_SPAPR
#define dprintf(fmt, ...) \
@@ -143,26 +142,9 @@ static int vio_make_devnode(VIOsPAPRDevice *dev,
}
}
- if (dev->rtce_window_size) {
- uint32_t dma_prop[] = {cpu_to_be32(dev->reg),
- 0, 0,
- 0, cpu_to_be32(dev->rtce_window_size)};
-
- ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-address-cells", 2);
- if (ret < 0) {
- return ret;
- }
-
- ret = fdt_setprop_cell(fdt, node_off, "ibm,#dma-size-cells", 2);
- if (ret < 0) {
- return ret;
- }
-
- ret = fdt_setprop(fdt, node_off, "ibm,my-dma-window", dma_prop,
- sizeof(dma_prop));
- if (ret < 0) {
- return ret;
- }
+ ret = spapr_dma_dt(fdt, node_off, "ibm,my-dma-window", dev->dma);
+ if (ret < 0) {
+ return ret;
}
if (pc->devnode) {
@@ -177,232 +159,6 @@ static int vio_make_devnode(VIOsPAPRDevice *dev,
#endif /* CONFIG_FDT */
/*
- * RTCE handling
- */
-
-static void rtce_init(VIOsPAPRDevice *dev)
-{
- size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT)
- * sizeof(VIOsPAPR_RTCE);
-
- if (size) {
- dev->rtce_table = kvmppc_create_spapr_tce(dev->reg,
- dev->rtce_window_size,
- &dev->kvmtce_fd);
-
- if (!dev->rtce_table) {
- dev->rtce_table = g_malloc0(size);
- }
- }
-}
-
-static target_ulong h_put_tce(CPUPPCState *env, sPAPREnvironment *spapr,
- target_ulong opcode, target_ulong *args)
-{
- target_ulong liobn = args[0];
- target_ulong ioba = args[1];
- target_ulong tce = args[2];
- VIOsPAPRDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, liobn);
- VIOsPAPR_RTCE *rtce;
-
- if (!dev) {
- hcall_dprintf("LIOBN 0x" TARGET_FMT_lx " does not exist\n", liobn);
- return H_PARAMETER;
- }
-
- ioba &= ~(SPAPR_VIO_TCE_PAGE_SIZE - 1);
-
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_vio_put_tce on %s ioba 0x" TARGET_FMT_lx
- " TCE 0x" TARGET_FMT_lx "\n", dev->qdev.id, ioba, tce);
-#endif
-
- if (ioba >= dev->rtce_window_size) {
- hcall_dprintf("Out-of-bounds IOBA 0x" TARGET_FMT_lx "\n", ioba);
- return H_PARAMETER;
- }
-
- rtce = dev->rtce_table + (ioba >> SPAPR_VIO_TCE_PAGE_SHIFT);
- rtce->tce = tce;
-
- return H_SUCCESS;
-}
-
-int spapr_vio_check_tces(VIOsPAPRDevice *dev, target_ulong ioba,
- target_ulong len, enum VIOsPAPR_TCEAccess access)
-{
- int start, end, i;
-
- start = ioba >> SPAPR_VIO_TCE_PAGE_SHIFT;
- end = (ioba + len - 1) >> SPAPR_VIO_TCE_PAGE_SHIFT;
-
- for (i = start; i <= end; i++) {
- if ((dev->rtce_table[i].tce & access) != access) {
-#ifdef DEBUG_TCE
- fprintf(stderr, "FAIL on %d\n", i);
-#endif
- return -1;
- }
- }
-
- return 0;
-}
-
-int spapr_tce_dma_write(VIOsPAPRDevice *dev, uint64_t taddr, const void *buf,
- uint32_t size)
-{
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_tce_dma_write taddr=0x%llx size=0x%x\n",
- (unsigned long long)taddr, size);
-#endif
-
- /* Check for bypass */
- if (dev->flags & VIO_PAPR_FLAG_DMA_BYPASS) {
- cpu_physical_memory_write(taddr, buf, size);
- return 0;
- }
-
- while (size) {
- uint64_t tce;
- uint32_t lsize;
- uint64_t txaddr;
-
- /* Check if we are in bound */
- if (taddr >= dev->rtce_window_size) {
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_tce_dma_write out of bounds\n");
-#endif
- return H_DEST_PARM;
- }
- tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce;
-
- /* How much til end of page ? */
- lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1);
-
- /* Check TCE */
- if (!(tce & 2)) {
- return H_DEST_PARM;
- }
-
- /* Translate */
- txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) |
- (taddr & SPAPR_VIO_TCE_PAGE_MASK);
-
-#ifdef DEBUG_TCE
- fprintf(stderr, " -> write to txaddr=0x%llx, size=0x%x\n",
- (unsigned long long)txaddr, lsize);
-#endif
-
- /* Do it */
- cpu_physical_memory_write(txaddr, buf, lsize);
- buf += lsize;
- taddr += lsize;
- size -= lsize;
- }
- return 0;
-}
-
-int spapr_tce_dma_zero(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t size)
-{
- /* FIXME: allocating a temp buffer is nasty, but just stepping
- * through writing zeroes is awkward. This will do for now. */
- uint8_t zeroes[size];
-
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_tce_dma_zero taddr=0x%llx size=0x%x\n",
- (unsigned long long)taddr, size);
-#endif
-
- memset(zeroes, 0, size);
- return spapr_tce_dma_write(dev, taddr, zeroes, size);
-}
-
-void stb_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint8_t val)
-{
- spapr_tce_dma_write(dev, taddr, &val, sizeof(val));
-}
-
-void sth_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint16_t val)
-{
- val = tswap16(val);
- spapr_tce_dma_write(dev, taddr, &val, sizeof(val));
-}
-
-
-void stw_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t val)
-{
- val = tswap32(val);
- spapr_tce_dma_write(dev, taddr, &val, sizeof(val));
-}
-
-void stq_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint64_t val)
-{
- val = tswap64(val);
- spapr_tce_dma_write(dev, taddr, &val, sizeof(val));
-}
-
-int spapr_tce_dma_read(VIOsPAPRDevice *dev, uint64_t taddr, void *buf,
- uint32_t size)
-{
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_tce_dma_write taddr=0x%llx size=0x%x\n",
- (unsigned long long)taddr, size);
-#endif
-
- /* Check for bypass */
- if (dev->flags & VIO_PAPR_FLAG_DMA_BYPASS) {
- cpu_physical_memory_read(taddr, buf, size);
- return 0;
- }
-
- while (size) {
- uint64_t tce;
- uint32_t lsize;
- uint64_t txaddr;
-
- /* Check if we are in bound */
- if (taddr >= dev->rtce_window_size) {
-#ifdef DEBUG_TCE
- fprintf(stderr, "spapr_tce_dma_read out of bounds\n");
-#endif
- return H_DEST_PARM;
- }
- tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce;
-
- /* How much til end of page ? */
- lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1);
-
- /* Check TCE */
- if (!(tce & 1)) {
- return H_DEST_PARM;
- }
-
- /* Translate */
- txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) |
- (taddr & SPAPR_VIO_TCE_PAGE_MASK);
-
-#ifdef DEBUG_TCE
- fprintf(stderr, " -> write to txaddr=0x%llx, size=0x%x\n",
- (unsigned long long)txaddr, lsize);
-#endif
- /* Do it */
- cpu_physical_memory_read(txaddr, buf, lsize);
- buf += lsize;
- taddr += lsize;
- size -= lsize;
- }
- return H_SUCCESS;
-}
-
-uint64_t ldq_tce(VIOsPAPRDevice *dev, uint64_t taddr)
-{
- uint64_t val;
-
- spapr_tce_dma_read(dev, taddr, &val, sizeof(val));
- return tswap64(val);
-}
-
-/*
* CRQ handling
*/
static target_ulong h_reg_crq(CPUPPCState *env, sPAPREnvironment *spapr,
@@ -526,7 +282,7 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq)
}
/* Maybe do a fast path for KVM just writing to the pages */
- rc = spapr_tce_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1);
+ rc = spapr_vio_dma_read(dev, dev->crq.qladdr + dev->crq.qnext, &byte, 1);
if (rc) {
return rc;
}
@@ -534,7 +290,7 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq)
return 1;
}
- rc = spapr_tce_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8,
+ rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext + 8,
&crq[8], 8);
if (rc) {
return rc;
@@ -542,7 +298,7 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq)
kvmppc_eieio();
- rc = spapr_tce_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8);
+ rc = spapr_vio_dma_write(dev, dev->crq.qladdr + dev->crq.qnext, crq, 8);
if (rc) {
return rc;
}
@@ -560,13 +316,13 @@ int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq)
static void spapr_vio_quiesce_one(VIOsPAPRDevice *dev)
{
- dev->flags &= ~VIO_PAPR_FLAG_DMA_BYPASS;
+ VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+ uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg;
- if (dev->rtce_table) {
- size_t size = (dev->rtce_window_size >> SPAPR_VIO_TCE_PAGE_SHIFT)
- * sizeof(VIOsPAPR_RTCE);
- memset(dev->rtce_table, 0, size);
+ if (dev->dma) {
+ spapr_tce_free(dev->dma);
}
+ dev->dma = spapr_tce_new_dma_context(liobn, pc->rtce_window_size);
dev->crq.qladdr = 0;
dev->crq.qsize = 0;
@@ -593,9 +349,13 @@ static void rtas_set_tce_bypass(sPAPREnvironment *spapr, uint32_t token,
return;
}
if (enable) {
- dev->flags |= VIO_PAPR_FLAG_DMA_BYPASS;
+ spapr_tce_free(dev->dma);
+ dev->dma = NULL;
} else {
- dev->flags &= ~VIO_PAPR_FLAG_DMA_BYPASS;
+ VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+ uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg;
+
+ dev->dma = spapr_tce_new_dma_context(liobn, pc->rtce_window_size);
}
rtas_st(rets, 0, 0);
@@ -662,6 +422,7 @@ static int spapr_vio_busdev_init(DeviceState *qdev)
{
VIOsPAPRDevice *dev = (VIOsPAPRDevice *)qdev;
VIOsPAPRDeviceClass *pc = VIO_SPAPR_DEVICE_GET_CLASS(dev);
+ uint32_t liobn;
char *id;
if (dev->reg != -1) {
@@ -703,7 +464,8 @@ static int spapr_vio_busdev_init(DeviceState *qdev)
return -1;
}
- rtce_init(dev);
+ liobn = SPAPR_VIO_BASE_LIOBN | dev->reg;
+ dev->dma = spapr_tce_new_dma_context(liobn, pc->rtce_window_size);
return pc->init(dev);
}
@@ -751,9 +513,6 @@ VIOsPAPRBus *spapr_vio_bus_init(void)
/* hcall-vio */
spapr_register_hypercall(H_VIO_SIGNAL, h_vio_signal);
- /* hcall-tce */
- spapr_register_hypercall(H_PUT_TCE, h_put_tce);
-
/* hcall-crq */
spapr_register_hypercall(H_REG_CRQ, h_reg_crq);
spapr_register_hypercall(H_FREE_CRQ, h_free_crq);
diff --git a/hw/spapr_vio.h b/hw/spapr_vio.h
index 2adad77d02..6f9a498ccd 100644
--- a/hw/spapr_vio.h
+++ b/hw/spapr_vio.h
@@ -21,16 +21,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
-#define SPAPR_VIO_TCE_PAGE_SHIFT 12
-#define SPAPR_VIO_TCE_PAGE_SIZE (1ULL << SPAPR_VIO_TCE_PAGE_SHIFT)
-#define SPAPR_VIO_TCE_PAGE_MASK (SPAPR_VIO_TCE_PAGE_SIZE - 1)
-
-enum VIOsPAPR_TCEAccess {
- SPAPR_TCE_FAULT = 0,
- SPAPR_TCE_RO = 1,
- SPAPR_TCE_WO = 2,
- SPAPR_TCE_RW = 3,
-};
+#include "dma.h"
#define TYPE_VIO_SPAPR_DEVICE "vio-spapr-device"
#define VIO_SPAPR_DEVICE(obj) \
@@ -45,10 +36,6 @@ enum VIOsPAPR_TCEAccess {
struct VIOsPAPRDevice;
-typedef struct VIOsPAPR_RTCE {
- uint64_t tce;
-} VIOsPAPR_RTCE;
-
typedef struct VIOsPAPR_CRQ {
uint64_t qladdr;
uint32_t qsize;
@@ -64,6 +51,7 @@ typedef struct VIOsPAPRDeviceClass {
const char *dt_name, *dt_type, *dt_compatible;
target_ulong signal_mask;
+ uint32_t rtce_window_size;
int (*init)(VIOsPAPRDevice *dev);
void (*reset)(VIOsPAPRDevice *dev);
int (*devnode)(VIOsPAPRDevice *dev, void *fdt, int node_off);
@@ -73,20 +61,15 @@ struct VIOsPAPRDevice {
DeviceState qdev;
uint32_t reg;
uint32_t flags;
-#define VIO_PAPR_FLAG_DMA_BYPASS 0x1
qemu_irq qirq;
uint32_t vio_irq_num;
target_ulong signal_state;
- uint32_t rtce_window_size;
- VIOsPAPR_RTCE *rtce_table;
- int kvmtce_fd;
VIOsPAPR_CRQ crq;
+ DMAContext *dma;
};
-#define DEFINE_SPAPR_PROPERTIES(type, field, default_dma_window) \
- DEFINE_PROP_UINT32("reg", type, field.reg, -1), \
- DEFINE_PROP_UINT32("dma-window", type, field.rtce_window_size, \
- default_dma_window)
+#define DEFINE_SPAPR_PROPERTIES(type, field) \
+ DEFINE_PROP_UINT32("reg", type, field.reg, -1)
struct VIOsPAPRBus {
BusState bus;
@@ -102,20 +85,38 @@ extern int spapr_populate_chosen_stdout(void *fdt, VIOsPAPRBus *bus);
extern int spapr_vio_signal(VIOsPAPRDevice *dev, target_ulong mode);
-int spapr_vio_check_tces(VIOsPAPRDevice *dev, target_ulong ioba,
- target_ulong len,
- enum VIOsPAPR_TCEAccess access);
-
-int spapr_tce_dma_read(VIOsPAPRDevice *dev, uint64_t taddr,
- void *buf, uint32_t size);
-int spapr_tce_dma_write(VIOsPAPRDevice *dev, uint64_t taddr,
- const void *buf, uint32_t size);
-int spapr_tce_dma_zero(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t size);
-void stb_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint8_t val);
-void sth_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint16_t val);
-void stw_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint32_t val);
-void stq_tce(VIOsPAPRDevice *dev, uint64_t taddr, uint64_t val);
-uint64_t ldq_tce(VIOsPAPRDevice *dev, uint64_t taddr);
+static inline bool spapr_vio_dma_valid(VIOsPAPRDevice *dev, uint64_t taddr,
+ uint32_t size, DMADirection dir)
+{
+ return dma_memory_valid(dev->dma, taddr, size, dir);
+}
+
+static inline int spapr_vio_dma_read(VIOsPAPRDevice *dev, uint64_t taddr,
+ void *buf, uint32_t size)
+{
+ return (dma_memory_read(dev->dma, taddr, buf, size) != 0) ?
+ H_DEST_PARM : H_SUCCESS;
+}
+
+static inline int spapr_vio_dma_write(VIOsPAPRDevice *dev, uint64_t taddr,
+ const void *buf, uint32_t size)
+{
+ return (dma_memory_write(dev->dma, taddr, buf, size) != 0) ?
+ H_DEST_PARM : H_SUCCESS;
+}
+
+static inline int spapr_vio_dma_set(VIOsPAPRDevice *dev, uint64_t taddr,
+ uint8_t c, uint32_t size)
+{
+ return (dma_memory_set(dev->dma, taddr, c, size) != 0) ?
+ H_DEST_PARM : H_SUCCESS;
+}
+
+#define vio_stb(_dev, _addr, _val) (stb_dma((_dev)->dma, (_addr), (_val)))
+#define vio_sth(_dev, _addr, _val) (stw_be_dma((_dev)->dma, (_addr), (_val)))
+#define vio_stl(_dev, _addr, _val) (stl_be_dma((_dev)->dma, (_addr), (_val)))
+#define vio_stq(_dev, _addr, _val) (stq_be_dma((_dev)->dma, (_addr), (_val)))
+#define vio_ldq(_dev, _addr) (ldq_be_dma((_dev)->dma, (_addr)))
int spapr_vio_send_crq(VIOsPAPRDevice *dev, uint8_t *crq);
diff --git a/hw/spapr_vscsi.c b/hw/spapr_vscsi.c
index 2f09616dd5..3cf5844e0f 100644
--- a/hw/spapr_vscsi.c
+++ b/hw/spapr_vscsi.c
@@ -165,7 +165,7 @@ static int vscsi_send_iu(VSCSIState *s, vscsi_req *req,
long rc, rc1;
/* First copy the SRP */
- rc = spapr_tce_dma_write(&s->vdev, req->crq.s.IU_data_ptr,
+ rc = spapr_vio_dma_write(&s->vdev, req->crq.s.IU_data_ptr,
&req->iu, length);
if (rc) {
fprintf(stderr, "vscsi_send_iu: DMA write failure !\n");
@@ -281,9 +281,9 @@ static int vscsi_srp_direct_data(VSCSIState *s, vscsi_req *req,
llen = MIN(len, md->len);
if (llen) {
if (req->writing) { /* writing = to device = reading from memory */
- rc = spapr_tce_dma_read(&s->vdev, md->va, buf, llen);
+ rc = spapr_vio_dma_read(&s->vdev, md->va, buf, llen);
} else {
- rc = spapr_tce_dma_write(&s->vdev, md->va, buf, llen);
+ rc = spapr_vio_dma_write(&s->vdev, md->va, buf, llen);
}
}
md->len -= llen;
@@ -329,10 +329,11 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
md = req->cur_desc = &req->ext_desc;
dprintf("VSCSI: Reading desc from 0x%llx\n",
(unsigned long long)td->va);
- rc = spapr_tce_dma_read(&s->vdev, td->va, md,
+ rc = spapr_vio_dma_read(&s->vdev, td->va, md,
sizeof(struct srp_direct_buf));
if (rc) {
- dprintf("VSCSI: tce_dma_read -> %d reading ext_desc\n", rc);
+ dprintf("VSCSI: spapr_vio_dma_read -> %d reading ext_desc\n",
+ rc);
break;
}
vscsi_swap_desc(md);
@@ -345,12 +346,12 @@ static int vscsi_srp_indirect_data(VSCSIState *s, vscsi_req *req,
/* Perform transfer */
llen = MIN(len, md->len);
if (req->writing) { /* writing = to device = reading from memory */
- rc = spapr_tce_dma_read(&s->vdev, md->va, buf, llen);
+ rc = spapr_vio_dma_read(&s->vdev, md->va, buf, llen);
} else {
- rc = spapr_tce_dma_write(&s->vdev, md->va, buf, llen);
+ rc = spapr_vio_dma_write(&s->vdev, md->va, buf, llen);
}
if (rc) {
- dprintf("VSCSI: tce_dma_r/w(%d) -> %d\n", req->writing, rc);
+ dprintf("VSCSI: spapr_vio_dma_r/w(%d) -> %d\n", req->writing, rc);
break;
}
dprintf("VSCSI: data: %02x %02x %02x %02x...\n",
@@ -728,7 +729,7 @@ static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req)
sinfo = &req->iu.mad.adapter_info;
#if 0 /* What for ? */
- rc = spapr_tce_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer),
+ rc = spapr_vio_dma_read(&s->vdev, be64_to_cpu(sinfo->buffer),
&info, be16_to_cpu(sinfo->common.length));
if (rc) {
fprintf(stderr, "vscsi_send_adapter_info: DMA read failure !\n");
@@ -742,7 +743,7 @@ static int vscsi_send_adapter_info(VSCSIState *s, vscsi_req *req)
info.os_type = cpu_to_be32(2);
info.port_max_txu[0] = cpu_to_be32(VSCSI_MAX_SECTORS << 9);
- rc = spapr_tce_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer),
+ rc = spapr_vio_dma_write(&s->vdev, be64_to_cpu(sinfo->buffer),
&info, be16_to_cpu(sinfo->common.length));
if (rc) {
fprintf(stderr, "vscsi_send_adapter_info: DMA write failure !\n");
@@ -805,7 +806,7 @@ static void vscsi_got_payload(VSCSIState *s, vscsi_crq *crq)
}
/* XXX Handle failure differently ? */
- if (spapr_tce_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->iu,
+ if (spapr_vio_dma_read(&s->vdev, crq->s.IU_data_ptr, &req->iu,
crq->s.IU_length)) {
fprintf(stderr, "vscsi_got_payload: DMA read failure !\n");
vscsi_put_req(req);
@@ -947,7 +948,7 @@ static int spapr_vscsi_devnode(VIOsPAPRDevice *dev, void *fdt, int node_off)
}
static Property spapr_vscsi_properties[] = {
- DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev, 0x10000000),
+ DEFINE_SPAPR_PROPERTIES(VSCSIState, vdev),
DEFINE_PROP_END_OF_LIST(),
};
@@ -964,6 +965,7 @@ static void spapr_vscsi_class_init(ObjectClass *klass, void *data)
k->dt_compatible = "IBM,v-scsi";
k->signal_mask = 0x00000001;
dc->props = spapr_vscsi_properties;
+ k->rtce_window_size = 0x10000000;
}
static TypeInfo spapr_vscsi_info = {
diff --git a/hw/spapr_vty.c b/hw/spapr_vty.c
index f340b83237..99e52cc6b7 100644
--- a/hw/spapr_vty.c
+++ b/hw/spapr_vty.c
@@ -133,7 +133,7 @@ void spapr_vty_create(VIOsPAPRBus *bus, CharDriverState *chardev)
}
static Property spapr_vty_properties[] = {
- DEFINE_SPAPR_PROPERTIES(VIOsPAPRVTYDevice, sdev, 0),
+ DEFINE_SPAPR_PROPERTIES(VIOsPAPRVTYDevice, sdev),
DEFINE_PROP_CHR("chardev", VIOsPAPRVTYDevice, chardev),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/stellaris_enet.c b/hw/stellaris_enet.c
index fbe99cb4a9..bc97280cca 100644
--- a/hw/stellaris_enet.c
+++ b/hw/stellaris_enet.c
@@ -78,7 +78,7 @@ static void stellaris_enet_update(stellaris_enet_state *s)
}
/* TODO: Implement MAC address filtering. */
-static ssize_t stellaris_enet_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t stellaris_enet_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
stellaris_enet_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
int n;
@@ -120,7 +120,7 @@ static ssize_t stellaris_enet_receive(VLANClientState *nc, const uint8_t *buf, s
return size;
}
-static int stellaris_enet_can_receive(VLANClientState *nc)
+static int stellaris_enet_can_receive(NetClientState *nc)
{
stellaris_enet_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -381,7 +381,7 @@ static int stellaris_enet_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static void stellaris_enet_cleanup(VLANClientState *nc)
+static void stellaris_enet_cleanup(NetClientState *nc)
{
stellaris_enet_state *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -393,7 +393,7 @@ static void stellaris_enet_cleanup(VLANClientState *nc)
}
static NetClientInfo net_stellaris_enet_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = stellaris_enet_can_receive,
.receive = stellaris_enet_receive,
diff --git a/hw/sun4m.c b/hw/sun4m.c
index a959261209..0f909b5f86 100644
--- a/hw/sun4m.c
+++ b/hw/sun4m.c
@@ -832,6 +832,10 @@ static void cpu_devinit(const char *cpu_model, unsigned int id,
env->prom_addr = prom_addr;
}
+static void dummy_fdc_tc(void *opaque, int irq, int level)
+{
+}
+
static void sun4m_hw_init(const struct sun4m_hwdef *hwdef, ram_addr_t RAM_size,
const char *boot_device,
const char *kernel_filename,
@@ -942,9 +946,6 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef, ram_addr_t RAM_size,
serial_hds[0], serial_hds[1], ESCC_CLOCK, 1);
cpu_halt = qemu_allocate_irqs(cpu_halt_signal, NULL, 1);
- slavio_misc_init(hwdef->slavio_base, hwdef->aux1_base, hwdef->aux2_base,
- slavio_irq[30], fdc_tc);
-
if (hwdef->apc_base) {
apc_init(hwdef->apc_base, cpu_halt[0]);
}
@@ -955,8 +956,13 @@ static void sun4m_hw_init(const struct sun4m_hwdef *hwdef, ram_addr_t RAM_size,
fd[0] = drive_get(IF_FLOPPY, 0, 0);
sun4m_fdctrl_init(slavio_irq[22], hwdef->fd_base, fd,
&fdc_tc);
+ } else {
+ fdc_tc = *qemu_allocate_irqs(dummy_fdc_tc, NULL, 1);
}
+ slavio_misc_init(hwdef->slavio_base, hwdef->aux1_base, hwdef->aux2_base,
+ slavio_irq[30], fdc_tc);
+
if (drive_get_max_bus(IF_SCSI) > 0) {
fprintf(stderr, "qemu: too many SCSI bus\n");
exit(1);
@@ -1772,16 +1778,18 @@ static void sun4c_hw_init(const struct sun4c_hwdef *hwdef, ram_addr_t RAM_size,
slavio_irq[1], serial_hds[0], serial_hds[1],
ESCC_CLOCK, 1);
- slavio_misc_init(0, hwdef->aux1_base, 0, slavio_irq[1], fdc_tc);
-
if (hwdef->fd_base != (target_phys_addr_t)-1) {
/* there is zero or one floppy drive */
memset(fd, 0, sizeof(fd));
fd[0] = drive_get(IF_FLOPPY, 0, 0);
sun4m_fdctrl_init(slavio_irq[1], hwdef->fd_base, fd,
&fdc_tc);
+ } else {
+ fdc_tc = *qemu_allocate_irqs(dummy_fdc_tc, NULL, 1);
}
+ slavio_misc_init(0, hwdef->aux1_base, 0, slavio_irq[1], fdc_tc);
+
if (drive_get_max_bus(IF_SCSI) > 0) {
fprintf(stderr, "qemu: too many SCSI bus\n");
exit(1);
diff --git a/hw/usb.h b/hw/usb.h
index 2a56fe554f..432ccae57f 100644
--- a/hw/usb.h
+++ b/hw/usb.h
@@ -25,7 +25,6 @@
* THE SOFTWARE.
*/
-#include "block.h"
#include "qdev.h"
#include "qemu-queue.h"
@@ -145,6 +144,8 @@
#define USB_ENDPOINT_XFER_INT 3
#define USB_ENDPOINT_XFER_INVALID 255
+#define USB_INTERFACE_INVALID 255
+
typedef struct USBBus USBBus;
typedef struct USBBusOps USBBusOps;
typedef struct USBPort USBPort;
@@ -345,7 +346,7 @@ void usb_packet_check_state(USBPacket *p, USBPacketState expected);
void usb_packet_setup(USBPacket *p, int pid, USBEndpoint *ep);
void usb_packet_addbuf(USBPacket *p, void *ptr, size_t len);
int usb_packet_map(USBPacket *p, QEMUSGList *sgl);
-void usb_packet_unmap(USBPacket *p);
+void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl);
void usb_packet_copy(USBPacket *p, void *ptr, size_t bytes);
void usb_packet_skip(USBPacket *p, size_t bytes);
void usb_packet_cleanup(USBPacket *p);
@@ -363,6 +364,7 @@ void usb_packet_complete(USBDevice *dev, USBPacket *p);
void usb_cancel_packet(USBPacket * p);
void usb_ep_init(USBDevice *dev);
+void usb_ep_reset(USBDevice *dev);
void usb_ep_dump(USBDevice *dev);
struct USBEndpoint *usb_ep_get(USBDevice *dev, int pid, int ep);
uint8_t usb_ep_get_type(USBDevice *dev, int pid, int ep);
diff --git a/hw/usb/Makefile.objs b/hw/usb/Makefile.objs
index 9c7ddf5cb2..4225136d0f 100644
--- a/hw/usb/Makefile.objs
+++ b/hw/usb/Makefile.objs
@@ -11,3 +11,4 @@ common-obj-y += core.o bus.o desc.o dev-hub.o
common-obj-y += host-$(HOST_USB).o dev-bluetooth.o
common-obj-y += dev-hid.o dev-storage.o dev-wacom.o
common-obj-y += dev-serial.o dev-network.o dev-audio.o
+common-obj-y += dev-uas.o
diff --git a/hw/usb/bus.c b/hw/usb/bus.c
index f87cc5f443..b649360dd3 100644
--- a/hw/usb/bus.c
+++ b/hw/usb/bus.c
@@ -37,10 +37,23 @@ static const TypeInfo usb_bus_info = {
static int next_usb_bus = 0;
static QTAILQ_HEAD(, USBBus) busses = QTAILQ_HEAD_INITIALIZER(busses);
+static int usb_device_post_load(void *opaque, int version_id)
+{
+ USBDevice *dev = opaque;
+
+ if (dev->state == USB_STATE_NOTATTACHED) {
+ dev->attached = 0;
+ } else {
+ dev->attached = 1;
+ }
+ return 0;
+}
+
const VMStateDescription vmstate_usb_device = {
.name = "USBDevice",
.version_id = 1,
.minimum_version_id = 1,
+ .post_load = usb_device_post_load,
.fields = (VMStateField []) {
VMSTATE_UINT8(addr, USBDevice),
VMSTATE_INT32(state, USBDevice),
diff --git a/hw/usb/core.c b/hw/usb/core.c
index 0e02da7601..01a7622837 100644
--- a/hw/usb/core.c
+++ b/hw/usb/core.c
@@ -522,10 +522,10 @@ void usb_packet_copy(USBPacket *p, void *ptr, size_t bytes)
switch (p->pid) {
case USB_TOKEN_SETUP:
case USB_TOKEN_OUT:
- iov_to_buf(p->iov.iov, p->iov.niov, ptr, p->result, bytes);
+ iov_to_buf(p->iov.iov, p->iov.niov, p->result, ptr, bytes);
break;
case USB_TOKEN_IN:
- iov_from_buf(p->iov.iov, p->iov.niov, ptr, p->result, bytes);
+ iov_from_buf(p->iov.iov, p->iov.niov, p->result, ptr, bytes);
break;
default:
fprintf(stderr, "%s: invalid pid: %x\n", __func__, p->pid);
@@ -539,7 +539,7 @@ void usb_packet_skip(USBPacket *p, size_t bytes)
assert(p->result >= 0);
assert(p->result + bytes <= p->iov.size);
if (p->pid == USB_TOKEN_IN) {
- iov_clear(p->iov.iov, p->iov.niov, p->result, bytes);
+ iov_memset(p->iov.iov, p->iov.niov, p->result, 0, bytes);
}
p->result += bytes;
}
@@ -550,7 +550,7 @@ void usb_packet_cleanup(USBPacket *p)
qemu_iovec_destroy(&p->iov);
}
-void usb_ep_init(USBDevice *dev)
+void usb_ep_reset(USBDevice *dev)
{
int ep;
@@ -559,7 +559,6 @@ void usb_ep_init(USBDevice *dev)
dev->ep_ctl.ifnum = 0;
dev->ep_ctl.dev = dev;
dev->ep_ctl.pipeline = false;
- QTAILQ_INIT(&dev->ep_ctl.queue);
for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) {
dev->ep_in[ep].nr = ep + 1;
dev->ep_out[ep].nr = ep + 1;
@@ -567,12 +566,22 @@ void usb_ep_init(USBDevice *dev)
dev->ep_out[ep].pid = USB_TOKEN_OUT;
dev->ep_in[ep].type = USB_ENDPOINT_XFER_INVALID;
dev->ep_out[ep].type = USB_ENDPOINT_XFER_INVALID;
- dev->ep_in[ep].ifnum = 0;
- dev->ep_out[ep].ifnum = 0;
+ dev->ep_in[ep].ifnum = USB_INTERFACE_INVALID;
+ dev->ep_out[ep].ifnum = USB_INTERFACE_INVALID;
dev->ep_in[ep].dev = dev;
dev->ep_out[ep].dev = dev;
dev->ep_in[ep].pipeline = false;
dev->ep_out[ep].pipeline = false;
+ }
+}
+
+void usb_ep_init(USBDevice *dev)
+{
+ int ep;
+
+ usb_ep_reset(dev);
+ QTAILQ_INIT(&dev->ep_ctl.queue);
+ for (ep = 0; ep < USB_MAX_ENDPOINTS; ep++) {
QTAILQ_INIT(&dev->ep_in[ep].queue);
QTAILQ_INIT(&dev->ep_out[ep].queue);
}
diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c
index 5d2f0982c9..c84892c98d 100644
--- a/hw/usb/dev-network.c
+++ b/hw/usb/dev-network.c
@@ -1247,7 +1247,7 @@ static int usb_net_handle_data(USBDevice *dev, USBPacket *p)
return ret;
}
-static ssize_t usbnet_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t usbnet_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
USBNetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
struct rndis_packet_msg_type *msg;
@@ -1285,7 +1285,7 @@ static ssize_t usbnet_receive(VLANClientState *nc, const uint8_t *buf, size_t si
return size;
}
-static int usbnet_can_receive(VLANClientState *nc)
+static int usbnet_can_receive(NetClientState *nc)
{
USBNetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -1296,7 +1296,7 @@ static int usbnet_can_receive(VLANClientState *nc)
return !s->in_len;
}
-static void usbnet_cleanup(VLANClientState *nc)
+static void usbnet_cleanup(NetClientState *nc)
{
USBNetState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -1309,11 +1309,11 @@ static void usb_net_handle_destroy(USBDevice *dev)
/* TODO: remove the nd_table[] entry */
rndis_clear_responsequeue(s);
- qemu_del_vlan_client(&s->nic->nc);
+ qemu_del_net_client(&s->nic->nc);
}
static NetClientInfo net_usbnet_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = usbnet_can_receive,
.receive = usbnet_receive,
diff --git a/hw/usb/dev-storage.c b/hw/usb/dev-storage.c
index 251e7de1cd..ff48d91049 100644
--- a/hw/usb/dev-storage.c
+++ b/hw/usb/dev-storage.c
@@ -247,6 +247,9 @@ static void usb_msd_command_complete(SCSIRequest *req, uint32_t status, size_t r
the status read packet. */
usb_msd_send_status(s, p);
s->mode = USB_MSDM_CBW;
+ } else if (s->mode == USB_MSDM_CSW) {
+ usb_msd_send_status(s, p);
+ s->mode = USB_MSDM_CBW;
} else {
if (s->data_len) {
int len = (p->iov.size - p->result);
@@ -383,6 +386,9 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p)
assert(le32_to_cpu(s->csw.residue) == 0);
s->scsi_len = 0;
s->req = scsi_req_new(s->scsi_dev, tag, 0, cbw.cmd, NULL);
+#ifdef DEBUG_MSD
+ scsi_req_print(s->req);
+#endif
scsi_req_enqueue(s->req);
if (s->req && s->req->cmd.xfer != SCSI_XFER_NONE) {
scsi_req_continue(s->req);
@@ -410,7 +416,7 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p)
}
}
if (p->result < p->iov.size) {
- DPRINTF("Deferring packet %p\n", p);
+ DPRINTF("Deferring packet %p [wait data-out]\n", p);
s->packet = p;
ret = USB_RET_ASYNC;
} else {
@@ -445,6 +451,7 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p)
if (s->req) {
/* still in flight */
+ DPRINTF("Deferring packet %p [wait status]\n", p);
s->packet = p;
ret = USB_RET_ASYNC;
} else {
@@ -471,7 +478,7 @@ static int usb_msd_handle_data(USBDevice *dev, USBPacket *p)
}
}
if (p->result < p->iov.size) {
- DPRINTF("Deferring packet %p\n", p);
+ DPRINTF("Deferring packet %p [wait data-in]\n", p);
s->packet = p;
ret = USB_RET_ASYNC;
} else {
@@ -532,13 +539,14 @@ static int usb_msd_initfn(USBDevice *dev)
{
MSDState *s = DO_UPCAST(MSDState, dev, dev);
BlockDriverState *bs = s->conf.bs;
- DriveInfo *dinfo;
if (!bs) {
error_report("drive property not set");
return -1;
}
+ blkconf_serial(&s->conf, &s->serial);
+
/*
* Hack alert: this pretends to be a block device, but it's really
* a SCSI bus that can serve only a single device, which it
@@ -551,13 +559,6 @@ static int usb_msd_initfn(USBDevice *dev)
bdrv_detach_dev(bs, &s->dev.qdev);
s->conf.bs = NULL;
- if (!s->serial) {
- /* try to fall back to value set with legacy -drive serial=... */
- dinfo = drive_get_by_blockdev(bs);
- if (*dinfo->serial) {
- s->serial = strdup(dinfo->serial);
- }
- }
if (s->serial) {
usb_desc_set_string(dev, STR_SERIALNUMBER, s->serial);
} else {
diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c
new file mode 100644
index 0000000000..9b02ff48fa
--- /dev/null
+++ b/hw/usb/dev-uas.c
@@ -0,0 +1,779 @@
+/*
+ * UAS (USB Attached SCSI) emulation
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Author: Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu-common.h"
+#include "qemu-option.h"
+#include "qemu-config.h"
+#include "trace.h"
+
+#include "hw/usb.h"
+#include "hw/usb/desc.h"
+#include "hw/scsi.h"
+#include "hw/scsi-defs.h"
+
+/* --------------------------------------------------------------------- */
+
+#define UAS_UI_COMMAND 0x01
+#define UAS_UI_SENSE 0x03
+#define UAS_UI_RESPONSE 0x04
+#define UAS_UI_TASK_MGMT 0x05
+#define UAS_UI_READ_READY 0x06
+#define UAS_UI_WRITE_READY 0x07
+
+#define UAS_RC_TMF_COMPLETE 0x00
+#define UAS_RC_INVALID_INFO_UNIT 0x02
+#define UAS_RC_TMF_NOT_SUPPORTED 0x04
+#define UAS_RC_TMF_FAILED 0x05
+#define UAS_RC_TMF_SUCCEEDED 0x08
+#define UAS_RC_INCORRECT_LUN 0x09
+#define UAS_RC_OVERLAPPED_TAG 0x0a
+
+#define UAS_TMF_ABORT_TASK 0x01
+#define UAS_TMF_ABORT_TASK_SET 0x02
+#define UAS_TMF_CLEAR_TASK_SET 0x04
+#define UAS_TMF_LOGICAL_UNIT_RESET 0x08
+#define UAS_TMF_I_T_NEXUS_RESET 0x10
+#define UAS_TMF_CLEAR_ACA 0x40
+#define UAS_TMF_QUERY_TASK 0x80
+#define UAS_TMF_QUERY_TASK_SET 0x81
+#define UAS_TMF_QUERY_ASYNC_EVENT 0x82
+
+#define UAS_PIPE_ID_COMMAND 0x01
+#define UAS_PIPE_ID_STATUS 0x02
+#define UAS_PIPE_ID_DATA_IN 0x03
+#define UAS_PIPE_ID_DATA_OUT 0x04
+
+typedef struct {
+ uint8_t id;
+ uint8_t reserved;
+ uint16_t tag;
+} QEMU_PACKED uas_ui_header;
+
+typedef struct {
+ uint8_t prio_taskattr; /* 6:3 priority, 2:0 task attribute */
+ uint8_t reserved_1;
+ uint8_t add_cdb_length; /* 7:2 additional adb length (dwords) */
+ uint8_t reserved_2;
+ uint64_t lun;
+ uint8_t cdb[16];
+ uint8_t add_cdb[];
+} QEMU_PACKED uas_ui_command;
+
+typedef struct {
+ uint16_t status_qualifier;
+ uint8_t status;
+ uint8_t reserved[7];
+ uint16_t sense_length;
+ uint8_t sense_data[18];
+} QEMU_PACKED uas_ui_sense;
+
+typedef struct {
+ uint16_t add_response_info;
+ uint8_t response_code;
+} QEMU_PACKED uas_ui_response;
+
+typedef struct {
+ uint8_t function;
+ uint8_t reserved;
+ uint16_t task_tag;
+ uint64_t lun;
+} QEMU_PACKED uas_ui_task_mgmt;
+
+typedef struct {
+ uas_ui_header hdr;
+ union {
+ uas_ui_command command;
+ uas_ui_sense sense;
+ uas_ui_task_mgmt task;
+ uas_ui_response response;
+ };
+} QEMU_PACKED uas_ui;
+
+/* --------------------------------------------------------------------- */
+
+typedef struct UASDevice UASDevice;
+typedef struct UASRequest UASRequest;
+typedef struct UASStatus UASStatus;
+
+struct UASDevice {
+ USBDevice dev;
+ SCSIBus bus;
+ UASRequest *datain;
+ UASRequest *dataout;
+ USBPacket *status;
+ QEMUBH *status_bh;
+ QTAILQ_HEAD(, UASStatus) results;
+ QTAILQ_HEAD(, UASRequest) requests;
+};
+
+struct UASRequest {
+ uint16_t tag;
+ uint64_t lun;
+ UASDevice *uas;
+ SCSIDevice *dev;
+ SCSIRequest *req;
+ USBPacket *data;
+ bool data_async;
+ bool active;
+ bool complete;
+ uint32_t buf_off;
+ uint32_t buf_size;
+ uint32_t data_off;
+ uint32_t data_size;
+ QTAILQ_ENTRY(UASRequest) next;
+};
+
+struct UASStatus {
+ uas_ui status;
+ uint32_t length;
+ QTAILQ_ENTRY(UASStatus) next;
+};
+
+/* --------------------------------------------------------------------- */
+
+enum {
+ STR_MANUFACTURER = 1,
+ STR_PRODUCT,
+ STR_SERIALNUMBER,
+ STR_CONFIG_HIGH,
+};
+
+static const USBDescStrings desc_strings = {
+ [STR_MANUFACTURER] = "QEMU",
+ [STR_PRODUCT] = "USB Attached SCSI HBA",
+ [STR_SERIALNUMBER] = "27842",
+ [STR_CONFIG_HIGH] = "High speed config (usb 2.0)",
+};
+
+static const USBDescIface desc_iface_high = {
+ .bInterfaceNumber = 0,
+ .bNumEndpoints = 4,
+ .bInterfaceClass = USB_CLASS_MASS_STORAGE,
+ .bInterfaceSubClass = 0x06, /* SCSI */
+ .bInterfaceProtocol = 0x62, /* UAS */
+ .eps = (USBDescEndpoint[]) {
+ {
+ .bEndpointAddress = USB_DIR_OUT | UAS_PIPE_ID_COMMAND,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 512,
+ .extra = (uint8_t[]) {
+ 0x04, /* u8 bLength */
+ 0x24, /* u8 bDescriptorType */
+ UAS_PIPE_ID_COMMAND,
+ 0x00, /* u8 bReserved */
+ },
+ },{
+ .bEndpointAddress = USB_DIR_IN | UAS_PIPE_ID_STATUS,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 512,
+ .extra = (uint8_t[]) {
+ 0x04, /* u8 bLength */
+ 0x24, /* u8 bDescriptorType */
+ UAS_PIPE_ID_STATUS,
+ 0x00, /* u8 bReserved */
+ },
+ },{
+ .bEndpointAddress = USB_DIR_IN | UAS_PIPE_ID_DATA_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 512,
+ .extra = (uint8_t[]) {
+ 0x04, /* u8 bLength */
+ 0x24, /* u8 bDescriptorType */
+ UAS_PIPE_ID_DATA_IN,
+ 0x00, /* u8 bReserved */
+ },
+ },{
+ .bEndpointAddress = USB_DIR_OUT | UAS_PIPE_ID_DATA_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = 512,
+ .extra = (uint8_t[]) {
+ 0x04, /* u8 bLength */
+ 0x24, /* u8 bDescriptorType */
+ UAS_PIPE_ID_DATA_OUT,
+ 0x00, /* u8 bReserved */
+ },
+ },
+ }
+};
+
+static const USBDescDevice desc_device_high = {
+ .bcdUSB = 0x0200,
+ .bMaxPacketSize0 = 64,
+ .bNumConfigurations = 1,
+ .confs = (USBDescConfig[]) {
+ {
+ .bNumInterfaces = 1,
+ .bConfigurationValue = 1,
+ .iConfiguration = STR_CONFIG_HIGH,
+ .bmAttributes = 0xc0,
+ .nif = 1,
+ .ifs = &desc_iface_high,
+ },
+ },
+};
+
+static const USBDesc desc = {
+ .id = {
+ .idVendor = 0x46f4, /* CRC16() of "QEMU" */
+ .idProduct = 0x0002,
+ .bcdDevice = 0,
+ .iManufacturer = STR_MANUFACTURER,
+ .iProduct = STR_PRODUCT,
+ .iSerialNumber = STR_SERIALNUMBER,
+ },
+ .high = &desc_device_high,
+ .str = desc_strings,
+};
+
+/* --------------------------------------------------------------------- */
+
+static UASStatus *usb_uas_alloc_status(uint8_t id, uint16_t tag)
+{
+ UASStatus *st = g_new0(UASStatus, 1);
+
+ st->status.hdr.id = id;
+ st->status.hdr.tag = cpu_to_be16(tag);
+ st->length = sizeof(uas_ui_header);
+ return st;
+}
+
+static void usb_uas_send_status_bh(void *opaque)
+{
+ UASDevice *uas = opaque;
+ UASStatus *st = QTAILQ_FIRST(&uas->results);
+ USBPacket *p = uas->status;
+
+ assert(p != NULL);
+ assert(st != NULL);
+
+ uas->status = NULL;
+ usb_packet_copy(p, &st->status, st->length);
+ p->result = st->length;
+ QTAILQ_REMOVE(&uas->results, st, next);
+ g_free(st);
+
+ usb_packet_complete(&uas->dev, p);
+}
+
+static void usb_uas_queue_status(UASDevice *uas, UASStatus *st, int length)
+{
+ st->length += length;
+ QTAILQ_INSERT_TAIL(&uas->results, st, next);
+ if (uas->status) {
+ /*
+ * Just schedule bh make sure any in-flight data transaction
+ * is finished before completing (sending) the status packet.
+ */
+ qemu_bh_schedule(uas->status_bh);
+ } else {
+ USBEndpoint *ep = usb_ep_get(&uas->dev, USB_TOKEN_IN,
+ UAS_PIPE_ID_STATUS);
+ usb_wakeup(ep);
+ }
+}
+
+static void usb_uas_queue_response(UASDevice *uas, uint16_t tag,
+ uint8_t code, uint16_t add_info)
+{
+ UASStatus *st = usb_uas_alloc_status(UAS_UI_RESPONSE, tag);
+
+ trace_usb_uas_response(uas->dev.addr, tag, code);
+ st->status.response.response_code = code;
+ st->status.response.add_response_info = cpu_to_be16(add_info);
+ usb_uas_queue_status(uas, st, sizeof(uas_ui_response));
+}
+
+static void usb_uas_queue_sense(UASRequest *req, uint8_t status)
+{
+ UASStatus *st = usb_uas_alloc_status(UAS_UI_SENSE, req->tag);
+ int len, slen = 0;
+
+ trace_usb_uas_sense(req->uas->dev.addr, req->tag, status);
+ st->status.sense.status = status;
+ st->status.sense.status_qualifier = cpu_to_be16(0);
+ if (status != GOOD) {
+ slen = scsi_req_get_sense(req->req, st->status.sense.sense_data,
+ sizeof(st->status.sense.sense_data));
+ st->status.sense.sense_length = cpu_to_be16(slen);
+ }
+ len = sizeof(uas_ui_sense) - sizeof(st->status.sense.sense_data) + slen;
+ usb_uas_queue_status(req->uas, st, len);
+}
+
+static void usb_uas_queue_read_ready(UASRequest *req)
+{
+ UASStatus *st = usb_uas_alloc_status(UAS_UI_READ_READY, req->tag);
+
+ trace_usb_uas_read_ready(req->uas->dev.addr, req->tag);
+ usb_uas_queue_status(req->uas, st, 0);
+}
+
+static void usb_uas_queue_write_ready(UASRequest *req)
+{
+ UASStatus *st = usb_uas_alloc_status(UAS_UI_WRITE_READY, req->tag);
+
+ trace_usb_uas_write_ready(req->uas->dev.addr, req->tag);
+ usb_uas_queue_status(req->uas, st, 0);
+}
+
+/* --------------------------------------------------------------------- */
+
+static int usb_uas_get_lun(uint64_t lun64)
+{
+ return (lun64 >> 48) & 0xff;
+}
+
+static SCSIDevice *usb_uas_get_dev(UASDevice *uas, uint64_t lun64)
+{
+ if ((lun64 >> 56) != 0x00) {
+ return NULL;
+ }
+ return scsi_device_find(&uas->bus, 0, 0, usb_uas_get_lun(lun64));
+}
+
+static void usb_uas_complete_data_packet(UASRequest *req)
+{
+ USBPacket *p;
+
+ if (!req->data_async) {
+ return;
+ }
+ p = req->data;
+ req->data = NULL;
+ req->data_async = false;
+ usb_packet_complete(&req->uas->dev, p);
+}
+
+static void usb_uas_copy_data(UASRequest *req)
+{
+ uint32_t length;
+
+ length = MIN(req->buf_size - req->buf_off,
+ req->data->iov.size - req->data->result);
+ trace_usb_uas_xfer_data(req->uas->dev.addr, req->tag, length,
+ req->data->result, req->data->iov.size,
+ req->buf_off, req->buf_size);
+ usb_packet_copy(req->data, scsi_req_get_buf(req->req) + req->buf_off,
+ length);
+ req->buf_off += length;
+ req->data_off += length;
+
+ if (req->data->result == req->data->iov.size) {
+ usb_uas_complete_data_packet(req);
+ }
+ if (req->buf_size && req->buf_off == req->buf_size) {
+ req->buf_off = 0;
+ req->buf_size = 0;
+ scsi_req_continue(req->req);
+ }
+}
+
+static void usb_uas_start_next_transfer(UASDevice *uas)
+{
+ UASRequest *req;
+
+ QTAILQ_FOREACH(req, &uas->requests, next) {
+ if (req->active || req->complete) {
+ continue;
+ }
+ if (req->req->cmd.mode == SCSI_XFER_FROM_DEV && uas->datain == NULL) {
+ uas->datain = req;
+ usb_uas_queue_read_ready(req);
+ req->active = true;
+ return;
+ }
+ if (req->req->cmd.mode == SCSI_XFER_TO_DEV && uas->dataout == NULL) {
+ uas->dataout = req;
+ usb_uas_queue_write_ready(req);
+ req->active = true;
+ return;
+ }
+ }
+}
+
+static UASRequest *usb_uas_alloc_request(UASDevice *uas, uas_ui *ui)
+{
+ UASRequest *req;
+
+ req = g_new0(UASRequest, 1);
+ req->uas = uas;
+ req->tag = be16_to_cpu(ui->hdr.tag);
+ req->lun = be64_to_cpu(ui->command.lun);
+ req->dev = usb_uas_get_dev(req->uas, req->lun);
+ return req;
+}
+
+static void usb_uas_scsi_free_request(SCSIBus *bus, void *priv)
+{
+ UASRequest *req = priv;
+ UASDevice *uas = req->uas;
+
+ if (req == uas->datain) {
+ uas->datain = NULL;
+ }
+ if (req == uas->dataout) {
+ uas->dataout = NULL;
+ }
+ QTAILQ_REMOVE(&uas->requests, req, next);
+ g_free(req);
+}
+
+static UASRequest *usb_uas_find_request(UASDevice *uas, uint16_t tag)
+{
+ UASRequest *req;
+
+ QTAILQ_FOREACH(req, &uas->requests, next) {
+ if (req->tag == tag) {
+ return req;
+ }
+ }
+ return NULL;
+}
+
+static void usb_uas_scsi_transfer_data(SCSIRequest *r, uint32_t len)
+{
+ UASRequest *req = r->hba_private;
+
+ trace_usb_uas_scsi_data(req->uas->dev.addr, req->tag, len);
+ req->buf_off = 0;
+ req->buf_size = len;
+ if (req->data) {
+ usb_uas_copy_data(req);
+ } else {
+ usb_uas_start_next_transfer(req->uas);
+ }
+}
+
+static void usb_uas_scsi_command_complete(SCSIRequest *r,
+ uint32_t status, size_t resid)
+{
+ UASRequest *req = r->hba_private;
+ UASDevice *uas = req->uas;
+
+ trace_usb_uas_scsi_complete(req->uas->dev.addr, req->tag, status, resid);
+ req->complete = true;
+ if (req->data) {
+ usb_uas_complete_data_packet(req);
+ }
+ usb_uas_queue_sense(req, status);
+ scsi_req_unref(req->req);
+ usb_uas_start_next_transfer(uas);
+}
+
+static void usb_uas_scsi_request_cancelled(SCSIRequest *r)
+{
+ UASRequest *req = r->hba_private;
+
+ /* FIXME: queue notification to status pipe? */
+ scsi_req_unref(req->req);
+}
+
+static const struct SCSIBusInfo usb_uas_scsi_info = {
+ .tcq = true,
+ .max_target = 0,
+ .max_lun = 255,
+
+ .transfer_data = usb_uas_scsi_transfer_data,
+ .complete = usb_uas_scsi_command_complete,
+ .cancel = usb_uas_scsi_request_cancelled,
+ .free_request = usb_uas_scsi_free_request,
+};
+
+/* --------------------------------------------------------------------- */
+
+static void usb_uas_handle_reset(USBDevice *dev)
+{
+ UASDevice *uas = DO_UPCAST(UASDevice, dev, dev);
+ UASRequest *req, *nreq;
+ UASStatus *st, *nst;
+
+ trace_usb_uas_reset(dev->addr);
+ QTAILQ_FOREACH_SAFE(req, &uas->requests, next, nreq) {
+ scsi_req_cancel(req->req);
+ }
+ QTAILQ_FOREACH_SAFE(st, &uas->results, next, nst) {
+ QTAILQ_REMOVE(&uas->results, st, next);
+ g_free(st);
+ }
+}
+
+static int usb_uas_handle_control(USBDevice *dev, USBPacket *p,
+ int request, int value, int index, int length, uint8_t *data)
+{
+ int ret;
+
+ ret = usb_desc_handle_control(dev, p, request, value, index, length, data);
+ if (ret >= 0) {
+ return ret;
+ }
+ fprintf(stderr, "%s: unhandled control request\n", __func__);
+ return USB_RET_STALL;
+}
+
+static void usb_uas_cancel_io(USBDevice *dev, USBPacket *p)
+{
+ UASDevice *uas = DO_UPCAST(UASDevice, dev, dev);
+ UASRequest *req, *nreq;
+
+ if (uas->status == p) {
+ uas->status = NULL;
+ qemu_bh_cancel(uas->status_bh);
+ return;
+ }
+ QTAILQ_FOREACH_SAFE(req, &uas->requests, next, nreq) {
+ if (req->data == p) {
+ req->data = NULL;
+ return;
+ }
+ }
+ assert(!"canceled usb packet not found");
+}
+
+static void usb_uas_command(UASDevice *uas, uas_ui *ui)
+{
+ UASRequest *req;
+ uint32_t len;
+
+ req = usb_uas_find_request(uas, be16_to_cpu(ui->hdr.tag));
+ if (req) {
+ goto overlapped_tag;
+ }
+ req = usb_uas_alloc_request(uas, ui);
+ if (req->dev == NULL) {
+ goto bad_target;
+ }
+
+ trace_usb_uas_command(uas->dev.addr, req->tag,
+ usb_uas_get_lun(req->lun),
+ req->lun >> 32, req->lun & 0xffffffff);
+ QTAILQ_INSERT_TAIL(&uas->requests, req, next);
+ req->req = scsi_req_new(req->dev, req->tag,
+ usb_uas_get_lun(req->lun),
+ ui->command.cdb, req);
+ len = scsi_req_enqueue(req->req);
+ if (len) {
+ req->data_size = len;
+ scsi_req_continue(req->req);
+ }
+ return;
+
+overlapped_tag:
+ usb_uas_queue_response(uas, req->tag, UAS_RC_OVERLAPPED_TAG, 0);
+ return;
+
+bad_target:
+ /*
+ * FIXME: Seems to upset linux, is this wrong?
+ * NOTE: Happens only with no scsi devices at the bus, not sure
+ * this is a valid UAS setup in the first place.
+ */
+ usb_uas_queue_response(uas, req->tag, UAS_RC_INVALID_INFO_UNIT, 0);
+ g_free(req);
+ return;
+}
+
+static void usb_uas_task(UASDevice *uas, uas_ui *ui)
+{
+ uint16_t tag = be16_to_cpu(ui->hdr.tag);
+ uint64_t lun64 = be64_to_cpu(ui->task.lun);
+ SCSIDevice *dev = usb_uas_get_dev(uas, lun64);
+ int lun = usb_uas_get_lun(lun64);
+ UASRequest *req;
+ uint16_t task_tag;
+
+ req = usb_uas_find_request(uas, be16_to_cpu(ui->hdr.tag));
+ if (req) {
+ goto overlapped_tag;
+ }
+
+ switch (ui->task.function) {
+ case UAS_TMF_ABORT_TASK:
+ task_tag = be16_to_cpu(ui->task.task_tag);
+ trace_usb_uas_tmf_abort_task(uas->dev.addr, tag, task_tag);
+ if (dev == NULL) {
+ goto bad_target;
+ }
+ if (dev->lun != lun) {
+ goto incorrect_lun;
+ }
+ req = usb_uas_find_request(uas, task_tag);
+ if (req && req->dev == dev) {
+ scsi_req_cancel(req->req);
+ }
+ usb_uas_queue_response(uas, tag, UAS_RC_TMF_COMPLETE, 0);
+ break;
+
+ case UAS_TMF_LOGICAL_UNIT_RESET:
+ trace_usb_uas_tmf_logical_unit_reset(uas->dev.addr, tag, lun);
+ if (dev == NULL) {
+ goto bad_target;
+ }
+ if (dev->lun != lun) {
+ goto incorrect_lun;
+ }
+ qdev_reset_all(&dev->qdev);
+ usb_uas_queue_response(uas, tag, UAS_RC_TMF_COMPLETE, 0);
+ break;
+
+ default:
+ trace_usb_uas_tmf_unsupported(uas->dev.addr, tag, ui->task.function);
+ usb_uas_queue_response(uas, tag, UAS_RC_TMF_NOT_SUPPORTED, 0);
+ break;
+ }
+ return;
+
+overlapped_tag:
+ usb_uas_queue_response(uas, req->tag, UAS_RC_OVERLAPPED_TAG, 0);
+ return;
+
+bad_target:
+ /* FIXME: correct? [see long comment in usb_uas_command()] */
+ usb_uas_queue_response(uas, tag, UAS_RC_INVALID_INFO_UNIT, 0);
+ return;
+
+incorrect_lun:
+ usb_uas_queue_response(uas, tag, UAS_RC_INCORRECT_LUN, 0);
+ return;
+}
+
+static int usb_uas_handle_data(USBDevice *dev, USBPacket *p)
+{
+ UASDevice *uas = DO_UPCAST(UASDevice, dev, dev);
+ uas_ui ui;
+ UASStatus *st;
+ UASRequest *req;
+ int length, ret = 0;
+
+ switch (p->ep->nr) {
+ case UAS_PIPE_ID_COMMAND:
+ length = MIN(sizeof(ui), p->iov.size);
+ usb_packet_copy(p, &ui, length);
+ switch (ui.hdr.id) {
+ case UAS_UI_COMMAND:
+ usb_uas_command(uas, &ui);
+ ret = length;
+ break;
+ case UAS_UI_TASK_MGMT:
+ usb_uas_task(uas, &ui);
+ ret = length;
+ break;
+ default:
+ fprintf(stderr, "%s: unknown command ui: id 0x%x\n",
+ __func__, ui.hdr.id);
+ ret = USB_RET_STALL;
+ break;
+ }
+ break;
+ case UAS_PIPE_ID_STATUS:
+ st = QTAILQ_FIRST(&uas->results);
+ if (st == NULL) {
+ assert(uas->status == NULL);
+ uas->status = p;
+ ret = USB_RET_ASYNC;
+ break;
+ }
+ usb_packet_copy(p, &st->status, st->length);
+ ret = st->length;
+ QTAILQ_REMOVE(&uas->results, st, next);
+ g_free(st);
+ break;
+ case UAS_PIPE_ID_DATA_IN:
+ case UAS_PIPE_ID_DATA_OUT:
+ req = (p->ep->nr == UAS_PIPE_ID_DATA_IN) ? uas->datain : uas->dataout;
+ if (req == NULL) {
+ fprintf(stderr, "%s: no inflight request\n", __func__);
+ ret = USB_RET_STALL;
+ break;
+ }
+ scsi_req_ref(req->req);
+ req->data = p;
+ usb_uas_copy_data(req);
+ if (p->result == p->iov.size || req->complete) {
+ req->data = NULL;
+ ret = p->result;
+ } else {
+ req->data_async = true;
+ ret = USB_RET_ASYNC;
+ }
+ scsi_req_unref(req->req);
+ usb_uas_start_next_transfer(uas);
+ break;
+ default:
+ fprintf(stderr, "%s: invalid endpoint %d\n", __func__, p->ep->nr);
+ ret = USB_RET_STALL;
+ break;
+ }
+ return ret;
+}
+
+static void usb_uas_handle_destroy(USBDevice *dev)
+{
+ UASDevice *uas = DO_UPCAST(UASDevice, dev, dev);
+
+ qemu_bh_delete(uas->status_bh);
+}
+
+static int usb_uas_init(USBDevice *dev)
+{
+ UASDevice *uas = DO_UPCAST(UASDevice, dev, dev);
+
+ usb_desc_create_serial(dev);
+ usb_desc_init(dev);
+
+ QTAILQ_INIT(&uas->results);
+ QTAILQ_INIT(&uas->requests);
+ uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas);
+
+ scsi_bus_new(&uas->bus, &uas->dev.qdev, &usb_uas_scsi_info);
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_usb_uas = {
+ .name = "usb-uas",
+ .unmigratable = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_USB_DEVICE(dev, UASDevice),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static void usb_uas_class_initfn(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ USBDeviceClass *uc = USB_DEVICE_CLASS(klass);
+
+ uc->init = usb_uas_init;
+ uc->product_desc = desc_strings[STR_PRODUCT];
+ uc->usb_desc = &desc;
+ uc->cancel_packet = usb_uas_cancel_io;
+ uc->handle_attach = usb_desc_attach;
+ uc->handle_reset = usb_uas_handle_reset;
+ uc->handle_control = usb_uas_handle_control;
+ uc->handle_data = usb_uas_handle_data;
+ uc->handle_destroy = usb_uas_handle_destroy;
+ dc->fw_name = "storage";
+ dc->vmsd = &vmstate_usb_uas;
+}
+
+static TypeInfo uas_info = {
+ .name = "usb-uas",
+ .parent = TYPE_USB_DEVICE,
+ .instance_size = sizeof(UASDevice),
+ .class_init = usb_uas_class_initfn,
+};
+
+static void usb_uas_register_types(void)
+{
+ type_register_static(&uas_info);
+}
+
+type_init(usb_uas_register_types)
diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c
index 5298204d9d..b043e7c23e 100644
--- a/hw/usb/hcd-ehci.c
+++ b/hw/usb/hcd-ehci.c
@@ -365,6 +365,7 @@ struct EHCIQueue {
uint32_t seen;
uint64_t ts;
int async;
+ int revalidate;
/* cached data from guest - needs to be flushed
* when guest removes an entry (doorbell, handshake sequence)
@@ -414,16 +415,18 @@ struct EHCIState {
*/
QEMUTimer *frame_timer;
QEMUBH *async_bh;
- int astate; // Current state in asynchronous schedule
- int pstate; // Current state in periodic schedule
+ uint32_t astate; /* Current state in asynchronous schedule */
+ uint32_t pstate; /* Current state in periodic schedule */
USBPort ports[NB_PORTS];
USBPort *companion_ports[NB_PORTS];
uint32_t usbsts_pending;
+ uint32_t usbsts_frindex;
EHCIQueueHead aqueues;
EHCIQueueHead pqueues;
- uint32_t a_fetch_addr; // which address to look at next
- uint32_t p_fetch_addr; // which address to look at next
+ /* which address to look at next */
+ uint32_t a_fetch_addr;
+ uint32_t p_fetch_addr;
USBPacket ipacket;
QEMUSGList isgl;
@@ -556,33 +559,45 @@ static inline void ehci_clear_usbsts(EHCIState *s, int mask)
s->usbsts &= ~mask;
}
-static inline void ehci_set_interrupt(EHCIState *s, int intr)
+/* update irq line */
+static inline void ehci_update_irq(EHCIState *s)
{
int level = 0;
- // TODO honour interrupt threshold requests
-
- ehci_set_usbsts(s, intr);
-
if ((s->usbsts & USBINTR_MASK) & s->usbintr) {
level = 1;
}
+ trace_usb_ehci_irq(level, s->frindex, s->usbsts, s->usbintr);
qemu_set_irq(s->irq, level);
}
-static inline void ehci_record_interrupt(EHCIState *s, int intr)
+/* flag interrupt condition */
+static inline void ehci_raise_irq(EHCIState *s, int intr)
{
s->usbsts_pending |= intr;
}
-static inline void ehci_commit_interrupt(EHCIState *s)
+/*
+ * Commit pending interrupts (added via ehci_raise_irq),
+ * at the rate allowed by "Interrupt Threshold Control".
+ */
+static inline void ehci_commit_irq(EHCIState *s)
{
+ uint32_t itc;
+
if (!s->usbsts_pending) {
return;
}
- ehci_set_interrupt(s, s->usbsts_pending);
+ if (s->usbsts_frindex > s->frindex) {
+ return;
+ }
+
+ itc = (s->usbcmd >> 16) & 0xff;
+ s->usbsts |= s->usbsts_pending;
s->usbsts_pending = 0;
+ s->usbsts_frindex = s->frindex + itc;
+ ehci_update_irq(s);
}
static void ehci_update_halt(EHCIState *s)
@@ -773,7 +788,18 @@ static EHCIQueue *ehci_find_queue_by_qh(EHCIState *ehci, uint32_t addr,
return NULL;
}
-static void ehci_queues_rip_unused(EHCIState *ehci, int async, int flush)
+static void ehci_queues_tag_unused_async(EHCIState *ehci)
+{
+ EHCIQueue *q;
+
+ QTAILQ_FOREACH(q, &ehci->aqueues, next) {
+ if (!q->seen) {
+ q->revalidate = 1;
+ }
+ }
+}
+
+static void ehci_queues_rip_unused(EHCIState *ehci, int async)
{
EHCIQueueHead *head = async ? &ehci->aqueues : &ehci->pqueues;
uint64_t maxage = FRAME_TIMER_NS * ehci->maxframes * 4;
@@ -785,7 +811,7 @@ static void ehci_queues_rip_unused(EHCIState *ehci, int async, int flush)
q->ts = ehci->last_run_ns;
continue;
}
- if (!flush && ehci->last_run_ns < q->ts + maxage) {
+ if (ehci->last_run_ns < q->ts + maxage) {
continue;
}
ehci_free_queue(q);
@@ -821,8 +847,9 @@ static void ehci_attach(USBPort *port)
{
EHCIState *s = port->opaque;
uint32_t *portsc = &s->portsc[port->index];
+ const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci";
- trace_usb_ehci_port_attach(port->index, port->dev->product_desc);
+ trace_usb_ehci_port_attach(port->index, owner, port->dev->product_desc);
if (*portsc & PORTSC_POWNER) {
USBPort *companion = s->companion_ports[port->index];
@@ -834,15 +861,17 @@ static void ehci_attach(USBPort *port)
*portsc |= PORTSC_CONNECT;
*portsc |= PORTSC_CSC;
- ehci_set_interrupt(s, USBSTS_PCD);
+ ehci_raise_irq(s, USBSTS_PCD);
+ ehci_commit_irq(s);
}
static void ehci_detach(USBPort *port)
{
EHCIState *s = port->opaque;
uint32_t *portsc = &s->portsc[port->index];
+ const char *owner = (*portsc & PORTSC_POWNER) ? "comp" : "ehci";
- trace_usb_ehci_port_detach(port->index);
+ trace_usb_ehci_port_detach(port->index, owner);
if (*portsc & PORTSC_POWNER) {
USBPort *companion = s->companion_ports[port->index];
@@ -862,7 +891,8 @@ static void ehci_detach(USBPort *port)
*portsc &= ~(PORTSC_CONNECT|PORTSC_PED);
*portsc |= PORTSC_CSC;
- ehci_set_interrupt(s, USBSTS_PCD);
+ ehci_raise_irq(s, USBSTS_PCD);
+ ehci_commit_irq(s);
}
static void ehci_child_detach(USBPort *port, USBDevice *child)
@@ -889,10 +919,11 @@ static void ehci_wakeup(USBPort *port)
USBPort *companion = s->companion_ports[port->index];
if (companion->ops->wakeup) {
companion->ops->wakeup(companion);
- } else {
- qemu_bh_schedule(s->async_bh);
}
+ return;
}
+
+ qemu_bh_schedule(s->async_bh);
}
static int ehci_register_companion(USBBus *bus, USBPort *ports[],
@@ -980,6 +1011,8 @@ static void ehci_reset(void *opaque)
s->usbcmd = NB_MAXINTRATE << USBCMD_ITC_SH;
s->usbsts = USBSTS_HALT;
+ s->usbsts_pending = 0;
+ s->usbsts_frindex = 0;
s->astate = EST_INACTIVE;
s->pstate = EST_INACTIVE;
@@ -1171,7 +1204,7 @@ static void ehci_mem_writel(void *ptr, target_phys_addr_t addr, uint32_t val)
val &= USBSTS_RO_MASK; // bits 6 through 31 are RO
ehci_clear_usbsts(s, val); // bits 0 through 5 are R/WC
val = s->usbsts;
- ehci_set_interrupt(s, 0);
+ ehci_update_irq(s);
break;
case USBINTR:
@@ -1242,6 +1275,23 @@ static inline int put_dwords(EHCIState *ehci, uint32_t addr,
return 1;
}
+/*
+ * Write the qh back to guest physical memory. This step isn't
+ * in the EHCI spec but we need to do it since we don't share
+ * physical memory with our guest VM.
+ *
+ * The first three dwords are read-only for the EHCI, so skip them
+ * when writing back the qh.
+ */
+static void ehci_flush_qh(EHCIQueue *q)
+{
+ uint32_t *qh = (uint32_t *) &q->qh;
+ uint32_t dwords = sizeof(EHCIqh) >> 2;
+ uint32_t addr = NLPTR_GET(q->qhaddr);
+
+ put_dwords(q->ehci, addr + 3 * sizeof(uint32_t), qh + 3, dwords - 3);
+}
+
// 4.10.2
static int ehci_qh_do_overlay(EHCIQueue *q)
@@ -1289,8 +1339,7 @@ static int ehci_qh_do_overlay(EHCIQueue *q)
q->qh.bufptr[1] &= ~BUFPTR_CPROGMASK_MASK;
q->qh.bufptr[2] &= ~BUFPTR_FRAMETAG_MASK;
- put_dwords(q->ehci, NLPTR_GET(q->qhaddr), (uint32_t *) &q->qh,
- sizeof(EHCIqh) >> 2);
+ ehci_flush_qh(q);
return 0;
}
@@ -1386,18 +1435,18 @@ static void ehci_execute_complete(EHCIQueue *q)
case USB_RET_NODEV:
q->qh.token |= (QTD_TOKEN_HALT | QTD_TOKEN_XACTERR);
set_field(&q->qh.token, 0, QTD_TOKEN_CERR);
- ehci_record_interrupt(q->ehci, USBSTS_ERRINT);
+ ehci_raise_irq(q->ehci, USBSTS_ERRINT);
break;
case USB_RET_STALL:
q->qh.token |= QTD_TOKEN_HALT;
- ehci_record_interrupt(q->ehci, USBSTS_ERRINT);
+ ehci_raise_irq(q->ehci, USBSTS_ERRINT);
break;
case USB_RET_NAK:
set_field(&q->qh.altnext_qtd, 0, QH_ALTNEXT_NAKCNT);
return; /* We're not done yet with this transaction */
case USB_RET_BABBLE:
q->qh.token |= (QTD_TOKEN_HALT | QTD_TOKEN_BABBLE);
- ehci_record_interrupt(q->ehci, USBSTS_ERRINT);
+ ehci_raise_irq(q->ehci, USBSTS_ERRINT);
break;
default:
/* should not be triggerable */
@@ -1408,7 +1457,7 @@ static void ehci_execute_complete(EHCIQueue *q)
} else if ((p->usb_status > p->tbytes) && (p->pid == USB_TOKEN_IN)) {
p->usb_status = USB_RET_BABBLE;
q->qh.token |= (QTD_TOKEN_HALT | QTD_TOKEN_BABBLE);
- ehci_record_interrupt(q->ehci, USBSTS_ERRINT);
+ ehci_raise_irq(q->ehci, USBSTS_ERRINT);
} else {
// TODO check 4.12 for splits
@@ -1422,14 +1471,14 @@ static void ehci_execute_complete(EHCIQueue *q)
set_field(&q->qh.token, p->tbytes, QTD_TOKEN_TBYTES);
}
ehci_finish_transfer(q, p->usb_status);
+ usb_packet_unmap(&p->packet, &p->sgl);
qemu_sglist_destroy(&p->sgl);
- usb_packet_unmap(&p->packet);
q->qh.token ^= QTD_TOKEN_DTOGGLE;
q->qh.token &= ~QTD_TOKEN_ACTIVE;
if (q->qh.token & QTD_TOKEN_IOC) {
- ehci_record_interrupt(q->ehci, USBSTS_INT);
+ ehci_raise_irq(q->ehci, USBSTS_INT);
}
}
@@ -1547,7 +1596,7 @@ static int ehci_process_itd(EHCIState *ehci,
usb_packet_map(&ehci->ipacket, &ehci->isgl);
ret = usb_handle_packet(dev, &ehci->ipacket);
assert(ret != USB_RET_ASYNC);
- usb_packet_unmap(&ehci->ipacket);
+ usb_packet_unmap(&ehci->ipacket, &ehci->isgl);
} else {
DPRINTF("ISOCH: attempt to addess non-iso endpoint\n");
ret = USB_RET_NAK;
@@ -1564,12 +1613,12 @@ static int ehci_process_itd(EHCIState *ehci,
/* 3.3.2: XACTERR is only allowed on IN transactions */
if (dir) {
itd->transact[i] |= ITD_XACT_XACTERR;
- ehci_record_interrupt(ehci, USBSTS_ERRINT);
+ ehci_raise_irq(ehci, USBSTS_ERRINT);
}
break;
case USB_RET_BABBLE:
itd->transact[i] |= ITD_XACT_BABBLE;
- ehci_record_interrupt(ehci, USBSTS_ERRINT);
+ ehci_raise_irq(ehci, USBSTS_ERRINT);
break;
case USB_RET_NAK:
/* no data for us, so do a zero-length transfer */
@@ -1587,7 +1636,7 @@ static int ehci_process_itd(EHCIState *ehci,
}
}
if (itd->transact[i] & ITD_XACT_IOC) {
- ehci_record_interrupt(ehci, USBSTS_INT);
+ ehci_raise_irq(ehci, USBSTS_INT);
}
itd->transact[i] &= ~ITD_XACT_ACTIVE;
}
@@ -1596,23 +1645,6 @@ static int ehci_process_itd(EHCIState *ehci,
}
-/*
- * Write the qh back to guest physical memory. This step isn't
- * in the EHCI spec but we need to do it since we don't share
- * physical memory with our guest VM.
- *
- * The first three dwords are read-only for the EHCI, so skip them
- * when writing back the qh.
- */
-static void ehci_flush_qh(EHCIQueue *q)
-{
- uint32_t *qh = (uint32_t *) &q->qh;
- uint32_t dwords = sizeof(EHCIqh) >> 2;
- uint32_t addr = NLPTR_GET(q->qhaddr);
-
- put_dwords(q->ehci, addr + 3 * sizeof(uint32_t), qh + 3, dwords - 3);
-}
-
/* This state is the entry point for asynchronous schedule
* processing. Entry here consitutes a EHCI start event state (4.8.5)
*/
@@ -1628,7 +1660,7 @@ static int ehci_state_waitlisthead(EHCIState *ehci, int async)
ehci_set_usbsts(ehci, USBSTS_REC);
}
- ehci_queues_rip_unused(ehci, async, 0);
+ ehci_queues_rip_unused(ehci, async);
/* Find the head of the list (4.9.1.1) */
for(i = 0; i < MAX_QH; i++) {
@@ -1713,6 +1745,7 @@ static EHCIQueue *ehci_state_fetchqh(EHCIState *ehci, int async)
EHCIPacket *p;
uint32_t entry, devaddr;
EHCIQueue *q;
+ EHCIqh qh;
entry = ehci_get_fetch_addr(ehci, async);
q = ehci_find_queue_by_qh(ehci, entry, async);
@@ -1730,7 +1763,17 @@ static EHCIQueue *ehci_state_fetchqh(EHCIState *ehci, int async)
}
get_dwords(ehci, NLPTR_GET(q->qhaddr),
- (uint32_t *) &q->qh, sizeof(EHCIqh) >> 2);
+ (uint32_t *) &qh, sizeof(EHCIqh) >> 2);
+ if (q->revalidate && (q->qh.epchar != qh.epchar ||
+ q->qh.epcap != qh.epcap ||
+ q->qh.current_qtd != qh.current_qtd)) {
+ ehci_free_queue(q);
+ q = ehci_alloc_queue(ehci, entry, async);
+ q->seen++;
+ p = NULL;
+ }
+ q->qh = qh;
+ q->revalidate = 0;
ehci_trace_qh(q, NLPTR_GET(q->qhaddr), &q->qh);
devaddr = get_field(q->qh.epchar, QH_EPCHAR_DEVADDR);
@@ -2067,6 +2110,7 @@ out:
static int ehci_state_writeback(EHCIQueue *q)
{
EHCIPacket *p = QTAILQ_FIRST(&q->packets);
+ uint32_t *qtd, addr;
int again = 0;
/* Write back the QTD from the QH area */
@@ -2074,8 +2118,9 @@ static int ehci_state_writeback(EHCIQueue *q)
assert(p->qtdaddr == q->qtdaddr);
ehci_trace_qtd(q, NLPTR_GET(p->qtdaddr), (EHCIqtd *) &q->qh.next_qtd);
- put_dwords(q->ehci, NLPTR_GET(p->qtdaddr), (uint32_t *) &q->qh.next_qtd,
- sizeof(EHCIqtd) >> 2);
+ qtd = (uint32_t *) &q->qh.next_qtd;
+ addr = NLPTR_GET(p->qtdaddr);
+ put_dwords(q->ehci, addr + 2 * sizeof(uint32_t), qtd + 2, 2);
ehci_free_packet(p);
/*
@@ -2179,8 +2224,6 @@ static void ehci_advance_state(EHCIState *ehci, int async)
}
}
while (again);
-
- ehci_commit_interrupt(ehci);
}
static void ehci_advance_async_state(EHCIState *ehci)
@@ -2223,10 +2266,10 @@ static void ehci_advance_async_state(EHCIState *ehci)
*/
if (ehci->usbcmd & USBCMD_IAAD) {
/* Remove all unseen qhs from the async qhs queue */
- ehci_queues_rip_unused(ehci, async, 1);
+ ehci_queues_tag_unused_async(ehci);
DPRINTF("ASYNC: doorbell request acknowledged\n");
ehci->usbcmd &= ~USBCMD_IAAD;
- ehci_set_interrupt(ehci, USBSTS_IAA);
+ ehci_raise_irq(ehci, USBSTS_IAA);
}
break;
@@ -2276,7 +2319,7 @@ static void ehci_advance_periodic_state(EHCIState *ehci)
ehci_set_fetch_addr(ehci, async,entry);
ehci_set_state(ehci, async, EST_FETCHENTRY);
ehci_advance_state(ehci, async);
- ehci_queues_rip_unused(ehci, async, 0);
+ ehci_queues_rip_unused(ehci, async);
break;
default:
@@ -2299,12 +2342,17 @@ static void ehci_update_frindex(EHCIState *ehci, int frames)
ehci->frindex += 8;
if (ehci->frindex == 0x00002000) {
- ehci_set_interrupt(ehci, USBSTS_FLR);
+ ehci_raise_irq(ehci, USBSTS_FLR);
}
if (ehci->frindex == 0x00004000) {
- ehci_set_interrupt(ehci, USBSTS_FLR);
+ ehci_raise_irq(ehci, USBSTS_FLR);
ehci->frindex = 0;
+ if (ehci->usbsts_frindex > 0x00004000) {
+ ehci->usbsts_frindex -= 0x00004000;
+ } else {
+ ehci->usbsts_frindex = 0;
+ }
}
}
}
@@ -2312,7 +2360,7 @@ static void ehci_update_frindex(EHCIState *ehci, int frames)
static void ehci_frame_timer(void *opaque)
{
EHCIState *ehci = opaque;
- int schedules = 0;
+ int need_timer = 0;
int64_t expire_time, t_now;
uint64_t ns_elapsed;
int frames, skipped_frames;
@@ -2323,8 +2371,8 @@ static void ehci_frame_timer(void *opaque)
frames = ns_elapsed / FRAME_TIMER_NS;
if (ehci_periodic_enabled(ehci) || ehci->pstate != EST_INACTIVE) {
- schedules++;
- expire_time = t_now + (get_ticks_per_sec() / FRAME_TIMER_FREQ);
+ need_timer++;
+ ehci->async_stepdown = 0;
if (frames > ehci->maxframes) {
skipped_frames = frames - ehci->maxframes;
@@ -2343,8 +2391,6 @@ static void ehci_frame_timer(void *opaque)
if (ehci->async_stepdown < ehci->maxframes / 2) {
ehci->async_stepdown++;
}
- expire_time = t_now + (get_ticks_per_sec()
- * ehci->async_stepdown / FRAME_TIMER_FREQ);
ehci_update_frindex(ehci, frames);
ehci->last_run_ns += FRAME_TIMER_NS * frames;
}
@@ -2353,11 +2399,19 @@ static void ehci_frame_timer(void *opaque)
* called
*/
if (ehci_async_enabled(ehci) || ehci->astate != EST_INACTIVE) {
- schedules++;
- qemu_bh_schedule(ehci->async_bh);
+ need_timer++;
+ ehci_advance_async_state(ehci);
}
- if (schedules) {
+ ehci_commit_irq(ehci);
+ if (ehci->usbsts_pending) {
+ need_timer++;
+ ehci->async_stepdown = 0;
+ }
+
+ if (need_timer) {
+ expire_time = t_now + (get_ticks_per_sec()
+ * (ehci->async_stepdown+1) / FRAME_TIMER_FREQ);
qemu_mod_timer(ehci->frame_timer, expire_time);
}
}
@@ -2390,9 +2444,58 @@ static USBBusOps ehci_bus_ops = {
.register_companion = ehci_register_companion,
};
+static int usb_ehci_post_load(void *opaque, int version_id)
+{
+ EHCIState *s = opaque;
+ int i;
+
+ for (i = 0; i < NB_PORTS; i++) {
+ USBPort *companion = s->companion_ports[i];
+ if (companion == NULL) {
+ continue;
+ }
+ if (s->portsc[i] & PORTSC_POWNER) {
+ companion->dev = s->ports[i].dev;
+ } else {
+ companion->dev = NULL;
+ }
+ }
+
+ return 0;
+}
+
static const VMStateDescription vmstate_ehci = {
- .name = "ehci",
- .unmigratable = 1,
+ .name = "ehci",
+ .version_id = 1,
+ .post_load = usb_ehci_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_PCI_DEVICE(dev, EHCIState),
+ /* mmio registers */
+ VMSTATE_UINT32(usbcmd, EHCIState),
+ VMSTATE_UINT32(usbsts, EHCIState),
+ VMSTATE_UINT32(usbintr, EHCIState),
+ VMSTATE_UINT32(frindex, EHCIState),
+ VMSTATE_UINT32(ctrldssegment, EHCIState),
+ VMSTATE_UINT32(periodiclistbase, EHCIState),
+ VMSTATE_UINT32(asynclistaddr, EHCIState),
+ VMSTATE_UINT32(configflag, EHCIState),
+ VMSTATE_UINT32(portsc[0], EHCIState),
+ VMSTATE_UINT32(portsc[1], EHCIState),
+ VMSTATE_UINT32(portsc[2], EHCIState),
+ VMSTATE_UINT32(portsc[3], EHCIState),
+ VMSTATE_UINT32(portsc[4], EHCIState),
+ VMSTATE_UINT32(portsc[5], EHCIState),
+ /* frame timer */
+ VMSTATE_TIMER(frame_timer, EHCIState),
+ VMSTATE_UINT64(last_run_ns, EHCIState),
+ VMSTATE_UINT32(async_stepdown, EHCIState),
+ /* schedule state */
+ VMSTATE_UINT32(astate, EHCIState),
+ VMSTATE_UINT32(pstate, EHCIState),
+ VMSTATE_UINT32(a_fetch_addr, EHCIState),
+ VMSTATE_UINT32(p_fetch_addr, EHCIState),
+ VMSTATE_END_OF_LIST()
+ }
};
static Property ehci_properties[] = {
@@ -2504,6 +2607,7 @@ static int usb_ehci_initfn(PCIDevice *dev)
s->async_bh = qemu_bh_new(ehci_async_bh, s);
QTAILQ_INIT(&s->aqueues);
QTAILQ_INIT(&s->pqueues);
+ usb_packet_init(&s->ipacket);
qemu_register_reset(ehci_reset, s);
diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c
index 1a1cc88b1f..844e7ed166 100644
--- a/hw/usb/hcd-ohci.c
+++ b/hw/usb/hcd-ohci.c
@@ -31,7 +31,7 @@
#include "hw/usb.h"
#include "hw/pci.h"
#include "hw/sysbus.h"
-#include "hw/qdev-addr.h"
+#include "hw/qdev-dma.h"
//#define DEBUG_OHCI
/* Dump packet contents. */
@@ -62,6 +62,7 @@ typedef struct {
USBBus bus;
qemu_irq irq;
MemoryRegion mem;
+ DMAContext *dma;
int num_ports;
const char *name;
@@ -104,7 +105,7 @@ typedef struct {
uint32_t htest;
/* SM501 local memory offset */
- target_phys_addr_t localmem_base;
+ dma_addr_t localmem_base;
/* Active packets. */
uint32_t old_ctl;
@@ -482,14 +483,14 @@ static void ohci_reset(void *opaque)
/* Get an array of dwords from main memory */
static inline int get_dwords(OHCIState *ohci,
- uint32_t addr, uint32_t *buf, int num)
+ dma_addr_t addr, uint32_t *buf, int num)
{
int i;
addr += ohci->localmem_base;
for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
- cpu_physical_memory_read(addr, buf, sizeof(*buf));
+ dma_memory_read(ohci->dma, addr, buf, sizeof(*buf));
*buf = le32_to_cpu(*buf);
}
@@ -498,7 +499,7 @@ static inline int get_dwords(OHCIState *ohci,
/* Put an array of dwords in to main memory */
static inline int put_dwords(OHCIState *ohci,
- uint32_t addr, uint32_t *buf, int num)
+ dma_addr_t addr, uint32_t *buf, int num)
{
int i;
@@ -506,7 +507,7 @@ static inline int put_dwords(OHCIState *ohci,
for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
uint32_t tmp = cpu_to_le32(*buf);
- cpu_physical_memory_write(addr, &tmp, sizeof(tmp));
+ dma_memory_write(ohci->dma, addr, &tmp, sizeof(tmp));
}
return 1;
@@ -514,14 +515,14 @@ static inline int put_dwords(OHCIState *ohci,
/* Get an array of words from main memory */
static inline int get_words(OHCIState *ohci,
- uint32_t addr, uint16_t *buf, int num)
+ dma_addr_t addr, uint16_t *buf, int num)
{
int i;
addr += ohci->localmem_base;
for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
- cpu_physical_memory_read(addr, buf, sizeof(*buf));
+ dma_memory_read(ohci->dma, addr, buf, sizeof(*buf));
*buf = le16_to_cpu(*buf);
}
@@ -530,7 +531,7 @@ static inline int get_words(OHCIState *ohci,
/* Put an array of words in to main memory */
static inline int put_words(OHCIState *ohci,
- uint32_t addr, uint16_t *buf, int num)
+ dma_addr_t addr, uint16_t *buf, int num)
{
int i;
@@ -538,40 +539,40 @@ static inline int put_words(OHCIState *ohci,
for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
uint16_t tmp = cpu_to_le16(*buf);
- cpu_physical_memory_write(addr, &tmp, sizeof(tmp));
+ dma_memory_write(ohci->dma, addr, &tmp, sizeof(tmp));
}
return 1;
}
static inline int ohci_read_ed(OHCIState *ohci,
- uint32_t addr, struct ohci_ed *ed)
+ dma_addr_t addr, struct ohci_ed *ed)
{
return get_dwords(ohci, addr, (uint32_t *)ed, sizeof(*ed) >> 2);
}
static inline int ohci_read_td(OHCIState *ohci,
- uint32_t addr, struct ohci_td *td)
+ dma_addr_t addr, struct ohci_td *td)
{
return get_dwords(ohci, addr, (uint32_t *)td, sizeof(*td) >> 2);
}
static inline int ohci_read_iso_td(OHCIState *ohci,
- uint32_t addr, struct ohci_iso_td *td)
+ dma_addr_t addr, struct ohci_iso_td *td)
{
return (get_dwords(ohci, addr, (uint32_t *)td, 4) &&
get_words(ohci, addr + 16, td->offset, 8));
}
static inline int ohci_read_hcca(OHCIState *ohci,
- uint32_t addr, struct ohci_hcca *hcca)
+ dma_addr_t addr, struct ohci_hcca *hcca)
{
- cpu_physical_memory_read(addr + ohci->localmem_base, hcca, sizeof(*hcca));
+ dma_memory_read(ohci->dma, addr + ohci->localmem_base, hcca, sizeof(*hcca));
return 1;
}
static inline int ohci_put_ed(OHCIState *ohci,
- uint32_t addr, struct ohci_ed *ed)
+ dma_addr_t addr, struct ohci_ed *ed)
{
/* ed->tail is under control of the HCD.
* Since just ed->head is changed by HC, just write back this
@@ -583,64 +584,63 @@ static inline int ohci_put_ed(OHCIState *ohci,
}
static inline int ohci_put_td(OHCIState *ohci,
- uint32_t addr, struct ohci_td *td)
+ dma_addr_t addr, struct ohci_td *td)
{
return put_dwords(ohci, addr, (uint32_t *)td, sizeof(*td) >> 2);
}
static inline int ohci_put_iso_td(OHCIState *ohci,
- uint32_t addr, struct ohci_iso_td *td)
+ dma_addr_t addr, struct ohci_iso_td *td)
{
return (put_dwords(ohci, addr, (uint32_t *)td, 4) &&
put_words(ohci, addr + 16, td->offset, 8));
}
static inline int ohci_put_hcca(OHCIState *ohci,
- uint32_t addr, struct ohci_hcca *hcca)
+ dma_addr_t addr, struct ohci_hcca *hcca)
{
- cpu_physical_memory_write(addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET,
- (char *)hcca + HCCA_WRITEBACK_OFFSET,
- HCCA_WRITEBACK_SIZE);
+ dma_memory_write(ohci->dma,
+ addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET,
+ (char *)hcca + HCCA_WRITEBACK_OFFSET,
+ HCCA_WRITEBACK_SIZE);
return 1;
}
/* Read/Write the contents of a TD from/to main memory. */
static void ohci_copy_td(OHCIState *ohci, struct ohci_td *td,
- uint8_t *buf, int len, int write)
+ uint8_t *buf, int len, DMADirection dir)
{
- uint32_t ptr;
- uint32_t n;
+ dma_addr_t ptr, n;
ptr = td->cbp;
n = 0x1000 - (ptr & 0xfff);
if (n > len)
n = len;
- cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, n, write);
+ dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, n, dir);
if (n == len)
return;
ptr = td->be & ~0xfffu;
buf += n;
- cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, len - n, write);
+ dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, len - n, dir);
}
/* Read/Write the contents of an ISO TD from/to main memory. */
static void ohci_copy_iso_td(OHCIState *ohci,
uint32_t start_addr, uint32_t end_addr,
- uint8_t *buf, int len, int write)
+ uint8_t *buf, int len, DMADirection dir)
{
- uint32_t ptr;
- uint32_t n;
+ dma_addr_t ptr, n;
ptr = start_addr;
n = 0x1000 - (ptr & 0xfff);
if (n > len)
n = len;
- cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, n, write);
+ dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, n, dir);
if (n == len)
return;
ptr = end_addr & ~0xfffu;
buf += n;
- cpu_physical_memory_rw(ptr + ohci->localmem_base, buf, len - n, write);
+ dma_memory_rw(ohci->dma, ptr + ohci->localmem_base, buf, len - n, dir);
}
static void ohci_process_lists(OHCIState *ohci, int completion);
@@ -803,7 +803,8 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
}
if (len && dir != OHCI_TD_DIR_IN) {
- ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len, 0);
+ ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, len,
+ DMA_DIRECTION_TO_DEVICE);
}
if (completion) {
@@ -827,7 +828,8 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
/* Writeback */
if (dir == OHCI_TD_DIR_IN && ret >= 0 && ret <= len) {
/* IN transfer succeeded */
- ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret, 1);
+ ohci_copy_iso_td(ohci, start_addr, end_addr, ohci->usb_buf, ret,
+ DMA_DIRECTION_FROM_DEVICE);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_CC,
OHCI_CC_NOERROR);
OHCI_SET_BM(iso_td.offset[relative_frame_number], TD_PSW_SIZE, ret);
@@ -971,7 +973,8 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
pktlen = len;
}
if (!completion) {
- ohci_copy_td(ohci, &td, ohci->usb_buf, pktlen, 0);
+ ohci_copy_td(ohci, &td, ohci->usb_buf, pktlen,
+ DMA_DIRECTION_TO_DEVICE);
}
}
}
@@ -1021,7 +1024,8 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
}
if (ret >= 0) {
if (dir == OHCI_TD_DIR_IN) {
- ohci_copy_td(ohci, &td, ohci->usb_buf, ret, 1);
+ ohci_copy_td(ohci, &td, ohci->usb_buf, ret,
+ DMA_DIRECTION_FROM_DEVICE);
#ifdef DEBUG_PACKET
DPRINTF(" data:");
for (i = 0; i < ret; i++)
@@ -1748,11 +1752,14 @@ static USBBusOps ohci_bus_ops = {
};
static int usb_ohci_init(OHCIState *ohci, DeviceState *dev,
- int num_ports, uint32_t localmem_base,
- char *masterbus, uint32_t firstport)
+ int num_ports, dma_addr_t localmem_base,
+ char *masterbus, uint32_t firstport,
+ DMAContext *dma)
{
int i;
+ ohci->dma = dma;
+
if (usb_frame_time == 0) {
#ifdef OHCI_TIME_WARP
usb_frame_time = get_ticks_per_sec();
@@ -1817,7 +1824,8 @@ static int usb_ohci_initfn_pci(struct PCIDevice *dev)
ohci->pci_dev.config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */
if (usb_ohci_init(&ohci->state, &dev->qdev, ohci->num_ports, 0,
- ohci->masterbus, ohci->firstport) != 0) {
+ ohci->masterbus, ohci->firstport,
+ pci_dma_context(dev)) != 0) {
return -1;
}
ohci->state.irq = ohci->pci_dev.irq[0];
@@ -1831,7 +1839,7 @@ typedef struct {
SysBusDevice busdev;
OHCIState ohci;
uint32_t num_ports;
- target_phys_addr_t dma_offset;
+ dma_addr_t dma_offset;
} OHCISysBusState;
static int ohci_init_pxa(SysBusDevice *dev)
@@ -1839,7 +1847,8 @@ static int ohci_init_pxa(SysBusDevice *dev)
OHCISysBusState *s = FROM_SYSBUS(OHCISysBusState, dev);
/* Cannot fail as we pass NULL for masterbus */
- usb_ohci_init(&s->ohci, &dev->qdev, s->num_ports, s->dma_offset, NULL, 0);
+ usb_ohci_init(&s->ohci, &dev->qdev, s->num_ports, s->dma_offset, NULL, 0,
+ NULL);
sysbus_init_irq(dev, &s->ohci.irq);
sysbus_init_mmio(dev, &s->ohci.mem);
@@ -1875,7 +1884,7 @@ static TypeInfo ohci_pci_info = {
static Property ohci_sysbus_properties[] = {
DEFINE_PROP_UINT32("num-ports", OHCISysBusState, num_ports, 3),
- DEFINE_PROP_TADDR("dma-offset", OHCISysBusState, dma_offset, 3),
+ DEFINE_PROP_DMAADDR("dma-offset", OHCISysBusState, dma_offset, 3),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c
index 9871e24f50..1ace2a41da 100644
--- a/hw/usb/hcd-uhci.c
+++ b/hw/usb/hcd-uhci.c
@@ -292,10 +292,10 @@ static void uhci_async_cancel_device(UHCIState *s, USBDevice *dev)
static void uhci_async_cancel_all(UHCIState *s)
{
- UHCIQueue *queue;
+ UHCIQueue *queue, *nq;
UHCIAsync *curr, *n;
- QTAILQ_FOREACH(queue, &s->queues, next) {
+ QTAILQ_FOREACH_SAFE(queue, &s->queues, next, nq) {
QTAILQ_FOREACH_SAFE(curr, &queue->asyncs, next, n) {
uhci_async_unlink(curr);
uhci_async_cancel(curr);
@@ -388,11 +388,23 @@ static const VMStateDescription vmstate_uhci_port = {
}
};
+static int uhci_post_load(void *opaque, int version_id)
+{
+ UHCIState *s = opaque;
+
+ if (version_id < 2) {
+ s->expire_time = qemu_get_clock_ns(vm_clock) +
+ (get_ticks_per_sec() / FRAME_TIMER_FREQ);
+ }
+ return 0;
+}
+
static const VMStateDescription vmstate_uhci = {
.name = "uhci",
.version_id = 2,
.minimum_version_id = 1,
.minimum_version_id_old = 1,
+ .post_load = uhci_post_load,
.fields = (VMStateField []) {
VMSTATE_PCI_DEVICE(dev, UHCIState),
VMSTATE_UINT8_EQUAL(num_ports_vmstate, UHCIState),
@@ -871,7 +883,7 @@ static int uhci_handle_td(UHCIState *s, uint32_t addr, UHCI_TD *td,
done:
len = uhci_complete_td(s, td, async, int_mask);
- usb_packet_unmap(&async->packet);
+ usb_packet_unmap(&async->packet, &async->sgl);
uhci_async_free(async);
return len;
}
@@ -1257,12 +1269,11 @@ static int usb_uhci_vt82c686b_initfn(PCIDevice *dev)
return usb_uhci_common_initfn(dev);
}
-static int usb_uhci_exit(PCIDevice *dev)
+static void usb_uhci_exit(PCIDevice *dev)
{
UHCIState *s = DO_UPCAST(UHCIState, dev, dev);
memory_region_destroy(&s->io_bar);
- return 0;
}
static Property uhci_properties[] = {
diff --git a/hw/usb/host-linux.c b/hw/usb/host-linux.c
index a95b0eda55..d55be878ad 100644
--- a/hw/usb/host-linux.c
+++ b/hw/usb/host-linux.c
@@ -111,6 +111,7 @@ typedef struct USBHostDevice {
uint32_t iso_urb_count;
uint32_t options;
Notifier exit;
+ QEMUBH *bh;
struct endp_data ep_in[USB_MAX_ENDPOINTS];
struct endp_data ep_out[USB_MAX_ENDPOINTS];
@@ -212,7 +213,7 @@ static int is_iso_started(USBHostDevice *s, int pid, int ep)
static void clear_iso_started(USBHostDevice *s, int pid, int ep)
{
- trace_usb_host_ep_stop_iso(s->bus_num, s->addr, ep);
+ trace_usb_host_iso_stop(s->bus_num, s->addr, ep);
get_endp(s, pid, ep)->iso_started = 0;
}
@@ -220,7 +221,7 @@ static void set_iso_started(USBHostDevice *s, int pid, int ep)
{
struct endp_data *e = get_endp(s, pid, ep);
- trace_usb_host_ep_start_iso(s->bus_num, s->addr, ep);
+ trace_usb_host_iso_start(s->bus_num, s->addr, ep);
if (!e->iso_started) {
e->iso_started = 1;
e->inflight = 0;
@@ -318,7 +319,8 @@ static void async_complete(void *opaque)
if (r < 0) {
if (errno == EAGAIN) {
if (urbs > 2) {
- fprintf(stderr, "husb: %d iso urbs finished at once\n", urbs);
+ /* indicates possible latency issues */
+ trace_usb_host_iso_many_urbs(s->bus_num, s->addr, urbs);
}
return;
}
@@ -351,7 +353,8 @@ static void async_complete(void *opaque)
urbs++;
inflight = change_iso_inflight(s, pid, ep, -1);
if (inflight == 0 && is_iso_started(s, pid, ep)) {
- fprintf(stderr, "husb: out of buffers for iso stream\n");
+ /* can be latency issues, or simply end of stream */
+ trace_usb_host_iso_out_of_bufs(s->bus_num, s->addr, ep);
}
continue;
}
@@ -1135,7 +1138,7 @@ static int usb_linux_update_endp_table(USBHostDevice *s)
USBDescriptor *d;
bool active = false;
- usb_ep_init(&s->dev);
+ usb_ep_reset(&s->dev);
for (i = 0;; i += d->bLength) {
if (i+2 >= s->descr_len) {
@@ -1238,7 +1241,7 @@ static int usb_linux_update_endp_table(USBHostDevice *s)
return 0;
error:
- usb_ep_init(&s->dev);
+ usb_ep_reset(&s->dev);
return 1;
}
@@ -1325,6 +1328,7 @@ static int usb_host_open(USBHostDevice *dev, int bus_num,
goto fail;
}
+ usb_ep_init(&dev->dev);
ret = usb_linux_update_endp_table(dev);
if (ret) {
goto fail;
@@ -1421,6 +1425,43 @@ static void usb_host_exit_notifier(struct Notifier *n, void *data)
}
}
+/*
+ * This is *NOT* about restoring state. We have absolutely no idea
+ * what state the host device is in at the moment and whenever it is
+ * still present in the first place. Attemping to contine where we
+ * left off is impossible.
+ *
+ * What we are going to to to here is emulate a surprise removal of
+ * the usb device passed through, then kick host scan so the device
+ * will get re-attached (and re-initialized by the guest) in case it
+ * is still present.
+ *
+ * As the device removal will change the state of other devices (usb
+ * host controller, most likely interrupt controller too) we have to
+ * wait with it until *all* vmstate is loaded. Thus post_load just
+ * kicks a bottom half which then does the actual work.
+ */
+static void usb_host_post_load_bh(void *opaque)
+{
+ USBHostDevice *dev = opaque;
+
+ if (dev->fd != -1) {
+ usb_host_close(dev);
+ }
+ if (dev->dev.attached) {
+ usb_device_detach(&dev->dev);
+ }
+ usb_host_auto_check(NULL);
+}
+
+static int usb_host_post_load(void *opaque, int version_id)
+{
+ USBHostDevice *dev = opaque;
+
+ qemu_bh_schedule(dev->bh);
+ return 0;
+}
+
static int usb_host_initfn(USBDevice *dev)
{
USBHostDevice *s = DO_UPCAST(USBHostDevice, dev, dev);
@@ -1432,6 +1473,7 @@ static int usb_host_initfn(USBDevice *dev)
QTAILQ_INSERT_TAIL(&hostdevs, s, next);
s->exit.notify = usb_host_exit_notifier;
qemu_add_exit_notifier(&s->exit);
+ s->bh = qemu_bh_new(usb_host_post_load_bh, s);
usb_host_auto_check(NULL);
if (s->match.bus_num != 0 && s->match.port != NULL) {
@@ -1443,7 +1485,13 @@ static int usb_host_initfn(USBDevice *dev)
static const VMStateDescription vmstate_usb_host = {
.name = "usb-host",
- .unmigratable = 1,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .post_load = usb_host_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_USB_DEVICE(dev, USBHostDevice),
+ VMSTATE_END_OF_LIST()
+ }
};
static Property usb_host_dev_properties[] = {
@@ -1737,25 +1785,27 @@ static void usb_host_auto_check(void *unused)
struct USBHostDevice *s;
int unconnected = 0;
- usb_host_scan(NULL, usb_host_auto_scan);
+ if (runstate_is_running()) {
+ usb_host_scan(NULL, usb_host_auto_scan);
- QTAILQ_FOREACH(s, &hostdevs, next) {
- if (s->fd == -1) {
- unconnected++;
- }
- if (s->seen == 0) {
- s->errcount = 0;
+ QTAILQ_FOREACH(s, &hostdevs, next) {
+ if (s->fd == -1) {
+ unconnected++;
+ }
+ if (s->seen == 0) {
+ s->errcount = 0;
+ }
+ s->seen = 0;
}
- s->seen = 0;
- }
- if (unconnected == 0) {
- /* nothing to watch */
- if (usb_auto_timer) {
- qemu_del_timer(usb_auto_timer);
- trace_usb_host_auto_scan_disabled();
+ if (unconnected == 0) {
+ /* nothing to watch */
+ if (usb_auto_timer) {
+ qemu_del_timer(usb_auto_timer);
+ trace_usb_host_auto_scan_disabled();
+ }
+ return;
}
- return;
}
if (!usb_auto_timer) {
diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c
index 2462351389..c0de30ea88 100644
--- a/hw/usb/libhw.c
+++ b/hw/usb/libhw.c
@@ -26,15 +26,15 @@
int usb_packet_map(USBPacket *p, QEMUSGList *sgl)
{
- int is_write = (p->pid == USB_TOKEN_IN);
- target_phys_addr_t len;
+ DMADirection dir = (p->pid == USB_TOKEN_IN) ?
+ DMA_DIRECTION_FROM_DEVICE : DMA_DIRECTION_TO_DEVICE;
+ dma_addr_t len;
void *mem;
int i;
for (i = 0; i < sgl->nsg; i++) {
len = sgl->sg[i].len;
- mem = cpu_physical_memory_map(sgl->sg[i].base, &len,
- is_write);
+ mem = dma_memory_map(sgl->dma, sgl->sg[i].base, &len, dir);
if (!mem) {
goto err;
}
@@ -46,18 +46,19 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl)
return 0;
err:
- usb_packet_unmap(p);
+ usb_packet_unmap(p, sgl);
return -1;
}
-void usb_packet_unmap(USBPacket *p)
+void usb_packet_unmap(USBPacket *p, QEMUSGList *sgl)
{
- int is_write = (p->pid == USB_TOKEN_IN);
+ DMADirection dir = (p->pid == USB_TOKEN_IN) ?
+ DMA_DIRECTION_FROM_DEVICE : DMA_DIRECTION_TO_DEVICE;
int i;
for (i = 0; i < p->iov.niov; i++) {
- cpu_physical_memory_unmap(p->iov.iov[i].iov_base,
- p->iov.iov[i].iov_len, is_write,
- p->iov.iov[i].iov_len);
+ dma_memory_unmap(sgl->dma, p->iov.iov[i].iov_base,
+ p->iov.iov[i].iov_len, dir,
+ p->iov.iov[i].iov_len);
}
}
diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c
index d949f040d5..10b4fbb3a7 100644
--- a/hw/usb/redirect.c
+++ b/hw/usb/redirect.c
@@ -1033,6 +1033,8 @@ static int usbredir_handle_status(USBRedirDevice *dev,
case usb_redir_inval:
WARNING("got invalid param error from usb-host?\n");
return USB_RET_NAK;
+ case usb_redir_babble:
+ return USB_RET_BABBLE;
case usb_redir_ioerror:
case usb_redir_timeout:
default:
diff --git a/hw/vexpress.c b/hw/vexpress.c
index 8072c5ada9..b6158447d7 100644
--- a/hw/vexpress.c
+++ b/hw/vexpress.c
@@ -284,9 +284,16 @@ static void a15_daughterboard_init(const VEDBoardInfo *daughterboard,
cpu_irq[n] = irqp[ARM_PIC_CPU_IRQ];
}
- if (ram_size > 0x80000000) {
- fprintf(stderr, "vexpress-a15: cannot model more than 2GB RAM\n");
- exit(1);
+ {
+ /* We have to use a separate 64 bit variable here to avoid the gcc
+ * "comparison is always false due to limited range of data type"
+ * warning if we are on a host where ram_addr_t is 32 bits.
+ */
+ uint64_t rsz = ram_size;
+ if (rsz > (30ULL * 1024 * 1024 * 1024)) {
+ fprintf(stderr, "vexpress-a15: cannot model more than 30GB RAM\n");
+ exit(1);
+ }
}
memory_region_init_ram(ram, "vexpress.highmem", ram_size);
@@ -420,7 +427,7 @@ static void vexpress_common_init(const VEDBoardInfo *daughterboard,
memory_region_add_subregion(sysmem, map[VE_VIDEORAM], vram);
/* 0x4e000000 LAN9118 Ethernet */
- if (nd_table[0].vlan) {
+ if (nd_table[0].used) {
lan9118_init(&nd_table[0], map[VE_ETHERNET], pic[15]);
}
diff --git a/hw/vga-isa-mm.c b/hw/vga-isa-mm.c
index f8984c62cb..44ae7d92c8 100644
--- a/hw/vga-isa-mm.c
+++ b/hw/vga-isa-mm.c
@@ -28,6 +28,8 @@
#include "pixel_ops.h"
#include "qemu-timer.h"
+#define VGA_RAM_SIZE (8192 * 1024)
+
typedef struct ISAVGAMMState {
VGACommonState vga;
int it_shift;
@@ -128,7 +130,8 @@ int isa_vga_mm_init(target_phys_addr_t vram_base,
s = g_malloc0(sizeof(*s));
- vga_common_init(&s->vga, VGA_RAM_SIZE);
+ s->vga.vram_size_mb = VGA_RAM_SIZE >> 20;
+ vga_common_init(&s->vga);
vga_mm_init(s, vram_base, ctrl_base, it_shift, address_space);
s->vga.ds = graphic_console_init(s->vga.update, s->vga.invalidate,
diff --git a/hw/vga-isa.c b/hw/vga-isa.c
index 4bcc4db62f..d2904737bc 100644
--- a/hw/vga-isa.c
+++ b/hw/vga-isa.c
@@ -49,7 +49,7 @@ static int vga_initfn(ISADevice *dev)
MemoryRegion *vga_io_memory;
const MemoryRegionPortio *vga_ports, *vbe_ports;
- vga_common_init(s, VGA_RAM_SIZE);
+ vga_common_init(s);
s->legacy_address_space = isa_address_space(dev);
vga_io_memory = vga_init_io(s, &vga_ports, &vbe_ports);
isa_register_portio_list(dev, 0x3b0, vga_ports, s, "vga");
@@ -69,6 +69,11 @@ static int vga_initfn(ISADevice *dev)
return 0;
}
+static Property vga_isa_properties[] = {
+ DEFINE_PROP_UINT32("vgamem_mb", ISAVGAState, state.vram_size_mb, 8),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void vga_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -76,6 +81,7 @@ static void vga_class_initfn(ObjectClass *klass, void *data)
ic->init = vga_initfn;
dc->reset = vga_reset_isa;
dc->vmsd = &vmstate_vga_common;
+ dc->props = vga_isa_properties;
}
static TypeInfo vga_info = {
diff --git a/hw/vga-pci.c b/hw/vga-pci.c
index 465b643d21..37dc019a61 100644
--- a/hw/vga-pci.c
+++ b/hw/vga-pci.c
@@ -53,7 +53,7 @@ static int pci_vga_initfn(PCIDevice *dev)
VGACommonState *s = &d->vga;
// vga + console init
- vga_common_init(s, VGA_RAM_SIZE);
+ vga_common_init(s);
vga_init(s, pci_address_space(dev), pci_address_space_io(dev), true);
s->ds = graphic_console_init(s->update, s->invalidate,
@@ -75,6 +75,11 @@ DeviceState *pci_vga_init(PCIBus *bus)
return &pci_create_simple(bus, -1, "VGA")->qdev;
}
+static Property vga_pci_properties[] = {
+ DEFINE_PROP_UINT32("vgamem_mb", PCIVGAState, vga.vram_size_mb, 16),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void vga_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -87,6 +92,7 @@ static void vga_class_init(ObjectClass *klass, void *data)
k->device_id = PCI_DEVICE_ID_QEMU_VGA;
k->class_id = PCI_CLASS_DISPLAY_VGA;
dc->vmsd = &vmstate_vga_pci;
+ dc->props = vga_pci_properties;
}
static TypeInfo vga_info = {
diff --git a/hw/vga.c b/hw/vga.c
index d784df7df4..f82ced8e66 100644
--- a/hw/vga.c
+++ b/hw/vga.c
@@ -38,6 +38,9 @@
//#define DEBUG_BOCHS_VBE
+/* 16 state changes per vertical frame @60 Hz */
+#define VGA_TEXT_CURSOR_PERIOD_MS (1000 * 2 * 16 / 60)
+
/*
* Video Graphics Array (VGA)
*
@@ -1300,6 +1303,7 @@ static void vga_draw_text(VGACommonState *s, int full_update)
uint32_t *ch_attr_ptr;
vga_draw_glyph8_func *vga_draw_glyph8;
vga_draw_glyph9_func *vga_draw_glyph9;
+ int64_t now = qemu_get_clock_ms(vm_clock);
/* compute font data address (in plane 2) */
v = s->sr[VGA_SEQ_CHARACTER_MAP];
@@ -1370,6 +1374,10 @@ static void vga_draw_text(VGACommonState *s, int full_update)
s->cursor_end = s->cr[VGA_CRTC_CURSOR_END];
}
cursor_ptr = s->vram_ptr + (s->start_addr + cursor_offset) * 4;
+ if (now >= s->cursor_blink_time) {
+ s->cursor_blink_time = now + VGA_TEXT_CURSOR_PERIOD_MS / 2;
+ s->cursor_visible_phase = !s->cursor_visible_phase;
+ }
depth_index = get_depth_index(s->ds);
if (cw == 16)
@@ -1390,7 +1398,7 @@ static void vga_draw_text(VGACommonState *s, int full_update)
cx_max = -1;
for(cx = 0; cx < width; cx++) {
ch_attr = *(uint16_t *)src;
- if (full_update || ch_attr != *ch_attr_ptr) {
+ if (full_update || ch_attr != *ch_attr_ptr || src == cursor_ptr) {
if (cx < cx_min)
cx_min = cx;
if (cx > cx_max)
@@ -1420,7 +1428,8 @@ static void vga_draw_text(VGACommonState *s, int full_update)
font_ptr, cheight, fgcol, bgcol, dup9);
}
if (src == cursor_ptr &&
- !(s->cr[VGA_CRTC_CURSOR_START] & 0x20)) {
+ !(s->cr[VGA_CRTC_CURSOR_START] & 0x20) &&
+ s->cursor_visible_phase) {
int line_start, line_last, h;
/* draw the cursor */
line_start = s->cr[VGA_CRTC_CURSOR_START] & 0x1f;
@@ -1884,6 +1893,7 @@ static void vga_update_display(void *opaque)
}
if (graphic_mode != s->graphic_mode) {
s->graphic_mode = graphic_mode;
+ s->cursor_blink_time = qemu_get_clock_ms(vm_clock);
full_update = 1;
}
switch(graphic_mode) {
@@ -2225,7 +2235,7 @@ const VMStateDescription vmstate_vga_common = {
}
};
-void vga_common_init(VGACommonState *s, int vga_ram_size)
+void vga_common_init(VGACommonState *s)
{
int i, j, v, b;
@@ -2252,16 +2262,23 @@ void vga_common_init(VGACommonState *s, int vga_ram_size)
expand4to8[i] = v;
}
+ /* valid range: 1 MB -> 256 MB */
+ s->vram_size = 1024 * 1024;
+ while (s->vram_size < (s->vram_size_mb << 20) &&
+ s->vram_size < (256 << 20)) {
+ s->vram_size <<= 1;
+ }
+ s->vram_size_mb = s->vram_size >> 20;
+
#ifdef CONFIG_BOCHS_VBE
s->is_vbe_vmstate = 1;
#else
s->is_vbe_vmstate = 0;
#endif
- memory_region_init_ram(&s->vram, "vga.vram", vga_ram_size);
+ memory_region_init_ram(&s->vram, "vga.vram", s->vram_size);
vmstate_register_ram_global(&s->vram);
xen_register_framebuffer(&s->vram);
s->vram_ptr = memory_region_get_ram_ptr(&s->vram);
- s->vram_size = vga_ram_size;
s->get_bpp = vga_get_bpp;
s->get_offsets = vga_get_offsets;
s->get_resolution = vga_get_resolution;
diff --git a/hw/vga_int.h b/hw/vga_int.h
index d244d8ff99..8938093682 100644
--- a/hw/vga_int.h
+++ b/hw/vga_int.h
@@ -31,8 +31,8 @@
/* bochs VBE support */
#define CONFIG_BOCHS_VBE
-#define VBE_DISPI_MAX_XRES 1600
-#define VBE_DISPI_MAX_YRES 1200
+#define VBE_DISPI_MAX_XRES 16000
+#define VBE_DISPI_MAX_YRES 12000
#define VBE_DISPI_MAX_BPP 32
#define VBE_DISPI_INDEX_ID 0x0
@@ -107,6 +107,7 @@ typedef struct VGACommonState {
MemoryRegion vram;
MemoryRegion vram_vbe;
uint32_t vram_size;
+ uint32_t vram_size_mb; /* property */
uint32_t latch;
MemoryRegion *chain4_alias;
uint8_t sr_index;
@@ -155,6 +156,8 @@ typedef struct VGACommonState {
uint32_t last_scr_width, last_scr_height; /* in pixels */
uint32_t last_depth; /* in bits */
uint8_t cursor_start, cursor_end;
+ bool cursor_visible_phase;
+ int64_t cursor_blink_time;
uint32_t cursor_offset;
unsigned int (*rgb_to_pixel)(unsigned int r,
unsigned int g, unsigned b);
@@ -184,7 +187,7 @@ static inline int c6_to_8(int v)
return (v << 2) | (b << 1) | b;
}
-void vga_common_init(VGACommonState *s, int vga_ram_size);
+void vga_common_init(VGACommonState *s);
void vga_init(VGACommonState *s, MemoryRegion *address_space,
MemoryRegion *address_space_io, bool init_vga_ports);
MemoryRegion *vga_init_io(VGACommonState *s,
@@ -209,7 +212,6 @@ void vga_init_vbe(VGACommonState *s, MemoryRegion *address_space);
extern const uint8_t sr_mask[8];
extern const uint8_t gr_mask[16];
-#define VGA_RAM_SIZE (8192 * 1024)
#define VGABIOS_FILENAME "vgabios.bin"
#define VGABIOS_CIRRUS_FILENAME "vgabios-cirrus.bin"
diff --git a/hw/vhost.c b/hw/vhost.c
index 43664e7f4d..0fd8da84e2 100644
--- a/hw/vhost.c
+++ b/hw/vhost.c
@@ -737,13 +737,13 @@ static void vhost_virtqueue_cleanup(struct vhost_dev *dev,
static void vhost_eventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
static void vhost_eventfd_del(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data, EventNotifier *e)
{
}
diff --git a/hw/vhost_net.c b/hw/vhost_net.c
index f672e9dafd..ecaa22dfb4 100644
--- a/hw/vhost_net.c
+++ b/hw/vhost_net.c
@@ -42,7 +42,7 @@ struct vhost_net {
struct vhost_dev dev;
struct vhost_virtqueue vqs[2];
int backend;
- VLANClientState *vc;
+ NetClientState *nc;
};
unsigned vhost_net_get_features(struct vhost_net *net, unsigned features)
@@ -80,10 +80,10 @@ void vhost_net_ack_features(struct vhost_net *net, unsigned features)
}
}
-static int vhost_net_get_fd(VLANClientState *backend)
+static int vhost_net_get_fd(NetClientState *backend)
{
switch (backend->info->type) {
- case NET_CLIENT_TYPE_TAP:
+ case NET_CLIENT_OPTIONS_KIND_TAP:
return tap_get_fd(backend);
default:
fprintf(stderr, "vhost-net requires tap backend\n");
@@ -91,7 +91,7 @@ static int vhost_net_get_fd(VLANClientState *backend)
}
}
-struct vhost_net *vhost_net_init(VLANClientState *backend, int devfd,
+struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
bool force)
{
int r;
@@ -104,7 +104,7 @@ struct vhost_net *vhost_net_init(VLANClientState *backend, int devfd,
if (r < 0) {
goto fail;
}
- net->vc = backend;
+ net->nc = backend;
net->dev.backend_features = tap_has_vnet_hdr(backend) ? 0 :
(1 << VHOST_NET_F_VIRTIO_NET_HDR);
net->backend = r;
@@ -151,7 +151,7 @@ int vhost_net_start(struct vhost_net *net,
goto fail_notifiers;
}
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
- tap_set_vnet_hdr_len(net->vc,
+ tap_set_vnet_hdr_len(net->nc,
sizeof(struct virtio_net_hdr_mrg_rxbuf));
}
@@ -160,7 +160,7 @@ int vhost_net_start(struct vhost_net *net,
goto fail_start;
}
- net->vc->info->poll(net->vc, false);
+ net->nc->info->poll(net->nc, false);
qemu_set_fd_handler(net->backend, NULL, NULL, NULL);
file.fd = net->backend;
for (file.index = 0; file.index < net->dev.nvqs; ++file.index) {
@@ -177,10 +177,10 @@ fail:
int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file);
assert(r >= 0);
}
- net->vc->info->poll(net->vc, true);
+ net->nc->info->poll(net->nc, true);
vhost_dev_stop(&net->dev, dev);
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
- tap_set_vnet_hdr_len(net->vc, sizeof(struct virtio_net_hdr));
+ tap_set_vnet_hdr_len(net->nc, sizeof(struct virtio_net_hdr));
}
fail_start:
vhost_dev_disable_notifiers(&net->dev, dev);
@@ -197,10 +197,10 @@ void vhost_net_stop(struct vhost_net *net,
int r = ioctl(net->dev.control, VHOST_NET_SET_BACKEND, &file);
assert(r >= 0);
}
- net->vc->info->poll(net->vc, true);
+ net->nc->info->poll(net->nc, true);
vhost_dev_stop(&net->dev, dev);
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
- tap_set_vnet_hdr_len(net->vc, sizeof(struct virtio_net_hdr));
+ tap_set_vnet_hdr_len(net->nc, sizeof(struct virtio_net_hdr));
}
vhost_dev_disable_notifiers(&net->dev, dev);
}
@@ -209,12 +209,12 @@ void vhost_net_cleanup(struct vhost_net *net)
{
vhost_dev_cleanup(&net->dev);
if (net->dev.acked_features & (1 << VIRTIO_NET_F_MRG_RXBUF)) {
- tap_set_vnet_hdr_len(net->vc, sizeof(struct virtio_net_hdr));
+ tap_set_vnet_hdr_len(net->nc, sizeof(struct virtio_net_hdr));
}
g_free(net);
}
#else
-struct vhost_net *vhost_net_init(VLANClientState *backend, int devfd,
+struct vhost_net *vhost_net_init(NetClientState *backend, int devfd,
bool force)
{
error_report("vhost-net support is not compiled in");
diff --git a/hw/vhost_net.h b/hw/vhost_net.h
index 91e40b195e..a9db23423c 100644
--- a/hw/vhost_net.h
+++ b/hw/vhost_net.h
@@ -6,7 +6,7 @@
struct vhost_net;
typedef struct vhost_net VHostNetState;
-VHostNetState *vhost_net_init(VLANClientState *backend, int devfd, bool force);
+VHostNetState *vhost_net_init(NetClientState *backend, int devfd, bool force);
bool vhost_net_query(VHostNetState *net, VirtIODevice *dev);
int vhost_net_start(VHostNetState *net, VirtIODevice *dev);
diff --git a/hw/virtio-balloon.c b/hw/virtio-balloon.c
index d048cef50f..dd1a6506cf 100644
--- a/hw/virtio-balloon.c
+++ b/hw/virtio-balloon.c
@@ -77,7 +77,7 @@ static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
size_t offset = 0;
uint32_t pfn;
- while (iov_to_buf(elem.out_sg, elem.out_num, &pfn, offset, 4) == 4) {
+ while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) {
ram_addr_t pa;
ram_addr_t addr;
@@ -118,7 +118,7 @@ static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
*/
reset_stats(s);
- while (iov_to_buf(elem->out_sg, elem->out_num, &stat, offset, sizeof(stat))
+ while (iov_to_buf(elem->out_sg, elem->out_num, offset, &stat, sizeof(stat))
== sizeof(stat)) {
uint16_t tag = tswap16(stat.tag);
uint64_t val = tswap64(stat.val);
diff --git a/hw/virtio-blk.c b/hw/virtio-blk.c
index fe0774617b..f21757ed55 100644
--- a/hw/virtio-blk.c
+++ b/hw/virtio-blk.c
@@ -14,6 +14,7 @@
#include "qemu-common.h"
#include "qemu-error.h"
#include "trace.h"
+#include "hw/block-common.h"
#include "blockdev.h"
#include "virtio-blk.h"
#include "scsi-defs.h"
@@ -478,19 +479,17 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
VirtIOBlock *s = to_virtio_blk(vdev);
struct virtio_blk_config blkcfg;
uint64_t capacity;
- int cylinders, heads, secs;
int blk_size = s->conf->logical_block_size;
bdrv_get_geometry(s->bs, &capacity);
- bdrv_get_geometry_hint(s->bs, &cylinders, &heads, &secs);
memset(&blkcfg, 0, sizeof(blkcfg));
stq_raw(&blkcfg.capacity, capacity);
stl_raw(&blkcfg.seg_max, 128 - 2);
- stw_raw(&blkcfg.cylinders, cylinders);
+ stw_raw(&blkcfg.cylinders, s->conf->cyls);
stl_raw(&blkcfg.blk_size, blk_size);
stw_raw(&blkcfg.min_io_size, s->conf->min_io_size / blk_size);
stw_raw(&blkcfg.opt_io_size, s->conf->opt_io_size / blk_size);
- blkcfg.heads = heads;
+ blkcfg.heads = s->conf->heads;
/*
* We must ensure that the block device capacity is a multiple of
* the logical block size. If that is not the case, lets use
@@ -502,10 +501,10 @@ static void virtio_blk_update_config(VirtIODevice *vdev, uint8_t *config)
* divided by 512 - instead it is the amount of blk_size blocks
* per track (cylinder).
*/
- if (bdrv_getlength(s->bs) / heads / secs % blk_size) {
- blkcfg.sectors = secs & ~s->sector_mask;
+ if (bdrv_getlength(s->bs) / s->conf->heads / s->conf->secs % blk_size) {
+ blkcfg.sectors = s->conf->secs & ~s->sector_mask;
} else {
- blkcfg.sectors = secs;
+ blkcfg.sectors = s->conf->secs;
}
blkcfg.size_max = 0;
blkcfg.physical_block_exp = get_physical_block_exp(s->conf);
@@ -589,9 +588,7 @@ static const BlockDevOps virtio_block_ops = {
VirtIODevice *virtio_blk_init(DeviceState *dev, VirtIOBlkConf *blk)
{
VirtIOBlock *s;
- int cylinders, heads, secs;
static int virtio_blk_id;
- DriveInfo *dinfo;
if (!blk->conf.bs) {
error_report("drive property not set");
@@ -602,12 +599,9 @@ VirtIODevice *virtio_blk_init(DeviceState *dev, VirtIOBlkConf *blk)
return NULL;
}
- if (!blk->serial) {
- /* try to fall back to value set with legacy -drive serial=... */
- dinfo = drive_get_by_blockdev(blk->conf.bs);
- if (*dinfo->serial) {
- blk->serial = strdup(dinfo->serial);
- }
+ blkconf_serial(&blk->conf, &blk->serial);
+ if (blkconf_geometry(&blk->conf, NULL, 65535, 255, 255) < 0) {
+ return NULL;
}
s = (VirtIOBlock *)virtio_common_init("virtio-blk", VIRTIO_ID_BLOCK,
@@ -622,7 +616,6 @@ VirtIODevice *virtio_blk_init(DeviceState *dev, VirtIOBlkConf *blk)
s->blk = blk;
s->rq = NULL;
s->sector_mask = (s->conf->logical_block_size / BDRV_SECTOR_SIZE) - 1;
- bdrv_guess_geometry(s->bs, &cylinders, &heads, &secs);
s->vq = virtio_add_queue(&s->vdev, 128, virtio_blk_handle_output);
diff --git a/hw/virtio-blk.h b/hw/virtio-blk.h
index d7850012bd..79ebccc95b 100644
--- a/hw/virtio-blk.h
+++ b/hw/virtio-blk.h
@@ -15,7 +15,7 @@
#define _QEMU_VIRTIO_BLK_H
#include "virtio.h"
-#include "block.h"
+#include "hw/block-common.h"
/* from Linux's linux/virtio_blk.h */
diff --git a/hw/virtio-net.c b/hw/virtio-net.c
index 3f190d417e..b1998b27d3 100644
--- a/hw/virtio-net.c
+++ b/hw/virtio-net.c
@@ -108,7 +108,7 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status)
if (!n->nic->nc.peer) {
return;
}
- if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
+ if (n->nic->nc.peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
return;
}
@@ -163,7 +163,7 @@ static void virtio_net_set_status(struct VirtIODevice *vdev, uint8_t status)
}
}
-static void virtio_net_set_link_status(VLANClientState *nc)
+static void virtio_net_set_link_status(NetClientState *nc)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
uint16_t old_status = n->status;
@@ -205,7 +205,7 @@ static int peer_has_vnet_hdr(VirtIONet *n)
if (!n->nic->nc.peer)
return 0;
- if (n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP)
+ if (n->nic->nc.peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP)
return 0;
n->has_vnet_hdr = tap_has_vnet_hdr(n->nic->nc.peer);
@@ -249,7 +249,7 @@ static uint32_t virtio_net_get_features(VirtIODevice *vdev, uint32_t features)
}
if (!n->nic->nc.peer ||
- n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
+ n->nic->nc.peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
return features;
}
if (!tap_get_vhost_net(n->nic->nc.peer)) {
@@ -288,7 +288,7 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint32_t features)
(features >> VIRTIO_NET_F_GUEST_UFO) & 1);
}
if (!n->nic->nc.peer ||
- n->nic->nc.peer->info->type != NET_CLIENT_TYPE_TAP) {
+ n->nic->nc.peer->info->type != NET_CLIENT_OPTIONS_KIND_TAP) {
return;
}
if (!tap_get_vhost_net(n->nic->nc.peer)) {
@@ -453,7 +453,7 @@ static void virtio_net_handle_rx(VirtIODevice *vdev, VirtQueue *vq)
qemu_notify_event();
}
-static int virtio_net_can_receive(VLANClientState *nc)
+static int virtio_net_can_receive(NetClientState *nc)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
if (!n->vdev.vm_running) {
@@ -593,7 +593,7 @@ static int receive_filter(VirtIONet *n, const uint8_t *buf, int size)
return 0;
}
-static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t virtio_net_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
struct virtio_net_hdr_mrg_rxbuf *mhdr = NULL;
@@ -656,8 +656,8 @@ static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_
}
/* copy in packet. ugh */
- len = iov_from_buf(sg, elem.in_num,
- buf + offset, 0, size - offset);
+ len = iov_from_buf(sg, elem.in_num, 0,
+ buf + offset, size - offset);
total += len;
offset += len;
/* If buffers can't be merged, at this point we
@@ -690,7 +690,7 @@ static ssize_t virtio_net_receive(VLANClientState *nc, const uint8_t *buf, size_
static int32_t virtio_net_flush_tx(VirtIONet *n, VirtQueue *vq);
-static void virtio_net_tx_complete(VLANClientState *nc, ssize_t len)
+static void virtio_net_tx_complete(NetClientState *nc, ssize_t len)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -980,7 +980,7 @@ static int virtio_net_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
-static void virtio_net_cleanup(VLANClientState *nc)
+static void virtio_net_cleanup(NetClientState *nc)
{
VirtIONet *n = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -988,7 +988,7 @@ static void virtio_net_cleanup(VLANClientState *nc)
}
static NetClientInfo net_virtio_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = virtio_net_can_receive,
.receive = virtio_net_receive,
@@ -1077,6 +1077,6 @@ void virtio_net_exit(VirtIODevice *vdev)
qemu_bh_delete(n->tx_bh);
}
- qemu_del_vlan_client(&n->nic->nc);
+ qemu_del_net_client(&n->nic->nc);
virtio_cleanup(&n->vdev);
}
diff --git a/hw/virtio-pci.c b/hw/virtio-pci.c
index 9342eed070..125eded9ca 100644
--- a/hw/virtio-pci.c
+++ b/hw/virtio-pci.c
@@ -160,7 +160,7 @@ static int virtio_pci_load_queue(void * opaque, int n, QEMUFile *f)
}
static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
- int n, bool assign)
+ int n, bool assign, bool set_handler)
{
VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
@@ -173,46 +173,18 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
__func__, r);
return r;
}
+ virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
- true, n, event_notifier_get_fd(notifier));
+ true, n, notifier);
} else {
memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
- true, n, event_notifier_get_fd(notifier));
- /* Handle the race condition where the guest kicked and we deassigned
- * before we got around to handling the kick.
- */
- if (event_notifier_test_and_clear(notifier)) {
- virtio_queue_notify_vq(vq);
- }
-
+ true, n, notifier);
+ virtio_queue_set_host_notifier_fd_handler(vq, false, false);
event_notifier_cleanup(notifier);
}
return r;
}
-static void virtio_pci_host_notifier_read(void *opaque)
-{
- VirtQueue *vq = opaque;
- EventNotifier *n = virtio_queue_get_host_notifier(vq);
- if (event_notifier_test_and_clear(n)) {
- virtio_queue_notify_vq(vq);
- }
-}
-
-static void virtio_pci_set_host_notifier_fd_handler(VirtIOPCIProxy *proxy,
- int n, bool assign)
-{
- VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
- EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
- if (assign) {
- qemu_set_fd_handler(event_notifier_get_fd(notifier),
- virtio_pci_host_notifier_read, NULL, vq);
- } else {
- qemu_set_fd_handler(event_notifier_get_fd(notifier),
- NULL, NULL, NULL);
- }
-}
-
static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
{
int n, r;
@@ -228,12 +200,10 @@ static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
continue;
}
- r = virtio_pci_set_host_notifier_internal(proxy, n, true);
+ r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
if (r < 0) {
goto assign_error;
}
-
- virtio_pci_set_host_notifier_fd_handler(proxy, n, true);
}
proxy->ioeventfd_started = true;
return;
@@ -244,8 +214,7 @@ assign_error:
continue;
}
- virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
- r = virtio_pci_set_host_notifier_internal(proxy, n, false);
+ r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
assert(r >= 0);
}
proxy->ioeventfd_started = false;
@@ -266,8 +235,7 @@ static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
continue;
}
- virtio_pci_set_host_notifier_fd_handler(proxy, n, false);
- r = virtio_pci_set_host_notifier_internal(proxy, n, false);
+ r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
assert(r >= 0);
}
proxy->ioeventfd_started = false;
@@ -528,25 +496,15 @@ static unsigned virtio_pci_get_features(void *opaque)
return proxy->host_features;
}
-static void virtio_pci_guest_notifier_read(void *opaque)
-{
- VirtQueue *vq = opaque;
- EventNotifier *n = virtio_queue_get_guest_notifier(vq);
- if (event_notifier_test_and_clear(n)) {
- virtio_irq(vq);
- }
-}
-
static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
unsigned int queue_no,
unsigned int vector,
MSIMessage msg)
{
VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
+ EventNotifier *n = virtio_queue_get_guest_notifier(vq);
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
- int fd, ret;
-
- fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vq));
+ int ret;
if (irqfd->users == 0) {
ret = kvm_irqchip_add_msi_route(kvm_state, msg);
@@ -557,7 +515,7 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
}
irqfd->users++;
- ret = kvm_irqchip_add_irqfd(kvm_state, fd, irqfd->virq);
+ ret = kvm_irqchip_add_irq_notifier(kvm_state, n, irqfd->virq);
if (ret < 0) {
if (--irqfd->users == 0) {
kvm_irqchip_release_virq(kvm_state, irqfd->virq);
@@ -565,8 +523,7 @@ static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
return ret;
}
- qemu_set_fd_handler(fd, NULL, NULL, NULL);
-
+ virtio_queue_set_guest_notifier_fd_handler(vq, true, true);
return 0;
}
@@ -575,19 +532,18 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
unsigned int vector)
{
VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
+ EventNotifier *n = virtio_queue_get_guest_notifier(vq);
VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
- int fd, ret;
-
- fd = event_notifier_get_fd(virtio_queue_get_guest_notifier(vq));
+ int ret;
- ret = kvm_irqchip_remove_irqfd(kvm_state, fd, irqfd->virq);
+ ret = kvm_irqchip_remove_irq_notifier(kvm_state, n, irqfd->virq);
assert(ret == 0);
if (--irqfd->users == 0) {
kvm_irqchip_release_virq(kvm_state, irqfd->virq);
}
- qemu_set_fd_handler(fd, virtio_pci_guest_notifier_read, NULL, vq);
+ virtio_queue_set_guest_notifier_fd_handler(vq, true, false);
}
static int kvm_virtio_pci_vector_use(PCIDevice *dev, unsigned vector,
@@ -649,14 +605,9 @@ static int virtio_pci_set_guest_notifier(void *opaque, int n, bool assign)
if (r < 0) {
return r;
}
- qemu_set_fd_handler(event_notifier_get_fd(notifier),
- virtio_pci_guest_notifier_read, NULL, vq);
+ virtio_queue_set_guest_notifier_fd_handler(vq, true, false);
} else {
- qemu_set_fd_handler(event_notifier_get_fd(notifier),
- NULL, NULL, NULL);
- /* Test and clear notifier before closing it,
- * in case poll callback didn't have time to run. */
- virtio_pci_guest_notifier_read(vq);
+ virtio_queue_set_guest_notifier_fd_handler(vq, false, false);
event_notifier_cleanup(notifier);
}
@@ -732,7 +683,7 @@ static int virtio_pci_set_host_notifier(void *opaque, int n, bool assign)
* currently only stops on status change away from ok,
* reset, vmstop and such. If we do add code to start here,
* need to check vmstate, device state etc. */
- return virtio_pci_set_host_notifier_internal(proxy, n, assign);
+ return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
}
static void virtio_pci_vmstate_change(void *opaque, bool running)
@@ -782,13 +733,10 @@ void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev)
pci_set_word(config + PCI_SUBSYSTEM_ID, vdev->device_id);
config[PCI_INTERRUPT_PIN] = 1;
- memory_region_init(&proxy->msix_bar, "virtio-msix", 4096);
- if (vdev->nvectors && !msix_init(&proxy->pci_dev, vdev->nvectors,
- &proxy->msix_bar, 1, 0)) {
- pci_register_bar(&proxy->pci_dev, 1, PCI_BASE_ADDRESS_SPACE_MEMORY,
- &proxy->msix_bar);
- } else
+ if (vdev->nvectors &&
+ msix_init_exclusive_bar(&proxy->pci_dev, vdev->nvectors, 1)) {
vdev->nvectors = 0;
+ }
proxy->pci_dev.config_write = virtio_write_config;
@@ -831,24 +779,21 @@ static int virtio_blk_init_pci(PCIDevice *pci_dev)
return 0;
}
-static int virtio_exit_pci(PCIDevice *pci_dev)
+static void virtio_exit_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
- int r;
memory_region_destroy(&proxy->bar);
- r = msix_uninit(pci_dev, &proxy->msix_bar);
- memory_region_destroy(&proxy->msix_bar);
- return r;
+ msix_uninit_exclusive_bar(pci_dev);
}
-static int virtio_blk_exit_pci(PCIDevice *pci_dev)
+static void virtio_blk_exit_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
virtio_pci_stop_ioeventfd(proxy);
virtio_blk_exit(proxy->vdev);
- return virtio_exit_pci(pci_dev);
+ virtio_exit_pci(pci_dev);
}
static int virtio_serial_init_pci(PCIDevice *pci_dev)
@@ -873,13 +818,13 @@ static int virtio_serial_init_pci(PCIDevice *pci_dev)
return 0;
}
-static int virtio_serial_exit_pci(PCIDevice *pci_dev)
+static void virtio_serial_exit_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
virtio_pci_stop_ioeventfd(proxy);
virtio_serial_exit(proxy->vdev);
- return virtio_exit_pci(pci_dev);
+ virtio_exit_pci(pci_dev);
}
static int virtio_net_init_pci(PCIDevice *pci_dev)
@@ -897,13 +842,13 @@ static int virtio_net_init_pci(PCIDevice *pci_dev)
return 0;
}
-static int virtio_net_exit_pci(PCIDevice *pci_dev)
+static void virtio_net_exit_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
virtio_pci_stop_ioeventfd(proxy);
virtio_net_exit(proxy->vdev);
- return virtio_exit_pci(pci_dev);
+ virtio_exit_pci(pci_dev);
}
static int virtio_balloon_init_pci(PCIDevice *pci_dev)
@@ -924,18 +869,19 @@ static int virtio_balloon_init_pci(PCIDevice *pci_dev)
return 0;
}
-static int virtio_balloon_exit_pci(PCIDevice *pci_dev)
+static void virtio_balloon_exit_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
virtio_pci_stop_ioeventfd(proxy);
virtio_balloon_exit(proxy->vdev);
- return virtio_exit_pci(pci_dev);
+ virtio_exit_pci(pci_dev);
}
static Property virtio_blk_properties[] = {
DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
DEFINE_BLOCK_PROPERTIES(VirtIOPCIProxy, blk.conf),
+ DEFINE_BLOCK_CHS_PROPERTIES(VirtIOPCIProxy, blk.conf),
DEFINE_PROP_STRING("serial", VirtIOPCIProxy, blk.serial),
#ifdef __linux__
DEFINE_PROP_BIT("scsi", VirtIOPCIProxy, blk.scsi, 0, true),
@@ -1071,7 +1017,9 @@ static int virtio_scsi_init_pci(PCIDevice *pci_dev)
return -EINVAL;
}
- vdev->nvectors = proxy->nvectors;
+ vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
+ ? proxy->scsi.num_queues + 3
+ : proxy->nvectors;
virtio_init_pci(proxy, vdev);
/* make the actual value visible */
@@ -1079,16 +1027,17 @@ static int virtio_scsi_init_pci(PCIDevice *pci_dev)
return 0;
}
-static int virtio_scsi_exit_pci(PCIDevice *pci_dev)
+static void virtio_scsi_exit_pci(PCIDevice *pci_dev)
{
VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
virtio_scsi_exit(proxy->vdev);
- return virtio_exit_pci(pci_dev);
+ virtio_exit_pci(pci_dev);
}
static Property virtio_scsi_properties[] = {
- DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED),
DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOPCIProxy, host_features, scsi),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/virtio-pci.h b/hw/virtio-pci.h
index 91b791ba9d..ac9d522f37 100644
--- a/hw/virtio-pci.h
+++ b/hw/virtio-pci.h
@@ -34,7 +34,6 @@ typedef struct {
PCIDevice pci_dev;
VirtIODevice *vdev;
MemoryRegion bar;
- MemoryRegion msix_bar;
uint32_t flags;
uint32_t class_code;
uint32_t nvectors;
diff --git a/hw/virtio-scsi.c b/hw/virtio-scsi.c
index e1a767ea78..c4a5b22f94 100644
--- a/hw/virtio-scsi.c
+++ b/hw/virtio-scsi.c
@@ -24,6 +24,11 @@
#define VIRTIO_SCSI_MAX_TARGET 255
#define VIRTIO_SCSI_MAX_LUN 16383
+/* Feature Bits */
+#define VIRTIO_SCSI_F_INOUT 0
+#define VIRTIO_SCSI_F_HOTPLUG 1
+#define VIRTIO_SCSI_F_CHANGE 2
+
/* Response codes */
#define VIRTIO_SCSI_S_OK 0
#define VIRTIO_SCSI_S_OVERRUN 1
@@ -59,6 +64,12 @@
#define VIRTIO_SCSI_T_NO_EVENT 0
#define VIRTIO_SCSI_T_TRANSPORT_RESET 1
#define VIRTIO_SCSI_T_ASYNC_NOTIFY 2
+#define VIRTIO_SCSI_T_PARAM_CHANGE 3
+
+/* Reasons for transport reset event */
+#define VIRTIO_SCSI_EVT_RESET_HARD 0
+#define VIRTIO_SCSI_EVT_RESET_RESCAN 1
+#define VIRTIO_SCSI_EVT_RESET_REMOVED 2
/* SCSI command request, followed by data-out */
typedef struct {
@@ -132,6 +143,7 @@ typedef struct {
uint32_t sense_size;
uint32_t cdb_size;
int resetting;
+ bool events_dropped;
VirtQueue *ctrl_vq;
VirtQueue *event_vq;
VirtQueue *cmd_vqs[0];
@@ -206,11 +218,13 @@ static void qemu_sgl_init_external(QEMUSGList *qsgl, struct iovec *sg,
static void virtio_scsi_parse_req(VirtIOSCSI *s, VirtQueue *vq,
VirtIOSCSIReq *req)
{
- assert(req->elem.out_num && req->elem.in_num);
+ assert(req->elem.in_num);
req->vq = vq;
req->dev = s;
req->sreq = NULL;
- req->req.buf = req->elem.out_sg[0].iov_base;
+ if (req->elem.out_num) {
+ req->req.buf = req->elem.out_sg[0].iov_base;
+ }
req->resp.buf = req->elem.in_sg[0].iov_base;
if (req->elem.out_num > 1) {
@@ -541,6 +555,8 @@ static void virtio_scsi_set_config(VirtIODevice *vdev,
static uint32_t virtio_scsi_get_features(VirtIODevice *vdev,
uint32_t requested_features)
{
+ requested_features |= (1UL << VIRTIO_SCSI_F_HOTPLUG);
+ requested_features |= (1UL << VIRTIO_SCSI_F_CHANGE);
return requested_features;
}
@@ -550,6 +566,7 @@ static void virtio_scsi_reset(VirtIODevice *vdev)
s->sense_size = VIRTIO_SCSI_SENSE_SIZE;
s->cdb_size = VIRTIO_SCSI_CDB_SIZE;
+ s->events_dropped = false;
}
/* The device does not have anything to save beyond the virtio data.
@@ -573,6 +590,93 @@ static int virtio_scsi_load(QEMUFile *f, void *opaque, int version_id)
return 0;
}
+static void virtio_scsi_push_event(VirtIOSCSI *s, SCSIDevice *dev,
+ uint32_t event, uint32_t reason)
+{
+ VirtIOSCSIReq *req = virtio_scsi_pop_req(s, s->event_vq);
+ VirtIOSCSIEvent *evt;
+ int in_size;
+
+ if (!req) {
+ s->events_dropped = true;
+ return;
+ }
+
+ if (req->elem.out_num || req->elem.in_num != 1) {
+ virtio_scsi_bad_req();
+ }
+
+ if (s->events_dropped) {
+ event |= VIRTIO_SCSI_T_EVENTS_MISSED;
+ s->events_dropped = false;
+ }
+
+ in_size = req->elem.in_sg[0].iov_len;
+ if (in_size < sizeof(VirtIOSCSIEvent)) {
+ virtio_scsi_bad_req();
+ }
+
+ evt = req->resp.event;
+ memset(evt, 0, sizeof(VirtIOSCSIEvent));
+ evt->event = event;
+ evt->reason = reason;
+ if (!dev) {
+ assert(event == VIRTIO_SCSI_T_NO_EVENT);
+ } else {
+ evt->lun[0] = 1;
+ evt->lun[1] = dev->id;
+
+ /* Linux wants us to keep the same encoding we use for REPORT LUNS. */
+ if (dev->lun >= 256) {
+ evt->lun[2] = (dev->lun >> 8) | 0x40;
+ }
+ evt->lun[3] = dev->lun & 0xFF;
+ }
+ virtio_scsi_complete_req(req);
+}
+
+static void virtio_scsi_handle_event(VirtIODevice *vdev, VirtQueue *vq)
+{
+ VirtIOSCSI *s = (VirtIOSCSI *)vdev;
+
+ if (s->events_dropped) {
+ virtio_scsi_push_event(s, NULL, VIRTIO_SCSI_T_NO_EVENT, 0);
+ }
+}
+
+static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense)
+{
+ VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
+
+ if (((s->vdev.guest_features >> VIRTIO_SCSI_F_CHANGE) & 1) &&
+ (s->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK) &&
+ dev->type != TYPE_ROM) {
+ virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_PARAM_CHANGE,
+ sense.asc | (sense.ascq << 8));
+ }
+}
+
+static void virtio_scsi_hotplug(SCSIBus *bus, SCSIDevice *dev)
+{
+ VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
+
+ if (((s->vdev.guest_features >> VIRTIO_SCSI_F_HOTPLUG) & 1) &&
+ (s->vdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_TRANSPORT_RESET,
+ VIRTIO_SCSI_EVT_RESET_RESCAN);
+ }
+}
+
+static void virtio_scsi_hot_unplug(SCSIBus *bus, SCSIDevice *dev)
+{
+ VirtIOSCSI *s = container_of(bus, VirtIOSCSI, bus);
+
+ if ((s->vdev.guest_features >> VIRTIO_SCSI_F_HOTPLUG) & 1) {
+ virtio_scsi_push_event(s, dev, VIRTIO_SCSI_T_TRANSPORT_RESET,
+ VIRTIO_SCSI_EVT_RESET_REMOVED);
+ }
+}
+
static struct SCSIBusInfo virtio_scsi_scsi_info = {
.tcq = true,
.max_channel = VIRTIO_SCSI_MAX_CHANNEL,
@@ -581,6 +685,9 @@ static struct SCSIBusInfo virtio_scsi_scsi_info = {
.complete = virtio_scsi_command_complete,
.cancel = virtio_scsi_request_cancelled,
+ .change = virtio_scsi_change,
+ .hotplug = virtio_scsi_hotplug,
+ .hot_unplug = virtio_scsi_hot_unplug,
.get_sg_list = virtio_scsi_get_sg_list,
.save_request = virtio_scsi_save_request,
.load_request = virtio_scsi_load_request,
@@ -609,7 +716,7 @@ VirtIODevice *virtio_scsi_init(DeviceState *dev, VirtIOSCSIConf *proxyconf)
s->ctrl_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE,
virtio_scsi_handle_ctrl);
s->event_vq = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE,
- NULL);
+ virtio_scsi_handle_event);
for (i = 0; i < s->conf->num_queues; i++) {
s->cmd_vqs[i] = virtio_add_queue(&s->vdev, VIRTIO_SCSI_VQ_SIZE,
virtio_scsi_handle_cmd);
diff --git a/hw/virtio-serial-bus.c b/hw/virtio-serial-bus.c
index 96382a4ea1..82073f5dc2 100644
--- a/hw/virtio-serial-bus.c
+++ b/hw/virtio-serial-bus.c
@@ -106,8 +106,8 @@ static size_t write_to_port(VirtIOSerialPort *port,
break;
}
- len = iov_from_buf(elem.in_sg, elem.in_num,
- buf + offset, 0, size - offset);
+ len = iov_from_buf(elem.in_sg, elem.in_num, 0,
+ buf + offset, size - offset);
offset += len;
virtqueue_push(vq, &elem, len);
@@ -454,7 +454,7 @@ static void control_out(VirtIODevice *vdev, VirtQueue *vq)
len = 0;
buf = NULL;
while (virtqueue_pop(vq, &elem)) {
- size_t cur_len, copied;
+ size_t cur_len;
cur_len = iov_size(elem.out_sg, elem.out_num);
/*
@@ -467,9 +467,9 @@ static void control_out(VirtIODevice *vdev, VirtQueue *vq)
buf = g_malloc(cur_len);
len = cur_len;
}
- copied = iov_to_buf(elem.out_sg, elem.out_num, buf, 0, len);
+ iov_to_buf(elem.out_sg, elem.out_num, 0, buf, cur_len);
- handle_control_message(vser, buf, copied);
+ handle_control_message(vser, buf, cur_len);
virtqueue_push(vq, &elem, 0);
}
g_free(buf);
diff --git a/hw/virtio.c b/hw/virtio.c
index 168abe4864..209c763751 100644
--- a/hw/virtio.c
+++ b/hw/virtio.c
@@ -984,10 +984,59 @@ VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n)
return vdev->vq + n;
}
+static void virtio_queue_guest_notifier_read(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, guest_notifier);
+ if (event_notifier_test_and_clear(n)) {
+ virtio_irq(vq);
+ }
+}
+
+void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
+ bool with_irqfd)
+{
+ if (assign && !with_irqfd) {
+ event_notifier_set_handler(&vq->guest_notifier,
+ virtio_queue_guest_notifier_read);
+ } else {
+ event_notifier_set_handler(&vq->guest_notifier, NULL);
+ }
+ if (!assign) {
+ /* Test and clear notifier before closing it,
+ * in case poll callback didn't have time to run. */
+ virtio_queue_guest_notifier_read(&vq->guest_notifier);
+ }
+}
+
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq)
{
return &vq->guest_notifier;
}
+
+static void virtio_queue_host_notifier_read(EventNotifier *n)
+{
+ VirtQueue *vq = container_of(n, VirtQueue, host_notifier);
+ if (event_notifier_test_and_clear(n)) {
+ virtio_queue_notify_vq(vq);
+ }
+}
+
+void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
+ bool set_handler)
+{
+ if (assign && set_handler) {
+ event_notifier_set_handler(&vq->host_notifier,
+ virtio_queue_host_notifier_read);
+ } else {
+ event_notifier_set_handler(&vq->host_notifier, NULL);
+ }
+ if (!assign) {
+ /* Test and clear notifier before after disabling event,
+ * in case poll callback didn't have time to run. */
+ virtio_queue_host_notifier_read(&vq->host_notifier);
+ }
+}
+
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq)
{
return &vq->host_notifier;
diff --git a/hw/virtio.h b/hw/virtio.h
index 85aabe53d8..7a4f564529 100644
--- a/hw/virtio.h
+++ b/hw/virtio.h
@@ -18,7 +18,6 @@
#include "net.h"
#include "qdev.h"
#include "sysemu.h"
-#include "block.h"
#include "event_notifier.h"
#ifdef CONFIG_LINUX
#include "9p.h"
@@ -231,7 +230,11 @@ void virtio_queue_set_last_avail_idx(VirtIODevice *vdev, int n, uint16_t idx);
VirtQueue *virtio_get_queue(VirtIODevice *vdev, int n);
int virtio_queue_get_id(VirtQueue *vq);
EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq);
+void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign,
+ bool with_irqfd);
EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq);
+void virtio_queue_set_host_notifier_fd_handler(VirtQueue *vq, bool assign,
+ bool set_handler);
void virtio_queue_notify_vq(VirtQueue *vq);
void virtio_irq(VirtQueue *vq);
#endif
diff --git a/hw/vmware_vga.c b/hw/vmware_vga.c
index 142d9f4ea0..f5e4f440d5 100644
--- a/hw/vmware_vga.c
+++ b/hw/vmware_vga.c
@@ -1078,7 +1078,7 @@ static const VMStateDescription vmstate_vmware_vga = {
}
};
-static void vmsvga_init(struct vmsvga_state_s *s, int vga_ram_size,
+static void vmsvga_init(struct vmsvga_state_s *s,
MemoryRegion *address_space, MemoryRegion *io)
{
s->scratch_size = SVGA_SCRATCH_SIZE;
@@ -1095,7 +1095,7 @@ static void vmsvga_init(struct vmsvga_state_s *s, int vga_ram_size,
vmstate_register_ram_global(&s->fifo_ram);
s->fifo_ptr = memory_region_get_ram_ptr(&s->fifo_ram);
- vga_common_init(&s->vga, vga_ram_size);
+ vga_common_init(&s->vga);
vga_init(&s->vga, address_space, io, true);
vmstate_register(NULL, 0, &vmstate_vga_common, &s->vga);
@@ -1150,11 +1150,14 @@ static void vmsvga_io_write(void *opaque, target_phys_addr_t addr,
switch (addr) {
case SVGA_IO_MUL * SVGA_INDEX_PORT:
- return vmsvga_index_write(s, addr, data);
+ vmsvga_index_write(s, addr, data);
+ break;
case SVGA_IO_MUL * SVGA_VALUE_PORT:
- return vmsvga_value_write(s, addr, data);
+ vmsvga_value_write(s, addr, data);
+ break;
case SVGA_IO_MUL * SVGA_BIOS_PORT:
- return vmsvga_bios_write(s, addr, data);
+ vmsvga_bios_write(s, addr, data);
+ break;
}
}
@@ -1184,7 +1187,7 @@ static int pci_vmsvga_initfn(PCIDevice *dev)
"vmsvga-io", 0x10);
pci_register_bar(&s->card, 0, PCI_BASE_ADDRESS_SPACE_IO, &s->io_bar);
- vmsvga_init(&s->chip, VGA_RAM_SIZE, pci_address_space(dev),
+ vmsvga_init(&s->chip, pci_address_space(dev),
pci_address_space_io(dev));
pci_register_bar(&s->card, 1, PCI_BASE_ADDRESS_MEM_PREFETCH, iomem);
@@ -1199,6 +1202,12 @@ static int pci_vmsvga_initfn(PCIDevice *dev)
return 0;
}
+static Property vga_vmware_properties[] = {
+ DEFINE_PROP_UINT32("vgamem_mb", struct pci_vmsvga_state_s,
+ chip.vga.vram_size_mb, 16),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
static void vmsvga_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
@@ -1214,6 +1223,7 @@ static void vmsvga_class_init(ObjectClass *klass, void *data)
k->subsystem_id = SVGA_PCI_DEVICE_ID;
dc->reset = vmsvga_reset;
dc->vmsd = &vmstate_vmware_vga;
+ dc->props = vga_vmware_properties;
}
static TypeInfo vmsvga_info = {
diff --git a/hw/watchdog.c b/hw/watchdog.c
index a42124d520..b52acedd98 100644
--- a/hw/watchdog.c
+++ b/hw/watchdog.c
@@ -55,7 +55,7 @@ int select_watchdog(const char *p)
QemuOpts *opts;
/* -watchdog ? lists available devices and exits cleanly. */
- if (strcmp(p, "?") == 0) {
+ if (is_help_option(p)) {
QLIST_FOREACH(model, &watchdog_list, entry) {
fprintf(stderr, "\t%s\t%s\n",
model->wdt_name, model->wdt_description);
diff --git a/hw/wdt_i6300esb.c b/hw/wdt_i6300esb.c
index 15c69db932..4a83474906 100644
--- a/hw/wdt_i6300esb.c
+++ b/hw/wdt_i6300esb.c
@@ -411,13 +411,11 @@ static int i6300esb_init(PCIDevice *dev)
return 0;
}
-static int i6300esb_exit(PCIDevice *dev)
+static void i6300esb_exit(PCIDevice *dev)
{
I6300State *d = DO_UPCAST(I6300State, dev, dev);
memory_region_destroy(&d->io_mem);
-
- return 0;
}
static WatchdogTimerModel model = {
diff --git a/hw/xen-host-pci-device.c b/hw/xen-host-pci-device.c
new file mode 100644
index 0000000000..e7ff680ef2
--- /dev/null
+++ b/hw/xen-host-pci-device.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2011 Citrix Ltd.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu-common.h"
+#include "xen-host-pci-device.h"
+
+#define XEN_HOST_PCI_MAX_EXT_CAP \
+ ((PCIE_CONFIG_SPACE_SIZE - PCI_CONFIG_SPACE_SIZE) / (PCI_CAP_SIZEOF + 4))
+
+#ifdef XEN_HOST_PCI_DEVICE_DEBUG
+# define XEN_HOST_PCI_LOG(f, a...) fprintf(stderr, "%s: " f, __func__, ##a)
+#else
+# define XEN_HOST_PCI_LOG(f, a...) (void)0
+#endif
+
+/*
+ * from linux/ioport.h
+ * IO resources have these defined flags.
+ */
+#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
+
+#define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+
+#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
+#define IORESOURCE_MEM_64 0x00100000
+
+static int xen_host_pci_sysfs_path(const XenHostPCIDevice *d,
+ const char *name, char *buf, ssize_t size)
+{
+ int rc;
+
+ rc = snprintf(buf, size, "/sys/bus/pci/devices/%04x:%02x:%02x.%d/%s",
+ d->domain, d->bus, d->dev, d->func, name);
+
+ if (rc >= size || rc < 0) {
+ /* The ouput is truncated or an other error is encountered */
+ return -ENODEV;
+ }
+ return 0;
+}
+
+
+/* This size should be enough to read the first 7 lines of a ressource file */
+#define XEN_HOST_PCI_RESSOURCE_BUFFER_SIZE 400
+static int xen_host_pci_get_resource(XenHostPCIDevice *d)
+{
+ int i, rc, fd;
+ char path[PATH_MAX];
+ char buf[XEN_HOST_PCI_RESSOURCE_BUFFER_SIZE];
+ unsigned long long start, end, flags, size;
+ char *endptr, *s;
+ uint8_t type;
+
+ rc = xen_host_pci_sysfs_path(d, "resource", path, sizeof (path));
+ if (rc) {
+ return rc;
+ }
+ fd = open(path, O_RDONLY);
+ if (fd == -1) {
+ XEN_HOST_PCI_LOG("Error: Can't open %s: %s\n", path, strerror(errno));
+ return -errno;
+ }
+
+ do {
+ rc = read(fd, &buf, sizeof (buf) - 1);
+ if (rc < 0 && errno != EINTR) {
+ rc = -errno;
+ goto out;
+ }
+ } while (rc < 0);
+ buf[rc] = 0;
+ rc = 0;
+
+ s = buf;
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ type = 0;
+
+ start = strtoll(s, &endptr, 16);
+ if (*endptr != ' ' || s == endptr) {
+ break;
+ }
+ s = endptr + 1;
+ end = strtoll(s, &endptr, 16);
+ if (*endptr != ' ' || s == endptr) {
+ break;
+ }
+ s = endptr + 1;
+ flags = strtoll(s, &endptr, 16);
+ if (*endptr != '\n' || s == endptr) {
+ break;
+ }
+ s = endptr + 1;
+
+ if (start) {
+ size = end - start + 1;
+ } else {
+ size = 0;
+ }
+
+ if (flags & IORESOURCE_IO) {
+ type |= XEN_HOST_PCI_REGION_TYPE_IO;
+ }
+ if (flags & IORESOURCE_MEM) {
+ type |= XEN_HOST_PCI_REGION_TYPE_MEM;
+ }
+ if (flags & IORESOURCE_PREFETCH) {
+ type |= XEN_HOST_PCI_REGION_TYPE_PREFETCH;
+ }
+ if (flags & IORESOURCE_MEM_64) {
+ type |= XEN_HOST_PCI_REGION_TYPE_MEM_64;
+ }
+
+ if (i < PCI_ROM_SLOT) {
+ d->io_regions[i].base_addr = start;
+ d->io_regions[i].size = size;
+ d->io_regions[i].type = type;
+ d->io_regions[i].bus_flags = flags & IORESOURCE_BITS;
+ } else {
+ d->rom.base_addr = start;
+ d->rom.size = size;
+ d->rom.type = type;
+ d->rom.bus_flags = flags & IORESOURCE_BITS;
+ }
+ }
+ if (i != PCI_NUM_REGIONS) {
+ /* Invalid format or input to short */
+ rc = -ENODEV;
+ }
+
+out:
+ close(fd);
+ return rc;
+}
+
+/* This size should be enough to read a long from a file */
+#define XEN_HOST_PCI_GET_VALUE_BUFFER_SIZE 22
+static int xen_host_pci_get_value(XenHostPCIDevice *d, const char *name,
+ unsigned int *pvalue, int base)
+{
+ char path[PATH_MAX];
+ char buf[XEN_HOST_PCI_GET_VALUE_BUFFER_SIZE];
+ int fd, rc;
+ unsigned long value;
+ char *endptr;
+
+ rc = xen_host_pci_sysfs_path(d, name, path, sizeof (path));
+ if (rc) {
+ return rc;
+ }
+ fd = open(path, O_RDONLY);
+ if (fd == -1) {
+ XEN_HOST_PCI_LOG("Error: Can't open %s: %s\n", path, strerror(errno));
+ return -errno;
+ }
+ do {
+ rc = read(fd, &buf, sizeof (buf) - 1);
+ if (rc < 0 && errno != EINTR) {
+ rc = -errno;
+ goto out;
+ }
+ } while (rc < 0);
+ buf[rc] = 0;
+ value = strtol(buf, &endptr, base);
+ if (endptr == buf || *endptr != '\n') {
+ rc = -1;
+ } else if ((value == LONG_MIN || value == LONG_MAX) && errno == ERANGE) {
+ rc = -errno;
+ } else {
+ rc = 0;
+ *pvalue = value;
+ }
+out:
+ close(fd);
+ return rc;
+}
+
+static inline int xen_host_pci_get_hex_value(XenHostPCIDevice *d,
+ const char *name,
+ unsigned int *pvalue)
+{
+ return xen_host_pci_get_value(d, name, pvalue, 16);
+}
+
+static inline int xen_host_pci_get_dec_value(XenHostPCIDevice *d,
+ const char *name,
+ unsigned int *pvalue)
+{
+ return xen_host_pci_get_value(d, name, pvalue, 10);
+}
+
+static bool xen_host_pci_dev_is_virtfn(XenHostPCIDevice *d)
+{
+ char path[PATH_MAX];
+ struct stat buf;
+
+ if (xen_host_pci_sysfs_path(d, "physfn", path, sizeof (path))) {
+ return false;
+ }
+ return !stat(path, &buf);
+}
+
+static int xen_host_pci_config_open(XenHostPCIDevice *d)
+{
+ char path[PATH_MAX];
+ int rc;
+
+ rc = xen_host_pci_sysfs_path(d, "config", path, sizeof (path));
+ if (rc) {
+ return rc;
+ }
+ d->config_fd = open(path, O_RDWR);
+ if (d->config_fd < 0) {
+ return -errno;
+ }
+ return 0;
+}
+
+static int xen_host_pci_config_read(XenHostPCIDevice *d,
+ int pos, void *buf, int len)
+{
+ int rc;
+
+ do {
+ rc = pread(d->config_fd, buf, len, pos);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+ if (rc != len) {
+ return -errno;
+ }
+ return 0;
+}
+
+static int xen_host_pci_config_write(XenHostPCIDevice *d,
+ int pos, const void *buf, int len)
+{
+ int rc;
+
+ do {
+ rc = pwrite(d->config_fd, buf, len, pos);
+ } while (rc < 0 && (errno == EINTR || errno == EAGAIN));
+ if (rc != len) {
+ return -errno;
+ }
+ return 0;
+}
+
+
+int xen_host_pci_get_byte(XenHostPCIDevice *d, int pos, uint8_t *p)
+{
+ uint8_t buf;
+ int rc = xen_host_pci_config_read(d, pos, &buf, 1);
+ if (!rc) {
+ *p = buf;
+ }
+ return rc;
+}
+
+int xen_host_pci_get_word(XenHostPCIDevice *d, int pos, uint16_t *p)
+{
+ uint16_t buf;
+ int rc = xen_host_pci_config_read(d, pos, &buf, 2);
+ if (!rc) {
+ *p = le16_to_cpu(buf);
+ }
+ return rc;
+}
+
+int xen_host_pci_get_long(XenHostPCIDevice *d, int pos, uint32_t *p)
+{
+ uint32_t buf;
+ int rc = xen_host_pci_config_read(d, pos, &buf, 4);
+ if (!rc) {
+ *p = le32_to_cpu(buf);
+ }
+ return rc;
+}
+
+int xen_host_pci_get_block(XenHostPCIDevice *d, int pos, uint8_t *buf, int len)
+{
+ return xen_host_pci_config_read(d, pos, buf, len);
+}
+
+int xen_host_pci_set_byte(XenHostPCIDevice *d, int pos, uint8_t data)
+{
+ return xen_host_pci_config_write(d, pos, &data, 1);
+}
+
+int xen_host_pci_set_word(XenHostPCIDevice *d, int pos, uint16_t data)
+{
+ data = cpu_to_le16(data);
+ return xen_host_pci_config_write(d, pos, &data, 2);
+}
+
+int xen_host_pci_set_long(XenHostPCIDevice *d, int pos, uint32_t data)
+{
+ data = cpu_to_le32(data);
+ return xen_host_pci_config_write(d, pos, &data, 4);
+}
+
+int xen_host_pci_set_block(XenHostPCIDevice *d, int pos, uint8_t *buf, int len)
+{
+ return xen_host_pci_config_write(d, pos, buf, len);
+}
+
+int xen_host_pci_find_ext_cap_offset(XenHostPCIDevice *d, uint32_t cap)
+{
+ uint32_t header = 0;
+ int max_cap = XEN_HOST_PCI_MAX_EXT_CAP;
+ int pos = PCI_CONFIG_SPACE_SIZE;
+
+ do {
+ if (xen_host_pci_get_long(d, pos, &header)) {
+ break;
+ }
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0) {
+ break;
+ }
+
+ if (PCI_EXT_CAP_ID(header) == cap) {
+ return pos;
+ }
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CONFIG_SPACE_SIZE) {
+ break;
+ }
+
+ max_cap--;
+ } while (max_cap > 0);
+
+ return -1;
+}
+
+int xen_host_pci_device_get(XenHostPCIDevice *d, uint16_t domain,
+ uint8_t bus, uint8_t dev, uint8_t func)
+{
+ unsigned int v;
+ int rc = 0;
+
+ d->config_fd = -1;
+ d->domain = domain;
+ d->bus = bus;
+ d->dev = dev;
+ d->func = func;
+
+ rc = xen_host_pci_config_open(d);
+ if (rc) {
+ goto error;
+ }
+ rc = xen_host_pci_get_resource(d);
+ if (rc) {
+ goto error;
+ }
+ rc = xen_host_pci_get_hex_value(d, "vendor", &v);
+ if (rc) {
+ goto error;
+ }
+ d->vendor_id = v;
+ rc = xen_host_pci_get_hex_value(d, "device", &v);
+ if (rc) {
+ goto error;
+ }
+ d->device_id = v;
+ rc = xen_host_pci_get_dec_value(d, "irq", &v);
+ if (rc) {
+ goto error;
+ }
+ d->irq = v;
+ d->is_virtfn = xen_host_pci_dev_is_virtfn(d);
+
+ return 0;
+error:
+ if (d->config_fd >= 0) {
+ close(d->config_fd);
+ d->config_fd = -1;
+ }
+ return rc;
+}
+
+void xen_host_pci_device_put(XenHostPCIDevice *d)
+{
+ if (d->config_fd >= 0) {
+ close(d->config_fd);
+ d->config_fd = -1;
+ }
+}
diff --git a/hw/xen-host-pci-device.h b/hw/xen-host-pci-device.h
new file mode 100644
index 0000000000..0079daca51
--- /dev/null
+++ b/hw/xen-host-pci-device.h
@@ -0,0 +1,55 @@
+#ifndef XEN_HOST_PCI_DEVICE_H
+#define XEN_HOST_PCI_DEVICE_H
+
+#include "pci.h"
+
+enum {
+ XEN_HOST_PCI_REGION_TYPE_IO = 1 << 1,
+ XEN_HOST_PCI_REGION_TYPE_MEM = 1 << 2,
+ XEN_HOST_PCI_REGION_TYPE_PREFETCH = 1 << 3,
+ XEN_HOST_PCI_REGION_TYPE_MEM_64 = 1 << 4,
+};
+
+typedef struct XenHostPCIIORegion {
+ pcibus_t base_addr;
+ pcibus_t size;
+ uint8_t type;
+ uint8_t bus_flags; /* Bus-specific bits */
+} XenHostPCIIORegion;
+
+typedef struct XenHostPCIDevice {
+ uint16_t domain;
+ uint8_t bus;
+ uint8_t dev;
+ uint8_t func;
+
+ uint16_t vendor_id;
+ uint16_t device_id;
+ int irq;
+
+ XenHostPCIIORegion io_regions[PCI_NUM_REGIONS - 1];
+ XenHostPCIIORegion rom;
+
+ bool is_virtfn;
+
+ int config_fd;
+} XenHostPCIDevice;
+
+int xen_host_pci_device_get(XenHostPCIDevice *d, uint16_t domain,
+ uint8_t bus, uint8_t dev, uint8_t func);
+void xen_host_pci_device_put(XenHostPCIDevice *pci_dev);
+
+int xen_host_pci_get_byte(XenHostPCIDevice *d, int pos, uint8_t *p);
+int xen_host_pci_get_word(XenHostPCIDevice *d, int pos, uint16_t *p);
+int xen_host_pci_get_long(XenHostPCIDevice *d, int pos, uint32_t *p);
+int xen_host_pci_get_block(XenHostPCIDevice *d, int pos, uint8_t *buf,
+ int len);
+int xen_host_pci_set_byte(XenHostPCIDevice *d, int pos, uint8_t data);
+int xen_host_pci_set_word(XenHostPCIDevice *d, int pos, uint16_t data);
+int xen_host_pci_set_long(XenHostPCIDevice *d, int pos, uint32_t data);
+int xen_host_pci_set_block(XenHostPCIDevice *d, int pos, uint8_t *buf,
+ int len);
+
+int xen_host_pci_find_ext_cap_offset(XenHostPCIDevice *s, uint32_t cap);
+
+#endif /* !XEN_HOST_PCI_DEVICE_H_ */
diff --git a/hw/xen_backend.c b/hw/xen_backend.c
index 66cb144397..f83a1e1d09 100644
--- a/hw/xen_backend.c
+++ b/hw/xen_backend.c
@@ -34,15 +34,13 @@
#include <sys/mman.h>
#include <sys/signal.h>
-#include <xs.h>
-#include <xenctrl.h>
-#include <xen/grant_table.h>
-
#include "hw.h"
#include "qemu-char.h"
#include "qemu-log.h"
#include "xen_backend.h"
+#include <xen/grant_table.h>
+
/* ------------------------------------------------------------- */
/* public */
diff --git a/hw/xen_backend.h b/hw/xen_backend.h
index 3305630903..fea86dd78b 100644
--- a/hw/xen_backend.h
+++ b/hw/xen_backend.h
@@ -4,6 +4,7 @@
#include "xen_common.h"
#include "sysemu.h"
#include "net.h"
+#include "net/hub.h"
/* ------------------------------------------------------------- */
diff --git a/hw/xen_common.h b/hw/xen_common.h
index fe7f227f92..727757afb4 100644
--- a/hw/xen_common.h
+++ b/hw/xen_common.h
@@ -7,7 +7,11 @@
#include <inttypes.h>
#include <xenctrl.h>
-#include <xs.h>
+#if CONFIG_XEN_CTRL_INTERFACE_VERSION < 420
+# include <xs.h>
+#else
+# include <xenstore.h>
+#endif
#include <xen/io/xenbus.h>
#include "hw.h"
@@ -150,4 +154,7 @@ static inline int xen_xc_hvm_inject_msi(XenXC xen_xc, domid_t dom,
void destroy_hvm_domain(bool reboot);
+/* shutdown/destroy current domain because of an error */
+void xen_shutdown_fatal_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
+
#endif /* QEMU_HW_XEN_COMMON_H */
diff --git a/hw/xen_console.c b/hw/xen_console.c
index 3794b1972d..9426d7374f 100644
--- a/hw/xen_console.c
+++ b/hw/xen_console.c
@@ -28,14 +28,13 @@
#include <termios.h>
#include <stdarg.h>
#include <sys/mman.h>
-#include <xs.h>
-#include <xen/io/console.h>
-#include <xenctrl.h>
#include "hw.h"
#include "qemu-char.h"
#include "xen_backend.h"
+#include <xen/io/console.h>
+
struct buffer {
uint8_t *data;
size_t consumed;
diff --git a/hw/xen_devconfig.c b/hw/xen_devconfig.c
index 0928613b55..d83e8d0f64 100644
--- a/hw/xen_devconfig.c
+++ b/hw/xen_devconfig.c
@@ -123,19 +123,21 @@ int xen_config_dev_nic(NICInfo *nic)
{
char fe[256], be[256];
char mac[20];
+ int vlan_id = -1;
+ net_hub_id_for_client(nic->netdev, &vlan_id);
snprintf(mac, sizeof(mac), "%02x:%02x:%02x:%02x:%02x:%02x",
nic->macaddr.a[0], nic->macaddr.a[1], nic->macaddr.a[2],
nic->macaddr.a[3], nic->macaddr.a[4], nic->macaddr.a[5]);
- xen_be_printf(NULL, 1, "config nic %d: mac=\"%s\"\n", nic->vlan->id, mac);
- xen_config_dev_dirs("vif", "qnic", nic->vlan->id, fe, be, sizeof(fe));
+ xen_be_printf(NULL, 1, "config nic %d: mac=\"%s\"\n", vlan_id, mac);
+ xen_config_dev_dirs("vif", "qnic", vlan_id, fe, be, sizeof(fe));
/* frontend */
- xenstore_write_int(fe, "handle", nic->vlan->id);
+ xenstore_write_int(fe, "handle", vlan_id);
xenstore_write_str(fe, "mac", mac);
/* backend */
- xenstore_write_int(be, "handle", nic->vlan->id);
+ xenstore_write_int(be, "handle", vlan_id);
xenstore_write_str(be, "mac", mac);
/* common stuff */
diff --git a/hw/xen_disk.c b/hw/xen_disk.c
index fb68ed9bbf..e6bb2f20b9 100644
--- a/hw/xen_disk.c
+++ b/hw/xen_disk.c
@@ -35,14 +35,10 @@
#include <sys/mman.h>
#include <sys/uio.h>
-#include <xs.h>
-#include <xenctrl.h>
-#include <xen/io/xenbus.h>
-
#include "hw.h"
#include "qemu-char.h"
-#include "xen_blkif.h"
#include "xen_backend.h"
+#include "xen_blkif.h"
#include "blockdev.h"
/* ------------------------------------------------------------- */
diff --git a/hw/xen_nic.c b/hw/xen_nic.c
index 9a59bdad6e..8b79bfb73e 100644
--- a/hw/xen_nic.c
+++ b/hw/xen_nic.c
@@ -35,11 +35,6 @@
#include <sys/mman.h>
#include <sys/wait.h>
-#include <xs.h>
-#include <xenctrl.h>
-#include <xen/io/xenbus.h>
-#include <xen/io/netif.h>
-
#include "hw.h"
#include "net.h"
#include "net/checksum.h"
@@ -47,6 +42,8 @@
#include "qemu-char.h"
#include "xen_backend.h"
+#include <xen/io/netif.h>
+
/* ------------------------------------------------------------- */
struct XenNetDev {
@@ -236,7 +233,7 @@ static void net_rx_response(struct XenNetDev *netdev,
#define NET_IP_ALIGN 2
-static int net_rx_ok(VLANClientState *nc)
+static int net_rx_ok(NetClientState *nc)
{
struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque;
RING_IDX rc, rp;
@@ -257,7 +254,7 @@ static int net_rx_ok(VLANClientState *nc)
return 1;
}
-static ssize_t net_rx_packet(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t net_rx_packet(NetClientState *nc, const uint8_t *buf, size_t size)
{
struct XenNetDev *netdev = DO_UPCAST(NICState, nc, nc)->opaque;
netif_rx_request_t rxreq;
@@ -304,7 +301,7 @@ static ssize_t net_rx_packet(VLANClientState *nc, const uint8_t *buf, size_t siz
/* ------------------------------------------------------------- */
static NetClientInfo net_xen_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = net_rx_ok,
.receive = net_rx_packet,
@@ -328,7 +325,6 @@ static int net_init(struct XenDevice *xendev)
return -1;
}
- netdev->conf.vlan = qemu_find_vlan(netdev->xendev.dev, 1);
netdev->conf.peer = NULL;
netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf,
@@ -410,7 +406,7 @@ static void net_disconnect(struct XenDevice *xendev)
netdev->rxs = NULL;
}
if (netdev->nic) {
- qemu_del_vlan_client(&netdev->nic->nc);
+ qemu_del_net_client(&netdev->nic->nc);
netdev->nic = NULL;
}
}
diff --git a/hw/xen_platform.c b/hw/xen_platform.c
index 0214f370b2..c1fe984f07 100644
--- a/hw/xen_platform.c
+++ b/hw/xen_platform.c
@@ -83,7 +83,7 @@ static void log_writeb(PCIXenPlatformState *s, char val)
#define UNPLUG_ALL_NICS 2
#define UNPLUG_AUX_IDE_DISKS 4
-static void unplug_nic(PCIBus *b, PCIDevice *d)
+static void unplug_nic(PCIBus *b, PCIDevice *d, void *o)
{
if (pci_get_word(d->config + PCI_CLASS_DEVICE) ==
PCI_CLASS_NETWORK_ETHERNET) {
@@ -96,10 +96,10 @@ static void unplug_nic(PCIBus *b, PCIDevice *d)
static void pci_unplug_nics(PCIBus *bus)
{
- pci_for_each_device(bus, 0, unplug_nic);
+ pci_for_each_device(bus, 0, unplug_nic, NULL);
}
-static void unplug_disks(PCIBus *b, PCIDevice *d)
+static void unplug_disks(PCIBus *b, PCIDevice *d, void *o)
{
if (pci_get_word(d->config + PCI_CLASS_DEVICE) ==
PCI_CLASS_STORAGE_IDE) {
@@ -109,7 +109,7 @@ static void unplug_disks(PCIBus *b, PCIDevice *d)
static void pci_unplug_disks(PCIBus *bus)
{
- pci_for_each_device(bus, 0, unplug_disks);
+ pci_for_each_device(bus, 0, unplug_disks, NULL);
}
static void platform_fixed_ioport_writew(void *opaque, uint32_t addr, uint32_t val)
diff --git a/hw/xen_pt.c b/hw/xen_pt.c
new file mode 100644
index 0000000000..307119a12f
--- /dev/null
+++ b/hw/xen_pt.c
@@ -0,0 +1,849 @@
+/*
+ * Copyright (c) 2007, Neocleus Corporation.
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Alex Novik <alex@neocleus.com>
+ * Allen Kay <allen.m.kay@intel.com>
+ * Guy Zana <guy@neocleus.com>
+ *
+ * This file implements direct PCI assignment to a HVM guest
+ */
+
+/*
+ * Interrupt Disable policy:
+ *
+ * INTx interrupt:
+ * Initialize(register_real_device)
+ * Map INTx(xc_physdev_map_pirq):
+ * <fail>
+ * - Set real Interrupt Disable bit to '1'.
+ * - Set machine_irq and assigned_device->machine_irq to '0'.
+ * * Don't bind INTx.
+ *
+ * Bind INTx(xc_domain_bind_pt_pci_irq):
+ * <fail>
+ * - Set real Interrupt Disable bit to '1'.
+ * - Unmap INTx.
+ * - Decrement xen_pt_mapped_machine_irq[machine_irq]
+ * - Set assigned_device->machine_irq to '0'.
+ *
+ * Write to Interrupt Disable bit by guest software(xen_pt_cmd_reg_write)
+ * Write '0'
+ * - Set real bit to '0' if assigned_device->machine_irq isn't '0'.
+ *
+ * Write '1'
+ * - Set real bit to '1'.
+ *
+ * MSI interrupt:
+ * Initialize MSI register(xen_pt_msi_setup, xen_pt_msi_update)
+ * Bind MSI(xc_domain_update_msi_irq)
+ * <fail>
+ * - Unmap MSI.
+ * - Set dev->msi->pirq to '-1'.
+ *
+ * MSI-X interrupt:
+ * Initialize MSI-X register(xen_pt_msix_update_one)
+ * Bind MSI-X(xc_domain_update_msi_irq)
+ * <fail>
+ * - Unmap MSI-X.
+ * - Set entry->pirq to '-1'.
+ */
+
+#include <sys/ioctl.h>
+
+#include "pci.h"
+#include "xen.h"
+#include "xen_backend.h"
+#include "xen_pt.h"
+#include "range.h"
+
+#define XEN_PT_NR_IRQS (256)
+static uint8_t xen_pt_mapped_machine_irq[XEN_PT_NR_IRQS] = {0};
+
+void xen_pt_log(const PCIDevice *d, const char *f, ...)
+{
+ va_list ap;
+
+ va_start(ap, f);
+ if (d) {
+ fprintf(stderr, "[%02x:%02x.%d] ", pci_bus_num(d->bus),
+ PCI_SLOT(d->devfn), PCI_FUNC(d->devfn));
+ }
+ vfprintf(stderr, f, ap);
+ va_end(ap);
+}
+
+/* Config Space */
+
+static int xen_pt_pci_config_access_check(PCIDevice *d, uint32_t addr, int len)
+{
+ /* check offset range */
+ if (addr >= 0xFF) {
+ XEN_PT_ERR(d, "Failed to access register with offset exceeding 0xFF. "
+ "(addr: 0x%02x, len: %d)\n", addr, len);
+ return -1;
+ }
+
+ /* check read size */
+ if ((len != 1) && (len != 2) && (len != 4)) {
+ XEN_PT_ERR(d, "Failed to access register with invalid access length. "
+ "(addr: 0x%02x, len: %d)\n", addr, len);
+ return -1;
+ }
+
+ /* check offset alignment */
+ if (addr & (len - 1)) {
+ XEN_PT_ERR(d, "Failed to access register with invalid access size "
+ "alignment. (addr: 0x%02x, len: %d)\n", addr, len);
+ return -1;
+ }
+
+ return 0;
+}
+
+int xen_pt_bar_offset_to_index(uint32_t offset)
+{
+ int index = 0;
+
+ /* check Exp ROM BAR */
+ if (offset == PCI_ROM_ADDRESS) {
+ return PCI_ROM_SLOT;
+ }
+
+ /* calculate BAR index */
+ index = (offset - PCI_BASE_ADDRESS_0) >> 2;
+ if (index >= PCI_NUM_REGIONS) {
+ return -1;
+ }
+
+ return index;
+}
+
+static uint32_t xen_pt_pci_read_config(PCIDevice *d, uint32_t addr, int len)
+{
+ XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
+ uint32_t val = 0;
+ XenPTRegGroup *reg_grp_entry = NULL;
+ XenPTReg *reg_entry = NULL;
+ int rc = 0;
+ int emul_len = 0;
+ uint32_t find_addr = addr;
+
+ if (xen_pt_pci_config_access_check(d, addr, len)) {
+ goto exit;
+ }
+
+ /* find register group entry */
+ reg_grp_entry = xen_pt_find_reg_grp(s, addr);
+ if (reg_grp_entry) {
+ /* check 0-Hardwired register group */
+ if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
+ /* no need to emulate, just return 0 */
+ val = 0;
+ goto exit;
+ }
+ }
+
+ /* read I/O device register value */
+ rc = xen_host_pci_get_block(&s->real_device, addr, (uint8_t *)&val, len);
+ if (rc < 0) {
+ XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
+ memset(&val, 0xff, len);
+ }
+
+ /* just return the I/O device register value for
+ * passthrough type register group */
+ if (reg_grp_entry == NULL) {
+ goto exit;
+ }
+
+ /* adjust the read value to appropriate CFC-CFF window */
+ val <<= (addr & 3) << 3;
+ emul_len = len;
+
+ /* loop around the guest requested size */
+ while (emul_len > 0) {
+ /* find register entry to be emulated */
+ reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
+ if (reg_entry) {
+ XenPTRegInfo *reg = reg_entry->reg;
+ uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
+ uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
+ uint8_t *ptr_val = NULL;
+
+ valid_mask <<= (find_addr - real_offset) << 3;
+ ptr_val = (uint8_t *)&val + (real_offset & 3);
+
+ /* do emulation based on register size */
+ switch (reg->size) {
+ case 1:
+ if (reg->u.b.read) {
+ rc = reg->u.b.read(s, reg_entry, ptr_val, valid_mask);
+ }
+ break;
+ case 2:
+ if (reg->u.w.read) {
+ rc = reg->u.w.read(s, reg_entry,
+ (uint16_t *)ptr_val, valid_mask);
+ }
+ break;
+ case 4:
+ if (reg->u.dw.read) {
+ rc = reg->u.dw.read(s, reg_entry,
+ (uint32_t *)ptr_val, valid_mask);
+ }
+ break;
+ }
+
+ if (rc < 0) {
+ xen_shutdown_fatal_error("Internal error: Invalid read "
+ "emulation. (%s, rc: %d)\n",
+ __func__, rc);
+ return 0;
+ }
+
+ /* calculate next address to find */
+ emul_len -= reg->size;
+ if (emul_len > 0) {
+ find_addr = real_offset + reg->size;
+ }
+ } else {
+ /* nothing to do with passthrough type register,
+ * continue to find next byte */
+ emul_len--;
+ find_addr++;
+ }
+ }
+
+ /* need to shift back before returning them to pci bus emulator */
+ val >>= ((addr & 3) << 3);
+
+exit:
+ XEN_PT_LOG_CONFIG(d, addr, val, len);
+ return val;
+}
+
+static void xen_pt_pci_write_config(PCIDevice *d, uint32_t addr,
+ uint32_t val, int len)
+{
+ XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
+ int index = 0;
+ XenPTRegGroup *reg_grp_entry = NULL;
+ int rc = 0;
+ uint32_t read_val = 0;
+ int emul_len = 0;
+ XenPTReg *reg_entry = NULL;
+ uint32_t find_addr = addr;
+ XenPTRegInfo *reg = NULL;
+
+ if (xen_pt_pci_config_access_check(d, addr, len)) {
+ return;
+ }
+
+ XEN_PT_LOG_CONFIG(d, addr, val, len);
+
+ /* check unused BAR register */
+ index = xen_pt_bar_offset_to_index(addr);
+ if ((index >= 0) && (val > 0 && val < XEN_PT_BAR_ALLF) &&
+ (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED)) {
+ XEN_PT_WARN(d, "Guest attempt to set address to unused Base Address "
+ "Register. (addr: 0x%02x, len: %d)\n", addr, len);
+ }
+
+ /* find register group entry */
+ reg_grp_entry = xen_pt_find_reg_grp(s, addr);
+ if (reg_grp_entry) {
+ /* check 0-Hardwired register group */
+ if (reg_grp_entry->reg_grp->grp_type == XEN_PT_GRP_TYPE_HARDWIRED) {
+ /* ignore silently */
+ XEN_PT_WARN(d, "Access to 0-Hardwired register. "
+ "(addr: 0x%02x, len: %d)\n", addr, len);
+ return;
+ }
+ }
+
+ rc = xen_host_pci_get_block(&s->real_device, addr,
+ (uint8_t *)&read_val, len);
+ if (rc < 0) {
+ XEN_PT_ERR(d, "pci_read_block failed. return value: %d.\n", rc);
+ memset(&read_val, 0xff, len);
+ }
+
+ /* pass directly to the real device for passthrough type register group */
+ if (reg_grp_entry == NULL) {
+ goto out;
+ }
+
+ memory_region_transaction_begin();
+ pci_default_write_config(d, addr, val, len);
+
+ /* adjust the read and write value to appropriate CFC-CFF window */
+ read_val <<= (addr & 3) << 3;
+ val <<= (addr & 3) << 3;
+ emul_len = len;
+
+ /* loop around the guest requested size */
+ while (emul_len > 0) {
+ /* find register entry to be emulated */
+ reg_entry = xen_pt_find_reg(reg_grp_entry, find_addr);
+ if (reg_entry) {
+ reg = reg_entry->reg;
+ uint32_t real_offset = reg_grp_entry->base_offset + reg->offset;
+ uint32_t valid_mask = 0xFFFFFFFF >> ((4 - emul_len) << 3);
+ uint8_t *ptr_val = NULL;
+
+ valid_mask <<= (find_addr - real_offset) << 3;
+ ptr_val = (uint8_t *)&val + (real_offset & 3);
+
+ /* do emulation based on register size */
+ switch (reg->size) {
+ case 1:
+ if (reg->u.b.write) {
+ rc = reg->u.b.write(s, reg_entry, ptr_val,
+ read_val >> ((real_offset & 3) << 3),
+ valid_mask);
+ }
+ break;
+ case 2:
+ if (reg->u.w.write) {
+ rc = reg->u.w.write(s, reg_entry, (uint16_t *)ptr_val,
+ (read_val >> ((real_offset & 3) << 3)),
+ valid_mask);
+ }
+ break;
+ case 4:
+ if (reg->u.dw.write) {
+ rc = reg->u.dw.write(s, reg_entry, (uint32_t *)ptr_val,
+ (read_val >> ((real_offset & 3) << 3)),
+ valid_mask);
+ }
+ break;
+ }
+
+ if (rc < 0) {
+ xen_shutdown_fatal_error("Internal error: Invalid write"
+ " emulation. (%s, rc: %d)\n",
+ __func__, rc);
+ return;
+ }
+
+ /* calculate next address to find */
+ emul_len -= reg->size;
+ if (emul_len > 0) {
+ find_addr = real_offset + reg->size;
+ }
+ } else {
+ /* nothing to do with passthrough type register,
+ * continue to find next byte */
+ emul_len--;
+ find_addr++;
+ }
+ }
+
+ /* need to shift back before passing them to xen_host_pci_device */
+ val >>= (addr & 3) << 3;
+
+ memory_region_transaction_commit();
+
+out:
+ if (!(reg && reg->no_wb)) {
+ /* unknown regs are passed through */
+ rc = xen_host_pci_set_block(&s->real_device, addr,
+ (uint8_t *)&val, len);
+
+ if (rc < 0) {
+ XEN_PT_ERR(d, "pci_write_block failed. return value: %d.\n", rc);
+ }
+ }
+}
+
+/* register regions */
+
+static uint64_t xen_pt_bar_read(void *o, target_phys_addr_t addr,
+ unsigned size)
+{
+ PCIDevice *d = o;
+ /* if this function is called, that probably means that there is a
+ * misconfiguration of the IOMMU. */
+ XEN_PT_ERR(d, "Should not read BAR through QEMU. @0x"TARGET_FMT_plx"\n",
+ addr);
+ return 0;
+}
+static void xen_pt_bar_write(void *o, target_phys_addr_t addr, uint64_t val,
+ unsigned size)
+{
+ PCIDevice *d = o;
+ /* Same comment as xen_pt_bar_read function */
+ XEN_PT_ERR(d, "Should not write BAR through QEMU. @0x"TARGET_FMT_plx"\n",
+ addr);
+}
+
+static const MemoryRegionOps ops = {
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .read = xen_pt_bar_read,
+ .write = xen_pt_bar_write,
+};
+
+static int xen_pt_register_regions(XenPCIPassthroughState *s)
+{
+ int i = 0;
+ XenHostPCIDevice *d = &s->real_device;
+
+ /* Register PIO/MMIO BARs */
+ for (i = 0; i < PCI_ROM_SLOT; i++) {
+ XenHostPCIIORegion *r = &d->io_regions[i];
+ uint8_t type;
+
+ if (r->base_addr == 0 || r->size == 0) {
+ continue;
+ }
+
+ s->bases[i].access.u = r->base_addr;
+
+ if (r->type & XEN_HOST_PCI_REGION_TYPE_IO) {
+ type = PCI_BASE_ADDRESS_SPACE_IO;
+ } else {
+ type = PCI_BASE_ADDRESS_SPACE_MEMORY;
+ if (r->type & XEN_HOST_PCI_REGION_TYPE_PREFETCH) {
+ type |= PCI_BASE_ADDRESS_MEM_PREFETCH;
+ }
+ }
+
+ memory_region_init_io(&s->bar[i], &ops, &s->dev,
+ "xen-pci-pt-bar", r->size);
+ pci_register_bar(&s->dev, i, type, &s->bar[i]);
+
+ XEN_PT_LOG(&s->dev, "IO region %i registered (size=0x%08"PRIx64
+ " base_addr=0x%08"PRIx64" type: %#x)\n",
+ i, r->size, r->base_addr, type);
+ }
+
+ /* Register expansion ROM address */
+ if (d->rom.base_addr && d->rom.size) {
+ uint32_t bar_data = 0;
+
+ /* Re-set BAR reported by OS, otherwise ROM can't be read. */
+ if (xen_host_pci_get_long(d, PCI_ROM_ADDRESS, &bar_data)) {
+ return 0;
+ }
+ if ((bar_data & PCI_ROM_ADDRESS_MASK) == 0) {
+ bar_data |= d->rom.base_addr & PCI_ROM_ADDRESS_MASK;
+ xen_host_pci_set_long(d, PCI_ROM_ADDRESS, bar_data);
+ }
+
+ s->bases[PCI_ROM_SLOT].access.maddr = d->rom.base_addr;
+
+ memory_region_init_rom_device(&s->rom, NULL, NULL,
+ "xen-pci-pt-rom", d->rom.size);
+ pci_register_bar(&s->dev, PCI_ROM_SLOT, PCI_BASE_ADDRESS_MEM_PREFETCH,
+ &s->rom);
+
+ XEN_PT_LOG(&s->dev, "Expansion ROM registered (size=0x%08"PRIx64
+ " base_addr=0x%08"PRIx64")\n",
+ d->rom.size, d->rom.base_addr);
+ }
+
+ return 0;
+}
+
+static void xen_pt_unregister_regions(XenPCIPassthroughState *s)
+{
+ XenHostPCIDevice *d = &s->real_device;
+ int i;
+
+ for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
+ XenHostPCIIORegion *r = &d->io_regions[i];
+
+ if (r->base_addr == 0 || r->size == 0) {
+ continue;
+ }
+
+ memory_region_destroy(&s->bar[i]);
+ }
+ if (d->rom.base_addr && d->rom.size) {
+ memory_region_destroy(&s->rom);
+ }
+}
+
+/* region mapping */
+
+static int xen_pt_bar_from_region(XenPCIPassthroughState *s, MemoryRegion *mr)
+{
+ int i = 0;
+
+ for (i = 0; i < PCI_NUM_REGIONS - 1; i++) {
+ if (mr == &s->bar[i]) {
+ return i;
+ }
+ }
+ if (mr == &s->rom) {
+ return PCI_ROM_SLOT;
+ }
+ return -1;
+}
+
+/*
+ * This function checks if an io_region overlaps an io_region from another
+ * device. The io_region to check is provided with (addr, size and type)
+ * A callback can be provided and will be called for every region that is
+ * overlapped.
+ * The return value indicates if the region is overlappsed */
+struct CheckBarArgs {
+ XenPCIPassthroughState *s;
+ pcibus_t addr;
+ pcibus_t size;
+ uint8_t type;
+ bool rc;
+};
+static void xen_pt_check_bar_overlap(PCIBus *bus, PCIDevice *d, void *opaque)
+{
+ struct CheckBarArgs *arg = opaque;
+ XenPCIPassthroughState *s = arg->s;
+ uint8_t type = arg->type;
+ int i;
+
+ if (d->devfn == s->dev.devfn) {
+ return;
+ }
+
+ /* xxx: This ignores bridges. */
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ const PCIIORegion *r = &d->io_regions[i];
+
+ if (!r->size) {
+ continue;
+ }
+ if ((type & PCI_BASE_ADDRESS_SPACE_IO)
+ != (r->type & PCI_BASE_ADDRESS_SPACE_IO)) {
+ continue;
+ }
+
+ if (ranges_overlap(arg->addr, arg->size, r->addr, r->size)) {
+ XEN_PT_WARN(&s->dev,
+ "Overlapped to device [%02x:%02x.%d] Region: %i"
+ " (addr: %#"FMT_PCIBUS", len: %#"FMT_PCIBUS")\n",
+ pci_bus_num(bus), PCI_SLOT(d->devfn),
+ PCI_FUNC(d->devfn), i, r->addr, r->size);
+ arg->rc = true;
+ }
+ }
+}
+
+static void xen_pt_region_update(XenPCIPassthroughState *s,
+ MemoryRegionSection *sec, bool adding)
+{
+ PCIDevice *d = &s->dev;
+ MemoryRegion *mr = sec->mr;
+ int bar = -1;
+ int rc;
+ int op = adding ? DPCI_ADD_MAPPING : DPCI_REMOVE_MAPPING;
+ struct CheckBarArgs args = {
+ .s = s,
+ .addr = sec->offset_within_address_space,
+ .size = sec->size,
+ .rc = false,
+ };
+
+ bar = xen_pt_bar_from_region(s, mr);
+ if (bar == -1 && (!s->msix || &s->msix->mmio != mr)) {
+ return;
+ }
+
+ if (s->msix && &s->msix->mmio == mr) {
+ if (adding) {
+ s->msix->mmio_base_addr = sec->offset_within_address_space;
+ rc = xen_pt_msix_update_remap(s, s->msix->bar_index);
+ }
+ return;
+ }
+
+ args.type = d->io_regions[bar].type;
+ pci_for_each_device(d->bus, pci_bus_num(d->bus),
+ xen_pt_check_bar_overlap, &args);
+ if (args.rc) {
+ XEN_PT_WARN(d, "Region: %d (addr: %#"FMT_PCIBUS
+ ", len: %#"FMT_PCIBUS") is overlapped.\n",
+ bar, sec->offset_within_address_space, sec->size);
+ }
+
+ if (d->io_regions[bar].type & PCI_BASE_ADDRESS_SPACE_IO) {
+ uint32_t guest_port = sec->offset_within_address_space;
+ uint32_t machine_port = s->bases[bar].access.pio_base;
+ uint32_t size = sec->size;
+ rc = xc_domain_ioport_mapping(xen_xc, xen_domid,
+ guest_port, machine_port, size,
+ op);
+ if (rc) {
+ XEN_PT_ERR(d, "%s ioport mapping failed! (rc: %i)\n",
+ adding ? "create new" : "remove old", rc);
+ }
+ } else {
+ pcibus_t guest_addr = sec->offset_within_address_space;
+ pcibus_t machine_addr = s->bases[bar].access.maddr
+ + sec->offset_within_region;
+ pcibus_t size = sec->size;
+ rc = xc_domain_memory_mapping(xen_xc, xen_domid,
+ XEN_PFN(guest_addr + XC_PAGE_SIZE - 1),
+ XEN_PFN(machine_addr + XC_PAGE_SIZE - 1),
+ XEN_PFN(size + XC_PAGE_SIZE - 1),
+ op);
+ if (rc) {
+ XEN_PT_ERR(d, "%s mem mapping failed! (rc: %i)\n",
+ adding ? "create new" : "remove old", rc);
+ }
+ }
+}
+
+static void xen_pt_begin(MemoryListener *l)
+{
+}
+
+static void xen_pt_commit(MemoryListener *l)
+{
+}
+
+static void xen_pt_region_add(MemoryListener *l, MemoryRegionSection *sec)
+{
+ XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
+ memory_listener);
+
+ xen_pt_region_update(s, sec, true);
+}
+
+static void xen_pt_region_del(MemoryListener *l, MemoryRegionSection *sec)
+{
+ XenPCIPassthroughState *s = container_of(l, XenPCIPassthroughState,
+ memory_listener);
+
+ xen_pt_region_update(s, sec, false);
+}
+
+static void xen_pt_region_nop(MemoryListener *l, MemoryRegionSection *s)
+{
+}
+
+static void xen_pt_log_fns(MemoryListener *l, MemoryRegionSection *s)
+{
+}
+
+static void xen_pt_log_global_fns(MemoryListener *l)
+{
+}
+
+static void xen_pt_eventfd_fns(MemoryListener *l, MemoryRegionSection *s,
+ bool match_data, uint64_t data, EventNotifier *n)
+{
+}
+
+static const MemoryListener xen_pt_memory_listener = {
+ .begin = xen_pt_begin,
+ .commit = xen_pt_commit,
+ .region_add = xen_pt_region_add,
+ .region_nop = xen_pt_region_nop,
+ .region_del = xen_pt_region_del,
+ .log_start = xen_pt_log_fns,
+ .log_stop = xen_pt_log_fns,
+ .log_sync = xen_pt_log_fns,
+ .log_global_start = xen_pt_log_global_fns,
+ .log_global_stop = xen_pt_log_global_fns,
+ .eventfd_add = xen_pt_eventfd_fns,
+ .eventfd_del = xen_pt_eventfd_fns,
+ .priority = 10,
+};
+
+/* init */
+
+static int xen_pt_initfn(PCIDevice *d)
+{
+ XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
+ int rc = 0;
+ uint8_t machine_irq = 0;
+ int pirq = XEN_PT_UNASSIGNED_PIRQ;
+
+ /* register real device */
+ XEN_PT_LOG(d, "Assigning real physical device %02x:%02x.%d"
+ " to devfn %#x\n",
+ s->hostaddr.bus, s->hostaddr.slot, s->hostaddr.function,
+ s->dev.devfn);
+
+ rc = xen_host_pci_device_get(&s->real_device,
+ s->hostaddr.domain, s->hostaddr.bus,
+ s->hostaddr.slot, s->hostaddr.function);
+ if (rc) {
+ XEN_PT_ERR(d, "Failed to \"open\" the real pci device. rc: %i\n", rc);
+ return -1;
+ }
+
+ s->is_virtfn = s->real_device.is_virtfn;
+ if (s->is_virtfn) {
+ XEN_PT_LOG(d, "%04x:%02x:%02x.%d is a SR-IOV Virtual Function\n",
+ s->real_device.domain, bus, slot, func);
+ }
+
+ /* Initialize virtualized PCI configuration (Extended 256 Bytes) */
+ if (xen_host_pci_get_block(&s->real_device, 0, d->config,
+ PCI_CONFIG_SPACE_SIZE) == -1) {
+ xen_host_pci_device_put(&s->real_device);
+ return -1;
+ }
+
+ s->memory_listener = xen_pt_memory_listener;
+
+ /* Handle real device's MMIO/PIO BARs */
+ xen_pt_register_regions(s);
+
+ /* reinitialize each config register to be emulated */
+ if (xen_pt_config_init(s)) {
+ XEN_PT_ERR(d, "PCI Config space initialisation failed.\n");
+ xen_host_pci_device_put(&s->real_device);
+ return -1;
+ }
+
+ /* Bind interrupt */
+ if (!s->dev.config[PCI_INTERRUPT_PIN]) {
+ XEN_PT_LOG(d, "no pin interrupt\n");
+ goto out;
+ }
+
+ machine_irq = s->real_device.irq;
+ rc = xc_physdev_map_pirq(xen_xc, xen_domid, machine_irq, &pirq);
+
+ if (rc < 0) {
+ XEN_PT_ERR(d, "Mapping machine irq %u to pirq %i failed, (rc: %d)\n",
+ machine_irq, pirq, rc);
+
+ /* Disable PCI intx assertion (turn on bit10 of devctl) */
+ xen_host_pci_set_word(&s->real_device,
+ PCI_COMMAND,
+ pci_get_word(s->dev.config + PCI_COMMAND)
+ | PCI_COMMAND_INTX_DISABLE);
+ machine_irq = 0;
+ s->machine_irq = 0;
+ } else {
+ machine_irq = pirq;
+ s->machine_irq = pirq;
+ xen_pt_mapped_machine_irq[machine_irq]++;
+ }
+
+ /* bind machine_irq to device */
+ if (machine_irq != 0) {
+ uint8_t e_intx = xen_pt_pci_intx(s);
+
+ rc = xc_domain_bind_pt_pci_irq(xen_xc, xen_domid, machine_irq,
+ pci_bus_num(d->bus),
+ PCI_SLOT(d->devfn),
+ e_intx);
+ if (rc < 0) {
+ XEN_PT_ERR(d, "Binding of interrupt %i failed! (rc: %d)\n",
+ e_intx, rc);
+
+ /* Disable PCI intx assertion (turn on bit10 of devctl) */
+ xen_host_pci_set_word(&s->real_device, PCI_COMMAND,
+ *(uint16_t *)(&s->dev.config[PCI_COMMAND])
+ | PCI_COMMAND_INTX_DISABLE);
+ xen_pt_mapped_machine_irq[machine_irq]--;
+
+ if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
+ if (xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq)) {
+ XEN_PT_ERR(d, "Unmapping of machine interrupt %i failed!"
+ " (rc: %d)\n", machine_irq, rc);
+ }
+ }
+ s->machine_irq = 0;
+ }
+ }
+
+out:
+ memory_listener_register(&s->memory_listener, NULL);
+ XEN_PT_LOG(d, "Real physical device %02x:%02x.%d registered successfuly!\n",
+ bus, slot, func);
+
+ return 0;
+}
+
+static void xen_pt_unregister_device(PCIDevice *d)
+{
+ XenPCIPassthroughState *s = DO_UPCAST(XenPCIPassthroughState, dev, d);
+ uint8_t machine_irq = s->machine_irq;
+ uint8_t intx = xen_pt_pci_intx(s);
+ int rc;
+
+ if (machine_irq) {
+ rc = xc_domain_unbind_pt_irq(xen_xc, xen_domid, machine_irq,
+ PT_IRQ_TYPE_PCI,
+ pci_bus_num(d->bus),
+ PCI_SLOT(s->dev.devfn),
+ intx,
+ 0 /* isa_irq */);
+ if (rc < 0) {
+ XEN_PT_ERR(d, "unbinding of interrupt INT%c failed."
+ " (machine irq: %i, rc: %d)"
+ " But bravely continuing on..\n",
+ 'a' + intx, machine_irq, rc);
+ }
+ }
+
+ if (s->msi) {
+ xen_pt_msi_disable(s);
+ }
+ if (s->msix) {
+ xen_pt_msix_disable(s);
+ }
+
+ if (machine_irq) {
+ xen_pt_mapped_machine_irq[machine_irq]--;
+
+ if (xen_pt_mapped_machine_irq[machine_irq] == 0) {
+ rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, machine_irq);
+
+ if (rc < 0) {
+ XEN_PT_ERR(d, "unmapping of interrupt %i failed. (rc: %d)"
+ " But bravely continuing on..\n",
+ machine_irq, rc);
+ }
+ }
+ }
+
+ /* delete all emulated config registers */
+ xen_pt_config_delete(s);
+
+ xen_pt_unregister_regions(s);
+ memory_listener_unregister(&s->memory_listener);
+
+ xen_host_pci_device_put(&s->real_device);
+}
+
+static Property xen_pci_passthrough_properties[] = {
+ DEFINE_PROP_PCI_HOST_DEVADDR("hostaddr", XenPCIPassthroughState, hostaddr),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xen_pci_passthrough_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ k->init = xen_pt_initfn;
+ k->exit = xen_pt_unregister_device;
+ k->config_read = xen_pt_pci_read_config;
+ k->config_write = xen_pt_pci_write_config;
+ dc->desc = "Assign an host PCI device with Xen";
+ dc->props = xen_pci_passthrough_properties;
+};
+
+static TypeInfo xen_pci_passthrough_info = {
+ .name = "xen-pci-passthrough",
+ .parent = TYPE_PCI_DEVICE,
+ .instance_size = sizeof(XenPCIPassthroughState),
+ .class_init = xen_pci_passthrough_class_init,
+};
+
+static void xen_pci_passthrough_register_types(void)
+{
+ type_register_static(&xen_pci_passthrough_info);
+}
+
+type_init(xen_pci_passthrough_register_types)
diff --git a/hw/xen_pt.h b/hw/xen_pt.h
new file mode 100644
index 0000000000..41904ece93
--- /dev/null
+++ b/hw/xen_pt.h
@@ -0,0 +1,301 @@
+#ifndef XEN_PT_H
+#define XEN_PT_H
+
+#include "qemu-common.h"
+#include "xen_common.h"
+#include "pci.h"
+#include "xen-host-pci-device.h"
+
+void xen_pt_log(const PCIDevice *d, const char *f, ...) GCC_FMT_ATTR(2, 3);
+
+#define XEN_PT_ERR(d, _f, _a...) xen_pt_log(d, "%s: Error: "_f, __func__, ##_a)
+
+#ifdef XEN_PT_LOGGING_ENABLED
+# define XEN_PT_LOG(d, _f, _a...) xen_pt_log(d, "%s: " _f, __func__, ##_a)
+# define XEN_PT_WARN(d, _f, _a...) \
+ xen_pt_log(d, "%s: Warning: "_f, __func__, ##_a)
+#else
+# define XEN_PT_LOG(d, _f, _a...)
+# define XEN_PT_WARN(d, _f, _a...)
+#endif
+
+#ifdef XEN_PT_DEBUG_PCI_CONFIG_ACCESS
+# define XEN_PT_LOG_CONFIG(d, addr, val, len) \
+ xen_pt_log(d, "%s: address=0x%04x val=0x%08x len=%d\n", \
+ __func__, addr, val, len)
+#else
+# define XEN_PT_LOG_CONFIG(d, addr, val, len)
+#endif
+
+
+/* Helper */
+#define XEN_PFN(x) ((x) >> XC_PAGE_SHIFT)
+
+typedef struct XenPTRegInfo XenPTRegInfo;
+typedef struct XenPTReg XenPTReg;
+
+typedef struct XenPCIPassthroughState XenPCIPassthroughState;
+
+/* function type for config reg */
+typedef int (*xen_pt_conf_reg_init)
+ (XenPCIPassthroughState *, XenPTRegInfo *, uint32_t real_offset,
+ uint32_t *data);
+typedef int (*xen_pt_conf_dword_write)
+ (XenPCIPassthroughState *, XenPTReg *cfg_entry,
+ uint32_t *val, uint32_t dev_value, uint32_t valid_mask);
+typedef int (*xen_pt_conf_word_write)
+ (XenPCIPassthroughState *, XenPTReg *cfg_entry,
+ uint16_t *val, uint16_t dev_value, uint16_t valid_mask);
+typedef int (*xen_pt_conf_byte_write)
+ (XenPCIPassthroughState *, XenPTReg *cfg_entry,
+ uint8_t *val, uint8_t dev_value, uint8_t valid_mask);
+typedef int (*xen_pt_conf_dword_read)
+ (XenPCIPassthroughState *, XenPTReg *cfg_entry,
+ uint32_t *val, uint32_t valid_mask);
+typedef int (*xen_pt_conf_word_read)
+ (XenPCIPassthroughState *, XenPTReg *cfg_entry,
+ uint16_t *val, uint16_t valid_mask);
+typedef int (*xen_pt_conf_byte_read)
+ (XenPCIPassthroughState *, XenPTReg *cfg_entry,
+ uint8_t *val, uint8_t valid_mask);
+
+#define XEN_PT_BAR_ALLF 0xFFFFFFFF
+#define XEN_PT_BAR_UNMAPPED (-1)
+
+#define PCI_CAP_MAX 48
+
+
+typedef enum {
+ XEN_PT_GRP_TYPE_HARDWIRED = 0, /* 0 Hardwired reg group */
+ XEN_PT_GRP_TYPE_EMU, /* emul reg group */
+} XenPTRegisterGroupType;
+
+typedef enum {
+ XEN_PT_BAR_FLAG_MEM = 0, /* Memory type BAR */
+ XEN_PT_BAR_FLAG_IO, /* I/O type BAR */
+ XEN_PT_BAR_FLAG_UPPER, /* upper 64bit BAR */
+ XEN_PT_BAR_FLAG_UNUSED, /* unused BAR */
+} XenPTBarFlag;
+
+
+typedef struct XenPTRegion {
+ /* BAR flag */
+ XenPTBarFlag bar_flag;
+ /* Translation of the emulated address */
+ union {
+ uint64_t maddr;
+ uint64_t pio_base;
+ uint64_t u;
+ } access;
+} XenPTRegion;
+
+/* XenPTRegInfo declaration
+ * - only for emulated register (either a part or whole bit).
+ * - for passthrough register that need special behavior (like interacting with
+ * other component), set emu_mask to all 0 and specify r/w func properly.
+ * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
+ */
+
+/* emulated register infomation */
+struct XenPTRegInfo {
+ uint32_t offset;
+ uint32_t size;
+ uint32_t init_val;
+ /* reg read only field mask (ON:RO/ROS, OFF:other) */
+ uint32_t ro_mask;
+ /* reg emulate field mask (ON:emu, OFF:passthrough) */
+ uint32_t emu_mask;
+ /* no write back allowed */
+ uint32_t no_wb;
+ xen_pt_conf_reg_init init;
+ /* read/write function pointer
+ * for double_word/word/byte size */
+ union {
+ struct {
+ xen_pt_conf_dword_write write;
+ xen_pt_conf_dword_read read;
+ } dw;
+ struct {
+ xen_pt_conf_word_write write;
+ xen_pt_conf_word_read read;
+ } w;
+ struct {
+ xen_pt_conf_byte_write write;
+ xen_pt_conf_byte_read read;
+ } b;
+ } u;
+};
+
+/* emulated register management */
+struct XenPTReg {
+ QLIST_ENTRY(XenPTReg) entries;
+ XenPTRegInfo *reg;
+ uint32_t data; /* emulated value */
+};
+
+typedef struct XenPTRegGroupInfo XenPTRegGroupInfo;
+
+/* emul reg group size initialize method */
+typedef int (*xen_pt_reg_size_init_fn)
+ (XenPCIPassthroughState *, const XenPTRegGroupInfo *,
+ uint32_t base_offset, uint8_t *size);
+
+/* emulated register group infomation */
+struct XenPTRegGroupInfo {
+ uint8_t grp_id;
+ XenPTRegisterGroupType grp_type;
+ uint8_t grp_size;
+ xen_pt_reg_size_init_fn size_init;
+ XenPTRegInfo *emu_regs;
+};
+
+/* emul register group management table */
+typedef struct XenPTRegGroup {
+ QLIST_ENTRY(XenPTRegGroup) entries;
+ const XenPTRegGroupInfo *reg_grp;
+ uint32_t base_offset;
+ uint8_t size;
+ QLIST_HEAD(, XenPTReg) reg_tbl_list;
+} XenPTRegGroup;
+
+
+#define XEN_PT_UNASSIGNED_PIRQ (-1)
+typedef struct XenPTMSI {
+ uint16_t flags;
+ uint32_t addr_lo; /* guest message address */
+ uint32_t addr_hi; /* guest message upper address */
+ uint16_t data; /* guest message data */
+ uint32_t ctrl_offset; /* saved control offset */
+ int pirq; /* guest pirq corresponding */
+ bool initialized; /* when guest MSI is initialized */
+ bool mapped; /* when pirq is mapped */
+} XenPTMSI;
+
+typedef struct XenPTMSIXEntry {
+ int pirq;
+ uint64_t addr;
+ uint32_t data;
+ uint32_t vector_ctrl;
+ bool updated; /* indicate whether MSI ADDR or DATA is updated */
+} XenPTMSIXEntry;
+typedef struct XenPTMSIX {
+ uint32_t ctrl_offset;
+ bool enabled;
+ int total_entries;
+ int bar_index;
+ uint64_t table_base;
+ uint32_t table_offset_adjust; /* page align mmap */
+ uint64_t mmio_base_addr;
+ MemoryRegion mmio;
+ void *phys_iomem_base;
+ XenPTMSIXEntry msix_entry[0];
+} XenPTMSIX;
+
+struct XenPCIPassthroughState {
+ PCIDevice dev;
+
+ PCIHostDeviceAddress hostaddr;
+ bool is_virtfn;
+ XenHostPCIDevice real_device;
+ XenPTRegion bases[PCI_NUM_REGIONS]; /* Access regions */
+ QLIST_HEAD(, XenPTRegGroup) reg_grps;
+
+ uint32_t machine_irq;
+
+ XenPTMSI *msi;
+ XenPTMSIX *msix;
+
+ MemoryRegion bar[PCI_NUM_REGIONS - 1];
+ MemoryRegion rom;
+
+ MemoryListener memory_listener;
+};
+
+int xen_pt_config_init(XenPCIPassthroughState *s);
+void xen_pt_config_delete(XenPCIPassthroughState *s);
+XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address);
+XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address);
+int xen_pt_bar_offset_to_index(uint32_t offset);
+
+static inline pcibus_t xen_pt_get_emul_size(XenPTBarFlag flag, pcibus_t r_size)
+{
+ /* align resource size (memory type only) */
+ if (flag == XEN_PT_BAR_FLAG_MEM) {
+ return (r_size + XC_PAGE_SIZE - 1) & XC_PAGE_MASK;
+ } else {
+ return r_size;
+ }
+}
+
+/* INTx */
+/* The PCI Local Bus Specification, Rev. 3.0,
+ * Section 6.2.4 Miscellaneous Registers, pp 223
+ * outlines 5 valid values for the interrupt pin (intx).
+ * 0: For devices (or device functions) that don't use an interrupt in
+ * 1: INTA#
+ * 2: INTB#
+ * 3: INTC#
+ * 4: INTD#
+ *
+ * Xen uses the following 4 values for intx
+ * 0: INTA#
+ * 1: INTB#
+ * 2: INTC#
+ * 3: INTD#
+ *
+ * Observing that these list of values are not the same, xen_pt_pci_read_intx()
+ * uses the following mapping from hw to xen values.
+ * This seems to reflect the current usage within Xen.
+ *
+ * PCI hardware | Xen | Notes
+ * ----------------+-----+----------------------------------------------------
+ * 0 | 0 | No interrupt
+ * 1 | 0 | INTA#
+ * 2 | 1 | INTB#
+ * 3 | 2 | INTC#
+ * 4 | 3 | INTD#
+ * any other value | 0 | This should never happen, log error message
+ */
+
+static inline uint8_t xen_pt_pci_read_intx(XenPCIPassthroughState *s)
+{
+ uint8_t v = 0;
+ xen_host_pci_get_byte(&s->real_device, PCI_INTERRUPT_PIN, &v);
+ return v;
+}
+
+static inline uint8_t xen_pt_pci_intx(XenPCIPassthroughState *s)
+{
+ uint8_t r_val = xen_pt_pci_read_intx(s);
+
+ XEN_PT_LOG(&s->dev, "intx=%i\n", r_val);
+ if (r_val < 1 || r_val > 4) {
+ XEN_PT_LOG(&s->dev, "Interrupt pin read from hardware is out of range:"
+ " value=%i, acceptable range is 1 - 4\n", r_val);
+ r_val = 0;
+ } else {
+ r_val -= 1;
+ }
+
+ return r_val;
+}
+
+/* MSI/MSI-X */
+int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool en);
+int xen_pt_msi_setup(XenPCIPassthroughState *s);
+int xen_pt_msi_update(XenPCIPassthroughState *d);
+void xen_pt_msi_disable(XenPCIPassthroughState *s);
+
+int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base);
+void xen_pt_msix_delete(XenPCIPassthroughState *s);
+int xen_pt_msix_update(XenPCIPassthroughState *s);
+int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index);
+void xen_pt_msix_disable(XenPCIPassthroughState *s);
+
+static inline bool xen_pt_has_msix_mapping(XenPCIPassthroughState *s, int bar)
+{
+ return s->msix && s->msix->bar_index == bar;
+}
+
+
+#endif /* !XEN_PT_H */
diff --git a/hw/xen_pt_config_init.c b/hw/xen_pt_config_init.c
new file mode 100644
index 0000000000..00eb3d997d
--- /dev/null
+++ b/hw/xen_pt_config_init.c
@@ -0,0 +1,1869 @@
+/*
+ * Copyright (c) 2007, Neocleus Corporation.
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Alex Novik <alex@neocleus.com>
+ * Allen Kay <allen.m.kay@intel.com>
+ * Guy Zana <guy@neocleus.com>
+ *
+ * This file implements direct PCI assignment to a HVM guest
+ */
+
+#include "qemu-timer.h"
+#include "xen_backend.h"
+#include "xen_pt.h"
+
+#define XEN_PT_MERGE_VALUE(value, data, val_mask) \
+ (((value) & (val_mask)) | ((data) & ~(val_mask)))
+
+#define XEN_PT_INVALID_REG 0xFFFFFFFF /* invalid register value */
+
+/* prototype */
+
+static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
+ uint32_t real_offset, uint32_t *data);
+
+
+/* helper */
+
+/* A return value of 1 means the capability should NOT be exposed to guest. */
+static int xen_pt_hide_dev_cap(const XenHostPCIDevice *d, uint8_t grp_id)
+{
+ switch (grp_id) {
+ case PCI_CAP_ID_EXP:
+ /* The PCI Express Capability Structure of the VF of Intel 82599 10GbE
+ * Controller looks trivial, e.g., the PCI Express Capabilities
+ * Register is 0. We should not try to expose it to guest.
+ *
+ * The datasheet is available at
+ * http://download.intel.com/design/network/datashts/82599_datasheet.pdf
+ *
+ * See 'Table 9.7. VF PCIe Configuration Space' of the datasheet, the
+ * PCI Express Capability Structure of the VF of Intel 82599 10GbE
+ * Controller looks trivial, e.g., the PCI Express Capabilities
+ * Register is 0, so the Capability Version is 0 and
+ * xen_pt_pcie_size_init() would fail.
+ */
+ if (d->vendor_id == PCI_VENDOR_ID_INTEL &&
+ d->device_id == PCI_DEVICE_ID_INTEL_82599_SFP_VF) {
+ return 1;
+ }
+ break;
+ }
+ return 0;
+}
+
+/* find emulate register group entry */
+XenPTRegGroup *xen_pt_find_reg_grp(XenPCIPassthroughState *s, uint32_t address)
+{
+ XenPTRegGroup *entry = NULL;
+
+ /* find register group entry */
+ QLIST_FOREACH(entry, &s->reg_grps, entries) {
+ /* check address */
+ if ((entry->base_offset <= address)
+ && ((entry->base_offset + entry->size) > address)) {
+ return entry;
+ }
+ }
+
+ /* group entry not found */
+ return NULL;
+}
+
+/* find emulate register entry */
+XenPTReg *xen_pt_find_reg(XenPTRegGroup *reg_grp, uint32_t address)
+{
+ XenPTReg *reg_entry = NULL;
+ XenPTRegInfo *reg = NULL;
+ uint32_t real_offset = 0;
+
+ /* find register entry */
+ QLIST_FOREACH(reg_entry, &reg_grp->reg_tbl_list, entries) {
+ reg = reg_entry->reg;
+ real_offset = reg_grp->base_offset + reg->offset;
+ /* check address */
+ if ((real_offset <= address)
+ && ((real_offset + reg->size) > address)) {
+ return reg_entry;
+ }
+ }
+
+ return NULL;
+}
+
+
+/****************
+ * general register functions
+ */
+
+/* register initialization function */
+
+static int xen_pt_common_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ *data = reg->init_val;
+ return 0;
+}
+
+/* Read register functions */
+
+static int xen_pt_byte_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint8_t *value, uint8_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint8_t valid_emu_mask = 0;
+
+ /* emulate byte register */
+ valid_emu_mask = reg->emu_mask & valid_mask;
+ *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
+
+ return 0;
+}
+static int xen_pt_word_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint16_t *value, uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint16_t valid_emu_mask = 0;
+
+ /* emulate word register */
+ valid_emu_mask = reg->emu_mask & valid_mask;
+ *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
+
+ return 0;
+}
+static int xen_pt_long_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint32_t *value, uint32_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint32_t valid_emu_mask = 0;
+
+ /* emulate long register */
+ valid_emu_mask = reg->emu_mask & valid_mask;
+ *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
+
+ return 0;
+}
+
+/* Write register functions */
+
+static int xen_pt_byte_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint8_t *val, uint8_t dev_value,
+ uint8_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint8_t writable_mask = 0;
+ uint8_t throughable_mask = 0;
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ return 0;
+}
+static int xen_pt_word_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint16_t *val, uint16_t dev_value,
+ uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = 0;
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ return 0;
+}
+static int xen_pt_long_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint32_t *val, uint32_t dev_value,
+ uint32_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint32_t writable_mask = 0;
+ uint32_t throughable_mask = 0;
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ return 0;
+}
+
+
+/* XenPTRegInfo declaration
+ * - only for emulated register (either a part or whole bit).
+ * - for passthrough register that need special behavior (like interacting with
+ * other component), set emu_mask to all 0 and specify r/w func properly.
+ * - do NOT use ALL F for init_val, otherwise the tbl will not be registered.
+ */
+
+/********************
+ * Header Type0
+ */
+
+static int xen_pt_vendor_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ *data = s->real_device.vendor_id;
+ return 0;
+}
+static int xen_pt_device_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ *data = s->real_device.device_id;
+ return 0;
+}
+static int xen_pt_status_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ XenPTRegGroup *reg_grp_entry = NULL;
+ XenPTReg *reg_entry = NULL;
+ uint32_t reg_field = 0;
+
+ /* find Header register group */
+ reg_grp_entry = xen_pt_find_reg_grp(s, PCI_CAPABILITY_LIST);
+ if (reg_grp_entry) {
+ /* find Capabilities Pointer register */
+ reg_entry = xen_pt_find_reg(reg_grp_entry, PCI_CAPABILITY_LIST);
+ if (reg_entry) {
+ /* check Capabilities Pointer register */
+ if (reg_entry->data) {
+ reg_field |= PCI_STATUS_CAP_LIST;
+ } else {
+ reg_field &= ~PCI_STATUS_CAP_LIST;
+ }
+ } else {
+ xen_shutdown_fatal_error("Internal error: Couldn't find XenPTReg*"
+ " for Capabilities Pointer register."
+ " (%s)\n", __func__);
+ return -1;
+ }
+ } else {
+ xen_shutdown_fatal_error("Internal error: Couldn't find XenPTRegGroup"
+ " for Header. (%s)\n", __func__);
+ return -1;
+ }
+
+ *data = reg_field;
+ return 0;
+}
+static int xen_pt_header_type_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ /* read PCI_HEADER_TYPE */
+ *data = reg->init_val | 0x80;
+ return 0;
+}
+
+/* initialize Interrupt Pin register */
+static int xen_pt_irqpin_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ *data = xen_pt_pci_read_intx(s);
+ return 0;
+}
+
+/* Command register */
+static int xen_pt_cmd_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint16_t *value, uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint16_t valid_emu_mask = 0;
+ uint16_t emu_mask = reg->emu_mask;
+
+ if (s->is_virtfn) {
+ emu_mask |= PCI_COMMAND_MEMORY;
+ }
+
+ /* emulate word register */
+ valid_emu_mask = emu_mask & valid_mask;
+ *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
+
+ return 0;
+}
+static int xen_pt_cmd_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint16_t *val, uint16_t dev_value,
+ uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = 0;
+ uint16_t emu_mask = reg->emu_mask;
+
+ if (s->is_virtfn) {
+ emu_mask |= PCI_COMMAND_MEMORY;
+ }
+
+ /* modify emulate register */
+ writable_mask = ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~emu_mask & valid_mask;
+
+ if (*val & PCI_COMMAND_INTX_DISABLE) {
+ throughable_mask |= PCI_COMMAND_INTX_DISABLE;
+ } else {
+ if (s->machine_irq) {
+ throughable_mask |= PCI_COMMAND_INTX_DISABLE;
+ }
+ }
+
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ return 0;
+}
+
+/* BAR */
+#define XEN_PT_BAR_MEM_RO_MASK 0x0000000F /* BAR ReadOnly mask(Memory) */
+#define XEN_PT_BAR_MEM_EMU_MASK 0xFFFFFFF0 /* BAR emul mask(Memory) */
+#define XEN_PT_BAR_IO_RO_MASK 0x00000003 /* BAR ReadOnly mask(I/O) */
+#define XEN_PT_BAR_IO_EMU_MASK 0xFFFFFFFC /* BAR emul mask(I/O) */
+
+static XenPTBarFlag xen_pt_bar_reg_parse(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg)
+{
+ PCIDevice *d = &s->dev;
+ XenPTRegion *region = NULL;
+ PCIIORegion *r;
+ int index = 0;
+
+ /* check 64bit BAR */
+ index = xen_pt_bar_offset_to_index(reg->offset);
+ if ((0 < index) && (index < PCI_ROM_SLOT)) {
+ int type = s->real_device.io_regions[index - 1].type;
+
+ if ((type & XEN_HOST_PCI_REGION_TYPE_MEM)
+ && (type & XEN_HOST_PCI_REGION_TYPE_MEM_64)) {
+ region = &s->bases[index - 1];
+ if (region->bar_flag != XEN_PT_BAR_FLAG_UPPER) {
+ return XEN_PT_BAR_FLAG_UPPER;
+ }
+ }
+ }
+
+ /* check unused BAR */
+ r = &d->io_regions[index];
+ if (r->size == 0) {
+ return XEN_PT_BAR_FLAG_UNUSED;
+ }
+
+ /* for ExpROM BAR */
+ if (index == PCI_ROM_SLOT) {
+ return XEN_PT_BAR_FLAG_MEM;
+ }
+
+ /* check BAR I/O indicator */
+ if (s->real_device.io_regions[index].type & XEN_HOST_PCI_REGION_TYPE_IO) {
+ return XEN_PT_BAR_FLAG_IO;
+ } else {
+ return XEN_PT_BAR_FLAG_MEM;
+ }
+}
+
+static inline uint32_t base_address_with_flags(XenHostPCIIORegion *hr)
+{
+ if (hr->type & XEN_HOST_PCI_REGION_TYPE_IO) {
+ return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_IO_MASK);
+ } else {
+ return hr->base_addr | (hr->bus_flags & ~PCI_BASE_ADDRESS_MEM_MASK);
+ }
+}
+
+static int xen_pt_bar_reg_init(XenPCIPassthroughState *s, XenPTRegInfo *reg,
+ uint32_t real_offset, uint32_t *data)
+{
+ uint32_t reg_field = 0;
+ int index;
+
+ index = xen_pt_bar_offset_to_index(reg->offset);
+ if (index < 0 || index >= PCI_NUM_REGIONS) {
+ XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
+ return -1;
+ }
+
+ /* set BAR flag */
+ s->bases[index].bar_flag = xen_pt_bar_reg_parse(s, reg);
+ if (s->bases[index].bar_flag == XEN_PT_BAR_FLAG_UNUSED) {
+ reg_field = XEN_PT_INVALID_REG;
+ }
+
+ *data = reg_field;
+ return 0;
+}
+static int xen_pt_bar_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint32_t *value, uint32_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint32_t valid_emu_mask = 0;
+ uint32_t bar_emu_mask = 0;
+ int index;
+
+ /* get BAR index */
+ index = xen_pt_bar_offset_to_index(reg->offset);
+ if (index < 0 || index >= PCI_NUM_REGIONS) {
+ XEN_PT_ERR(&s->dev, "Internal error: Invalid BAR index [%d].\n", index);
+ return -1;
+ }
+
+ /* use fixed-up value from kernel sysfs */
+ *value = base_address_with_flags(&s->real_device.io_regions[index]);
+
+ /* set emulate mask depend on BAR flag */
+ switch (s->bases[index].bar_flag) {
+ case XEN_PT_BAR_FLAG_MEM:
+ bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
+ break;
+ case XEN_PT_BAR_FLAG_IO:
+ bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
+ break;
+ case XEN_PT_BAR_FLAG_UPPER:
+ bar_emu_mask = XEN_PT_BAR_ALLF;
+ break;
+ default:
+ break;
+ }
+
+ /* emulate BAR */
+ valid_emu_mask = bar_emu_mask & valid_mask;
+ *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
+
+ return 0;
+}
+static int xen_pt_bar_reg_write(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint32_t *val, uint32_t dev_value,
+ uint32_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ XenPTRegion *base = NULL;
+ PCIDevice *d = &s->dev;
+ const PCIIORegion *r;
+ uint32_t writable_mask = 0;
+ uint32_t throughable_mask = 0;
+ uint32_t bar_emu_mask = 0;
+ uint32_t bar_ro_mask = 0;
+ uint32_t r_size = 0;
+ int index = 0;
+
+ index = xen_pt_bar_offset_to_index(reg->offset);
+ if (index < 0 || index >= PCI_NUM_REGIONS) {
+ XEN_PT_ERR(d, "Internal error: Invalid BAR index [%d].\n", index);
+ return -1;
+ }
+
+ r = &d->io_regions[index];
+ base = &s->bases[index];
+ r_size = xen_pt_get_emul_size(base->bar_flag, r->size);
+
+ /* set emulate mask and read-only mask values depend on the BAR flag */
+ switch (s->bases[index].bar_flag) {
+ case XEN_PT_BAR_FLAG_MEM:
+ bar_emu_mask = XEN_PT_BAR_MEM_EMU_MASK;
+ bar_ro_mask = XEN_PT_BAR_MEM_RO_MASK | (r_size - 1);
+ break;
+ case XEN_PT_BAR_FLAG_IO:
+ bar_emu_mask = XEN_PT_BAR_IO_EMU_MASK;
+ bar_ro_mask = XEN_PT_BAR_IO_RO_MASK | (r_size - 1);
+ break;
+ case XEN_PT_BAR_FLAG_UPPER:
+ bar_emu_mask = XEN_PT_BAR_ALLF;
+ bar_ro_mask = 0; /* all upper 32bit are R/W */
+ break;
+ default:
+ break;
+ }
+
+ /* modify emulate register */
+ writable_mask = bar_emu_mask & ~bar_ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* check whether we need to update the virtual region address or not */
+ switch (s->bases[index].bar_flag) {
+ case XEN_PT_BAR_FLAG_MEM:
+ /* nothing to do */
+ break;
+ case XEN_PT_BAR_FLAG_IO:
+ /* nothing to do */
+ break;
+ case XEN_PT_BAR_FLAG_UPPER:
+ if (cfg_entry->data) {
+ if (cfg_entry->data != (XEN_PT_BAR_ALLF & ~bar_ro_mask)) {
+ XEN_PT_WARN(d, "Guest attempt to set high MMIO Base Address. "
+ "Ignore mapping. "
+ "(offset: 0x%02x, high address: 0x%08x)\n",
+ reg->offset, cfg_entry->data);
+ }
+ }
+ break;
+ default:
+ break;
+ }
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~bar_emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ return 0;
+}
+
+/* write Exp ROM BAR */
+static int xen_pt_exp_rom_bar_reg_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint32_t *val,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ XenPTRegion *base = NULL;
+ PCIDevice *d = (PCIDevice *)&s->dev;
+ uint32_t writable_mask = 0;
+ uint32_t throughable_mask = 0;
+ pcibus_t r_size = 0;
+ uint32_t bar_emu_mask = 0;
+ uint32_t bar_ro_mask = 0;
+
+ r_size = d->io_regions[PCI_ROM_SLOT].size;
+ base = &s->bases[PCI_ROM_SLOT];
+ /* align memory type resource size */
+ r_size = xen_pt_get_emul_size(base->bar_flag, r_size);
+
+ /* set emulate mask and read-only mask */
+ bar_emu_mask = reg->emu_mask;
+ bar_ro_mask = (reg->ro_mask | (r_size - 1)) & ~PCI_ROM_ADDRESS_ENABLE;
+
+ /* modify emulate register */
+ writable_mask = ~bar_ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~bar_emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ return 0;
+}
+
+/* Header Type0 reg static infomation table */
+static XenPTRegInfo xen_pt_emu_reg_header0[] = {
+ /* Vendor ID reg */
+ {
+ .offset = PCI_VENDOR_ID,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xFFFF,
+ .emu_mask = 0xFFFF,
+ .init = xen_pt_vendor_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ /* Device ID reg */
+ {
+ .offset = PCI_DEVICE_ID,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xFFFF,
+ .emu_mask = 0xFFFF,
+ .init = xen_pt_device_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ /* Command reg */
+ {
+ .offset = PCI_COMMAND,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xF880,
+ .emu_mask = 0x0740,
+ .init = xen_pt_common_reg_init,
+ .u.w.read = xen_pt_cmd_reg_read,
+ .u.w.write = xen_pt_cmd_reg_write,
+ },
+ /* Capabilities Pointer reg */
+ {
+ .offset = PCI_CAPABILITY_LIST,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_ptr_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Status reg */
+ /* use emulated Cap Ptr value to initialize,
+ * so need to be declared after Cap Ptr reg
+ */
+ {
+ .offset = PCI_STATUS,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x06FF,
+ .emu_mask = 0x0010,
+ .init = xen_pt_status_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ /* Cache Line Size reg */
+ {
+ .offset = PCI_CACHE_LINE_SIZE,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0x00,
+ .emu_mask = 0xFF,
+ .init = xen_pt_common_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Latency Timer reg */
+ {
+ .offset = PCI_LATENCY_TIMER,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0x00,
+ .emu_mask = 0xFF,
+ .init = xen_pt_common_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Header Type reg */
+ {
+ .offset = PCI_HEADER_TYPE,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0x00,
+ .init = xen_pt_header_type_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Interrupt Line reg */
+ {
+ .offset = PCI_INTERRUPT_LINE,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0x00,
+ .emu_mask = 0xFF,
+ .init = xen_pt_common_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Interrupt Pin reg */
+ {
+ .offset = PCI_INTERRUPT_PIN,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_irqpin_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* BAR 0 reg */
+ /* mask of BAR need to be decided later, depends on IO/MEM type */
+ {
+ .offset = PCI_BASE_ADDRESS_0,
+ .size = 4,
+ .init_val = 0x00000000,
+ .init = xen_pt_bar_reg_init,
+ .u.dw.read = xen_pt_bar_reg_read,
+ .u.dw.write = xen_pt_bar_reg_write,
+ },
+ /* BAR 1 reg */
+ {
+ .offset = PCI_BASE_ADDRESS_1,
+ .size = 4,
+ .init_val = 0x00000000,
+ .init = xen_pt_bar_reg_init,
+ .u.dw.read = xen_pt_bar_reg_read,
+ .u.dw.write = xen_pt_bar_reg_write,
+ },
+ /* BAR 2 reg */
+ {
+ .offset = PCI_BASE_ADDRESS_2,
+ .size = 4,
+ .init_val = 0x00000000,
+ .init = xen_pt_bar_reg_init,
+ .u.dw.read = xen_pt_bar_reg_read,
+ .u.dw.write = xen_pt_bar_reg_write,
+ },
+ /* BAR 3 reg */
+ {
+ .offset = PCI_BASE_ADDRESS_3,
+ .size = 4,
+ .init_val = 0x00000000,
+ .init = xen_pt_bar_reg_init,
+ .u.dw.read = xen_pt_bar_reg_read,
+ .u.dw.write = xen_pt_bar_reg_write,
+ },
+ /* BAR 4 reg */
+ {
+ .offset = PCI_BASE_ADDRESS_4,
+ .size = 4,
+ .init_val = 0x00000000,
+ .init = xen_pt_bar_reg_init,
+ .u.dw.read = xen_pt_bar_reg_read,
+ .u.dw.write = xen_pt_bar_reg_write,
+ },
+ /* BAR 5 reg */
+ {
+ .offset = PCI_BASE_ADDRESS_5,
+ .size = 4,
+ .init_val = 0x00000000,
+ .init = xen_pt_bar_reg_init,
+ .u.dw.read = xen_pt_bar_reg_read,
+ .u.dw.write = xen_pt_bar_reg_write,
+ },
+ /* Expansion ROM BAR reg */
+ {
+ .offset = PCI_ROM_ADDRESS,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x000007FE,
+ .emu_mask = 0xFFFFF800,
+ .init = xen_pt_bar_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_exp_rom_bar_reg_write,
+ },
+ {
+ .size = 0,
+ },
+};
+
+
+/*********************************
+ * Vital Product Data Capability
+ */
+
+/* Vital Product Data Capability Structure reg static infomation table */
+static XenPTRegInfo xen_pt_emu_reg_vpd[] = {
+ {
+ .offset = PCI_CAP_LIST_NEXT,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_ptr_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ {
+ .size = 0,
+ },
+};
+
+
+/**************************************
+ * Vendor Specific Capability
+ */
+
+/* Vendor Specific Capability Structure reg static infomation table */
+static XenPTRegInfo xen_pt_emu_reg_vendor[] = {
+ {
+ .offset = PCI_CAP_LIST_NEXT,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_ptr_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ {
+ .size = 0,
+ },
+};
+
+
+/*****************************
+ * PCI Express Capability
+ */
+
+static inline uint8_t get_capability_version(XenPCIPassthroughState *s,
+ uint32_t offset)
+{
+ uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
+ return flags & PCI_EXP_FLAGS_VERS;
+}
+
+static inline uint8_t get_device_type(XenPCIPassthroughState *s,
+ uint32_t offset)
+{
+ uint8_t flags = pci_get_byte(s->dev.config + offset + PCI_EXP_FLAGS);
+ return (flags & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+
+/* initialize Link Control register */
+static int xen_pt_linkctrl_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
+ uint8_t dev_type = get_device_type(s, real_offset - reg->offset);
+
+ /* no need to initialize in case of Root Complex Integrated Endpoint
+ * with cap_ver 1.x
+ */
+ if ((dev_type == PCI_EXP_TYPE_RC_END) && (cap_ver == 1)) {
+ *data = XEN_PT_INVALID_REG;
+ }
+
+ *data = reg->init_val;
+ return 0;
+}
+/* initialize Device Control 2 register */
+static int xen_pt_devctrl2_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
+
+ /* no need to initialize in case of cap_ver 1.x */
+ if (cap_ver == 1) {
+ *data = XEN_PT_INVALID_REG;
+ }
+
+ *data = reg->init_val;
+ return 0;
+}
+/* initialize Link Control 2 register */
+static int xen_pt_linkctrl2_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ uint8_t cap_ver = get_capability_version(s, real_offset - reg->offset);
+ uint32_t reg_field = 0;
+
+ /* no need to initialize in case of cap_ver 1.x */
+ if (cap_ver == 1) {
+ reg_field = XEN_PT_INVALID_REG;
+ } else {
+ /* set Supported Link Speed */
+ uint8_t lnkcap = pci_get_byte(s->dev.config + real_offset - reg->offset
+ + PCI_EXP_LNKCAP);
+ reg_field |= PCI_EXP_LNKCAP_SLS & lnkcap;
+ }
+
+ *data = reg_field;
+ return 0;
+}
+
+/* PCI Express Capability Structure reg static infomation table */
+static XenPTRegInfo xen_pt_emu_reg_pcie[] = {
+ /* Next Pointer reg */
+ {
+ .offset = PCI_CAP_LIST_NEXT,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_ptr_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Device Capabilities reg */
+ {
+ .offset = PCI_EXP_DEVCAP,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x1FFCFFFF,
+ .emu_mask = 0x10000000,
+ .init = xen_pt_common_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_long_reg_write,
+ },
+ /* Device Control reg */
+ {
+ .offset = PCI_EXP_DEVCTL,
+ .size = 2,
+ .init_val = 0x2810,
+ .ro_mask = 0x8400,
+ .emu_mask = 0xFFFF,
+ .init = xen_pt_common_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ /* Link Control reg */
+ {
+ .offset = PCI_EXP_LNKCTL,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xFC34,
+ .emu_mask = 0xFFFF,
+ .init = xen_pt_linkctrl_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ /* Device Control 2 reg */
+ {
+ .offset = 0x28,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xFFE0,
+ .emu_mask = 0xFFFF,
+ .init = xen_pt_devctrl2_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ /* Link Control 2 reg */
+ {
+ .offset = 0x30,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xE040,
+ .emu_mask = 0xFFFF,
+ .init = xen_pt_linkctrl2_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ {
+ .size = 0,
+ },
+};
+
+
+/*********************************
+ * Power Management Capability
+ */
+
+/* read Power Management Control/Status register */
+static int xen_pt_pmcsr_reg_read(XenPCIPassthroughState *s, XenPTReg *cfg_entry,
+ uint16_t *value, uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint16_t valid_emu_mask = reg->emu_mask;
+
+ valid_emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
+
+ valid_emu_mask = valid_emu_mask & valid_mask;
+ *value = XEN_PT_MERGE_VALUE(*value, cfg_entry->data, ~valid_emu_mask);
+
+ return 0;
+}
+/* write Power Management Control/Status register */
+static int xen_pt_pmcsr_reg_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint16_t emu_mask = reg->emu_mask;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = 0;
+
+ emu_mask |= PCI_PM_CTRL_STATE_MASK | PCI_PM_CTRL_NO_SOFT_RESET;
+
+ /* modify emulate register */
+ writable_mask = emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ return 0;
+}
+
+/* Power Management Capability reg static infomation table */
+static XenPTRegInfo xen_pt_emu_reg_pm[] = {
+ /* Next Pointer reg */
+ {
+ .offset = PCI_CAP_LIST_NEXT,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_ptr_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Power Management Capabilities reg */
+ {
+ .offset = PCI_CAP_FLAGS,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xFFFF,
+ .emu_mask = 0xF9C8,
+ .init = xen_pt_common_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_word_reg_write,
+ },
+ /* PCI Power Management Control/Status reg */
+ {
+ .offset = PCI_PM_CTRL,
+ .size = 2,
+ .init_val = 0x0008,
+ .ro_mask = 0xE1FC,
+ .emu_mask = 0x8100,
+ .init = xen_pt_common_reg_init,
+ .u.w.read = xen_pt_pmcsr_reg_read,
+ .u.w.write = xen_pt_pmcsr_reg_write,
+ },
+ {
+ .size = 0,
+ },
+};
+
+
+/********************************
+ * MSI Capability
+ */
+
+/* Helper */
+static bool xen_pt_msgdata_check_type(uint32_t offset, uint16_t flags)
+{
+ /* check the offset whether matches the type or not */
+ bool is_32 = (offset == PCI_MSI_DATA_32) && !(flags & PCI_MSI_FLAGS_64BIT);
+ bool is_64 = (offset == PCI_MSI_DATA_64) && (flags & PCI_MSI_FLAGS_64BIT);
+ return is_32 || is_64;
+}
+
+/* Message Control register */
+static int xen_pt_msgctrl_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ PCIDevice *d = &s->dev;
+ XenPTMSI *msi = s->msi;
+ uint16_t reg_field = 0;
+
+ /* use I/O device register's value as initial value */
+ reg_field = pci_get_word(d->config + real_offset);
+
+ if (reg_field & PCI_MSI_FLAGS_ENABLE) {
+ XEN_PT_LOG(&s->dev, "MSI already enabled, disabling it first\n");
+ xen_host_pci_set_word(&s->real_device, real_offset,
+ reg_field & ~PCI_MSI_FLAGS_ENABLE);
+ }
+ msi->flags |= reg_field;
+ msi->ctrl_offset = real_offset;
+ msi->initialized = false;
+ msi->mapped = false;
+
+ *data = reg->init_val;
+ return 0;
+}
+static int xen_pt_msgctrl_reg_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ XenPTMSI *msi = s->msi;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = 0;
+ uint16_t raw_val;
+
+ /* Currently no support for multi-vector */
+ if (*val & PCI_MSI_FLAGS_QSIZE) {
+ XEN_PT_WARN(&s->dev, "Tries to set more than 1 vector ctrl %x\n", *val);
+ }
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+ msi->flags |= cfg_entry->data & ~PCI_MSI_FLAGS_ENABLE;
+
+ /* create value for writing to I/O device register */
+ raw_val = *val;
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ /* update MSI */
+ if (raw_val & PCI_MSI_FLAGS_ENABLE) {
+ /* setup MSI pirq for the first time */
+ if (!msi->initialized) {
+ /* Init physical one */
+ XEN_PT_LOG(&s->dev, "setup MSI\n");
+ if (xen_pt_msi_setup(s)) {
+ /* We do not broadcast the error to the framework code, so
+ * that MSI errors are contained in MSI emulation code and
+ * QEMU can go on running.
+ * Guest MSI would be actually not working.
+ */
+ *val &= ~PCI_MSI_FLAGS_ENABLE;
+ XEN_PT_WARN(&s->dev, "Can not map MSI.\n");
+ return 0;
+ }
+ if (xen_pt_msi_update(s)) {
+ *val &= ~PCI_MSI_FLAGS_ENABLE;
+ XEN_PT_WARN(&s->dev, "Can not bind MSI\n");
+ return 0;
+ }
+ msi->initialized = true;
+ msi->mapped = true;
+ }
+ msi->flags |= PCI_MSI_FLAGS_ENABLE;
+ } else {
+ msi->flags &= ~PCI_MSI_FLAGS_ENABLE;
+ }
+
+ /* pass through MSI_ENABLE bit */
+ *val &= ~PCI_MSI_FLAGS_ENABLE;
+ *val |= raw_val & PCI_MSI_FLAGS_ENABLE;
+
+ return 0;
+}
+
+/* initialize Message Upper Address register */
+static int xen_pt_msgaddr64_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ /* no need to initialize in case of 32 bit type */
+ if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
+ *data = XEN_PT_INVALID_REG;
+ } else {
+ *data = reg->init_val;
+ }
+
+ return 0;
+}
+/* this function will be called twice (for 32 bit and 64 bit type) */
+/* initialize Message Data register */
+static int xen_pt_msgdata_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ uint32_t flags = s->msi->flags;
+ uint32_t offset = reg->offset;
+
+ /* check the offset whether matches the type or not */
+ if (xen_pt_msgdata_check_type(offset, flags)) {
+ *data = reg->init_val;
+ } else {
+ *data = XEN_PT_INVALID_REG;
+ }
+ return 0;
+}
+
+/* write Message Address register */
+static int xen_pt_msgaddr32_reg_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint32_t *val,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint32_t writable_mask = 0;
+ uint32_t throughable_mask = 0;
+ uint32_t old_addr = cfg_entry->data;
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+ s->msi->addr_lo = cfg_entry->data;
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ /* update MSI */
+ if (cfg_entry->data != old_addr) {
+ if (s->msi->mapped) {
+ xen_pt_msi_update(s);
+ }
+ }
+
+ return 0;
+}
+/* write Message Upper Address register */
+static int xen_pt_msgaddr64_reg_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint32_t *val,
+ uint32_t dev_value, uint32_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint32_t writable_mask = 0;
+ uint32_t throughable_mask = 0;
+ uint32_t old_addr = cfg_entry->data;
+
+ /* check whether the type is 64 bit or not */
+ if (!(s->msi->flags & PCI_MSI_FLAGS_64BIT)) {
+ XEN_PT_ERR(&s->dev,
+ "Can't write to the upper address without 64 bit support\n");
+ return -1;
+ }
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+ /* update the msi_info too */
+ s->msi->addr_hi = cfg_entry->data;
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ /* update MSI */
+ if (cfg_entry->data != old_addr) {
+ if (s->msi->mapped) {
+ xen_pt_msi_update(s);
+ }
+ }
+
+ return 0;
+}
+
+
+/* this function will be called twice (for 32 bit and 64 bit type) */
+/* write Message Data register */
+static int xen_pt_msgdata_reg_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ XenPTMSI *msi = s->msi;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = 0;
+ uint16_t old_data = cfg_entry->data;
+ uint32_t offset = reg->offset;
+
+ /* check the offset whether matches the type or not */
+ if (!xen_pt_msgdata_check_type(offset, msi->flags)) {
+ /* exit I/O emulator */
+ XEN_PT_ERR(&s->dev, "the offset does not match the 32/64 bit type!\n");
+ return -1;
+ }
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+ /* update the msi_info too */
+ msi->data = cfg_entry->data;
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ /* update MSI */
+ if (cfg_entry->data != old_data) {
+ if (msi->mapped) {
+ xen_pt_msi_update(s);
+ }
+ }
+
+ return 0;
+}
+
+/* MSI Capability Structure reg static infomation table */
+static XenPTRegInfo xen_pt_emu_reg_msi[] = {
+ /* Next Pointer reg */
+ {
+ .offset = PCI_CAP_LIST_NEXT,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_ptr_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Message Control reg */
+ {
+ .offset = PCI_MSI_FLAGS,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0xFF8E,
+ .emu_mask = 0x007F,
+ .init = xen_pt_msgctrl_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_msgctrl_reg_write,
+ },
+ /* Message Address reg */
+ {
+ .offset = PCI_MSI_ADDRESS_LO,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x00000003,
+ .emu_mask = 0xFFFFFFFF,
+ .no_wb = 1,
+ .init = xen_pt_common_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_msgaddr32_reg_write,
+ },
+ /* Message Upper Address reg (if PCI_MSI_FLAGS_64BIT set) */
+ {
+ .offset = PCI_MSI_ADDRESS_HI,
+ .size = 4,
+ .init_val = 0x00000000,
+ .ro_mask = 0x00000000,
+ .emu_mask = 0xFFFFFFFF,
+ .no_wb = 1,
+ .init = xen_pt_msgaddr64_reg_init,
+ .u.dw.read = xen_pt_long_reg_read,
+ .u.dw.write = xen_pt_msgaddr64_reg_write,
+ },
+ /* Message Data reg (16 bits of data for 32-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_32,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x0000,
+ .emu_mask = 0xFFFF,
+ .no_wb = 1,
+ .init = xen_pt_msgdata_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_msgdata_reg_write,
+ },
+ /* Message Data reg (16 bits of data for 64-bit devices) */
+ {
+ .offset = PCI_MSI_DATA_64,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x0000,
+ .emu_mask = 0xFFFF,
+ .no_wb = 1,
+ .init = xen_pt_msgdata_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_msgdata_reg_write,
+ },
+ {
+ .size = 0,
+ },
+};
+
+
+/**************************************
+ * MSI-X Capability
+ */
+
+/* Message Control register for MSI-X */
+static int xen_pt_msixctrl_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ PCIDevice *d = &s->dev;
+ uint16_t reg_field = 0;
+
+ /* use I/O device register's value as initial value */
+ reg_field = pci_get_word(d->config + real_offset);
+
+ if (reg_field & PCI_MSIX_FLAGS_ENABLE) {
+ XEN_PT_LOG(d, "MSIX already enabled, disabling it first\n");
+ xen_host_pci_set_word(&s->real_device, real_offset,
+ reg_field & ~PCI_MSIX_FLAGS_ENABLE);
+ }
+
+ s->msix->ctrl_offset = real_offset;
+
+ *data = reg->init_val;
+ return 0;
+}
+static int xen_pt_msixctrl_reg_write(XenPCIPassthroughState *s,
+ XenPTReg *cfg_entry, uint16_t *val,
+ uint16_t dev_value, uint16_t valid_mask)
+{
+ XenPTRegInfo *reg = cfg_entry->reg;
+ uint16_t writable_mask = 0;
+ uint16_t throughable_mask = 0;
+ int debug_msix_enabled_old;
+
+ /* modify emulate register */
+ writable_mask = reg->emu_mask & ~reg->ro_mask & valid_mask;
+ cfg_entry->data = XEN_PT_MERGE_VALUE(*val, cfg_entry->data, writable_mask);
+
+ /* create value for writing to I/O device register */
+ throughable_mask = ~reg->emu_mask & valid_mask;
+ *val = XEN_PT_MERGE_VALUE(*val, dev_value, throughable_mask);
+
+ /* update MSI-X */
+ if ((*val & PCI_MSIX_FLAGS_ENABLE)
+ && !(*val & PCI_MSIX_FLAGS_MASKALL)) {
+ xen_pt_msix_update(s);
+ }
+
+ debug_msix_enabled_old = s->msix->enabled;
+ s->msix->enabled = !!(*val & PCI_MSIX_FLAGS_ENABLE);
+ if (s->msix->enabled != debug_msix_enabled_old) {
+ XEN_PT_LOG(&s->dev, "%s MSI-X\n",
+ s->msix->enabled ? "enable" : "disable");
+ }
+
+ return 0;
+}
+
+/* MSI-X Capability Structure reg static infomation table */
+static XenPTRegInfo xen_pt_emu_reg_msix[] = {
+ /* Next Pointer reg */
+ {
+ .offset = PCI_CAP_LIST_NEXT,
+ .size = 1,
+ .init_val = 0x00,
+ .ro_mask = 0xFF,
+ .emu_mask = 0xFF,
+ .init = xen_pt_ptr_reg_init,
+ .u.b.read = xen_pt_byte_reg_read,
+ .u.b.write = xen_pt_byte_reg_write,
+ },
+ /* Message Control reg */
+ {
+ .offset = PCI_MSI_FLAGS,
+ .size = 2,
+ .init_val = 0x0000,
+ .ro_mask = 0x3FFF,
+ .emu_mask = 0x0000,
+ .init = xen_pt_msixctrl_reg_init,
+ .u.w.read = xen_pt_word_reg_read,
+ .u.w.write = xen_pt_msixctrl_reg_write,
+ },
+ {
+ .size = 0,
+ },
+};
+
+
+/****************************
+ * Capabilities
+ */
+
+/* capability structure register group size functions */
+
+static int xen_pt_reg_grp_size_init(XenPCIPassthroughState *s,
+ const XenPTRegGroupInfo *grp_reg,
+ uint32_t base_offset, uint8_t *size)
+{
+ *size = grp_reg->grp_size;
+ return 0;
+}
+/* get Vendor Specific Capability Structure register group size */
+static int xen_pt_vendor_size_init(XenPCIPassthroughState *s,
+ const XenPTRegGroupInfo *grp_reg,
+ uint32_t base_offset, uint8_t *size)
+{
+ *size = pci_get_byte(s->dev.config + base_offset + 0x02);
+ return 0;
+}
+/* get PCI Express Capability Structure register group size */
+static int xen_pt_pcie_size_init(XenPCIPassthroughState *s,
+ const XenPTRegGroupInfo *grp_reg,
+ uint32_t base_offset, uint8_t *size)
+{
+ PCIDevice *d = &s->dev;
+ uint8_t version = get_capability_version(s, base_offset);
+ uint8_t type = get_device_type(s, base_offset);
+ uint8_t pcie_size = 0;
+
+
+ /* calculate size depend on capability version and device/port type */
+ /* in case of PCI Express Base Specification Rev 1.x */
+ if (version == 1) {
+ /* The PCI Express Capabilities, Device Capabilities, and Device
+ * Status/Control registers are required for all PCI Express devices.
+ * The Link Capabilities and Link Status/Control are required for all
+ * Endpoints that are not Root Complex Integrated Endpoints. Endpoints
+ * are not required to implement registers other than those listed
+ * above and terminate the capability structure.
+ */
+ switch (type) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_LEG_END:
+ pcie_size = 0x14;
+ break;
+ case PCI_EXP_TYPE_RC_END:
+ /* has no link */
+ pcie_size = 0x0C;
+ break;
+ /* only EndPoint passthrough is supported */
+ case PCI_EXP_TYPE_ROOT_PORT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ case PCI_EXP_TYPE_RC_EC:
+ default:
+ XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
+ return -1;
+ }
+ }
+ /* in case of PCI Express Base Specification Rev 2.0 */
+ else if (version == 2) {
+ switch (type) {
+ case PCI_EXP_TYPE_ENDPOINT:
+ case PCI_EXP_TYPE_LEG_END:
+ case PCI_EXP_TYPE_RC_END:
+ /* For Functions that do not implement the registers,
+ * these spaces must be hardwired to 0b.
+ */
+ pcie_size = 0x3C;
+ break;
+ /* only EndPoint passthrough is supported */
+ case PCI_EXP_TYPE_ROOT_PORT:
+ case PCI_EXP_TYPE_UPSTREAM:
+ case PCI_EXP_TYPE_DOWNSTREAM:
+ case PCI_EXP_TYPE_PCI_BRIDGE:
+ case PCI_EXP_TYPE_PCIE_BRIDGE:
+ case PCI_EXP_TYPE_RC_EC:
+ default:
+ XEN_PT_ERR(d, "Unsupported device/port type %#x.\n", type);
+ return -1;
+ }
+ } else {
+ XEN_PT_ERR(d, "Unsupported capability version %#x.\n", version);
+ return -1;
+ }
+
+ *size = pcie_size;
+ return 0;
+}
+/* get MSI Capability Structure register group size */
+static int xen_pt_msi_size_init(XenPCIPassthroughState *s,
+ const XenPTRegGroupInfo *grp_reg,
+ uint32_t base_offset, uint8_t *size)
+{
+ PCIDevice *d = &s->dev;
+ uint16_t msg_ctrl = 0;
+ uint8_t msi_size = 0xa;
+
+ msg_ctrl = pci_get_word(d->config + (base_offset + PCI_MSI_FLAGS));
+
+ /* check if 64-bit address is capable of per-vector masking */
+ if (msg_ctrl & PCI_MSI_FLAGS_64BIT) {
+ msi_size += 4;
+ }
+ if (msg_ctrl & PCI_MSI_FLAGS_MASKBIT) {
+ msi_size += 10;
+ }
+
+ s->msi = g_new0(XenPTMSI, 1);
+ s->msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
+
+ *size = msi_size;
+ return 0;
+}
+/* get MSI-X Capability Structure register group size */
+static int xen_pt_msix_size_init(XenPCIPassthroughState *s,
+ const XenPTRegGroupInfo *grp_reg,
+ uint32_t base_offset, uint8_t *size)
+{
+ int rc = 0;
+
+ rc = xen_pt_msix_init(s, base_offset);
+
+ if (rc < 0) {
+ XEN_PT_ERR(&s->dev, "Internal error: Invalid xen_pt_msix_init.\n");
+ return rc;
+ }
+
+ *size = grp_reg->grp_size;
+ return 0;
+}
+
+
+static const XenPTRegGroupInfo xen_pt_emu_reg_grps[] = {
+ /* Header Type0 reg group */
+ {
+ .grp_id = 0xFF,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = 0x40,
+ .size_init = xen_pt_reg_grp_size_init,
+ .emu_regs = xen_pt_emu_reg_header0,
+ },
+ /* PCI PowerManagement Capability reg group */
+ {
+ .grp_id = PCI_CAP_ID_PM,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = PCI_PM_SIZEOF,
+ .size_init = xen_pt_reg_grp_size_init,
+ .emu_regs = xen_pt_emu_reg_pm,
+ },
+ /* AGP Capability Structure reg group */
+ {
+ .grp_id = PCI_CAP_ID_AGP,
+ .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
+ .grp_size = 0x30,
+ .size_init = xen_pt_reg_grp_size_init,
+ },
+ /* Vital Product Data Capability Structure reg group */
+ {
+ .grp_id = PCI_CAP_ID_VPD,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = 0x08,
+ .size_init = xen_pt_reg_grp_size_init,
+ .emu_regs = xen_pt_emu_reg_vpd,
+ },
+ /* Slot Identification reg group */
+ {
+ .grp_id = PCI_CAP_ID_SLOTID,
+ .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
+ .grp_size = 0x04,
+ .size_init = xen_pt_reg_grp_size_init,
+ },
+ /* MSI Capability Structure reg group */
+ {
+ .grp_id = PCI_CAP_ID_MSI,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = 0xFF,
+ .size_init = xen_pt_msi_size_init,
+ .emu_regs = xen_pt_emu_reg_msi,
+ },
+ /* PCI-X Capabilities List Item reg group */
+ {
+ .grp_id = PCI_CAP_ID_PCIX,
+ .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
+ .grp_size = 0x18,
+ .size_init = xen_pt_reg_grp_size_init,
+ },
+ /* Vendor Specific Capability Structure reg group */
+ {
+ .grp_id = PCI_CAP_ID_VNDR,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = 0xFF,
+ .size_init = xen_pt_vendor_size_init,
+ .emu_regs = xen_pt_emu_reg_vendor,
+ },
+ /* SHPC Capability List Item reg group */
+ {
+ .grp_id = PCI_CAP_ID_SHPC,
+ .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
+ .grp_size = 0x08,
+ .size_init = xen_pt_reg_grp_size_init,
+ },
+ /* Subsystem ID and Subsystem Vendor ID Capability List Item reg group */
+ {
+ .grp_id = PCI_CAP_ID_SSVID,
+ .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
+ .grp_size = 0x08,
+ .size_init = xen_pt_reg_grp_size_init,
+ },
+ /* AGP 8x Capability Structure reg group */
+ {
+ .grp_id = PCI_CAP_ID_AGP3,
+ .grp_type = XEN_PT_GRP_TYPE_HARDWIRED,
+ .grp_size = 0x30,
+ .size_init = xen_pt_reg_grp_size_init,
+ },
+ /* PCI Express Capability Structure reg group */
+ {
+ .grp_id = PCI_CAP_ID_EXP,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = 0xFF,
+ .size_init = xen_pt_pcie_size_init,
+ .emu_regs = xen_pt_emu_reg_pcie,
+ },
+ /* MSI-X Capability Structure reg group */
+ {
+ .grp_id = PCI_CAP_ID_MSIX,
+ .grp_type = XEN_PT_GRP_TYPE_EMU,
+ .grp_size = 0x0C,
+ .size_init = xen_pt_msix_size_init,
+ .emu_regs = xen_pt_emu_reg_msix,
+ },
+ {
+ .grp_size = 0,
+ },
+};
+
+/* initialize Capabilities Pointer or Next Pointer register */
+static int xen_pt_ptr_reg_init(XenPCIPassthroughState *s,
+ XenPTRegInfo *reg, uint32_t real_offset,
+ uint32_t *data)
+{
+ int i;
+ uint8_t *config = s->dev.config;
+ uint32_t reg_field = pci_get_byte(config + real_offset);
+ uint8_t cap_id = 0;
+
+ /* find capability offset */
+ while (reg_field) {
+ for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
+ if (xen_pt_hide_dev_cap(&s->real_device,
+ xen_pt_emu_reg_grps[i].grp_id)) {
+ continue;
+ }
+
+ cap_id = pci_get_byte(config + reg_field + PCI_CAP_LIST_ID);
+ if (xen_pt_emu_reg_grps[i].grp_id == cap_id) {
+ if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
+ goto out;
+ }
+ /* ignore the 0 hardwired capability, find next one */
+ break;
+ }
+ }
+
+ /* next capability */
+ reg_field = pci_get_byte(config + reg_field + PCI_CAP_LIST_NEXT);
+ }
+
+out:
+ *data = reg_field;
+ return 0;
+}
+
+
+/*************
+ * Main
+ */
+
+static uint8_t find_cap_offset(XenPCIPassthroughState *s, uint8_t cap)
+{
+ uint8_t id;
+ unsigned max_cap = PCI_CAP_MAX;
+ uint8_t pos = PCI_CAPABILITY_LIST;
+ uint8_t status = 0;
+
+ if (xen_host_pci_get_byte(&s->real_device, PCI_STATUS, &status)) {
+ return 0;
+ }
+ if ((status & PCI_STATUS_CAP_LIST) == 0) {
+ return 0;
+ }
+
+ while (max_cap--) {
+ if (xen_host_pci_get_byte(&s->real_device, pos, &pos)) {
+ break;
+ }
+ if (pos < PCI_CONFIG_HEADER_SIZE) {
+ break;
+ }
+
+ pos &= ~3;
+ if (xen_host_pci_get_byte(&s->real_device,
+ pos + PCI_CAP_LIST_ID, &id)) {
+ break;
+ }
+
+ if (id == 0xff) {
+ break;
+ }
+ if (id == cap) {
+ return pos;
+ }
+
+ pos += PCI_CAP_LIST_NEXT;
+ }
+ return 0;
+}
+
+static int xen_pt_config_reg_init(XenPCIPassthroughState *s,
+ XenPTRegGroup *reg_grp, XenPTRegInfo *reg)
+{
+ XenPTReg *reg_entry;
+ uint32_t data = 0;
+ int rc = 0;
+
+ reg_entry = g_new0(XenPTReg, 1);
+ reg_entry->reg = reg;
+
+ if (reg->init) {
+ /* initialize emulate register */
+ rc = reg->init(s, reg_entry->reg,
+ reg_grp->base_offset + reg->offset, &data);
+ if (rc < 0) {
+ free(reg_entry);
+ return rc;
+ }
+ if (data == XEN_PT_INVALID_REG) {
+ /* free unused BAR register entry */
+ free(reg_entry);
+ return 0;
+ }
+ /* set register value */
+ reg_entry->data = data;
+ }
+ /* list add register entry */
+ QLIST_INSERT_HEAD(&reg_grp->reg_tbl_list, reg_entry, entries);
+
+ return 0;
+}
+
+int xen_pt_config_init(XenPCIPassthroughState *s)
+{
+ int i, rc;
+
+ QLIST_INIT(&s->reg_grps);
+
+ for (i = 0; xen_pt_emu_reg_grps[i].grp_size != 0; i++) {
+ uint32_t reg_grp_offset = 0;
+ XenPTRegGroup *reg_grp_entry = NULL;
+
+ if (xen_pt_emu_reg_grps[i].grp_id != 0xFF) {
+ if (xen_pt_hide_dev_cap(&s->real_device,
+ xen_pt_emu_reg_grps[i].grp_id)) {
+ continue;
+ }
+
+ reg_grp_offset = find_cap_offset(s, xen_pt_emu_reg_grps[i].grp_id);
+
+ if (!reg_grp_offset) {
+ continue;
+ }
+ }
+
+ reg_grp_entry = g_new0(XenPTRegGroup, 1);
+ QLIST_INIT(&reg_grp_entry->reg_tbl_list);
+ QLIST_INSERT_HEAD(&s->reg_grps, reg_grp_entry, entries);
+
+ reg_grp_entry->base_offset = reg_grp_offset;
+ reg_grp_entry->reg_grp = xen_pt_emu_reg_grps + i;
+ if (xen_pt_emu_reg_grps[i].size_init) {
+ /* get register group size */
+ rc = xen_pt_emu_reg_grps[i].size_init(s, reg_grp_entry->reg_grp,
+ reg_grp_offset,
+ &reg_grp_entry->size);
+ if (rc < 0) {
+ xen_pt_config_delete(s);
+ return rc;
+ }
+ }
+
+ if (xen_pt_emu_reg_grps[i].grp_type == XEN_PT_GRP_TYPE_EMU) {
+ if (xen_pt_emu_reg_grps[i].emu_regs) {
+ int j = 0;
+ XenPTRegInfo *regs = xen_pt_emu_reg_grps[i].emu_regs;
+ /* initialize capability register */
+ for (j = 0; regs->size != 0; j++, regs++) {
+ /* initialize capability register */
+ rc = xen_pt_config_reg_init(s, reg_grp_entry, regs);
+ if (rc < 0) {
+ xen_pt_config_delete(s);
+ return rc;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* delete all emulate register */
+void xen_pt_config_delete(XenPCIPassthroughState *s)
+{
+ struct XenPTRegGroup *reg_group, *next_grp;
+ struct XenPTReg *reg, *next_reg;
+
+ /* free MSI/MSI-X info table */
+ if (s->msix) {
+ xen_pt_msix_delete(s);
+ }
+ if (s->msi) {
+ g_free(s->msi);
+ }
+
+ /* free all register group entry */
+ QLIST_FOREACH_SAFE(reg_group, &s->reg_grps, entries, next_grp) {
+ /* free all register entry */
+ QLIST_FOREACH_SAFE(reg, &reg_group->reg_tbl_list, entries, next_reg) {
+ QLIST_REMOVE(reg, entries);
+ g_free(reg);
+ }
+
+ QLIST_REMOVE(reg_group, entries);
+ g_free(reg_group);
+ }
+}
diff --git a/hw/xen_pt_msi.c b/hw/xen_pt_msi.c
new file mode 100644
index 0000000000..2299cc7772
--- /dev/null
+++ b/hw/xen_pt_msi.c
@@ -0,0 +1,620 @@
+/*
+ * Copyright (c) 2007, Intel Corporation.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Jiang Yunhong <yunhong.jiang@intel.com>
+ *
+ * This file implements direct PCI assignment to a HVM guest
+ */
+
+#include <sys/mman.h>
+
+#include "xen_backend.h"
+#include "xen_pt.h"
+#include "apic-msidef.h"
+
+
+#define XEN_PT_AUTO_ASSIGN -1
+
+/* shift count for gflags */
+#define XEN_PT_GFLAGS_SHIFT_DEST_ID 0
+#define XEN_PT_GFLAGS_SHIFT_RH 8
+#define XEN_PT_GFLAGS_SHIFT_DM 9
+#define XEN_PT_GFLAGSSHIFT_DELIV_MODE 12
+#define XEN_PT_GFLAGSSHIFT_TRG_MODE 15
+
+
+/*
+ * Helpers
+ */
+
+static inline uint8_t msi_vector(uint32_t data)
+{
+ return (data & MSI_DATA_VECTOR_MASK) >> MSI_DATA_VECTOR_SHIFT;
+}
+
+static inline uint8_t msi_dest_id(uint32_t addr)
+{
+ return (addr & MSI_ADDR_DEST_ID_MASK) >> MSI_ADDR_DEST_ID_SHIFT;
+}
+
+static inline uint32_t msi_ext_dest_id(uint32_t addr_hi)
+{
+ return addr_hi & 0xffffff00;
+}
+
+static uint32_t msi_gflags(uint32_t data, uint64_t addr)
+{
+ uint32_t result = 0;
+ int rh, dm, dest_id, deliv_mode, trig_mode;
+
+ rh = (addr >> MSI_ADDR_REDIRECTION_SHIFT) & 0x1;
+ dm = (addr >> MSI_ADDR_DEST_MODE_SHIFT) & 0x1;
+ dest_id = msi_dest_id(addr);
+ deliv_mode = (data >> MSI_DATA_DELIVERY_MODE_SHIFT) & 0x7;
+ trig_mode = (data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
+
+ result = dest_id | (rh << XEN_PT_GFLAGS_SHIFT_RH)
+ | (dm << XEN_PT_GFLAGS_SHIFT_DM)
+ | (deliv_mode << XEN_PT_GFLAGSSHIFT_DELIV_MODE)
+ | (trig_mode << XEN_PT_GFLAGSSHIFT_TRG_MODE);
+
+ return result;
+}
+
+static inline uint64_t msi_addr64(XenPTMSI *msi)
+{
+ return (uint64_t)msi->addr_hi << 32 | msi->addr_lo;
+}
+
+static int msi_msix_enable(XenPCIPassthroughState *s,
+ uint32_t address,
+ uint16_t flag,
+ bool enable)
+{
+ uint16_t val = 0;
+
+ if (!address) {
+ return -1;
+ }
+
+ xen_host_pci_get_word(&s->real_device, address, &val);
+ if (enable) {
+ val |= flag;
+ } else {
+ val &= ~flag;
+ }
+ xen_host_pci_set_word(&s->real_device, address, val);
+ return 0;
+}
+
+static int msi_msix_setup(XenPCIPassthroughState *s,
+ uint64_t addr,
+ uint32_t data,
+ int *ppirq,
+ bool is_msix,
+ int msix_entry,
+ bool is_not_mapped)
+{
+ uint8_t gvec = msi_vector(data);
+ int rc = 0;
+
+ assert((!is_msix && msix_entry == 0) || is_msix);
+
+ if (gvec == 0) {
+ /* if gvec is 0, the guest is asking for a particular pirq that
+ * is passed as dest_id */
+ *ppirq = msi_ext_dest_id(addr >> 32) | msi_dest_id(addr);
+ if (!*ppirq) {
+ /* this probably identifies an misconfiguration of the guest,
+ * try the emulated path */
+ *ppirq = XEN_PT_UNASSIGNED_PIRQ;
+ } else {
+ XEN_PT_LOG(&s->dev, "requested pirq %d for MSI%s"
+ " (vec: %#x, entry: %#x)\n",
+ *ppirq, is_msix ? "-X" : "", gvec, msix_entry);
+ }
+ }
+
+ if (is_not_mapped) {
+ uint64_t table_base = 0;
+
+ if (is_msix) {
+ table_base = s->msix->table_base;
+ }
+
+ rc = xc_physdev_map_pirq_msi(xen_xc, xen_domid, XEN_PT_AUTO_ASSIGN,
+ ppirq, PCI_DEVFN(s->real_device.dev,
+ s->real_device.func),
+ s->real_device.bus,
+ msix_entry, table_base);
+ if (rc) {
+ XEN_PT_ERR(&s->dev,
+ "Mapping of MSI%s (rc: %i, vec: %#x, entry %#x)\n",
+ is_msix ? "-X" : "", rc, gvec, msix_entry);
+ return rc;
+ }
+ }
+
+ return 0;
+}
+static int msi_msix_update(XenPCIPassthroughState *s,
+ uint64_t addr,
+ uint32_t data,
+ int pirq,
+ bool is_msix,
+ int msix_entry,
+ int *old_pirq)
+{
+ PCIDevice *d = &s->dev;
+ uint8_t gvec = msi_vector(data);
+ uint32_t gflags = msi_gflags(data, addr);
+ int rc = 0;
+ uint64_t table_addr = 0;
+
+ XEN_PT_LOG(d, "Updating MSI%s with pirq %d gvec %#x gflags %#x"
+ " (entry: %#x)\n",
+ is_msix ? "-X" : "", pirq, gvec, gflags, msix_entry);
+
+ if (is_msix) {
+ table_addr = s->msix->mmio_base_addr;
+ }
+
+ rc = xc_domain_update_msi_irq(xen_xc, xen_domid, gvec,
+ pirq, gflags, table_addr);
+
+ if (rc) {
+ XEN_PT_ERR(d, "Updating of MSI%s failed. (rc: %d)\n",
+ is_msix ? "-X" : "", rc);
+
+ if (xc_physdev_unmap_pirq(xen_xc, xen_domid, *old_pirq)) {
+ XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed.\n",
+ is_msix ? "-X" : "", *old_pirq);
+ }
+ *old_pirq = XEN_PT_UNASSIGNED_PIRQ;
+ }
+ return rc;
+}
+
+static int msi_msix_disable(XenPCIPassthroughState *s,
+ uint64_t addr,
+ uint32_t data,
+ int pirq,
+ bool is_msix,
+ bool is_binded)
+{
+ PCIDevice *d = &s->dev;
+ uint8_t gvec = msi_vector(data);
+ uint32_t gflags = msi_gflags(data, addr);
+ int rc = 0;
+
+ if (pirq == XEN_PT_UNASSIGNED_PIRQ) {
+ return 0;
+ }
+
+ if (is_binded) {
+ XEN_PT_LOG(d, "Unbind MSI%s with pirq %d, gvec %#x\n",
+ is_msix ? "-X" : "", pirq, gvec);
+ rc = xc_domain_unbind_msi_irq(xen_xc, xen_domid, gvec, pirq, gflags);
+ if (rc) {
+ XEN_PT_ERR(d, "Unbinding of MSI%s failed. (pirq: %d, gvec: %#x)\n",
+ is_msix ? "-X" : "", pirq, gvec);
+ return rc;
+ }
+ }
+
+ XEN_PT_LOG(d, "Unmap MSI%s pirq %d\n", is_msix ? "-X" : "", pirq);
+ rc = xc_physdev_unmap_pirq(xen_xc, xen_domid, pirq);
+ if (rc) {
+ XEN_PT_ERR(d, "Unmapping of MSI%s pirq %d failed. (rc: %i)\n",
+ is_msix ? "-X" : "", pirq, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/*
+ * MSI virtualization functions
+ */
+
+int xen_pt_msi_set_enable(XenPCIPassthroughState *s, bool enable)
+{
+ XEN_PT_LOG(&s->dev, "%s MSI.\n", enable ? "enabling" : "disabling");
+
+ if (!s->msi) {
+ return -1;
+ }
+
+ return msi_msix_enable(s, s->msi->ctrl_offset, PCI_MSI_FLAGS_ENABLE,
+ enable);
+}
+
+/* setup physical msi, but don't enable it */
+int xen_pt_msi_setup(XenPCIPassthroughState *s)
+{
+ int pirq = XEN_PT_UNASSIGNED_PIRQ;
+ int rc = 0;
+ XenPTMSI *msi = s->msi;
+
+ if (msi->initialized) {
+ XEN_PT_ERR(&s->dev,
+ "Setup physical MSI when it has been properly initialized.\n");
+ return -1;
+ }
+
+ rc = msi_msix_setup(s, msi_addr64(msi), msi->data, &pirq, false, 0, true);
+ if (rc) {
+ return rc;
+ }
+
+ if (pirq < 0) {
+ XEN_PT_ERR(&s->dev, "Invalid pirq number: %d.\n", pirq);
+ return -1;
+ }
+
+ msi->pirq = pirq;
+ XEN_PT_LOG(&s->dev, "MSI mapped with pirq %d.\n", pirq);
+
+ return 0;
+}
+
+int xen_pt_msi_update(XenPCIPassthroughState *s)
+{
+ XenPTMSI *msi = s->msi;
+ return msi_msix_update(s, msi_addr64(msi), msi->data, msi->pirq,
+ false, 0, &msi->pirq);
+}
+
+void xen_pt_msi_disable(XenPCIPassthroughState *s)
+{
+ XenPTMSI *msi = s->msi;
+
+ if (!msi) {
+ return;
+ }
+
+ xen_pt_msi_set_enable(s, false);
+
+ msi_msix_disable(s, msi_addr64(msi), msi->data, msi->pirq, false,
+ msi->initialized);
+
+ /* clear msi info */
+ msi->flags = 0;
+ msi->mapped = false;
+ msi->pirq = XEN_PT_UNASSIGNED_PIRQ;
+}
+
+/*
+ * MSI-X virtualization functions
+ */
+
+static int msix_set_enable(XenPCIPassthroughState *s, bool enabled)
+{
+ XEN_PT_LOG(&s->dev, "%s MSI-X.\n", enabled ? "enabling" : "disabling");
+
+ if (!s->msix) {
+ return -1;
+ }
+
+ return msi_msix_enable(s, s->msix->ctrl_offset, PCI_MSIX_FLAGS_ENABLE,
+ enabled);
+}
+
+static int xen_pt_msix_update_one(XenPCIPassthroughState *s, int entry_nr)
+{
+ XenPTMSIXEntry *entry = NULL;
+ int pirq;
+ int rc;
+
+ if (entry_nr < 0 || entry_nr >= s->msix->total_entries) {
+ return -EINVAL;
+ }
+
+ entry = &s->msix->msix_entry[entry_nr];
+
+ if (!entry->updated) {
+ return 0;
+ }
+
+ pirq = entry->pirq;
+
+ rc = msi_msix_setup(s, entry->data, entry->data, &pirq, true, entry_nr,
+ entry->pirq == XEN_PT_UNASSIGNED_PIRQ);
+ if (rc) {
+ return rc;
+ }
+ if (entry->pirq == XEN_PT_UNASSIGNED_PIRQ) {
+ entry->pirq = pirq;
+ }
+
+ rc = msi_msix_update(s, entry->addr, entry->data, pirq, true,
+ entry_nr, &entry->pirq);
+
+ if (!rc) {
+ entry->updated = false;
+ }
+
+ return rc;
+}
+
+int xen_pt_msix_update(XenPCIPassthroughState *s)
+{
+ XenPTMSIX *msix = s->msix;
+ int i;
+
+ for (i = 0; i < msix->total_entries; i++) {
+ xen_pt_msix_update_one(s, i);
+ }
+
+ return 0;
+}
+
+void xen_pt_msix_disable(XenPCIPassthroughState *s)
+{
+ int i = 0;
+
+ msix_set_enable(s, false);
+
+ for (i = 0; i < s->msix->total_entries; i++) {
+ XenPTMSIXEntry *entry = &s->msix->msix_entry[i];
+
+ msi_msix_disable(s, entry->addr, entry->data, entry->pirq, true, true);
+
+ /* clear MSI-X info */
+ entry->pirq = XEN_PT_UNASSIGNED_PIRQ;
+ entry->updated = false;
+ }
+}
+
+int xen_pt_msix_update_remap(XenPCIPassthroughState *s, int bar_index)
+{
+ XenPTMSIXEntry *entry;
+ int i, ret;
+
+ if (!(s->msix && s->msix->bar_index == bar_index)) {
+ return 0;
+ }
+
+ for (i = 0; i < s->msix->total_entries; i++) {
+ entry = &s->msix->msix_entry[i];
+ if (entry->pirq != XEN_PT_UNASSIGNED_PIRQ) {
+ ret = xc_domain_unbind_pt_irq(xen_xc, xen_domid, entry->pirq,
+ PT_IRQ_TYPE_MSI, 0, 0, 0, 0);
+ if (ret) {
+ XEN_PT_ERR(&s->dev, "unbind MSI-X entry %d failed\n",
+ entry->pirq);
+ }
+ entry->updated = true;
+ }
+ }
+ return xen_pt_msix_update(s);
+}
+
+static uint32_t get_entry_value(XenPTMSIXEntry *e, int offset)
+{
+ switch (offset) {
+ case PCI_MSIX_ENTRY_LOWER_ADDR:
+ return e->addr & UINT32_MAX;
+ case PCI_MSIX_ENTRY_UPPER_ADDR:
+ return e->addr >> 32;
+ case PCI_MSIX_ENTRY_DATA:
+ return e->data;
+ case PCI_MSIX_ENTRY_VECTOR_CTRL:
+ return e->vector_ctrl;
+ default:
+ return 0;
+ }
+}
+
+static void set_entry_value(XenPTMSIXEntry *e, int offset, uint32_t val)
+{
+ switch (offset) {
+ case PCI_MSIX_ENTRY_LOWER_ADDR:
+ e->addr = (e->addr & ((uint64_t)UINT32_MAX << 32)) | val;
+ break;
+ case PCI_MSIX_ENTRY_UPPER_ADDR:
+ e->addr = (uint64_t)val << 32 | (e->addr & UINT32_MAX);
+ break;
+ case PCI_MSIX_ENTRY_DATA:
+ e->data = val;
+ break;
+ case PCI_MSIX_ENTRY_VECTOR_CTRL:
+ e->vector_ctrl = val;
+ break;
+ }
+}
+
+static void pci_msix_write(void *opaque, target_phys_addr_t addr,
+ uint64_t val, unsigned size)
+{
+ XenPCIPassthroughState *s = opaque;
+ XenPTMSIX *msix = s->msix;
+ XenPTMSIXEntry *entry;
+ int entry_nr, offset;
+
+ entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
+ if (entry_nr < 0 || entry_nr >= msix->total_entries) {
+ XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
+ return;
+ }
+ entry = &msix->msix_entry[entry_nr];
+ offset = addr % PCI_MSIX_ENTRY_SIZE;
+
+ if (offset != PCI_MSIX_ENTRY_VECTOR_CTRL) {
+ const volatile uint32_t *vec_ctrl;
+
+ if (get_entry_value(entry, offset) == val) {
+ return;
+ }
+
+ /*
+ * If Xen intercepts the mask bit access, entry->vec_ctrl may not be
+ * up-to-date. Read from hardware directly.
+ */
+ vec_ctrl = s->msix->phys_iomem_base + entry_nr * PCI_MSIX_ENTRY_SIZE
+ + PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ if (msix->enabled && !(*vec_ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
+ XEN_PT_ERR(&s->dev, "Can't update msix entry %d since MSI-X is"
+ " already enabled.\n", entry_nr);
+ return;
+ }
+
+ entry->updated = true;
+ }
+
+ set_entry_value(entry, offset, val);
+
+ if (offset == PCI_MSIX_ENTRY_VECTOR_CTRL) {
+ if (msix->enabled && !(val & PCI_MSIX_ENTRY_CTRL_MASKBIT)) {
+ xen_pt_msix_update_one(s, entry_nr);
+ }
+ }
+}
+
+static uint64_t pci_msix_read(void *opaque, target_phys_addr_t addr,
+ unsigned size)
+{
+ XenPCIPassthroughState *s = opaque;
+ XenPTMSIX *msix = s->msix;
+ int entry_nr, offset;
+
+ entry_nr = addr / PCI_MSIX_ENTRY_SIZE;
+ if (entry_nr < 0) {
+ XEN_PT_ERR(&s->dev, "asked MSI-X entry '%i' invalid!\n", entry_nr);
+ return 0;
+ }
+
+ offset = addr % PCI_MSIX_ENTRY_SIZE;
+
+ if (addr < msix->total_entries * PCI_MSIX_ENTRY_SIZE) {
+ return get_entry_value(&msix->msix_entry[entry_nr], offset);
+ } else {
+ /* Pending Bit Array (PBA) */
+ return *(uint32_t *)(msix->phys_iomem_base + addr);
+ }
+}
+
+static const MemoryRegionOps pci_msix_ops = {
+ .read = pci_msix_read,
+ .write = pci_msix_write,
+ .endianness = DEVICE_NATIVE_ENDIAN,
+ .valid = {
+ .min_access_size = 4,
+ .max_access_size = 4,
+ .unaligned = false,
+ },
+};
+
+int xen_pt_msix_init(XenPCIPassthroughState *s, uint32_t base)
+{
+ uint8_t id = 0;
+ uint16_t control = 0;
+ uint32_t table_off = 0;
+ int i, total_entries, bar_index;
+ XenHostPCIDevice *hd = &s->real_device;
+ PCIDevice *d = &s->dev;
+ int fd = -1;
+ XenPTMSIX *msix = NULL;
+ int rc = 0;
+
+ rc = xen_host_pci_get_byte(hd, base + PCI_CAP_LIST_ID, &id);
+ if (rc) {
+ return rc;
+ }
+
+ if (id != PCI_CAP_ID_MSIX) {
+ XEN_PT_ERR(d, "Invalid id %#x base %#x\n", id, base);
+ return -1;
+ }
+
+ xen_host_pci_get_word(hd, base + PCI_MSIX_FLAGS, &control);
+ total_entries = control & PCI_MSIX_FLAGS_QSIZE;
+ total_entries += 1;
+
+ s->msix = g_malloc0(sizeof (XenPTMSIX)
+ + total_entries * sizeof (XenPTMSIXEntry));
+ msix = s->msix;
+
+ msix->total_entries = total_entries;
+ for (i = 0; i < total_entries; i++) {
+ msix->msix_entry[i].pirq = XEN_PT_UNASSIGNED_PIRQ;
+ }
+
+ memory_region_init_io(&msix->mmio, &pci_msix_ops, s, "xen-pci-pt-msix",
+ (total_entries * PCI_MSIX_ENTRY_SIZE
+ + XC_PAGE_SIZE - 1)
+ & XC_PAGE_MASK);
+
+ xen_host_pci_get_long(hd, base + PCI_MSIX_TABLE, &table_off);
+ bar_index = msix->bar_index = table_off & PCI_MSIX_FLAGS_BIRMASK;
+ table_off = table_off & ~PCI_MSIX_FLAGS_BIRMASK;
+ msix->table_base = s->real_device.io_regions[bar_index].base_addr;
+ XEN_PT_LOG(d, "get MSI-X table BAR base 0x%"PRIx64"\n", msix->table_base);
+
+ fd = open("/dev/mem", O_RDWR);
+ if (fd == -1) {
+ rc = -errno;
+ XEN_PT_ERR(d, "Can't open /dev/mem: %s\n", strerror(errno));
+ goto error_out;
+ }
+ XEN_PT_LOG(d, "table_off = %#x, total_entries = %d\n",
+ table_off, total_entries);
+ msix->table_offset_adjust = table_off & 0x0fff;
+ msix->phys_iomem_base =
+ mmap(NULL,
+ total_entries * PCI_MSIX_ENTRY_SIZE + msix->table_offset_adjust,
+ PROT_READ,
+ MAP_SHARED | MAP_LOCKED,
+ fd,
+ msix->table_base + table_off - msix->table_offset_adjust);
+ close(fd);
+ if (msix->phys_iomem_base == MAP_FAILED) {
+ rc = -errno;
+ XEN_PT_ERR(d, "Can't map physical MSI-X table: %s\n", strerror(errno));
+ goto error_out;
+ }
+ msix->phys_iomem_base = (char *)msix->phys_iomem_base
+ + msix->table_offset_adjust;
+
+ XEN_PT_LOG(d, "mapping physical MSI-X table to %p\n",
+ msix->phys_iomem_base);
+
+ memory_region_add_subregion_overlap(&s->bar[bar_index], table_off,
+ &msix->mmio,
+ 2); /* Priority: pci default + 1 */
+
+ return 0;
+
+error_out:
+ memory_region_destroy(&msix->mmio);
+ g_free(s->msix);
+ s->msix = NULL;
+ return rc;
+}
+
+void xen_pt_msix_delete(XenPCIPassthroughState *s)
+{
+ XenPTMSIX *msix = s->msix;
+
+ if (!msix) {
+ return;
+ }
+
+ /* unmap the MSI-X memory mapped register area */
+ if (msix->phys_iomem_base) {
+ XEN_PT_LOG(&s->dev, "unmapping physical MSI-X table from %p\n",
+ msix->phys_iomem_base);
+ munmap(msix->phys_iomem_base, msix->total_entries * PCI_MSIX_ENTRY_SIZE
+ + msix->table_offset_adjust);
+ }
+
+ memory_region_del_subregion(&s->bar[msix->bar_index], &msix->mmio);
+ memory_region_destroy(&msix->mmio);
+
+ g_free(s->msix);
+ s->msix = NULL;
+}
diff --git a/hw/xenfb.c b/hw/xenfb.c
index 1bcf171b01..338800a4d9 100644
--- a/hw/xenfb.c
+++ b/hw/xenfb.c
@@ -35,19 +35,16 @@
#include <string.h>
#include <time.h>
-#include <xs.h>
-#include <xenctrl.h>
-#include <xen/event_channel.h>
-#include <xen/io/xenbus.h>
-#include <xen/io/fbif.h>
-#include <xen/io/kbdif.h>
-#include <xen/io/protocols.h>
-
#include "hw.h"
#include "console.h"
#include "qemu-char.h"
#include "xen_backend.h"
+#include <xen/event_channel.h>
+#include <xen/io/fbif.h>
+#include <xen/io/kbdif.h>
+#include <xen/io/protocols.h>
+
#ifndef BTN_LEFT
#define BTN_LEFT 0x110 /* from <linux/input.h> */
#endif
diff --git a/hw/xgmac.c b/hw/xgmac.c
index dd4bdc46f5..a91ef608f1 100644
--- a/hw/xgmac.c
+++ b/hw/xgmac.c
@@ -308,7 +308,7 @@ static const MemoryRegionOps enet_mem_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static int eth_can_rx(VLANClientState *nc)
+static int eth_can_rx(NetClientState *nc)
{
struct XgmacState *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -316,7 +316,7 @@ static int eth_can_rx(VLANClientState *nc)
return s->regs[DMA_CONTROL] & DMA_CONTROL_SR;
}
-static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
{
struct XgmacState *s = DO_UPCAST(NICState, nc, nc)->opaque;
static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
@@ -364,14 +364,14 @@ out:
return ret;
}
-static void eth_cleanup(VLANClientState *nc)
+static void eth_cleanup(NetClientState *nc)
{
struct XgmacState *s = DO_UPCAST(NICState, nc, nc)->opaque;
s->nic = NULL;
}
static NetClientInfo net_xgmac_enet_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = eth_can_rx,
.receive = eth_rx,
diff --git a/hw/xilinx_axienet.c b/hw/xilinx_axienet.c
index 2e8d8a59ba..9b08c62912 100644
--- a/hw/xilinx_axienet.c
+++ b/hw/xilinx_axienet.c
@@ -612,7 +612,7 @@ static const MemoryRegionOps enet_ops = {
.endianness = DEVICE_LITTLE_ENDIAN,
};
-static int eth_can_rx(VLANClientState *nc)
+static int eth_can_rx(NetClientState *nc)
{
struct XilinxAXIEnet *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -635,7 +635,7 @@ static int enet_match_addr(const uint8_t *buf, uint32_t f0, uint32_t f1)
return match;
}
-static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
{
struct XilinxAXIEnet *s = DO_UPCAST(NICState, nc, nc)->opaque;
static const unsigned char sa_bcast[6] = {0xff, 0xff, 0xff,
@@ -648,7 +648,6 @@ static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
uint16_t csum16;
int i;
- s = s;
DENET(qemu_log("%s: %zd bytes\n", __func__, size));
unicast = ~buf[0] & 0x1;
@@ -780,7 +779,7 @@ static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
return size;
}
-static void eth_cleanup(VLANClientState *nc)
+static void eth_cleanup(NetClientState *nc)
{
/* FIXME. */
struct XilinxAXIEnet *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -832,7 +831,7 @@ axienet_stream_push(void *opaque, uint8_t *buf, size_t size, uint32_t *hdr)
}
static NetClientInfo net_xilinx_enet_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = eth_can_rx,
.receive = eth_rx,
diff --git a/hw/xilinx_ethlite.c b/hw/xilinx_ethlite.c
index affbb8bfff..56ca620dd7 100644
--- a/hw/xilinx_ethlite.c
+++ b/hw/xilinx_ethlite.c
@@ -160,7 +160,7 @@ static const MemoryRegionOps eth_ops = {
}
};
-static int eth_can_rx(VLANClientState *nc)
+static int eth_can_rx(NetClientState *nc)
{
struct xlx_ethlite *s = DO_UPCAST(NICState, nc, nc)->opaque;
int r;
@@ -168,7 +168,7 @@ static int eth_can_rx(VLANClientState *nc)
return r;
}
-static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t eth_rx(NetClientState *nc, const uint8_t *buf, size_t size)
{
struct xlx_ethlite *s = DO_UPCAST(NICState, nc, nc)->opaque;
unsigned int rxbase = s->rxbuf * (0x800 / 4);
@@ -194,7 +194,7 @@ static ssize_t eth_rx(VLANClientState *nc, const uint8_t *buf, size_t size)
return size;
}
-static void eth_cleanup(VLANClientState *nc)
+static void eth_cleanup(NetClientState *nc)
{
struct xlx_ethlite *s = DO_UPCAST(NICState, nc, nc)->opaque;
@@ -202,7 +202,7 @@ static void eth_cleanup(VLANClientState *nc)
}
static NetClientInfo net_xilinx_ethlite_info = {
- .type = NET_CLIENT_TYPE_NIC,
+ .type = NET_CLIENT_OPTIONS_KIND_NIC,
.size = sizeof(NICState),
.can_receive = eth_can_rx,
.receive = eth_rx,
diff --git a/hw/xio3130_downstream.c b/hw/xio3130_downstream.c
index 56d1b353d0..0d8a5e7020 100644
--- a/hw/xio3130_downstream.c
+++ b/hw/xio3130_downstream.c
@@ -60,7 +60,6 @@ static int xio3130_downstream_initfn(PCIDevice *d)
PCIEPort *p = DO_UPCAST(PCIEPort, br, br);
PCIESlot *s = DO_UPCAST(PCIESlot, port, p);
int rc;
- int tmp;
rc = pci_bridge_initfn(d);
if (rc < 0) {
@@ -108,12 +107,11 @@ err_pcie_cap:
err_msi:
msi_uninit(d);
err_bridge:
- tmp = pci_bridge_exitfn(d);
- assert(!tmp);
+ pci_bridge_exitfn(d);
return rc;
}
-static int xio3130_downstream_exitfn(PCIDevice *d)
+static void xio3130_downstream_exitfn(PCIDevice *d)
{
PCIBridge* br = DO_UPCAST(PCIBridge, dev, d);
PCIEPort *p = DO_UPCAST(PCIEPort, br, br);
@@ -123,7 +121,7 @@ static int xio3130_downstream_exitfn(PCIDevice *d)
pcie_chassis_del_slot(s);
pcie_cap_exit(d);
msi_uninit(d);
- return pci_bridge_exitfn(d);
+ pci_bridge_exitfn(d);
}
PCIESlot *xio3130_downstream_init(PCIBus *bus, int devfn, bool multifunction,
diff --git a/hw/xio3130_upstream.c b/hw/xio3130_upstream.c
index 79725813a2..d46b86c74d 100644
--- a/hw/xio3130_upstream.c
+++ b/hw/xio3130_upstream.c
@@ -56,7 +56,6 @@ static int xio3130_upstream_initfn(PCIDevice *d)
PCIBridge* br = DO_UPCAST(PCIBridge, dev, d);
PCIEPort *p = DO_UPCAST(PCIEPort, br, br);
int rc;
- int tmp;
rc = pci_bridge_initfn(d);
if (rc < 0) {
@@ -95,17 +94,16 @@ err:
err_msi:
msi_uninit(d);
err_bridge:
- tmp = pci_bridge_exitfn(d);
- assert(!tmp);
+ pci_bridge_exitfn(d);
return rc;
}
-static int xio3130_upstream_exitfn(PCIDevice *d)
+static void xio3130_upstream_exitfn(PCIDevice *d)
{
pcie_aer_exit(d);
pcie_cap_exit(d);
msi_uninit(d);
- return pci_bridge_exitfn(d);
+ pci_bridge_exitfn(d);
}
PCIEPort *xio3130_upstream_init(PCIBus *bus, int devfn, bool multifunction,
diff --git a/hw/xtensa_lx60.c b/hw/xtensa_lx60.c
index 152eed95d8..3653f65b1e 100644
--- a/hw/xtensa_lx60.c
+++ b/hw/xtensa_lx60.c
@@ -173,7 +173,7 @@ static void lx_init(const LxBoardDesc *board,
int n;
if (!cpu_model) {
- cpu_model = "dc232b";
+ cpu_model = XTENSA_DEFAULT_CPU_MODEL;
}
for (n = 0; n < smp_cpus; n++) {
@@ -201,7 +201,7 @@ static void lx_init(const LxBoardDesc *board,
memory_region_init(system_io, "lx60.io", 224 * 1024 * 1024);
memory_region_add_subregion(system_memory, 0xf0000000, system_io);
lx60_fpga_init(system_io, 0x0d020000);
- if (nd_table[0].vlan) {
+ if (nd_table[0].used) {
lx60_net_init(system_io, 0x0d030000, 0x0d030400, 0x0d800000,
xtensa_get_extint(env, 1), nd_table);
}
@@ -300,14 +300,14 @@ static void xtensa_lx200_init(ram_addr_t ram_size,
static QEMUMachine xtensa_lx60_machine = {
.name = "lx60",
- .desc = "lx60 EVB (dc232b)",
+ .desc = "lx60 EVB (" XTENSA_DEFAULT_CPU_MODEL ")",
.init = xtensa_lx60_init,
.max_cpus = 4,
};
static QEMUMachine xtensa_lx200_machine = {
.name = "lx200",
- .desc = "lx200 EVB (dc232b)",
+ .desc = "lx200 EVB (" XTENSA_DEFAULT_CPU_MODEL ")",
.init = xtensa_lx200_init,
.max_cpus = 4,
};
diff --git a/hw/xtensa_sim.c b/hw/xtensa_sim.c
index 1ce07fb899..831460b7c4 100644
--- a/hw/xtensa_sim.c
+++ b/hw/xtensa_sim.c
@@ -102,7 +102,7 @@ static void xtensa_sim_init(ram_addr_t ram_size,
const char *initrd_filename, const char *cpu_model)
{
if (!cpu_model) {
- cpu_model = "dc232b";
+ cpu_model = XTENSA_DEFAULT_CPU_MODEL;
}
sim_init(ram_size, boot_device, kernel_filename, kernel_cmdline,
initrd_filename, cpu_model);
@@ -110,7 +110,8 @@ static void xtensa_sim_init(ram_addr_t ram_size,
static QEMUMachine xtensa_sim_machine = {
.name = "sim",
- .desc = "sim machine (dc232b)",
+ .desc = "sim machine (" XTENSA_DEFAULT_CPU_MODEL ")",
+ .is_default = true,
.init = xtensa_sim_init,
.max_cpus = 4,
};
diff --git a/include/qemu/cpu.h b/include/qemu/cpu.h
index 78b65b35fc..ad706a6dbd 100644
--- a/include/qemu/cpu.h
+++ b/include/qemu/cpu.h
@@ -21,6 +21,7 @@
#define QEMU_CPU_H
#include "qemu/object.h"
+#include "qemu-thread.h"
/**
* SECTION:cpu
@@ -61,6 +62,12 @@ struct CPUState {
Object parent_obj;
/*< public >*/
+ struct QemuThread *thread;
+#ifdef _WIN32
+ HANDLE hThread;
+#endif
+ bool thread_kicked;
+
/* TODO Move common fields from CPUArchState here. */
};
diff --git a/iov.c b/iov.c
index 0f964939d0..b3330610bb 100644
--- a/iov.c
+++ b/iov.c
@@ -7,6 +7,7 @@
* Author(s):
* Anthony Liguori <aliguori@us.ibm.com>
* Amit Shah <amit.shah@redhat.com>
+ * Michael Tokarev <mjt@tls.msk.ru>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
@@ -17,75 +18,69 @@
#include "iov.h"
+#ifdef _WIN32
+# include <windows.h>
+# include <winsock2.h>
+#else
+# include <sys/types.h>
+# include <sys/socket.h>
+#endif
+
size_t iov_from_buf(struct iovec *iov, unsigned int iov_cnt,
- const void *buf, size_t iov_off, size_t size)
+ size_t offset, const void *buf, size_t bytes)
{
- size_t iovec_off, buf_off;
+ size_t done;
unsigned int i;
-
- iovec_off = 0;
- buf_off = 0;
- for (i = 0; i < iov_cnt && size; i++) {
- if (iov_off < (iovec_off + iov[i].iov_len)) {
- size_t len = MIN((iovec_off + iov[i].iov_len) - iov_off, size);
-
- memcpy(iov[i].iov_base + (iov_off - iovec_off), buf + buf_off, len);
-
- buf_off += len;
- iov_off += len;
- size -= len;
+ for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
+ if (offset < iov[i].iov_len) {
+ size_t len = MIN(iov[i].iov_len - offset, bytes - done);
+ memcpy(iov[i].iov_base + offset, buf + done, len);
+ done += len;
+ offset = 0;
+ } else {
+ offset -= iov[i].iov_len;
}
- iovec_off += iov[i].iov_len;
}
- return buf_off;
+ assert(offset == 0);
+ return done;
}
size_t iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
- void *buf, size_t iov_off, size_t size)
+ size_t offset, void *buf, size_t bytes)
{
- uint8_t *ptr;
- size_t iovec_off, buf_off;
+ size_t done;
unsigned int i;
-
- ptr = buf;
- iovec_off = 0;
- buf_off = 0;
- for (i = 0; i < iov_cnt && size; i++) {
- if (iov_off < (iovec_off + iov[i].iov_len)) {
- size_t len = MIN((iovec_off + iov[i].iov_len) - iov_off , size);
-
- memcpy(ptr + buf_off, iov[i].iov_base + (iov_off - iovec_off), len);
-
- buf_off += len;
- iov_off += len;
- size -= len;
+ for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
+ if (offset < iov[i].iov_len) {
+ size_t len = MIN(iov[i].iov_len - offset, bytes - done);
+ memcpy(buf + done, iov[i].iov_base + offset, len);
+ done += len;
+ offset = 0;
+ } else {
+ offset -= iov[i].iov_len;
}
- iovec_off += iov[i].iov_len;
}
- return buf_off;
+ assert(offset == 0);
+ return done;
}
-size_t iov_clear(const struct iovec *iov, const unsigned int iov_cnt,
- size_t iov_off, size_t size)
+size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
+ size_t offset, int fillc, size_t bytes)
{
- size_t iovec_off, buf_off;
+ size_t done;
unsigned int i;
-
- iovec_off = 0;
- buf_off = 0;
- for (i = 0; i < iov_cnt && size; i++) {
- if (iov_off < (iovec_off + iov[i].iov_len)) {
- size_t len = MIN((iovec_off + iov[i].iov_len) - iov_off , size);
-
- memset(iov[i].iov_base + (iov_off - iovec_off), 0, len);
-
- buf_off += len;
- iov_off += len;
- size -= len;
+ for (i = 0, done = 0; (offset || done < bytes) && i < iov_cnt; i++) {
+ if (offset < iov[i].iov_len) {
+ size_t len = MIN(iov[i].iov_len - offset, bytes - done);
+ memset(iov[i].iov_base + offset, fillc, len);
+ done += len;
+ offset = 0;
+ } else {
+ offset -= iov[i].iov_len;
}
- iovec_off += iov[i].iov_len;
}
- return buf_off;
+ assert(offset == 0);
+ return done;
}
size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt)
@@ -100,6 +95,102 @@ size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt)
return len;
}
+/* helper function for iov_send_recv() */
+static ssize_t
+do_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt, bool do_send)
+{
+#if defined CONFIG_IOVEC && defined CONFIG_POSIX
+ ssize_t ret;
+ struct msghdr msg;
+ memset(&msg, 0, sizeof(msg));
+ msg.msg_iov = iov;
+ msg.msg_iovlen = iov_cnt;
+ do {
+ ret = do_send
+ ? sendmsg(sockfd, &msg, 0)
+ : recvmsg(sockfd, &msg, 0);
+ } while (ret < 0 && errno == EINTR);
+ return ret;
+#else
+ /* else send piece-by-piece */
+ /*XXX Note: windows has WSASend() and WSARecv() */
+ unsigned i = 0;
+ ssize_t ret = 0;
+ while (i < iov_cnt) {
+ ssize_t r = do_send
+ ? send(sockfd, iov[i].iov_base, iov[i].iov_len, 0)
+ : recv(sockfd, iov[i].iov_base, iov[i].iov_len, 0);
+ if (r > 0) {
+ ret += r;
+ } else if (!r) {
+ break;
+ } else if (errno == EINTR) {
+ continue;
+ } else {
+ /* else it is some "other" error,
+ * only return if there was no data processed. */
+ if (ret == 0) {
+ ret = -1;
+ }
+ break;
+ }
+ i++;
+ }
+ return ret;
+#endif
+}
+
+ssize_t iov_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt,
+ size_t offset, size_t bytes,
+ bool do_send)
+{
+ ssize_t ret;
+ unsigned si, ei; /* start and end indexes */
+
+ /* Find the start position, skipping `offset' bytes:
+ * first, skip all full-sized vector elements, */
+ for (si = 0; si < iov_cnt && offset >= iov[si].iov_len; ++si) {
+ offset -= iov[si].iov_len;
+ }
+ if (offset) {
+ assert(si < iov_cnt);
+ /* second, skip `offset' bytes from the (now) first element,
+ * undo it on exit */
+ iov[si].iov_base += offset;
+ iov[si].iov_len -= offset;
+ }
+ /* Find the end position skipping `bytes' bytes: */
+ /* first, skip all full-sized elements */
+ for (ei = si; ei < iov_cnt && iov[ei].iov_len <= bytes; ++ei) {
+ bytes -= iov[ei].iov_len;
+ }
+ if (bytes) {
+ /* second, fixup the last element, and remember
+ * the length we've cut from the end of it in `bytes' */
+ size_t tail;
+ assert(ei < iov_cnt);
+ assert(iov[ei].iov_len > bytes);
+ tail = iov[ei].iov_len - bytes;
+ iov[ei].iov_len = bytes;
+ bytes = tail; /* bytes is now equal to the tail size */
+ ++ei;
+ }
+
+ ret = do_send_recv(sockfd, iov + si, ei - si, do_send);
+
+ /* Undo the changes above */
+ if (offset) {
+ iov[si].iov_base -= offset;
+ iov[si].iov_len += offset;
+ }
+ if (bytes) {
+ iov[ei-1].iov_len += bytes;
+ }
+
+ return ret;
+}
+
+
void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt,
FILE *fp, const char *prefix, size_t limit)
{
diff --git a/iov.h b/iov.h
index 94d2f78284..381f37a546 100644
--- a/iov.h
+++ b/iov.h
@@ -1,10 +1,11 @@
/*
- * Helpers for getting linearized buffers from iov / filling buffers into iovs
+ * Helpers for using (partial) iovecs.
*
* Copyright (C) 2010 Red Hat, Inc.
*
* Author(s):
* Amit Shah <amit.shah@redhat.com>
+ * Michael Tokarev <mjt@tls.msk.ru>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
@@ -12,12 +13,76 @@
#include "qemu-common.h"
+/**
+ * count and return data size, in bytes, of an iovec
+ * starting at `iov' of `iov_cnt' number of elements.
+ */
+size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt);
+
+/**
+ * Copy from single continuous buffer to scatter-gather vector of buffers
+ * (iovec) and back like memcpy() between two continuous memory regions.
+ * Data in single continuous buffer starting at address `buf' and
+ * `bytes' bytes long will be copied to/from an iovec `iov' with
+ * `iov_cnt' number of elements, starting at byte position `offset'
+ * within the iovec. If the iovec does not contain enough space,
+ * only part of data will be copied, up to the end of the iovec.
+ * Number of bytes actually copied will be returned, which is
+ * min(bytes, iov_size(iov)-offset)
+ * `Offset' must point to the inside of iovec.
+ * It is okay to use very large value for `bytes' since we're
+ * limited by the size of the iovec anyway, provided that the
+ * buffer pointed to by buf has enough space. One possible
+ * such "large" value is -1 (sinice size_t is unsigned),
+ * so specifying `-1' as `bytes' means 'up to the end of iovec'.
+ */
size_t iov_from_buf(struct iovec *iov, unsigned int iov_cnt,
- const void *buf, size_t iov_off, size_t size);
+ size_t offset, const void *buf, size_t bytes);
size_t iov_to_buf(const struct iovec *iov, const unsigned int iov_cnt,
- void *buf, size_t iov_off, size_t size);
-size_t iov_size(const struct iovec *iov, const unsigned int iov_cnt);
-size_t iov_clear(const struct iovec *iov, const unsigned int iov_cnt,
- size_t iov_off, size_t size);
+ size_t offset, void *buf, size_t bytes);
+
+/**
+ * Set data bytes pointed out by iovec `iov' of size `iov_cnt' elements,
+ * starting at byte offset `start', to value `fillc', repeating it
+ * `bytes' number of times. `Offset' must point to the inside of iovec.
+ * If `bytes' is large enough, only last bytes portion of iovec,
+ * up to the end of it, will be filled with the specified value.
+ * Function return actual number of bytes processed, which is
+ * min(size, iov_size(iov) - offset).
+ * Again, it is okay to use large value for `bytes' to mean "up to the end".
+ */
+size_t iov_memset(const struct iovec *iov, const unsigned int iov_cnt,
+ size_t offset, int fillc, size_t bytes);
+
+/*
+ * Send/recv data from/to iovec buffers directly
+ *
+ * `offset' bytes in the beginning of iovec buffer are skipped and
+ * next `bytes' bytes are used, which must be within data of iovec.
+ *
+ * r = iov_send_recv(sockfd, iov, iovcnt, offset, bytes, true);
+ *
+ * is logically equivalent to
+ *
+ * char *buf = malloc(bytes);
+ * iov_to_buf(iov, iovcnt, offset, buf, bytes);
+ * r = send(sockfd, buf, bytes, 0);
+ * free(buf);
+ *
+ * For iov_send_recv() _whole_ area being sent or received
+ * should be within the iovec, not only beginning of it.
+ */
+ssize_t iov_send_recv(int sockfd, struct iovec *iov, unsigned iov_cnt,
+ size_t offset, size_t bytes, bool do_send);
+#define iov_recv(sockfd, iov, iov_cnt, offset, bytes) \
+ iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, false)
+#define iov_send(sockfd, iov, iov_cnt, offset, bytes) \
+ iov_send_recv(sockfd, iov, iov_cnt, offset, bytes, true)
+
+/**
+ * Produce a text hexdump of iovec `iov' with `iov_cnt' number of elements
+ * in file `fp', prefixing each line with `prefix' and processing not more
+ * than `limit' data bytes.
+ */
void iov_hexdump(const struct iovec *iov, const unsigned int iov_cnt,
FILE *fp, const char *prefix, size_t limit);
diff --git a/kvm-all.c b/kvm-all.c
index f8e432841f..2148b20bdb 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -32,6 +32,7 @@
#include "bswap.h"
#include "memory.h"
#include "exec-memory.h"
+#include "event_notifier.h"
/* This check must be after config-host.h is included */
#ifdef CONFIG_EVENTFD
@@ -800,23 +801,29 @@ static void kvm_io_ioeventfd_del(MemoryRegionSection *section,
static void kvm_eventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data,
+ EventNotifier *e)
{
if (section->address_space == get_system_memory()) {
- kvm_mem_ioeventfd_add(section, match_data, data, fd);
+ kvm_mem_ioeventfd_add(section, match_data, data,
+ event_notifier_get_fd(e));
} else {
- kvm_io_ioeventfd_add(section, match_data, data, fd);
+ kvm_io_ioeventfd_add(section, match_data, data,
+ event_notifier_get_fd(e));
}
}
static void kvm_eventfd_del(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data,
+ EventNotifier *e)
{
if (section->address_space == get_system_memory()) {
- kvm_mem_ioeventfd_del(section, match_data, data, fd);
+ kvm_mem_ioeventfd_del(section, match_data, data,
+ event_notifier_get_fd(e));
} else {
- kvm_io_ioeventfd_del(section, match_data, data, fd);
+ kvm_io_ioeventfd_del(section, match_data, data,
+ event_notifier_get_fd(e));
}
}
@@ -1142,7 +1149,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
{
- abort();
+ return -ENOSYS;
}
static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
@@ -1156,11 +1163,21 @@ int kvm_irqchip_add_irqfd(KVMState *s, int fd, int virq)
return kvm_irqchip_assign_irqfd(s, fd, virq, true);
}
+int kvm_irqchip_add_irq_notifier(KVMState *s, EventNotifier *n, int virq)
+{
+ return kvm_irqchip_add_irqfd(s, event_notifier_get_fd(n), virq);
+}
+
int kvm_irqchip_remove_irqfd(KVMState *s, int fd, int virq)
{
return kvm_irqchip_assign_irqfd(s, fd, virq, false);
}
+int kvm_irqchip_remove_irq_notifier(KVMState *s, EventNotifier *n, int virq)
+{
+ return kvm_irqchip_remove_irqfd(s, event_notifier_get_fd(n), virq);
+}
+
static int kvm_irqchip_create(KVMState *s)
{
QemuOptsList *list = qemu_find_opts("machine");
@@ -1655,6 +1672,19 @@ int kvm_allows_irq0_override(void)
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
}
+void *kvm_vmalloc(ram_addr_t size)
+{
+#ifdef TARGET_S390X
+ void *mem;
+
+ mem = kvm_arch_vmalloc(size);
+ if (mem) {
+ return mem;
+ }
+#endif
+ return qemu_vmalloc(size);
+}
+
void kvm_setup_guest_memory(void *start, size_t size)
{
if (!kvm_has_sync_mmu()) {
diff --git a/kvm-stub.c b/kvm-stub.c
index ec9a36454d..d23b11c020 100644
--- a/kvm-stub.c
+++ b/kvm-stub.c
@@ -147,7 +147,17 @@ int kvm_irqchip_add_irqfd(KVMState *s, int fd, int virq)
return -ENOSYS;
}
+int kvm_irqchip_add_irq_notifier(KVMState *s, EventNotifier *n, int virq)
+{
+ return -ENOSYS;
+}
+
int kvm_irqchip_remove_irqfd(KVMState *s, int fd, int virq)
{
return -ENOSYS;
}
+
+int kvm_irqchip_remove_irq_notifier(KVMState *s, EventNotifier *n, int virq)
+{
+ return -ENOSYS;
+}
diff --git a/kvm.h b/kvm.h
index 9c7b0ea6ae..2617dd5acd 100644
--- a/kvm.h
+++ b/kvm.h
@@ -70,6 +70,8 @@ int kvm_init_vcpu(CPUArchState *env);
int kvm_cpu_exec(CPUArchState *env);
#if !defined(CONFIG_USER_ONLY)
+void *kvm_vmalloc(ram_addr_t size);
+void *kvm_arch_vmalloc(ram_addr_t size);
void kvm_setup_guest_memory(void *start, size_t size);
int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size);
@@ -218,4 +220,6 @@ void kvm_irqchip_release_virq(KVMState *s, int virq);
int kvm_irqchip_add_irqfd(KVMState *s, int fd, int virq);
int kvm_irqchip_remove_irqfd(KVMState *s, int fd, int virq);
+int kvm_irqchip_add_irq_notifier(KVMState *s, EventNotifier *n, int virq);
+int kvm_irqchip_remove_irq_notifier(KVMState *s, EventNotifier *n, int virq);
#endif
diff --git a/linux-aio.c b/linux-aio.c
index fa0fbf34aa..ce9b5d4be8 100644
--- a/linux-aio.c
+++ b/linux-aio.c
@@ -63,8 +63,8 @@ static void qemu_laio_process_completion(struct qemu_laio_state *s,
} else if (ret >= 0) {
/* Short reads mean EOF, pad with zeros. */
if (laiocb->is_read) {
- qemu_iovec_memset_skip(laiocb->qiov, 0,
- laiocb->qiov->size - ret, ret);
+ qemu_iovec_memset(laiocb->qiov, ret, 0,
+ laiocb->qiov->size - ret);
} else {
ret = -EINVAL;
}
diff --git a/linux-user/alpha/syscall_nr.h b/linux-user/alpha/syscall_nr.h
index f6284db22f..ac2b6e2c65 100644
--- a/linux-user/alpha/syscall_nr.h
+++ b/linux-user/alpha/syscall_nr.h
@@ -46,7 +46,7 @@
#define TARGET_NR_open 45
#define TARGET_NR_osf_old_sigaction 46 /* not implemented */
#define TARGET_NR_getxgid 47
-#define TARGET_NR_osf_sigprocmask 48
+#define TARGET_NR_sigprocmask 48
#define TARGET_NR_osf_getlogin 49 /* not implemented */
#define TARGET_NR_osf_setlogin 50 /* not implemented */
#define TARGET_NR_acct 51
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index f3b1552e9e..6b622d4ff9 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -787,6 +787,47 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUMBState *env
#endif /* TARGET_MICROBLAZE */
+#ifdef TARGET_OPENRISC
+
+#define ELF_START_MMAP 0x08000000
+
+#define elf_check_arch(x) ((x) == EM_OPENRISC)
+
+#define ELF_ARCH EM_OPENRISC
+#define ELF_CLASS ELFCLASS32
+#define ELF_DATA ELFDATA2MSB
+
+static inline void init_thread(struct target_pt_regs *regs,
+ struct image_info *infop)
+{
+ regs->pc = infop->entry;
+ regs->gpr[1] = infop->start_stack;
+}
+
+#define USE_ELF_CORE_DUMP
+#define ELF_EXEC_PAGESIZE 8192
+
+/* See linux kernel arch/openrisc/include/asm/elf.h. */
+#define ELF_NREG 34 /* gprs and pc, sr */
+typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG];
+
+static void elf_core_copy_regs(target_elf_gregset_t *regs,
+ const CPUOpenRISCState *env)
+{
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ (*regs)[i] = tswapl(env->gpr[i]);
+ }
+
+ (*regs)[32] = tswapl(env->pc);
+ (*regs)[33] = tswapl(env->sr);
+}
+#define ELF_HWCAP 0
+#define ELF_PLATFORM NULL
+
+#endif /* TARGET_OPENRISC */
+
#ifdef TARGET_SH4
#define ELF_START_MMAP 0x80000000
diff --git a/linux-user/main.c b/linux-user/main.c
index d0e0e4fc6a..53714de0d4 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -2306,6 +2306,93 @@ done_syscall:
}
#endif
+#ifdef TARGET_OPENRISC
+
+void cpu_loop(CPUOpenRISCState *env)
+{
+ int trapnr, gdbsig;
+
+ for (;;) {
+ trapnr = cpu_exec(env);
+ gdbsig = 0;
+
+ switch (trapnr) {
+ case EXCP_RESET:
+ qemu_log("\nReset request, exit, pc is %#x\n", env->pc);
+ exit(1);
+ break;
+ case EXCP_BUSERR:
+ qemu_log("\nBus error, exit, pc is %#x\n", env->pc);
+ gdbsig = SIGBUS;
+ break;
+ case EXCP_DPF:
+ case EXCP_IPF:
+ cpu_dump_state(env, stderr, fprintf, 0);
+ gdbsig = TARGET_SIGSEGV;
+ break;
+ case EXCP_TICK:
+ qemu_log("\nTick time interrupt pc is %#x\n", env->pc);
+ break;
+ case EXCP_ALIGN:
+ qemu_log("\nAlignment pc is %#x\n", env->pc);
+ gdbsig = SIGBUS;
+ break;
+ case EXCP_ILLEGAL:
+ qemu_log("\nIllegal instructionpc is %#x\n", env->pc);
+ gdbsig = SIGILL;
+ break;
+ case EXCP_INT:
+ qemu_log("\nExternal interruptpc is %#x\n", env->pc);
+ break;
+ case EXCP_DTLBMISS:
+ case EXCP_ITLBMISS:
+ qemu_log("\nTLB miss\n");
+ break;
+ case EXCP_RANGE:
+ qemu_log("\nRange\n");
+ gdbsig = SIGSEGV;
+ break;
+ case EXCP_SYSCALL:
+ env->pc += 4; /* 0xc00; */
+ env->gpr[11] = do_syscall(env,
+ env->gpr[11], /* return value */
+ env->gpr[3], /* r3 - r7 are params */
+ env->gpr[4],
+ env->gpr[5],
+ env->gpr[6],
+ env->gpr[7],
+ env->gpr[8], 0, 0);
+ break;
+ case EXCP_FPE:
+ qemu_log("\nFloating point error\n");
+ break;
+ case EXCP_TRAP:
+ qemu_log("\nTrap\n");
+ gdbsig = SIGTRAP;
+ break;
+ case EXCP_NR:
+ qemu_log("\nNR\n");
+ break;
+ default:
+ qemu_log("\nqemu: unhandled CPU exception %#x - aborting\n",
+ trapnr);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ gdbsig = TARGET_SIGILL;
+ break;
+ }
+ if (gdbsig) {
+ gdb_handlesig(env, gdbsig);
+ if (gdbsig != TARGET_SIGTRAP) {
+ exit(1);
+ }
+ }
+
+ process_pending_signals(env);
+ }
+}
+
+#endif /* TARGET_OPENRISC */
+
#ifdef TARGET_SH4
void cpu_loop(CPUSH4State *env)
{
@@ -2759,13 +2846,11 @@ void cpu_loop(CPUAlphaState *env)
break;
}
/* Syscall writes 0 to V0 to bypass error check, similar
- to how this is handled internal to Linux kernel. */
- if (env->ir[IR_V0] == 0) {
- env->ir[IR_V0] = sysret;
- } else {
- env->ir[IR_V0] = (sysret < 0 ? -sysret : sysret);
- env->ir[IR_A3] = (sysret < 0);
- }
+ to how this is handled internal to Linux kernel.
+ (Ab)use trapnr temporarily as boolean indicating error. */
+ trapnr = (env->ir[IR_V0] != 0 && sysret < 0);
+ env->ir[IR_V0] = (trapnr ? -sysret : sysret);
+ env->ir[IR_A3] = trapnr;
break;
case 0x86:
/* IMB */
@@ -2834,6 +2919,9 @@ void cpu_loop(CPUAlphaState *env)
case EXCP_STQ_C:
do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C);
break;
+ case EXCP_INTERRUPT:
+ /* Just indicate that signals should be handled asap. */
+ break;
default:
printf ("Unhandled trap: 0x%x\n", trapnr);
cpu_dump_state(env, stderr, fprintf, 0);
@@ -3053,7 +3141,7 @@ static void handle_arg_uname(const char *arg)
static void handle_arg_cpu(const char *arg)
{
cpu_model = strdup(arg);
- if (cpu_model == NULL || strcmp(cpu_model, "?") == 0) {
+ if (cpu_model == NULL || is_help_option(cpu_model)) {
/* XXX: implement xxx_cpu_list for targets that still miss it */
#if defined(cpu_list_id)
cpu_list_id(stdout, &fprintf, "");
@@ -3144,7 +3232,7 @@ struct qemu_argument arg_table[] = {
{"s", "QEMU_STACK_SIZE", true, handle_arg_stack_size,
"size", "set the stack size to 'size' bytes"},
{"cpu", "QEMU_CPU", true, handle_arg_cpu,
- "model", "select CPU (-cpu ? for list)"},
+ "model", "select CPU (-cpu help for list)"},
{"E", "QEMU_SET_ENV", true, handle_arg_set_env,
"var=value", "sets targets environment variable (see below)"},
{"U", "QEMU_UNSET_ENV", true, handle_arg_unset_env,
@@ -3386,6 +3474,8 @@ int main(int argc, char **argv, char **envp)
#else
cpu_model = "24Kf";
#endif
+#elif defined TARGET_OPENRISC
+ cpu_model = "or1200";
#elif defined(TARGET_PPC)
#ifdef TARGET_PPC64
cpu_model = "970fx";
@@ -3788,6 +3878,17 @@ int main(int argc, char **argv, char **envp)
env->hflags |= MIPS_HFLAG_M16;
}
}
+#elif defined(TARGET_OPENRISC)
+ {
+ int i;
+
+ for (i = 0; i < 32; i++) {
+ env->gpr[i] = regs->gpr[i];
+ }
+
+ env->sr = regs->sr;
+ env->pc = regs->pc;
+ }
#elif defined(TARGET_SH4)
{
int i;
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index d9468fea90..b412e3fe0a 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -382,7 +382,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
int flags, int fd, abi_ulong offset)
{
abi_ulong ret, end, real_start, real_end, retaddr, host_offset, host_len;
- unsigned long host_start;
mmap_lock();
#ifdef DEBUG_MMAP
@@ -421,6 +420,19 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
if (len == 0)
goto the_end;
real_start = start & qemu_host_page_mask;
+ host_offset = offset & qemu_host_page_mask;
+
+ /* If the user is asking for the kernel to find a location, do that
+ before we truncate the length for mapping files below. */
+ if (!(flags & MAP_FIXED)) {
+ host_len = len + offset - host_offset;
+ host_len = HOST_PAGE_ALIGN(host_len);
+ start = mmap_find_vma(real_start, host_len);
+ if (start == (abi_ulong)-1) {
+ errno = ENOMEM;
+ goto fail;
+ }
+ }
/* When mapping files into a memory area larger than the file, accesses
to pages beyond the file size will cause a SIGBUS.
@@ -453,27 +465,23 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
}
if (!(flags & MAP_FIXED)) {
- abi_ulong mmap_start;
+ unsigned long host_start;
void *p;
- host_offset = offset & qemu_host_page_mask;
+
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
- mmap_start = mmap_find_vma(real_start, host_len);
- if (mmap_start == (abi_ulong)-1) {
- errno = ENOMEM;
- goto fail;
- }
+
/* Note: we prefer to control the mapping address. It is
especially important if qemu_host_page_size >
qemu_real_host_page_size */
- p = mmap(g2h(mmap_start),
- host_len, prot, flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
+ p = mmap(g2h(start), host_len, prot,
+ flags | MAP_FIXED | MAP_ANONYMOUS, -1, 0);
if (p == MAP_FAILED)
goto fail;
/* update start so that it points to the file position at 'offset' */
host_start = (unsigned long)p;
if (!(flags & MAP_ANONYMOUS)) {
- p = mmap(g2h(mmap_start), len, prot,
+ p = mmap(g2h(start), len, prot,
flags | MAP_FIXED, fd, host_offset);
host_start += offset - host_offset;
}
diff --git a/linux-user/openrisc/syscall.h b/linux-user/openrisc/syscall.h
new file mode 100644
index 0000000000..bdbb577fc3
--- /dev/null
+++ b/linux-user/openrisc/syscall.h
@@ -0,0 +1,24 @@
+struct target_pt_regs {
+ union {
+ struct {
+ /* Named registers */
+ uint32_t sr; /* Stored in place of r0 */
+ target_ulong sp; /* r1 */
+ };
+ struct {
+ /* Old style */
+ target_ulong offset[2];
+ target_ulong gprs[30];
+ };
+ struct {
+ /* New style */
+ target_ulong gpr[32];
+ };
+ };
+ target_ulong pc;
+ target_ulong orig_gpr11; /* For restarting system calls */
+ uint32_t syscallno; /* Syscall number (used by strace) */
+ target_ulong dummy; /* Cheap alignment fix */
+};
+
+#define UNAME_MACHINE "openrisc"
diff --git a/linux-user/openrisc/syscall_nr.h b/linux-user/openrisc/syscall_nr.h
new file mode 100644
index 0000000000..f4ac91ef71
--- /dev/null
+++ b/linux-user/openrisc/syscall_nr.h
@@ -0,0 +1,506 @@
+#define TARGET_NR_io_setup 0
+#define TARGET_NR_io_destroy 1
+#define TARGET_NR_io_submit 2
+#define TARGET_NR_io_cancel 3
+#define TARGET_NR_io_getevents 4
+
+/* fs/xattr.c */
+#define TARGET_NR_setxattr 5
+#define TARGET_NR_lsetxattr 6
+#define TARGET_NR_fsetxattr 7
+#define TARGET_NR_getxattr 8
+#define TARGET_NR_lgetxattr 9
+#define TARGET_NR_fgetxattr 10
+#define TARGET_NR_listxattr 11
+#define TARGET_NR_llistxattr 12
+#define TARGET_NR_flistxattr 13
+#define TARGET_NR_removexattr 14
+#define TARGET_NR_lremovexattr 15
+#define TARGET_NR_fremovexattr 16
+
+/* fs/dcache.c */
+#define TARGET_NR_getcwd 17
+
+/* fs/cookies.c */
+#define TARGET_NR_lookup_dcookie 18
+
+/* fs/eventfd.c */
+#define TARGET_NR_eventfd2 19
+
+/* fs/eventpoll.c */
+#define TARGET_NR_epoll_create1 20
+#define TARGET_NR_epoll_ctl 21
+#define TARGET_NR_epoll_pwait 22
+
+/* fs/fcntl.c */
+#define TARGET_NR_dup 23
+#define TARGET_NR_dup3 24
+#define TARGET_NR_3264_fcntl 25
+
+/* fs/inotify_user.c */
+#define TARGET_NR_inotify_init1 26
+#define TARGET_NR_inotify_add_watch 27
+#define TARGET_NR_inotify_rm_watch 28
+
+/* fs/ioctl.c */
+#define TARGET_NR_ioctl 29
+
+/* fs/ioprio.c */
+#define TARGET_NR_ioprio_set 30
+#define TARGET_NR_ioprio_get 31
+
+/* fs/locks.c */
+#define TARGET_NR_flock 32
+
+/* fs/namei.c */
+#define TARGET_NR_mknodat 33
+#define TARGET_NR_mkdirat 34
+#define TARGET_NR_unlinkat 35
+#define TARGET_NR_symlinkat 36
+#define TARGET_NR_linkat 37
+#define TARGET_NR_renameat 38
+
+/* fs/namespace.c */
+#define TARGET_NR_umount2 39
+#define TARGET_NR_mount 40
+#define TARGET_NR_pivot_root 41
+
+/* fs/nfsctl.c */
+#define TARGET_NR_nfsservctl 42
+
+/* fs/open.c */
+#define TARGET_NR_3264_statfs 43
+#define TARGET_NR_3264_fstatfs 44
+#define TARGET_NR_3264_truncate 45
+#define TARGET_NR_3264_ftruncate 46
+
+#define TARGET_NR_fallocate 47
+#define TARGET_NR_faccessat 48
+#define TARGET_NR_chdir 49
+#define TARGET_NR_fchdir 50
+#define TARGET_NR_chroot 51
+#define TARGET_NR_fchmod 52
+#define TARGET_NR_fchmodat 53
+#define TARGET_NR_fchownat 54
+#define TARGET_NR_fchown 55
+#define TARGET_NR_openat 56
+#define TARGET_NR_close 57
+#define TARGET_NR_vhangup 58
+
+/* fs/pipe.c */
+#define TARGET_NR_pipe2 59
+
+/* fs/quota.c */
+#define TARGET_NR_quotactl 60
+
+/* fs/readdir.c */
+#define TARGET_NR_getdents64 61
+
+/* fs/read_write.c */
+#define TARGET_NR_3264_lseek 62
+#define TARGET_NR_read 63
+#define TARGET_NR_write 64
+#define TARGET_NR_readv 65
+#define TARGET_NR_writev 66
+#define TARGET_NR_pread64 67
+#define TARGET_NR_pwrite64 68
+#define TARGET_NR_preadv 69
+#define TARGET_NR_pwritev 70
+
+/* fs/sendfile.c */
+#define TARGET_NR_3264_sendfile 71
+
+/* fs/select.c */
+#define TARGET_NR_pselect6 72
+#define TARGET_NR_ppoll 73
+
+/* fs/signalfd.c */
+#define TARGET_NR_signalfd4 74
+
+/* fs/splice.c */
+#define TARGET_NR_vmsplice 75
+#define TARGET_NR_splice 76
+#define TARGET_NR_tee 77
+
+/* fs/stat.c */
+#define TARGET_NR_readlinkat 78
+#define TARGET_NR_3264_fstatat 79
+#define TARGET_NR_3264_fstat 80
+
+/* fs/sync.c */
+#define TARGET_NR_sync 81
+#define TARGET_NR_fsync 82
+#define TARGET_NR_fdatasync 83
+
+#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
+#define TARGET_NR_sync_file_range2 84
+#else
+#define TARGET_NR_sync_file_range 84
+#endif
+
+/* fs/timerfd.c */
+#define TARGET_NR_timerfd_create 85
+#define TARGET_NR_timerfd_settime 86
+#define TARGET_NR_timerfd_gettime 87
+
+/* fs/utimes.c */
+#define TARGET_NR_utimensat 88
+
+/* kernel/acct.c */
+#define TARGET_NR_acct 89
+
+/* kernel/capability.c */
+#define TARGET_NR_capget 90
+#define TARGET_NR_capset 91
+
+/* kernel/exec_domain.c */
+#define TARGET_NR_personality 92
+
+/* kernel/exit.c */
+#define TARGET_NR_exit 93
+#define TARGET_NR_exit_group 94
+#define TARGET_NR_waitid 95
+
+/* kernel/fork.c */
+#define TARGET_NR_set_tid_address 96
+#define TARGET_NR_unshare 97
+
+/* kernel/futex.c */
+#define TARGET_NR_futex 98
+#define TARGET_NR_set_robust_list 99
+#define TARGET_NR_get_robust_list 100
+
+/* kernel/hrtimer.c */
+#define TARGET_NR_nanosleep 101
+
+/* kernel/itimer.c */
+#define TARGET_NR_getitimer 102
+#define TARGET_NR_setitimer 103
+
+/* kernel/kexec.c */
+#define TARGET_NR_kexec_load 104
+
+/* kernel/module.c */
+#define TARGET_NR_init_module 105
+#define TARGET_NR_delete_module 106
+
+/* kernel/posix-timers.c */
+#define TARGET_NR_timer_create 107
+#define TARGET_NR_timer_gettime 108
+#define TARGET_NR_timer_getoverrun 109
+#define TARGET_NR_timer_settime 110
+#define TARGET_NR_timer_delete 111
+#define TARGET_NR_clock_settime 112
+#define TARGET_NR_clock_gettime 113
+#define TARGET_NR_clock_getres 114
+#define TARGET_NR_clock_nanosleep 115
+
+/* kernel/printk.c */
+#define TARGET_NR_syslog 116
+
+/* kernel/ptrace.c */
+#define TARGET_NR_ptrace 117
+
+/* kernel/sched.c */
+#define TARGET_NR_sched_setparam 118
+#define TARGET_NR_sched_setscheduler 119
+#define TARGET_NR_sched_getscheduler 120
+#define TARGET_NR_sched_getparam 121
+#define TARGET_NR_sched_setaffinity 122
+#define TARGET_NR_sched_getaffinity 123
+#define TARGET_NR_sched_yield 124
+#define TARGET_NR_sched_get_priority_max 125
+#define TARGET_NR_sched_get_priority_min 126
+#define TARGET_NR_sched_rr_get_interval 127
+
+/* kernel/signal.c */
+#define TARGET_NR_restart_syscall 128
+#define TARGET_NR_kill 129
+#define TARGET_NR_tkill 130
+#define TARGET_NR_tgkill 131
+#define TARGET_NR_sigaltstack 132
+#define TARGET_NR_rt_sigsuspend 133
+#define TARGET_NR_rt_sigaction 134
+#define TARGET_NR_rt_sigprocmask 135
+#define TARGET_NR_rt_sigpending 136
+#define TARGET_NR_rt_sigtimedwait 137
+#define TARGET_NR_rt_sigqueueinfo 138
+#define TARGET_NR_rt_sigreturn 139
+
+/* kernel/sys.c */
+#define TARGET_NR_setpriority 140
+#define TARGET_NR_getpriority 141
+#define TARGET_NR_reboot 142
+#define TARGET_NR_setregid 143
+#define TARGET_NR_setgid 144
+#define TARGET_NR_setreuid 145
+#define TARGET_NR_setuid 146
+#define TARGET_NR_setresuid 147
+#define TARGET_NR_getresuid 148
+#define TARGET_NR_setresgid 149
+#define TARGET_NR_getresgid 150
+#define TARGET_NR_setfsuid 151
+#define TARGET_NR_setfsgid 152
+#define TARGET_NR_times 153
+#define TARGET_NR_setpgid 154
+#define TARGET_NR_getpgid 155
+#define TARGET_NR_getsid 156
+#define TARGET_NR_setsid 157
+#define TARGET_NR_getgroups 158
+#define TARGET_NR_setgroups 159
+#define TARGET_NR_uname 160
+#define TARGET_NR_sethostname 161
+#define TARGET_NR_setdomainname 162
+#define TARGET_NR_getrlimit 163
+#define TARGET_NR_setrlimit 164
+#define TARGET_NR_getrusage 165
+#define TARGET_NR_umask 166
+#define TARGET_NR_prctl 167
+#define TARGET_NR_getcpu 168
+
+/* kernel/time.c */
+#define TARGET_NR_gettimeofday 169
+#define TARGET_NR_settimeofday 170
+#define TARGET_NR_adjtimex 171
+
+/* kernel/timer.c */
+#define TARGET_NR_getpid 172
+#define TARGET_NR_getppid 173
+#define TARGET_NR_getuid 174
+#define TARGET_NR_geteuid 175
+#define TARGET_NR_getgid 176
+#define TARGET_NR_getegid 177
+#define TARGET_NR_gettid 178
+#define TARGET_NR_sysinfo 179
+
+/* ipc/mqueue.c */
+#define TARGET_NR_mq_open 180
+#define TARGET_NR_mq_unlink 181
+#define TARGET_NR_mq_timedsend 182
+#define TARGET_NR_mq_timedreceive 183
+#define TARGET_NR_mq_notify 184
+#define TARGET_NR_mq_getsetattr 185
+
+/* ipc/msg.c */
+#define TARGET_NR_msgget 186
+#define TARGET_NR_msgctl 187
+#define TARGET_NR_msgrcv 188
+#define TARGET_NR_msgsnd 189
+
+/* ipc/sem.c */
+#define TARGET_NR_semget 190
+#define TARGET_NR_semctl 191
+#define TARGET_NR_semtimedop 192
+#define TARGET_NR_semop 193
+
+/* ipc/shm.c */
+#define TARGET_NR_shmget 194
+#define TARGET_NR_shmctl 195
+#define TARGET_NR_shmat 196
+#define TARGET_NR_shmdt 197
+
+/* net/socket.c */
+#define TARGET_NR_socket 198
+#define TARGET_NR_socketpair 199
+#define TARGET_NR_bind 200
+#define TARGET_NR_listen 201
+#define TARGET_NR_accept 202
+#define TARGET_NR_connect 203
+#define TARGET_NR_getsockname 204
+#define TARGET_NR_getpeername 205
+#define TARGET_NR_sendto 206
+#define TARGET_NR_recvfrom 207
+#define TARGET_NR_setsockopt 208
+#define TARGET_NR_getsockopt 209
+#define TARGET_NR_shutdown 210
+#define TARGET_NR_sendmsg 211
+#define TARGET_NR_recvmsg 212
+
+/* mm/filemap.c */
+#define TARGET_NR_readahead 213
+
+/* mm/nommu.c, also with MMU */
+#define TARGET_NR_brk 214
+#define TARGET_NR_munmap 215
+#define TARGET_NR_mremap 216
+
+/* security/keys/keyctl.c */
+#define TARGET_NR_add_key 217
+#define TARGET_NR_request_key 218
+#define TARGET_NR_keyctl 219
+
+/* arch/example/kernel/sys_example.c */
+#define TARGET_NR_clone 220
+#define TARGET_NR_execve 221
+
+#define TARGET_NR_3264_mmap 222
+/* mm/fadvise.c */
+#define TARGET_NR_3264_fadvise64 223
+
+/* mm/, CONFIG_MMU only */
+#ifndef __ARCH_NOMMU
+#define TARGET_NR_swapon 224
+#define TARGET_NR_swapoff 225
+#define TARGET_NR_mprotect 226
+#define TARGET_NR_msync 227
+#define TARGET_NR_mlock 228
+#define TARGET_NR_munlock 229
+#define TARGET_NR_mlockall 230
+#define TARGET_NR_munlockall 231
+#define TARGET_NR_mincore 232
+#define TARGET_NR_madvise 233
+#define TARGET_NR_remap_file_pages 234
+#define TARGET_NR_mbind 235
+#define TARGET_NR_get_mempolicy 236
+#define TARGET_NR_set_mempolicy 237
+#define TARGET_NR_migrate_pages 238
+#define TARGET_NR_move_pages 239
+#endif
+
+#define TARGET_NR_rt_tgsigqueueinfo 240
+#define TARGET_NR_perf_event_open 241
+#define TARGET_NR_accept4 242
+#define TARGET_NR_recvmmsg 243
+
+/*
+ * Architectures may provide up to 16 syscalls of their own
+ * starting with this value.
+ */
+#define TARGET_NR_arch_specific_syscall 244
+
+#define TARGET_NR_wait4 260
+#define TARGET_NR_prlimit64 261
+#define TARGET_NR_fanotify_init 262
+#define TARGET_NR_fanotify_mark 263
+#define TARGET_NR_name_to_handle_at 264
+#define TARGET_NR_open_by_handle_at 265
+#define TARGET_NR_clock_adjtime 266
+#define TARGET_NR_syncfs 267
+#define TARGET_NR_setns 268
+#define TARGET_NR_sendmmsg 269
+
+#undef TARGET_NR_syscalls
+#define TARGET_NR_syscalls 270
+
+/*
+ * All syscalls below here should go away really,
+ * these are provided for both review and as a porting
+ * help for the C library version.
+*
+ * Last chance: are any of these important enough to
+ * enable by default?
+ */
+#define TARGET_NR_open 1024
+#define TARGET_NR_link 1025
+#define TARGET_NR_unlink 1026
+#define TARGET_NR_mknod 1027
+#define TARGET_NR_chmod 1028
+#define TARGET_NR_chown 1029
+#define TARGET_NR_mkdir 1030
+#define TARGET_NR_rmdir 1031
+#define TARGET_NR_lchown 1032
+#define TARGET_NR_access 1033
+#define TARGET_NR_rename 1034
+#define TARGET_NR_readlink 1035
+#define TARGET_NR_symlink 1036
+#define TARGET_NR_utimes 1037
+#define TARGET_NR_3264_stat 1038
+#define TARGET_NR_3264_lstat 1039
+
+#undef TARGET_NR_syscalls
+#define TARGET_NR_syscalls (TARGET_NR_3264_lstat+1)
+
+#define TARGET_NR_pipe 1040
+#define TARGET_NR_dup2 1041
+#define TARGET_NR_epoll_create 1042
+#define TARGET_NR_inotify_init 1043
+#define TARGET_NR_eventfd 1044
+#define TARGET_NR_signalfd 1045
+
+#undef TARGET_NR_syscalls
+#define TARGET_NR_syscalls (TARGET_NR_signalfd+1)
+
+
+#define TARGET_NR_sendfile 1046
+#define TARGET_NR_ftruncate 1047
+#define TARGET_NR_truncate 1048
+#define TARGET_NR_stat 1049
+#define TARGET_NR_lstat 1050
+#define TARGET_NR_fstat 1051
+#define TARGET_NR_fcntl 1052
+#define TARGET_NR_fadvise64 1053
+#define __ARCH_WANT_SYS_FADVISE64
+#define TARGET_NR_newfstatat 1054
+#define __ARCH_WANT_SYS_NEWFSTATAT
+#define TARGET_NR_fstatfs 1055
+#define TARGET_NR_statfs 1056
+#define TARGET_NR_lseek 1057
+#define TARGET_NR_mmap 1058
+
+#undef TARGET_NR_syscalls
+#define TARGET_NR_syscalls (TARGET_NR_mmap+1)
+
+#define TARGET_NR_alarm 1059
+#define __ARCH_WANT_SYS_ALARM
+#define TARGET_NR_getpgrp 1060
+#define __ARCH_WANT_SYS_GETPGRP
+#define TARGET_NR_pause 1061
+#define __ARCH_WANT_SYS_PAUSE
+#define TARGET_NR_time 1062
+#define __ARCH_WANT_SYS_TIME
+#define __ARCH_WANT_COMPAT_SYS_TIME
+#define TARGET_NR_utime 1063
+#define __ARCH_WANT_SYS_UTIME
+
+#define TARGET_NR_creat 1064
+#define TARGET_NR_getdents 1065
+#define __ARCH_WANT_SYS_GETDENTS
+#define TARGET_NR_futimesat 1066
+#define TARGET_NR_select 1067
+#define __ARCH_WANT_SYS_SELECT
+#define TARGET_NR_poll 1068
+#define TARGET_NR_epoll_wait 1069
+#define TARGET_NR_ustat 1070
+#define TARGET_NR_vfork 1071
+#define TARGET_NR_oldwait4 1072
+#define TARGET_NR_recv 1073
+#define TARGET_NR_send 1074
+#define TARGET_NR_bdflush 1075
+#define TARGET_NR_umount 1076
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define TARGET_NR_uselib 1077
+#define TARGET_NR__sysctl 1078
+
+#define TARGET_NR_fork 1079
+
+#undef TARGET_NR_syscalls
+#define TARGET_NR_syscalls (TARGET_NR_fork+1)
+
+
+/*
+ * 32 bit systems traditionally used different
+ * syscalls for off_t and loff_t arguments, while
+ * 64 bit systems only need the off_t version.
+ * For new 32 bit platforms, there is no need to
+ * implement the old 32 bit off_t syscalls, so
+ * they take different names.
+ * Here we map the numbers so that both versions
+ * use the same syscall table layout.
+ */
+
+#define TARGET_NR_fcntl64 TARGET_NR_3264_fcntl
+#define TARGET_NR_statfs64 TARGET_NR_3264_statfs
+#define TARGET_NR_fstatfs64 TARGET_NR_3264_fstatfs
+#define TARGET_NR_truncate64 TARGET_NR_3264_truncate
+#define TARGET_NR_ftruncate64 TARGET_NR_3264_ftruncate
+#define TARGET_NR_llseek TARGET_NR_3264_lseek
+#define TARGET_NR_sendfile64 TARGET_NR_3264_sendfile
+#define TARGET_NR_fstatat64 TARGET_NR_3264_fstatat
+#define TARGET_NR_fstat64 TARGET_NR_3264_fstat
+#define TARGET_NR_mmap2 TARGET_NR_3264_mmap
+#define TARGET_NR_fadvise64_64 TARGET_NR_3264_fadvise64
+
+#ifdef TARGET_NR_3264_stat
+#define TARGET_NR_stat64 TARGET_NR_3264_stat
+#define TARGET_NR_lstat64 TARGET_NR_3264_lstat
+#endif
diff --git a/linux-user/openrisc/target_signal.h b/linux-user/openrisc/target_signal.h
new file mode 100644
index 0000000000..964aed69f1
--- /dev/null
+++ b/linux-user/openrisc/target_signal.h
@@ -0,0 +1,26 @@
+#ifndef TARGET_SIGNAL_H
+#define TARGET_SIGNAL_H
+
+#include "cpu.h"
+
+/* this struct defines a stack used during syscall handling */
+
+typedef struct target_sigaltstack {
+ abi_long ss_sp;
+ abi_ulong ss_size;
+ abi_long ss_flags;
+} target_stack_t;
+
+/* sigaltstack controls */
+#define TARGET_SS_ONSTACK 1
+#define TARGET_SS_DISABLE 2
+
+#define TARGET_MINSIGSTKSZ 2048
+#define TARGET_SIGSTKSZ 8192
+
+static inline abi_ulong get_sp_from_cpustate(CPUOpenRISCState *state)
+{
+ return state->gpr[1];
+}
+
+#endif /* TARGET_SIGNAL_H */
diff --git a/linux-user/openrisc/termbits.h b/linux-user/openrisc/termbits.h
new file mode 100644
index 0000000000..373af77215
--- /dev/null
+++ b/linux-user/openrisc/termbits.h
@@ -0,0 +1,294 @@
+typedef unsigned char target_openrisc_cc; /*cc_t*/
+typedef unsigned int target_openrisc_speed; /*speed_t*/
+typedef unsigned int target_openrisc_tcflag; /*tcflag_t*/
+
+#define TARGET_NCCS 19
+struct target_termios {
+ target_openrisc_tcflag c_iflag; /* input mode flags */
+ target_openrisc_tcflag c_oflag; /* output mode flags */
+ target_openrisc_tcflag c_cflag; /* control mode flags */
+ target_openrisc_tcflag c_lflag; /* local mode flags */
+ target_openrisc_cc c_line; /* line discipline */
+ target_openrisc_cc c_cc[TARGET_NCCS]; /* control characters */
+};
+
+struct target_termios2 {
+ target_openrisc_tcflag c_iflag; /* input mode flags */
+ target_openrisc_tcflag c_oflag; /* output mode flags */
+ target_openrisc_tcflag c_cflag; /* control mode flags */
+ target_openrisc_tcflag c_lflag; /* local mode flags */
+ target_openrisc_cc c_line; /* line discipline */
+ target_openrisc_cc c_cc[TARGET_NCCS]; /* control characters */
+ target_openrisc_speed c_ispeed; /* input speed */
+ target_openrisc_speed c_ospeed; /* output speed */
+};
+
+struct target_termios3 {
+ target_openrisc_tcflag c_iflag; /* input mode flags */
+ target_openrisc_tcflag c_oflag; /* output mode flags */
+ target_openrisc_tcflag c_cflag; /* control mode flags */
+ target_openrisc_tcflag c_lflag; /* local mode flags */
+ target_openrisc_cc c_line; /* line discipline */
+ target_openrisc_cc c_cc[TARGET_NCCS]; /* control characters */
+ target_openrisc_speed c_ispeed; /* input speed */
+ target_openrisc_speed c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define TARGET_VINTR 0
+#define TARGET_VQUIT 1
+#define TARGET_VERASE 2
+#define TARGET_VKILL 3
+#define TARGET_VEOF 4
+#define TARGET_VTIME 5
+#define TARGET_VMIN 6
+#define TARGET_VSWTC 7
+#define TARGET_VSTART 8
+#define TARGET_VSTOP 9
+#define TARGET_VSUSP 10
+#define TARGET_VEOL 11
+#define TARGET_VREPRINT 12
+#define TARGET_VDISCARD 13
+#define TARGET_VWERASE 14
+#define TARGET_VLNEXT 15
+#define TARGET_VEOL2 16
+
+/* c_iflag bits */
+#define TARGET_IGNBRK 0000001
+#define TARGET_BRKINT 0000002
+#define TARGET_IGNPAR 0000004
+#define TARGET_PARMRK 0000010
+#define TARGET_INPCK 0000020
+#define TARGET_ISTRIP 0000040
+#define TARGET_INLCR 0000100
+#define TARGET_IGNCR 0000200
+#define TARGET_ICRNL 0000400
+#define TARGET_IUCLC 0001000
+#define TARGET_IXON 0002000
+#define TARGET_IXANY 0004000
+#define TARGET_IXOFF 0010000
+#define TARGET_IMAXBEL 0020000
+#define TARGET_IUTF8 0040000
+
+/* c_oflag bits */
+#define TARGET_OPOST 0000001
+#define TARGET_OLCUC 0000002
+#define TARGET_ONLCR 0000004
+#define TARGET_OCRNL 0000010
+#define TARGET_ONOCR 0000020
+#define TARGET_ONLRET 0000040
+#define TARGET_OFILL 0000100
+#define TARGET_OFDEL 0000200
+#define TARGET_NLDLY 0000400
+#define TARGET_NL0 0000000
+#define TARGET_NL1 0000400
+#define TARGET_CRDLY 0003000
+#define TARGET_CR0 0000000
+#define TARGET_CR1 0001000
+#define TARGET_CR2 0002000
+#define TARGET_CR3 0003000
+#define TARGET_TABDLY 0014000
+#define TARGET_TAB0 0000000
+#define TARGET_TAB1 0004000
+#define TARGET_TAB2 0010000
+#define TARGET_TAB3 0014000
+#define TARGET_XTABS 0014000
+#define TARGET_BSDLY 0020000
+#define TARGET_BS0 0000000
+#define TARGET_BS1 0020000
+#define TARGET_VTDLY 0040000
+#define TARGET_VT0 0000000
+#define TARGET_VT1 0040000
+#define TARGET_FFDLY 0100000
+#define TARGET_FF0 0000000
+#define TARGET_FF1 0100000
+
+/* c_cflag bit meaning */
+#define TARGET_CBAUD 0010017
+#define TARGET_B0 0000000 /* hang up */
+#define TARGET_B50 0000001
+#define TARGET_B75 0000002
+#define TARGET_B110 0000003
+#define TARGET_B134 0000004
+#define TARGET_B150 0000005
+#define TARGET_B200 0000006
+#define TARGET_B300 0000007
+#define TARGET_B600 0000010
+#define TARGET_B1200 0000011
+#define TARGET_B1800 0000012
+#define TARGET_B2400 0000013
+#define TARGET_B4800 0000014
+#define TARGET_B9600 0000015
+#define TARGET_B19200 0000016
+#define TARGET_B38400 0000017
+#define TARGET_EXTA B19200
+#define TARGET_EXTB B38400
+#define TARGET_CSIZE 0000060
+#define TARGET_CS5 0000000
+#define TARGET_CS6 0000020
+#define TARGET_CS7 0000040
+#define TARGET_CS8 0000060
+#define TARGET_CSTOPB 0000100
+#define TARGET_CREAD 0000200
+#define TARGET_PARENB 0000400
+#define TARGET_PARODD 0001000
+#define TARGET_HUPCL 0002000
+#define TARGET_CLOCAL 0004000
+#define TARGET_CBAUDEX 0010000
+#define TARGET_BOTHER 0010000
+#define TARGET_B57600 0010001
+#define TARGET_B115200 0010002
+#define TARGET_B230400 0010003
+#define TARGET_B460800 0010004
+#define TARGET_B500000 0010005
+#define TARGET_B576000 0010006
+#define TARGET_B921600 0010007
+#define TARGET_B1000000 0010010
+#define TARGET_B1152000 0010011
+#define TARGET_B1500000 0010012
+#define TARGET_B2000000 0010013
+#define TARGET_B2500000 0010014
+#define TARGET_B3000000 0010015
+#define TARGET_B3500000 0010016
+#define TARGET_B4000000 0010017
+#define TARGET_CIBAUD 002003600000 /* input baud rate */
+#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */
+#define TARGET_CRTSCTS 020000000000 /* flow control */
+
+#define TARGET_IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define TARGET_ISIG 0000001
+#define TARGET_ICANON 0000002
+#define TARGET_XCASE 0000004
+#define TARGET_ECHO 0000010
+#define TARGET_ECHOE 0000020
+#define TARGET_ECHOK 0000040
+#define TARGET_ECHONL 0000100
+#define TARGET_NOFLSH 0000200
+#define TARGET_TOSTOP 0000400
+#define TARGET_ECHOCTL 0001000
+#define TARGET_ECHOPRT 0002000
+#define TARGET_ECHOKE 0004000
+#define TARGET_FLUSHO 0010000
+#define TARGET_PENDIN 0040000
+#define TARGET_IEXTEN 0100000
+#define TARGET_EXTPROC 0200000
+
+/* tcflow() and TCXONC use these */
+#define TARGET_TCOOFF 0
+#define TARGET_TCOON 1
+#define TARGET_TCIOFF 2
+#define TARGET_TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TARGET_TCIFLUSH 0
+#define TARGET_TCOFLUSH 1
+#define TARGET_TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TARGET_TCSANOW 0
+#define TARGET_TCSADRAIN 1
+#define TARGET_TCSAFLUSH 2
+
+/* ioctls */
+#define TARGET_TCGETS 0x5401
+#define TARGET_TCSETS 0x5402
+#define TARGET_TCSETSW 0x5403
+#define TARGET_TCSETSF 0x5404
+#define TARGET_TCGETA 0x5405
+#define TARGET_TCSETA 0x5406
+#define TARGET_TCSETAW 0x5407
+#define TARGET_TCSETAF 0x5408
+#define TARGET_TCSBRK 0x5409
+#define TARGET_TCXONC 0x540A
+#define TARGET_TCFLSH 0x540B
+#define TARGET_TIOCEXCL 0x540C
+#define TARGET_TIOCNXCL 0x540D
+#define TARGET_TIOCSCTTY 0x540E
+#define TARGET_TIOCGPGRP 0x540F
+#define TARGET_TIOCSPGRP 0x5410
+#define TARGET_TIOCOUTQ 0x5411
+#define TARGET_TIOCSTI 0x5412
+#define TARGET_TIOCGWINSZ 0x5413
+#define TARGET_TIOCSWINSZ 0x5414
+#define TARGET_TIOCMGET 0x5415
+#define TARGET_TIOCMBIS 0x5416
+#define TARGET_TIOCMBIC 0x5417
+#define TARGET_TIOCMSET 0x5418
+#define TARGET_TIOCGSOFTCAR 0x5419
+#define TARGET_TIOCSSOFTCAR 0x541A
+#define TARGET_FIONREAD 0x541B
+#define TARGET_TIOCINQ FIONREAD
+#define TARGET_TIOCLINUX 0x541C
+#define TARGET_TIOCCONS 0x541D
+#define TARGET_TIOCGSERIAL 0x541E
+#define TARGET_TIOCSSERIAL 0x541F
+#define TARGET_TIOCPKT 0x5420
+#define TARGET_FIONBIO 0x5421
+#define TARGET_TIOCNOTTY 0x5422
+#define TARGET_TIOCSETD 0x5423
+#define TARGET_TIOCGETD 0x5424
+#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */
+#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */
+#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TARGET_TCGETS2 TARGET_IOR('T', 0x2A, struct termios2)
+#define TARGET_TCSETS2 TARGET_IOW('T', 0x2B, struct termios2)
+#define TARGET_TCSETSW2 TARGET_IOW('T', 0x2C, struct termios2)
+#define TARGET_TCSETSF2 TARGET_IOW('T', 0x2D, struct termios2)
+#define TARGET_TIOCGRS485 0x542E
+#ifndef TARGET_TIOCSRS485
+#define TARGET_TIOCSRS485 0x542F
+#endif
+/* Get Pty Number (of pty-mux device) */
+#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int)
+/* Lock/unlock Pty */
+#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int)
+/* Get primary device node of /dev/console */
+#define TARGET_TIOCGDEV TARGET_IOR('T', 0x32, unsigned int)
+#define TARGET_TCGETX 0x5432 /* SYS5 TCGETX compatibility */
+#define TARGET_TCSETX 0x5433
+#define TARGET_TCSETXF 0x5434
+#define TARGET_TCSETXW 0x5435
+/* pty: generate signal */
+#define TARGET_TIOCSIG TARGET_IOW('T', 0x36, int)
+#define TARGET_TIOCVHANGUP 0x5437
+
+#define TARGET_FIONCLEX 0x5450
+#define TARGET_FIOCLEX 0x5451
+#define TARGET_FIOASYNC 0x5452
+#define TARGET_TIOCSERCONFIG 0x5453
+#define TARGET_TIOCSERGWILD 0x5454
+#define TARGET_TIOCSERSWILD 0x5455
+#define TARGET_TIOCGLCKTRMIOS 0x5456
+#define TARGET_TIOCSLCKTRMIOS 0x5457
+#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+/* wait for a change on serial input line(s) */
+#define TARGET_TIOCMIWAIT 0x545C
+/* read serial port inline interrupt counts */
+#define TARGET_TIOCGICOUNT 0x545D
+
+/*
+ * Some arches already define TARGET_FIOQSIZE due to a historical
+ * conflict with a Hayes modem-specific ioctl value.
+ */
+#ifndef TARGET_FIOQSIZE
+#define TARGET_FIOQSIZE 0x5460
+#endif
+
+/* Used for packet mode */
+#define TARGET_TIOCPKT_DATA 0
+#define TARGET_TIOCPKT_FLUSHREAD 1
+#define TARGET_TIOCPKT_FLUSHWRITE 2
+#define TARGET_TIOCPKT_STOP 4
+#define TARGET_TIOCPKT_START 8
+#define TARGET_TIOCPKT_NOSTOP 16
+#define TARGET_TIOCPKT_DOSTOP 32
+#define TARGET_TIOCPKT_IOCTL 64
+
+#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */
diff --git a/linux-user/signal.c b/linux-user/signal.c
index 43346dcbcc..78691473fa 100644
--- a/linux-user/signal.c
+++ b/linux-user/signal.c
@@ -1844,7 +1844,7 @@ typedef struct {
} __siginfo_t;
typedef struct {
- unsigned long si_float_regs [32];
+ abi_ulong si_float_regs[32];
unsigned long si_fsr;
unsigned long si_fpqdepth;
struct {
@@ -2056,11 +2056,9 @@ restore_fpu_state(CPUSPARCState *env, qemu_siginfo_fpu_t *fpu)
return -EFAULT;
#endif
-#if 0
/* XXX: incorrect */
- err = __copy_from_user(&env->fpr[0], &fpu->si_float_regs[0],
- (sizeof(unsigned long) * 32));
-#endif
+ err = copy_from_user(&env->fpr[0], fpu->si_float_regs[0],
+ (sizeof(abi_ulong) * 32));
err |= __get_user(env->fsr, &fpu->si_fsr);
#if 0
err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth);
@@ -2849,7 +2847,7 @@ static void setup_rt_frame(int sig, struct target_sigaction *ka,
* Arguments to signal handler:
*
* a0 = signal number
- * a1 = pointer to struct siginfo
+ * a1 = pointer to siginfo_t
* a2 = pointer to struct ucontext
*
* $25 and PC point to the signal handler, $29 points to the
@@ -3255,7 +3253,7 @@ struct target_signal_frame {
};
struct rt_signal_frame {
- struct siginfo info;
+ siginfo_t info;
struct ucontext uc;
uint32_t tramp[2];
};
@@ -3474,9 +3472,9 @@ struct target_signal_frame {
};
struct rt_signal_frame {
- struct siginfo *pinfo;
+ siginfo_t *pinfo;
void *puc;
- struct siginfo info;
+ siginfo_t info;
struct ucontext uc;
uint8_t retcode[8]; /* Trampoline code. */
};
@@ -3629,6 +3627,235 @@ long do_rt_sigreturn(CPUCRISState *env)
return -TARGET_ENOSYS;
}
+#elif defined(TARGET_OPENRISC)
+
+struct target_sigcontext {
+ struct target_pt_regs regs;
+ abi_ulong oldmask;
+ abi_ulong usp;
+};
+
+struct target_ucontext {
+ abi_ulong tuc_flags;
+ abi_ulong tuc_link;
+ target_stack_t tuc_stack;
+ struct target_sigcontext tuc_mcontext;
+ target_sigset_t tuc_sigmask; /* mask last for extensibility */
+};
+
+struct target_rt_sigframe {
+ abi_ulong pinfo;
+ uint64_t puc;
+ struct target_siginfo info;
+ struct target_sigcontext sc;
+ struct target_ucontext uc;
+ unsigned char retcode[16]; /* trampoline code */
+};
+
+/* This is the asm-generic/ucontext.h version */
+#if 0
+static int restore_sigcontext(CPUOpenRISCState *regs,
+ struct target_sigcontext *sc)
+{
+ unsigned int err = 0;
+ unsigned long old_usp;
+
+ /* Alwys make any pending restarted system call return -EINTR */
+ current_thread_info()->restart_block.fn = do_no_restart_syscall;
+
+ /* restore the regs from &sc->regs (same as sc, since regs is first)
+ * (sc is already checked for VERIFY_READ since the sigframe was
+ * checked in sys_sigreturn previously)
+ */
+
+ if (copy_from_user(regs, &sc, sizeof(struct target_pt_regs))) {
+ goto badframe;
+ }
+
+ /* make sure the U-flag is set so user-mode cannot fool us */
+
+ regs->sr &= ~SR_SM;
+
+ /* restore the old USP as it was before we stacked the sc etc.
+ * (we cannot just pop the sigcontext since we aligned the sp and
+ * stuff after pushing it)
+ */
+
+ err |= __get_user(old_usp, &sc->usp);
+ phx_signal("old_usp 0x%lx", old_usp);
+
+ __PHX__ REALLY /* ??? */
+ wrusp(old_usp);
+ regs->gpr[1] = old_usp;
+
+ /* TODO: the other ports use regs->orig_XX to disable syscall checks
+ * after this completes, but we don't use that mechanism. maybe we can
+ * use it now ?
+ */
+
+ return err;
+
+badframe:
+ return 1;
+}
+#endif
+
+/* Set up a signal frame. */
+
+static int setup_sigcontext(struct target_sigcontext *sc,
+ CPUOpenRISCState *regs,
+ unsigned long mask)
+{
+ int err = 0;
+ unsigned long usp = regs->gpr[1];
+
+ /* copy the regs. they are first in sc so we can use sc directly */
+
+ /*err |= copy_to_user(&sc, regs, sizeof(struct target_pt_regs));*/
+
+ /* Set the frametype to CRIS_FRAME_NORMAL for the execution of
+ the signal handler. The frametype will be restored to its previous
+ value in restore_sigcontext. */
+ /*regs->frametype = CRIS_FRAME_NORMAL;*/
+
+ /* then some other stuff */
+ err |= __put_user(mask, &sc->oldmask);
+ err |= __put_user(usp, &sc->usp); return err;
+}
+
+static inline unsigned long align_sigframe(unsigned long sp)
+{
+ unsigned long i;
+ i = sp & ~3UL;
+ return i;
+}
+
+static inline abi_ulong get_sigframe(struct target_sigaction *ka,
+ CPUOpenRISCState *regs,
+ size_t frame_size)
+{
+ unsigned long sp = regs->gpr[1];
+ int onsigstack = on_sig_stack(sp);
+
+ /* redzone */
+ /* This is the X/Open sanctioned signal stack switching. */
+ if ((ka->sa_flags & SA_ONSTACK) != 0 && !onsigstack) {
+ sp = target_sigaltstack_used.ss_sp + target_sigaltstack_used.ss_size;
+ }
+
+ sp = align_sigframe(sp - frame_size);
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+
+ if (onsigstack && !likely(on_sig_stack(sp))) {
+ return -1L;
+ }
+
+ return sp;
+}
+
+static void setup_frame(int sig, struct target_sigaction *ka,
+ target_sigset_t *set, CPUOpenRISCState *env)
+{
+ qemu_log("Not implement.\n");
+}
+
+static void setup_rt_frame(int sig, struct target_sigaction *ka,
+ target_siginfo_t *info,
+ target_sigset_t *set, CPUOpenRISCState *env)
+{
+ int err = 0;
+ abi_ulong frame_addr;
+ unsigned long return_ip;
+ struct target_rt_sigframe *frame;
+ abi_ulong info_addr, uc_addr;
+
+ frame_addr = get_sigframe(ka, env, sizeof *frame);
+
+ frame_addr = get_sigframe(ka, env, sizeof(*frame));
+ if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) {
+ goto give_sigsegv;
+ }
+
+ info_addr = frame_addr + offsetof(struct target_rt_sigframe, info);
+ err |= __put_user(info_addr, &frame->pinfo);
+ uc_addr = frame_addr + offsetof(struct target_rt_sigframe, uc);
+ err |= __put_user(uc_addr, &frame->puc);
+
+ if (ka->sa_flags & SA_SIGINFO) {
+ err |= copy_siginfo_to_user(&frame->info, info);
+ }
+ if (err) {
+ goto give_sigsegv;
+ }
+
+ /*err |= __clear_user(&frame->uc, offsetof(struct ucontext, uc_mcontext));*/
+ err |= __put_user(0, &frame->uc.tuc_flags);
+ err |= __put_user(0, &frame->uc.tuc_link);
+ err |= __put_user(target_sigaltstack_used.ss_sp,
+ &frame->uc.tuc_stack.ss_sp);
+ err |= __put_user(sas_ss_flags(env->gpr[1]), &frame->uc.tuc_stack.ss_flags);
+ err |= __put_user(target_sigaltstack_used.ss_size,
+ &frame->uc.tuc_stack.ss_size);
+ err |= setup_sigcontext(&frame->sc, env, set->sig[0]);
+
+ /*err |= copy_to_user(frame->uc.tuc_sigmask, set, sizeof(*set));*/
+
+ if (err) {
+ goto give_sigsegv;
+ }
+
+ /* trampoline - the desired return ip is the retcode itself */
+ return_ip = (unsigned long)&frame->retcode;
+ /* This is l.ori r11,r0,__NR_sigreturn, l.sys 1 */
+ err |= __put_user(0xa960, (short *)(frame->retcode + 0));
+ err |= __put_user(TARGET_NR_rt_sigreturn, (short *)(frame->retcode + 2));
+ err |= __put_user(0x20000001, (unsigned long *)(frame->retcode + 4));
+ err |= __put_user(0x15000000, (unsigned long *)(frame->retcode + 8));
+
+ if (err) {
+ goto give_sigsegv;
+ }
+
+ /* TODO what is the current->exec_domain stuff and invmap ? */
+
+ /* Set up registers for signal handler */
+ env->pc = (unsigned long)ka->_sa_handler; /* what we enter NOW */
+ env->gpr[9] = (unsigned long)return_ip; /* what we enter LATER */
+ env->gpr[3] = (unsigned long)sig; /* arg 1: signo */
+ env->gpr[4] = (unsigned long)&frame->info; /* arg 2: (siginfo_t*) */
+ env->gpr[5] = (unsigned long)&frame->uc; /* arg 3: ucontext */
+
+ /* actually move the usp to reflect the stacked frame */
+ env->gpr[1] = (unsigned long)frame;
+
+ return;
+
+give_sigsegv:
+ unlock_user_struct(frame, frame_addr, 1);
+ if (sig == TARGET_SIGSEGV) {
+ ka->_sa_handler = TARGET_SIG_DFL;
+ }
+ force_sig(TARGET_SIGSEGV);
+}
+
+long do_sigreturn(CPUOpenRISCState *env)
+{
+
+ qemu_log("do_sigreturn: not implemented\n");
+ return -TARGET_ENOSYS;
+}
+
+long do_rt_sigreturn(CPUOpenRISCState *env)
+{
+ qemu_log("do_rt_sigreturn: not implemented\n");
+ return -TARGET_ENOSYS;
+}
+/* TARGET_OPENRISC */
+
#elif defined(TARGET_S390X)
#define __NUM_GPRS 16
diff --git a/linux-user/strace.c b/linux-user/strace.c
index 05a0d3e9d7..6ec90e8974 100644
--- a/linux-user/strace.c
+++ b/linux-user/strace.c
@@ -371,11 +371,21 @@ UNUSED static struct flags open_flags[] = {
FLAG_TARGET(O_NOCTTY),
FLAG_TARGET(O_NOFOLLOW),
FLAG_TARGET(O_NONBLOCK), /* also O_NDELAY */
- FLAG_TARGET(O_SYNC),
+ FLAG_TARGET(O_DSYNC),
+ FLAG_TARGET(__O_SYNC),
FLAG_TARGET(O_TRUNC),
#ifdef O_DIRECT
FLAG_TARGET(O_DIRECT),
#endif
+#ifdef O_NOATIME
+ FLAG_TARGET(O_NOATIME),
+#endif
+#ifdef O_CLOEXEC
+ FLAG_TARGET(O_CLOEXEC),
+#endif
+#ifdef O_PATH
+ FLAG_TARGET(O_PATH),
+#endif
FLAG_END,
};
diff --git a/linux-user/strace.list b/linux-user/strace.list
index a7eeaef99f..af3c6a0cce 100644
--- a/linux-user/strace.list
+++ b/linux-user/strace.list
@@ -1527,3 +1527,6 @@
#ifdef TARGET_NR_sync_file_range2
{ TARGET_NR_sync_file_range2, "sync_file_range2", NULL, NULL, NULL },
#endif
+#ifdef TARGET_NR_pipe2
+{ TARGET_NR_pipe2, "pipe2", NULL, NULL, NULL },
+#endif
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 539af3f94b..3ba3ef5719 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -218,7 +218,6 @@ _syscall3(int, sys_getdents, uint, fd, struct linux_dirent *, dirp, uint, count)
#if defined(TARGET_NR_getdents64) && defined(__NR_getdents64)
_syscall3(int, sys_getdents64, uint, fd, struct linux_dirent64 *, dirp, uint, count);
#endif
-_syscall2(int, sys_getpriority, int, which, int, who);
#if defined(TARGET_NR__llseek) && defined(__NR_llseek)
_syscall5(int, _llseek, uint, fd, ulong, hi, ulong, lo,
loff_t *, res, uint, wh);
@@ -261,14 +260,27 @@ static bitmask_transtbl fcntl_flags_tbl[] = {
{ TARGET_O_TRUNC, TARGET_O_TRUNC, O_TRUNC, O_TRUNC, },
{ TARGET_O_APPEND, TARGET_O_APPEND, O_APPEND, O_APPEND, },
{ TARGET_O_NONBLOCK, TARGET_O_NONBLOCK, O_NONBLOCK, O_NONBLOCK, },
+ { TARGET_O_SYNC, TARGET_O_DSYNC, O_SYNC, O_DSYNC, },
{ TARGET_O_SYNC, TARGET_O_SYNC, O_SYNC, O_SYNC, },
{ TARGET_FASYNC, TARGET_FASYNC, FASYNC, FASYNC, },
{ TARGET_O_DIRECTORY, TARGET_O_DIRECTORY, O_DIRECTORY, O_DIRECTORY, },
{ TARGET_O_NOFOLLOW, TARGET_O_NOFOLLOW, O_NOFOLLOW, O_NOFOLLOW, },
- { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
#if defined(O_DIRECT)
{ TARGET_O_DIRECT, TARGET_O_DIRECT, O_DIRECT, O_DIRECT, },
#endif
+#if defined(O_NOATIME)
+ { TARGET_O_NOATIME, TARGET_O_NOATIME, O_NOATIME, O_NOATIME },
+#endif
+#if defined(O_CLOEXEC)
+ { TARGET_O_CLOEXEC, TARGET_O_CLOEXEC, O_CLOEXEC, O_CLOEXEC },
+#endif
+#if defined(O_PATH)
+ { TARGET_O_PATH, TARGET_O_PATH, O_PATH, O_PATH },
+#endif
+ /* Don't terminate the list prematurely on 64-bit host+guest. */
+#if TARGET_O_LARGEFILE != 0 || O_LARGEFILE != 0
+ { TARGET_O_LARGEFILE, TARGET_O_LARGEFILE, O_LARGEFILE, O_LARGEFILE, },
+#endif
{ 0, 0, 0, 0 }
};
@@ -5582,7 +5594,8 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break;
#ifdef TARGET_NR_pipe2
case TARGET_NR_pipe2:
- ret = do_pipe(cpu_env, arg1, arg2, 1);
+ ret = do_pipe(cpu_env, arg1,
+ target_to_host_bitmask(arg2, fcntl_flags_tbl), 1);
break;
#endif
case TARGET_NR_times:
@@ -5867,11 +5880,10 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
target_to_host_old_sigset(&set, &mask);
ret = get_errno(sigprocmask(how, &set, &oldset));
-
if (!is_error(ret)) {
host_to_target_old_sigset(&mask, &oldset);
ret = mask;
- ((CPUAlphaState *)cpu_env)->[IR_V0] = 0; /* force no error */
+ ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0; /* force no error */
}
#else
sigset_t set, oldset, *set_ptr;
@@ -6432,10 +6444,21 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
break;
#endif
case TARGET_NR_getpriority:
- /* libc does special remapping of the return value of
- * sys_getpriority() so it's just easiest to call
- * sys_getpriority() directly rather than through libc. */
- ret = get_errno(sys_getpriority(arg1, arg2));
+ /* Note that negative values are valid for getpriority, so we must
+ differentiate based on errno settings. */
+ errno = 0;
+ ret = getpriority(arg1, arg2);
+ if (ret == -1 && errno != 0) {
+ ret = -host_to_target_errno(errno);
+ break;
+ }
+#ifdef TARGET_ALPHA
+ /* Return value is the unbiased priority. Signal no error. */
+ ((CPUAlphaState *)cpu_env)->ir[IR_V0] = 0;
+#else
+ /* Return value is a biased priority to avoid negative numbers. */
+ ret = 20 - ret;
+#endif
break;
case TARGET_NR_setpriority:
ret = get_errno(setpriority(arg1, arg2, arg3));
@@ -7377,7 +7400,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
case TARGET_NR_sigaltstack:
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_MIPS) || \
defined(TARGET_SPARC) || defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
- defined(TARGET_M68K) || defined(TARGET_S390X)
+ defined(TARGET_M68K) || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
ret = do_sigaltstack(arg1, arg2, get_sp_from_cpustate((CPUArchState *)cpu_env));
break;
#else
@@ -7699,13 +7722,13 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
ret = -TARGET_EOPNOTSUPP;
switch (arg1) {
case TARGET_SSI_IEEE_FP_CONTROL:
- case TARGET_SSI_IEEE_RAISE_EXCEPTION:
{
uint64_t swcr, fpcr, orig_fpcr;
- if (get_user_u64 (swcr, arg2))
+ if (get_user_u64 (swcr, arg2)) {
goto efault;
- orig_fpcr = cpu_alpha_load_fpcr (cpu_env);
+ }
+ orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
fpcr = orig_fpcr & FPCR_DYN_MASK;
/* Copied from linux ieee_swcr_to_fpcr. */
@@ -7719,16 +7742,57 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
fpcr |= (swcr & SWCR_MAP_UMZ ? FPCR_UNDZ | FPCR_UNFD : 0);
fpcr |= (~swcr & SWCR_TRAP_ENABLE_DNO) << 41;
- cpu_alpha_store_fpcr (cpu_env, fpcr);
+ cpu_alpha_store_fpcr(cpu_env, fpcr);
+ ret = 0;
+ }
+ break;
+
+ case TARGET_SSI_IEEE_RAISE_EXCEPTION:
+ {
+ uint64_t exc, fpcr, orig_fpcr;
+ int si_code;
+
+ if (get_user_u64(exc, arg2)) {
+ goto efault;
+ }
+
+ orig_fpcr = cpu_alpha_load_fpcr(cpu_env);
+
+ /* We only add to the exception status here. */
+ fpcr = orig_fpcr | ((exc & SWCR_STATUS_MASK) << 35);
+
+ cpu_alpha_store_fpcr(cpu_env, fpcr);
ret = 0;
- if (arg1 == TARGET_SSI_IEEE_RAISE_EXCEPTION) {
- /* Old exceptions are not signaled. */
- fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
+ /* Old exceptions are not signaled. */
+ fpcr &= ~(orig_fpcr & FPCR_STATUS_MASK);
- /* If any exceptions set by this call, and are unmasked,
- send a signal. */
- /* ??? FIXME */
+ /* If any exceptions set by this call,
+ and are unmasked, send a signal. */
+ si_code = 0;
+ if ((fpcr & (FPCR_INE | FPCR_INED)) == FPCR_INE) {
+ si_code = TARGET_FPE_FLTRES;
+ }
+ if ((fpcr & (FPCR_UNF | FPCR_UNFD)) == FPCR_UNF) {
+ si_code = TARGET_FPE_FLTUND;
+ }
+ if ((fpcr & (FPCR_OVF | FPCR_OVFD)) == FPCR_OVF) {
+ si_code = TARGET_FPE_FLTOVF;
+ }
+ if ((fpcr & (FPCR_DZE | FPCR_DZED)) == FPCR_DZE) {
+ si_code = TARGET_FPE_FLTDIV;
+ }
+ if ((fpcr & (FPCR_INV | FPCR_INVD)) == FPCR_INV) {
+ si_code = TARGET_FPE_FLTINV;
+ }
+ if (si_code != 0) {
+ target_siginfo_t info;
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info._sifields._sigfault._addr
+ = ((CPUArchState *)cpu_env)->pc;
+ queue_signal((CPUArchState *)cpu_env, info.si_signo, &info);
}
}
break;
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
index a79b67df49..ba9a58c814 100644
--- a/linux-user/syscall_defs.h
+++ b/linux-user/syscall_defs.h
@@ -59,7 +59,7 @@
#if defined(TARGET_I386) || defined(TARGET_ARM) || defined(TARGET_SH4) \
|| defined(TARGET_M68K) || defined(TARGET_CRIS) || defined(TARGET_UNICORE32) \
- || defined(TARGET_S390X)
+ || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
#define TARGET_IOC_SIZEBITS 14
#define TARGET_IOC_DIRBITS 2
@@ -323,7 +323,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
|| defined(TARGET_PPC) || defined(TARGET_MIPS) || defined(TARGET_SH4) \
|| defined(TARGET_M68K) || defined(TARGET_ALPHA) || defined(TARGET_CRIS) \
|| defined(TARGET_MICROBLAZE) || defined(TARGET_UNICORE32) \
- || defined(TARGET_S390X)
+ || defined(TARGET_S390X) || defined(TARGET_OPENRISC)
#if defined(TARGET_SPARC)
#define TARGET_SA_NOCLDSTOP 8u
@@ -344,6 +344,14 @@ int do_sigaction(int sig, const struct target_sigaction *act,
#if !defined(TARGET_ABI_MIPSN32) && !defined(TARGET_ABI_MIPSN64)
#define TARGET_SA_RESTORER 0x04000000 /* Only for O32 */
#endif
+#elif defined(TARGET_OPENRISC)
+#define TARGET_SA_NOCLDSTOP 0x00000001
+#define TARGET_SA_NOCLDWAIT 0x00000002
+#define TARGET_SA_SIGINFO 0x00000004
+#define TARGET_SA_ONSTACK 0x08000000
+#define TARGET_SA_RESTART 0x10000000
+#define TARGET_SA_NODEFER 0x40000000
+#define TARGET_SA_RESETHAND 0x80000000
#elif defined(TARGET_ALPHA)
#define TARGET_SA_ONSTACK 0x00000001
#define TARGET_SA_RESTART 0x00000002
@@ -363,7 +371,46 @@ int do_sigaction(int sig, const struct target_sigaction *act,
#define TARGET_SA_RESTORER 0x04000000
#endif
-#if defined(TARGET_SPARC)
+#if defined(TARGET_ALPHA)
+
+#define TARGET_SIGHUP 1
+#define TARGET_SIGINT 2
+#define TARGET_SIGQUIT 3
+#define TARGET_SIGILL 4
+#define TARGET_SIGTRAP 5
+#define TARGET_SIGABRT 6
+#define TARGET_SIGSTKFLT 7 /* actually SIGEMT */
+#define TARGET_SIGFPE 8
+#define TARGET_SIGKILL 9
+#define TARGET_SIGBUS 10
+#define TARGET_SIGSEGV 11
+#define TARGET_SIGSYS 12
+#define TARGET_SIGPIPE 13
+#define TARGET_SIGALRM 14
+#define TARGET_SIGTERM 15
+#define TARGET_SIGURG 16
+#define TARGET_SIGSTOP 17
+#define TARGET_SIGTSTP 18
+#define TARGET_SIGCONT 19
+#define TARGET_SIGCHLD 20
+#define TARGET_SIGTTIN 21
+#define TARGET_SIGTTOU 22
+#define TARGET_SIGIO 23
+#define TARGET_SIGXCPU 24
+#define TARGET_SIGXFSZ 25
+#define TARGET_SIGVTALRM 26
+#define TARGET_SIGPROF 27
+#define TARGET_SIGWINCH 28
+#define TARGET_SIGPWR 29 /* actually SIGINFO */
+#define TARGET_SIGUSR1 30
+#define TARGET_SIGUSR2 31
+#define TARGET_SIGRTMIN 32
+
+#define TARGET_SIG_BLOCK 1
+#define TARGET_SIG_UNBLOCK 2
+#define TARGET_SIG_SETMASK 3
+
+#elif defined(TARGET_SPARC)
#define TARGET_SIGHUP 1
#define TARGET_SIGINT 2
@@ -448,6 +495,7 @@ int do_sigaction(int sig, const struct target_sigaction *act,
#else
+/* OpenRISC Using the general signals */
#define TARGET_SIGHUP 1
#define TARGET_SIGINT 2
#define TARGET_SIGQUIT 3
@@ -1086,7 +1134,8 @@ struct target_winsize {
#endif
#if (defined(TARGET_I386) && defined(TARGET_ABI32)) || defined(TARGET_ARM) \
- || defined(TARGET_CRIS) || defined(TARGET_UNICORE32)
+ || defined(TARGET_CRIS) || defined(TARGET_UNICORE32) \
+ || defined(TARGET_OPENRISC)
struct target_stat {
unsigned short st_dev;
unsigned short __pad1;
@@ -1783,6 +1832,30 @@ struct target_stat {
abi_long st_blocks;
abi_ulong __unused[3];
};
+#elif defined(TARGET_OPENRISC)
+struct target_stat {
+ abi_ulong st_dev;
+ abi_ulong st_ino;
+ abi_ulong st_nlink;
+
+ unsigned int st_mode;
+ unsigned int st_uid;
+ unsigned int st_gid;
+ unsigned int __pad0;
+ abi_ulong st_rdev;
+ abi_long st_size;
+ abi_long st_blksize;
+ abi_long st_blocks; /* Number 512-byte blocks allocated. */
+
+ abi_ulong target_st_atime;
+ abi_ulong target_st_atime_nsec;
+ abi_ulong target_st_mtime;
+ abi_ulong target_st_mtime_nsec;
+ abi_ulong target_st_ctime;
+ abi_ulong target_st_ctime_nsec;
+
+ abi_long __unused[3];
+};
#else
#error unsupported CPU
#endif
@@ -1973,135 +2046,125 @@ struct target_statfs64 {
#define TARGET_F_DUPFD_CLOEXEC (TARGET_F_LINUX_SPECIFIC_BASE + 6)
#define TARGET_F_NOTIFY (TARGET_F_LINUX_SPECIFIC_BASE+2)
-#if defined (TARGET_ARM)
-#define TARGET_O_ACCMODE 0003
-#define TARGET_O_RDONLY 00
-#define TARGET_O_WRONLY 01
-#define TARGET_O_RDWR 02
-#define TARGET_O_CREAT 0100 /* not fcntl */
-#define TARGET_O_EXCL 0200 /* not fcntl */
-#define TARGET_O_NOCTTY 0400 /* not fcntl */
-#define TARGET_O_TRUNC 01000 /* not fcntl */
-#define TARGET_O_APPEND 02000
-#define TARGET_O_NONBLOCK 04000
-#define TARGET_O_NDELAY TARGET_O_NONBLOCK
-#define TARGET_O_SYNC 010000
-#define TARGET_FASYNC 020000 /* fcntl, for BSD compatibility */
+#if defined(TARGET_ALPHA)
+#define TARGET_O_NONBLOCK 04
+#define TARGET_O_APPEND 010
+#define TARGET_O_CREAT 01000 /* not fcntl */
+#define TARGET_O_TRUNC 02000 /* not fcntl */
+#define TARGET_O_EXCL 04000 /* not fcntl */
+#define TARGET_O_NOCTTY 010000 /* not fcntl */
+#define TARGET_O_DSYNC 040000
+#define TARGET_O_LARGEFILE 0 /* not necessary, always 64-bit */
+#define TARGET_O_DIRECTORY 0100000 /* must be a directory */
+#define TARGET_O_NOFOLLOW 0200000 /* don't follow links */
+#define TARGET_O_DIRECT 02000000 /* direct disk access hint */
+#define TARGET_O_NOATIME 04000000
+#define TARGET_O_CLOEXEC 010000000
+#define TARGET___O_SYNC 020000000
+#define TARGET_O_PATH 040000000
+#elif defined(TARGET_ARM) || defined(TARGET_M68K)
#define TARGET_O_DIRECTORY 040000 /* must be a directory */
#define TARGET_O_NOFOLLOW 0100000 /* don't follow links */
#define TARGET_O_DIRECT 0200000 /* direct disk access hint */
#define TARGET_O_LARGEFILE 0400000
+#elif defined(TARGET_MIPS)
+#define TARGET_O_APPEND 0x0008
+#define TARGET_O_DSYNC 0x0010
+#define TARGET_O_NONBLOCK 0x0080
+#define TARGET_O_CREAT 0x0100 /* not fcntl */
+#define TARGET_O_TRUNC 0x0200 /* not fcntl */
+#define TARGET_O_EXCL 0x0400 /* not fcntl */
+#define TARGET_O_NOCTTY 0x0800 /* not fcntl */
+#define TARGET_FASYNC 0x1000 /* fcntl, for BSD compatibility */
+#define TARGET_O_LARGEFILE 0x2000 /* allow large file opens */
+#define TARGET___O_SYNC 0x4000
+#define TARGET_O_DIRECT 0x8000 /* direct disk access hint */
#elif defined (TARGET_PPC)
-#define TARGET_O_ACCMODE 0003
-#define TARGET_O_RDONLY 00
-#define TARGET_O_WRONLY 01
-#define TARGET_O_RDWR 02
-#define TARGET_O_CREAT 0100 /* not fcntl */
-#define TARGET_O_EXCL 0200 /* not fcntl */
-#define TARGET_O_NOCTTY 0400 /* not fcntl */
-#define TARGET_O_TRUNC 01000 /* not fcntl */
-#define TARGET_O_APPEND 02000
-#define TARGET_O_NONBLOCK 04000
-#define TARGET_O_NDELAY TARGET_O_NONBLOCK
-#define TARGET_O_SYNC 010000
-#define TARGET_FASYNC 020000 /* fcntl, for BSD compatibility */
-#define TARGET_O_DIRECTORY 040000 /* must be a directory */
-#define TARGET_O_NOFOLLOW 0100000 /* don't follow links */
-#define TARGET_O_LARGEFILE 0200000
-#define TARGET_O_DIRECT 0400000 /* direct disk access hint */
-#elif defined (TARGET_MICROBLAZE)
-#define TARGET_O_ACCMODE 0003
-#define TARGET_O_RDONLY 00
-#define TARGET_O_WRONLY 01
-#define TARGET_O_RDWR 02
-#define TARGET_O_CREAT 0100 /* not fcntl */
-#define TARGET_O_EXCL 0200 /* not fcntl */
-#define TARGET_O_NOCTTY 0400 /* not fcntl */
-#define TARGET_O_TRUNC 01000 /* not fcntl */
-#define TARGET_O_APPEND 02000
-#define TARGET_O_NONBLOCK 04000
-#define TARGET_O_NDELAY TARGET_O_NONBLOCK
-#define TARGET_O_SYNC 010000
-#define TARGET_FASYNC 020000 /* fcntl, for BSD compatibility */
#define TARGET_O_DIRECTORY 040000 /* must be a directory */
#define TARGET_O_NOFOLLOW 0100000 /* don't follow links */
#define TARGET_O_LARGEFILE 0200000
#define TARGET_O_DIRECT 0400000 /* direct disk access hint */
#elif defined (TARGET_SPARC)
-#define TARGET_O_RDONLY 0x0000
-#define TARGET_O_WRONLY 0x0001
-#define TARGET_O_RDWR 0x0002
-#define TARGET_O_ACCMODE 0x0003
-#define TARGET_O_APPEND 0x0008
-#define TARGET_FASYNC 0x0040 /* fcntl, for BSD compatibility */
-#define TARGET_O_CREAT 0x0200 /* not fcntl */
-#define TARGET_O_TRUNC 0x0400 /* not fcntl */
-#define TARGET_O_EXCL 0x0800 /* not fcntl */
-#define TARGET_O_SYNC 0x2000
-#define TARGET_O_NONBLOCK 0x4000
-#define TARGET_O_NDELAY (0x0004 | TARGET_O_NONBLOCK)
-#define TARGET_O_NOCTTY 0x8000 /* not fcntl */
-#define TARGET_O_DIRECTORY 0x10000 /* must be a directory */
-#define TARGET_O_NOFOLLOW 0x20000 /* don't follow links */
+#define TARGET_O_APPEND 0x0008
+#define TARGET_FASYNC 0x0040 /* fcntl, for BSD compatibility */
+#define TARGET_O_CREAT 0x0200 /* not fcntl */
+#define TARGET_O_TRUNC 0x0400 /* not fcntl */
+#define TARGET_O_EXCL 0x0800 /* not fcntl */
+#define TARGET_O_DSYNC 0x2000
+#define TARGET_O_NONBLOCK 0x4000
+# ifdef TARGET_SPARC64
+# define TARGET_O_NDELAY 0x0004
+# else
+# define TARGET_O_NDELAY (0x0004 | TARGET_O_NONBLOCK)
+# endif
+#define TARGET_O_NOCTTY 0x8000 /* not fcntl */
#define TARGET_O_LARGEFILE 0x40000
-#define TARGET_O_DIRECT 0x100000 /* direct disk access hint */
-#elif defined(TARGET_MIPS)
-#define TARGET_O_ACCMODE 0x0003
-#define TARGET_O_RDONLY 0x0000
-#define TARGET_O_WRONLY 0x0001
-#define TARGET_O_RDWR 0x0002
-#define TARGET_O_APPEND 0x0008
-#define TARGET_O_SYNC 0x0010
-#define TARGET_O_NONBLOCK 0x0080
-#define TARGET_O_CREAT 0x0100 /* not fcntl */
-#define TARGET_O_TRUNC 0x0200 /* not fcntl */
-#define TARGET_O_EXCL 0x0400 /* not fcntl */
-#define TARGET_O_NOCTTY 0x0800 /* not fcntl */
-#define TARGET_FASYNC 0x1000 /* fcntl, for BSD compatibility */
-#define TARGET_O_LARGEFILE 0x2000 /* allow large file opens */
-#define TARGET_O_DIRECT 0x8000 /* direct disk access hint */
-#define TARGET_O_DIRECTORY 0x10000 /* must be a directory */
-#define TARGET_O_NOFOLLOW 0x20000 /* don't follow links */
-#define TARGET_O_NOATIME 0x40000
-#define TARGET_O_NDELAY TARGET_O_NONBLOCK
-#elif defined(TARGET_ALPHA)
-#define TARGET_O_ACCMODE 0x0003
-#define TARGET_O_RDONLY 0x0000
-#define TARGET_O_WRONLY 0x0001
-#define TARGET_O_RDWR 0x0002
-#define TARGET_O_APPEND 0x0008
-#define TARGET_O_SYNC 0x4000
-#define TARGET_O_NONBLOCK 0x0004
-#define TARGET_O_CREAT 0x0200 /* not fcntl */
-#define TARGET_O_TRUNC 0x0400 /* not fcntl */
-#define TARGET_O_EXCL 0x0800 /* not fcntl */
-#define TARGET_O_NOCTTY 0x1000 /* not fcntl */
-#define TARGET_FASYNC 0x2000 /* fcntl, for BSD compatibility */
-#define TARGET_O_LARGEFILE 0x0000 /* not necessary, always 64-bit */
-#define TARGET_O_DIRECT 0x80000 /* direct disk access hint */
-#define TARGET_O_DIRECTORY 0x8000 /* must be a directory */
-#define TARGET_O_NOFOLLOW 0x10000 /* don't follow links */
-#define TARGET_O_NOATIME 0x100000
-#define TARGET_O_NDELAY TARGET_O_NONBLOCK
-#else
+#define TARGET_O_DIRECT 0x100000 /* direct disk access hint */
+#define TARGET_O_NOATIME 0x200000
+#define TARGET_O_CLOEXEC 0x400000
+#define TARGET___O_SYNC 0x800000
+#define TARGET_O_PATH 0x1000000
+#endif
+
+/* <asm-generic/fcntl.h> values follow. */
#define TARGET_O_ACCMODE 0003
#define TARGET_O_RDONLY 00
#define TARGET_O_WRONLY 01
#define TARGET_O_RDWR 02
+#ifndef TARGET_O_CREAT
#define TARGET_O_CREAT 0100 /* not fcntl */
+#endif
+#ifndef TARGET_O_EXCL
#define TARGET_O_EXCL 0200 /* not fcntl */
+#endif
+#ifndef TARGET_O_NOCTTY
#define TARGET_O_NOCTTY 0400 /* not fcntl */
+#endif
+#ifndef TARGET_O_TRUNC
#define TARGET_O_TRUNC 01000 /* not fcntl */
+#endif
+#ifndef TARGET_O_APPEND
#define TARGET_O_APPEND 02000
+#endif
+#ifndef TARGET_O_NONBLOCK
#define TARGET_O_NONBLOCK 04000
-#define TARGET_O_NDELAY TARGET_O_NONBLOCK
-#define TARGET_O_SYNC 010000
+#endif
+#ifndef TARGET_O_DSYNC
+#define TARGET_O_DSYNC 010000
+#endif
+#ifndef TARGET_FASYNC
#define TARGET_FASYNC 020000 /* fcntl, for BSD compatibility */
+#endif
+#ifndef TARGET_O_DIRECT
#define TARGET_O_DIRECT 040000 /* direct disk access hint */
+#endif
+#ifndef TARGET_O_LARGEFILE
#define TARGET_O_LARGEFILE 0100000
+#endif
+#ifndef TARGET_O_DIRECTORY
#define TARGET_O_DIRECTORY 0200000 /* must be a directory */
+#endif
+#ifndef TARGET_O_NOFOLLOW
#define TARGET_O_NOFOLLOW 0400000 /* don't follow links */
#endif
+#ifndef TARGET_O_NOATIME
+#define TARGET_O_NOATIME 01000000
+#endif
+#ifndef TARGET_O_CLOEXEC
+#define TARGET_O_CLOEXEC 02000000
+#endif
+#ifndef TARGET___O_SYNC
+#define TARGET___O_SYNC 04000000
+#endif
+#ifndef TARGET_O_PATH
+#define TARGET_O_PATH 010000000
+#endif
+#ifndef TARGET_O_NDELAY
+#define TARGET_O_NDELAY TARGET_O_NONBLOCK
+#endif
+#ifndef TARGET_O_SYNC
+#define TARGET_O_SYNC (TARGET___O_SYNC | TARGET_O_DSYNC)
+#endif
struct target_flock {
short l_type;
diff --git a/memory.c b/memory.c
index aab4a31323..643871bafa 100644
--- a/memory.c
+++ b/memory.c
@@ -156,7 +156,7 @@ struct MemoryRegionIoeventfd {
AddrRange addr;
bool match_data;
uint64_t data;
- int fd;
+ EventNotifier *e;
};
static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
@@ -181,9 +181,9 @@ static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
return false;
}
}
- if (a.fd < b.fd) {
+ if (a.e < b.e) {
return true;
- } else if (a.fd > b.fd) {
+ } else if (a.e > b.e) {
return false;
}
return false;
@@ -597,7 +597,7 @@ static void address_space_add_del_ioeventfds(AddressSpace *as,
.size = int128_get64(fd->addr.size),
};
MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
- fd->match_data, fd->data, fd->fd);
+ fd->match_data, fd->data, fd->e);
++iold;
} else if (inew < fds_new_nb
&& (iold == fds_old_nb
@@ -610,7 +610,7 @@ static void address_space_add_del_ioeventfds(AddressSpace *as,
.size = int128_get64(fd->addr.size),
};
MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
- fd->match_data, fd->data, fd->fd);
+ fd->match_data, fd->data, fd->e);
++inew;
} else {
++iold;
@@ -1195,14 +1195,14 @@ void memory_region_add_eventfd(MemoryRegion *mr,
unsigned size,
bool match_data,
uint64_t data,
- int fd)
+ EventNotifier *e)
{
MemoryRegionIoeventfd mrfd = {
.addr.start = int128_make64(addr),
.addr.size = int128_make64(size),
.match_data = match_data,
.data = data,
- .fd = fd,
+ .e = e,
};
unsigned i;
@@ -1225,14 +1225,14 @@ void memory_region_del_eventfd(MemoryRegion *mr,
unsigned size,
bool match_data,
uint64_t data,
- int fd)
+ EventNotifier *e)
{
MemoryRegionIoeventfd mrfd = {
.addr.start = int128_make64(addr),
.addr.size = int128_make64(size),
.match_data = match_data,
.data = data,
- .fd = fd,
+ .e = e,
};
unsigned i;
diff --git a/memory.h b/memory.h
index 740c48e8e5..bd1bbaeabe 100644
--- a/memory.h
+++ b/memory.h
@@ -198,9 +198,9 @@ struct MemoryListener {
void (*log_global_start)(MemoryListener *listener);
void (*log_global_stop)(MemoryListener *listener);
void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd);
+ bool match_data, uint64_t data, EventNotifier *e);
void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd);
+ bool match_data, uint64_t data, EventNotifier *e);
/* Lower = earlier (during add), later (during del) */
unsigned priority;
MemoryRegion *address_space_filter;
@@ -541,7 +541,7 @@ void memory_region_add_eventfd(MemoryRegion *mr,
unsigned size,
bool match_data,
uint64_t data,
- int fd);
+ EventNotifier *e);
/**
* memory_region_del_eventfd: Cancel an eventfd.
@@ -561,7 +561,8 @@ void memory_region_del_eventfd(MemoryRegion *mr,
unsigned size,
bool match_data,
uint64_t data,
- int fd);
+ EventNotifier *e);
+
/**
* memory_region_add_subregion: Add a subregion to a container.
*
diff --git a/migration.c b/migration.c
index 3f485d33a5..8db1b433f0 100644
--- a/migration.c
+++ b/migration.c
@@ -131,6 +131,8 @@ MigrationInfo *qmp_query_migrate(Error **errp)
info->ram->transferred = ram_bytes_transferred();
info->ram->remaining = ram_bytes_remaining();
info->ram->total = ram_bytes_total();
+ info->ram->total_time = qemu_get_clock_ms(rt_clock)
+ - s->total_time;
if (blk_mig_active()) {
info->has_disk = true;
@@ -143,6 +145,13 @@ MigrationInfo *qmp_query_migrate(Error **errp)
case MIG_STATE_COMPLETED:
info->has_status = true;
info->status = g_strdup("completed");
+
+ info->has_ram = true;
+ info->ram = g_malloc0(sizeof(*info->ram));
+ info->ram->transferred = ram_bytes_transferred();
+ info->ram->remaining = 0;
+ info->ram->total = ram_bytes_total();
+ info->ram->total_time = s->total_time;
break;
case MIG_STATE_ERROR:
info->has_status = true;
@@ -260,6 +269,7 @@ static void migrate_fd_put_ready(void *opaque)
} else {
migrate_fd_completed(s);
}
+ s->total_time = qemu_get_clock_ms(rt_clock) - s->total_time;
if (s->state != MIG_STATE_COMPLETED) {
if (old_vm_running) {
vm_start();
@@ -352,7 +362,7 @@ void migrate_fd_connect(MigrationState *s)
migrate_fd_close);
DPRINTF("beginning savevm\n");
- ret = qemu_savevm_state_begin(s->file, s->blk, s->shared);
+ ret = qemu_savevm_state_begin(s->file, &s->params);
if (ret < 0) {
DPRINTF("failed, %d\n", ret);
migrate_fd_error(s);
@@ -361,18 +371,18 @@ void migrate_fd_connect(MigrationState *s)
migrate_fd_put_ready(s);
}
-static MigrationState *migrate_init(int blk, int inc)
+static MigrationState *migrate_init(const MigrationParams *params)
{
MigrationState *s = migrate_get_current();
int64_t bandwidth_limit = s->bandwidth_limit;
memset(s, 0, sizeof(*s));
s->bandwidth_limit = bandwidth_limit;
- s->blk = blk;
- s->shared = inc;
+ s->params = *params;
s->bandwidth_limit = bandwidth_limit;
s->state = MIG_STATE_SETUP;
+ s->total_time = qemu_get_clock_ms(rt_clock);
return s;
}
@@ -394,9 +404,13 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
Error **errp)
{
MigrationState *s = migrate_get_current();
+ MigrationParams params;
const char *p;
int ret;
+ params.blk = blk;
+ params.shared = inc;
+
if (s->state == MIG_STATE_ACTIVE) {
error_set(errp, QERR_MIGRATION_ACTIVE);
return;
@@ -411,7 +425,7 @@ void qmp_migrate(const char *uri, bool has_blk, bool blk,
return;
}
- s = migrate_init(blk, inc);
+ s = migrate_init(&params);
if (strstart(uri, "tcp:", &p)) {
ret = tcp_start_outgoing_migration(s, p, errp);
diff --git a/migration.h b/migration.h
index 2e9ca2edf2..57572a61e9 100644
--- a/migration.h
+++ b/migration.h
@@ -18,6 +18,12 @@
#include "qemu-common.h"
#include "notify.h"
#include "error.h"
+#include "vmstate.h"
+
+struct MigrationParams {
+ bool blk;
+ bool shared;
+};
typedef struct MigrationState MigrationState;
@@ -31,8 +37,8 @@ struct MigrationState
int (*close)(MigrationState *s);
int (*write)(MigrationState *s, const void *buff, size_t size);
void *opaque;
- int blk;
- int shared;
+ MigrationParams params;
+ int64_t total_time;
};
void process_incoming_migration(QEMUFile *f);
@@ -76,8 +82,7 @@ uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_transferred(void);
uint64_t ram_bytes_total(void);
-int ram_save_live(QEMUFile *f, int stage, void *opaque);
-int ram_load(QEMUFile *f, void *opaque, int version_id);
+extern SaveVMHandlers savevm_ram_handlers;
/**
* @migrate_add_blocker - prevent migration from proceeding
diff --git a/monitor.c b/monitor.c
index f6107badb6..49dccfe854 100644
--- a/monitor.c
+++ b/monitor.c
@@ -941,13 +941,6 @@ static void do_info_cpu_stats(Monitor *mon)
}
#endif
-#if defined(CONFIG_TRACE_SIMPLE)
-static void do_info_trace(Monitor *mon)
-{
- st_print_trace((FILE *)mon, &monitor_fprintf);
-}
-#endif
-
static void do_trace_print_events(Monitor *mon)
{
trace_print_events((FILE *)mon, &monitor_fprintf);
@@ -1262,45 +1255,24 @@ static void do_print(Monitor *mon, const QDict *qdict)
int format = qdict_get_int(qdict, "format");
target_phys_addr_t val = qdict_get_int(qdict, "val");
-#if TARGET_PHYS_ADDR_BITS == 32
- switch(format) {
- case 'o':
- monitor_printf(mon, "%#o", val);
- break;
- case 'x':
- monitor_printf(mon, "%#x", val);
- break;
- case 'u':
- monitor_printf(mon, "%u", val);
- break;
- default:
- case 'd':
- monitor_printf(mon, "%d", val);
- break;
- case 'c':
- monitor_printc(mon, val);
- break;
- }
-#else
switch(format) {
case 'o':
- monitor_printf(mon, "%#" PRIo64, val);
+ monitor_printf(mon, "%#" TARGET_PRIoPHYS, val);
break;
case 'x':
- monitor_printf(mon, "%#" PRIx64, val);
+ monitor_printf(mon, "%#" TARGET_PRIxPHYS, val);
break;
case 'u':
- monitor_printf(mon, "%" PRIu64, val);
+ monitor_printf(mon, "%" TARGET_PRIuPHYS, val);
break;
default:
case 'd':
- monitor_printf(mon, "%" PRId64, val);
+ monitor_printf(mon, "%" TARGET_PRIdPHYS, val);
break;
case 'c':
monitor_printc(mon, val);
break;
}
-#endif
monitor_printf(mon, "\n");
}
@@ -2328,48 +2300,45 @@ static void do_inject_mce(Monitor *mon, const QDict *qdict)
}
#endif
-static int do_getfd(Monitor *mon, const QDict *qdict, QObject **ret_data)
+void qmp_getfd(const char *fdname, Error **errp)
{
- const char *fdname = qdict_get_str(qdict, "fdname");
mon_fd_t *monfd;
int fd;
- fd = qemu_chr_fe_get_msgfd(mon->chr);
+ fd = qemu_chr_fe_get_msgfd(cur_mon->chr);
if (fd == -1) {
- qerror_report(QERR_FD_NOT_SUPPLIED);
- return -1;
+ error_set(errp, QERR_FD_NOT_SUPPLIED);
+ return;
}
if (qemu_isdigit(fdname[0])) {
- qerror_report(QERR_INVALID_PARAMETER_VALUE, "fdname",
- "a name not starting with a digit");
- return -1;
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE, "fdname",
+ "a name not starting with a digit");
+ return;
}
- QLIST_FOREACH(monfd, &mon->fds, next) {
+ QLIST_FOREACH(monfd, &cur_mon->fds, next) {
if (strcmp(monfd->name, fdname) != 0) {
continue;
}
close(monfd->fd);
monfd->fd = fd;
- return 0;
+ return;
}
monfd = g_malloc0(sizeof(mon_fd_t));
monfd->name = g_strdup(fdname);
monfd->fd = fd;
- QLIST_INSERT_HEAD(&mon->fds, monfd, next);
- return 0;
+ QLIST_INSERT_HEAD(&cur_mon->fds, monfd, next);
}
-static int do_closefd(Monitor *mon, const QDict *qdict, QObject **ret_data)
+void qmp_closefd(const char *fdname, Error **errp)
{
- const char *fdname = qdict_get_str(qdict, "fdname");
mon_fd_t *monfd;
- QLIST_FOREACH(monfd, &mon->fds, next) {
+ QLIST_FOREACH(monfd, &cur_mon->fds, next) {
if (strcmp(monfd->name, fdname) != 0) {
continue;
}
@@ -2378,11 +2347,10 @@ static int do_closefd(Monitor *mon, const QDict *qdict, QObject **ret_data)
close(monfd->fd);
g_free(monfd->name);
g_free(monfd);
- return 0;
+ return;
}
- qerror_report(QERR_FD_NOT_FOUND, fdname);
- return -1;
+ error_set(errp, QERR_FD_NOT_FOUND, fdname);
}
static void do_loadvm(Monitor *mon, const QDict *qdict)
@@ -2714,15 +2682,6 @@ static mon_cmd_t info_cmds[] = {
.help = "show roms",
.mhandler.info = do_info_roms,
},
-#if defined(CONFIG_TRACE_SIMPLE)
- {
- .name = "trace",
- .args_type = "",
- .params = "",
- .help = "show current contents of trace buffer",
- .mhandler.info = do_info_trace,
- },
-#endif
{
.name = "trace-events",
.args_type = "",
diff --git a/net.c b/net.c
index 4aa416cffb..60043ddec6 100644
--- a/net.c
+++ b/net.c
@@ -30,6 +30,7 @@
#include "net/dump.h"
#include "net/slirp.h"
#include "net/vde.h"
+#include "net/hub.h"
#include "net/util.h"
#include "monitor.h"
#include "qemu-common.h"
@@ -37,14 +38,16 @@
#include "qmp-commands.h"
#include "hw/qdev.h"
#include "iov.h"
+#include "qapi-visit.h"
+#include "qapi/opts-visitor.h"
+#include "qapi/qapi-dealloc-visitor.h"
/* Net bridge is currently not supported for W32. */
#if !defined(_WIN32)
# define CONFIG_NET_BRIDGE
#endif
-static QTAILQ_HEAD(, VLANState) vlans;
-static QTAILQ_HEAD(, VLANClientState) non_vlan_clients;
+static QTAILQ_HEAD(, NetClientState) net_clients;
int default_net = 1;
@@ -129,11 +132,11 @@ int parse_host_port(struct sockaddr_in *saddr, const char *str)
return 0;
}
-void qemu_format_nic_info_str(VLANClientState *vc, uint8_t macaddr[6])
+void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6])
{
- snprintf(vc->info_str, sizeof(vc->info_str),
+ snprintf(nc->info_str, sizeof(nc->info_str),
"model=%s,macaddr=%02x:%02x:%02x:%02x:%02x:%02x",
- vc->model,
+ nc->model,
macaddr[0], macaddr[1], macaddr[2],
macaddr[3], macaddr[4], macaddr[5]);
}
@@ -153,23 +156,25 @@ void qemu_macaddr_default_if_unset(MACAddr *macaddr)
macaddr->a[5] = 0x56 + index++;
}
-static char *assign_name(VLANClientState *vc1, const char *model)
+/**
+ * Generate a name for net client
+ *
+ * Only net clients created with the legacy -net option need this. Naming is
+ * mandatory for net clients created with -netdev.
+ */
+static char *assign_name(NetClientState *nc1, const char *model)
{
- VLANState *vlan;
- VLANClientState *vc;
+ NetClientState *nc;
char buf[256];
int id = 0;
- QTAILQ_FOREACH(vlan, &vlans, next) {
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- if (vc != vc1 && strcmp(vc->model, model) == 0) {
- id++;
- }
+ QTAILQ_FOREACH(nc, &net_clients, next) {
+ if (nc == nc1) {
+ continue;
}
- }
-
- QTAILQ_FOREACH(vc, &non_vlan_clients, next) {
- if (vc != vc1 && strcmp(vc->model, model) == 0) {
+ /* For compatibility only bump id for net clients on a vlan */
+ if (strcmp(nc->model, model) == 0 &&
+ net_hub_id_for_client(nc, NULL) == 0) {
id++;
}
}
@@ -179,55 +184,35 @@ static char *assign_name(VLANClientState *vc1, const char *model)
return g_strdup(buf);
}
-static ssize_t qemu_deliver_packet(VLANClientState *sender,
- unsigned flags,
- const uint8_t *data,
- size_t size,
- void *opaque);
-static ssize_t qemu_deliver_packet_iov(VLANClientState *sender,
- unsigned flags,
- const struct iovec *iov,
- int iovcnt,
- void *opaque);
-
-VLANClientState *qemu_new_net_client(NetClientInfo *info,
- VLANState *vlan,
- VLANClientState *peer,
- const char *model,
- const char *name)
+NetClientState *qemu_new_net_client(NetClientInfo *info,
+ NetClientState *peer,
+ const char *model,
+ const char *name)
{
- VLANClientState *vc;
+ NetClientState *nc;
- assert(info->size >= sizeof(VLANClientState));
+ assert(info->size >= sizeof(NetClientState));
- vc = g_malloc0(info->size);
+ nc = g_malloc0(info->size);
- vc->info = info;
- vc->model = g_strdup(model);
+ nc->info = info;
+ nc->model = g_strdup(model);
if (name) {
- vc->name = g_strdup(name);
+ nc->name = g_strdup(name);
} else {
- vc->name = assign_name(vc, model);
+ nc->name = assign_name(nc, model);
}
- if (vlan) {
- assert(!peer);
- vc->vlan = vlan;
- QTAILQ_INSERT_TAIL(&vc->vlan->clients, vc, next);
- } else {
- if (peer) {
- assert(!peer->peer);
- vc->peer = peer;
- peer->peer = vc;
- }
- QTAILQ_INSERT_TAIL(&non_vlan_clients, vc, next);
-
- vc->send_queue = qemu_new_net_queue(qemu_deliver_packet,
- qemu_deliver_packet_iov,
- vc);
+ if (peer) {
+ assert(!peer->peer);
+ nc->peer = peer;
+ peer->peer = nc;
}
+ QTAILQ_INSERT_TAIL(&net_clients, nc, next);
- return vc;
+ nc->send_queue = qemu_new_net_queue(nc);
+
+ return nc;
}
NICState *qemu_new_nic(NetClientInfo *info,
@@ -236,13 +221,13 @@ NICState *qemu_new_nic(NetClientInfo *info,
const char *name,
void *opaque)
{
- VLANClientState *nc;
+ NetClientState *nc;
NICState *nic;
- assert(info->type == NET_CLIENT_TYPE_NIC);
+ assert(info->type == NET_CLIENT_OPTIONS_KIND_NIC);
assert(info->size >= sizeof(NICState));
- nc = qemu_new_net_client(info, conf->vlan, conf->peer, model, name);
+ nc = qemu_new_net_client(info, conf->peer, model, name);
nic = DO_UPCAST(NICState, nc, nc);
nic->conf = conf;
@@ -251,250 +236,131 @@ NICState *qemu_new_nic(NetClientInfo *info,
return nic;
}
-static void qemu_cleanup_vlan_client(VLANClientState *vc)
+static void qemu_cleanup_net_client(NetClientState *nc)
{
- if (vc->vlan) {
- QTAILQ_REMOVE(&vc->vlan->clients, vc, next);
- } else {
- QTAILQ_REMOVE(&non_vlan_clients, vc, next);
- }
+ QTAILQ_REMOVE(&net_clients, nc, next);
- if (vc->info->cleanup) {
- vc->info->cleanup(vc);
+ if (nc->info->cleanup) {
+ nc->info->cleanup(nc);
}
}
-static void qemu_free_vlan_client(VLANClientState *vc)
+static void qemu_free_net_client(NetClientState *nc)
{
- if (!vc->vlan) {
- if (vc->send_queue) {
- qemu_del_net_queue(vc->send_queue);
- }
- if (vc->peer) {
- vc->peer->peer = NULL;
- }
+ if (nc->send_queue) {
+ qemu_del_net_queue(nc->send_queue);
}
- g_free(vc->name);
- g_free(vc->model);
- g_free(vc);
+ if (nc->peer) {
+ nc->peer->peer = NULL;
+ }
+ g_free(nc->name);
+ g_free(nc->model);
+ g_free(nc);
}
-void qemu_del_vlan_client(VLANClientState *vc)
+void qemu_del_net_client(NetClientState *nc)
{
/* If there is a peer NIC, delete and cleanup client, but do not free. */
- if (!vc->vlan && vc->peer && vc->peer->info->type == NET_CLIENT_TYPE_NIC) {
- NICState *nic = DO_UPCAST(NICState, nc, vc->peer);
+ if (nc->peer && nc->peer->info->type == NET_CLIENT_OPTIONS_KIND_NIC) {
+ NICState *nic = DO_UPCAST(NICState, nc, nc->peer);
if (nic->peer_deleted) {
return;
}
nic->peer_deleted = true;
/* Let NIC know peer is gone. */
- vc->peer->link_down = true;
- if (vc->peer->info->link_status_changed) {
- vc->peer->info->link_status_changed(vc->peer);
+ nc->peer->link_down = true;
+ if (nc->peer->info->link_status_changed) {
+ nc->peer->info->link_status_changed(nc->peer);
}
- qemu_cleanup_vlan_client(vc);
+ qemu_cleanup_net_client(nc);
return;
}
/* If this is a peer NIC and peer has already been deleted, free it now. */
- if (!vc->vlan && vc->peer && vc->info->type == NET_CLIENT_TYPE_NIC) {
- NICState *nic = DO_UPCAST(NICState, nc, vc);
+ if (nc->peer && nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC) {
+ NICState *nic = DO_UPCAST(NICState, nc, nc);
if (nic->peer_deleted) {
- qemu_free_vlan_client(vc->peer);
- }
- }
-
- qemu_cleanup_vlan_client(vc);
- qemu_free_vlan_client(vc);
-}
-
-VLANClientState *
-qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
- const char *client_str)
-{
- VLANState *vlan;
- VLANClientState *vc;
-
- vlan = qemu_find_vlan(vlan_id, 0);
- if (!vlan) {
- monitor_printf(mon, "unknown VLAN %d\n", vlan_id);
- return NULL;
- }
-
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- if (!strcmp(vc->name, client_str)) {
- break;
+ qemu_free_net_client(nc->peer);
}
}
- if (!vc) {
- monitor_printf(mon, "can't find device %s on VLAN %d\n",
- client_str, vlan_id);
- }
- return vc;
+ qemu_cleanup_net_client(nc);
+ qemu_free_net_client(nc);
}
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque)
{
- VLANClientState *nc;
- VLANState *vlan;
+ NetClientState *nc;
- QTAILQ_FOREACH(nc, &non_vlan_clients, next) {
- if (nc->info->type == NET_CLIENT_TYPE_NIC) {
+ QTAILQ_FOREACH(nc, &net_clients, next) {
+ if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC) {
func(DO_UPCAST(NICState, nc, nc), opaque);
}
}
-
- QTAILQ_FOREACH(vlan, &vlans, next) {
- QTAILQ_FOREACH(nc, &vlan->clients, next) {
- if (nc->info->type == NET_CLIENT_TYPE_NIC) {
- func(DO_UPCAST(NICState, nc, nc), opaque);
- }
- }
- }
}
-int qemu_can_send_packet(VLANClientState *sender)
+int qemu_can_send_packet(NetClientState *sender)
{
- VLANState *vlan = sender->vlan;
- VLANClientState *vc;
-
- if (sender->peer) {
- if (sender->peer->receive_disabled) {
- return 0;
- } else if (sender->peer->info->can_receive &&
- !sender->peer->info->can_receive(sender->peer)) {
- return 0;
- } else {
- return 1;
- }
- }
-
- if (!sender->vlan) {
+ if (!sender->peer) {
return 1;
}
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- if (vc == sender) {
- continue;
- }
-
- /* no can_receive() handler, they can always receive */
- if (vc->info->can_receive && !vc->info->can_receive(vc)) {
- return 0;
- }
+ if (sender->peer->receive_disabled) {
+ return 0;
+ } else if (sender->peer->info->can_receive &&
+ !sender->peer->info->can_receive(sender->peer)) {
+ return 0;
}
return 1;
}
-static ssize_t qemu_deliver_packet(VLANClientState *sender,
- unsigned flags,
- const uint8_t *data,
- size_t size,
- void *opaque)
+ssize_t qemu_deliver_packet(NetClientState *sender,
+ unsigned flags,
+ const uint8_t *data,
+ size_t size,
+ void *opaque)
{
- VLANClientState *vc = opaque;
+ NetClientState *nc = opaque;
ssize_t ret;
- if (vc->link_down) {
+ if (nc->link_down) {
return size;
}
- if (vc->receive_disabled) {
+ if (nc->receive_disabled) {
return 0;
}
- if (flags & QEMU_NET_PACKET_FLAG_RAW && vc->info->receive_raw) {
- ret = vc->info->receive_raw(vc, data, size);
+ if (flags & QEMU_NET_PACKET_FLAG_RAW && nc->info->receive_raw) {
+ ret = nc->info->receive_raw(nc, data, size);
} else {
- ret = vc->info->receive(vc, data, size);
+ ret = nc->info->receive(nc, data, size);
}
if (ret == 0) {
- vc->receive_disabled = 1;
+ nc->receive_disabled = 1;
};
return ret;
}
-static ssize_t qemu_vlan_deliver_packet(VLANClientState *sender,
- unsigned flags,
- const uint8_t *buf,
- size_t size,
- void *opaque)
+void qemu_purge_queued_packets(NetClientState *nc)
{
- VLANState *vlan = opaque;
- VLANClientState *vc;
- ssize_t ret = -1;
-
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- ssize_t len;
-
- if (vc == sender) {
- continue;
- }
-
- if (vc->link_down) {
- ret = size;
- continue;
- }
-
- if (vc->receive_disabled) {
- ret = 0;
- continue;
- }
-
- if (flags & QEMU_NET_PACKET_FLAG_RAW && vc->info->receive_raw) {
- len = vc->info->receive_raw(vc, buf, size);
- } else {
- len = vc->info->receive(vc, buf, size);
- }
-
- if (len == 0) {
- vc->receive_disabled = 1;
- }
-
- ret = (ret >= 0) ? ret : len;
-
- }
-
- return ret;
-}
-
-void qemu_purge_queued_packets(VLANClientState *vc)
-{
- NetQueue *queue;
-
- if (!vc->peer && !vc->vlan) {
+ if (!nc->peer) {
return;
}
- if (vc->peer) {
- queue = vc->peer->send_queue;
- } else {
- queue = vc->vlan->send_queue;
- }
-
- qemu_net_queue_purge(queue, vc);
+ qemu_net_queue_purge(nc->peer->send_queue, nc);
}
-void qemu_flush_queued_packets(VLANClientState *vc)
+void qemu_flush_queued_packets(NetClientState *nc)
{
- NetQueue *queue;
-
- vc->receive_disabled = 0;
+ nc->receive_disabled = 0;
- if (vc->vlan) {
- queue = vc->vlan->send_queue;
- } else {
- queue = vc->send_queue;
- }
-
- qemu_net_queue_flush(queue);
+ qemu_net_queue_flush(nc->send_queue);
}
-static ssize_t qemu_send_packet_async_with_flags(VLANClientState *sender,
+static ssize_t qemu_send_packet_async_with_flags(NetClientState *sender,
unsigned flags,
const uint8_t *buf, int size,
NetPacketSent *sent_cb)
@@ -506,20 +372,16 @@ static ssize_t qemu_send_packet_async_with_flags(VLANClientState *sender,
hex_dump(stdout, buf, size);
#endif
- if (sender->link_down || (!sender->peer && !sender->vlan)) {
+ if (sender->link_down || !sender->peer) {
return size;
}
- if (sender->peer) {
- queue = sender->peer->send_queue;
- } else {
- queue = sender->vlan->send_queue;
- }
+ queue = sender->peer->send_queue;
return qemu_net_queue_send(queue, sender, flags, buf, size, sent_cb);
}
-ssize_t qemu_send_packet_async(VLANClientState *sender,
+ssize_t qemu_send_packet_async(NetClientState *sender,
const uint8_t *buf, int size,
NetPacketSent *sent_cb)
{
@@ -527,98 +389,58 @@ ssize_t qemu_send_packet_async(VLANClientState *sender,
buf, size, sent_cb);
}
-void qemu_send_packet(VLANClientState *vc, const uint8_t *buf, int size)
+void qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size)
{
- qemu_send_packet_async(vc, buf, size, NULL);
+ qemu_send_packet_async(nc, buf, size, NULL);
}
-ssize_t qemu_send_packet_raw(VLANClientState *vc, const uint8_t *buf, int size)
+ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size)
{
- return qemu_send_packet_async_with_flags(vc, QEMU_NET_PACKET_FLAG_RAW,
+ return qemu_send_packet_async_with_flags(nc, QEMU_NET_PACKET_FLAG_RAW,
buf, size, NULL);
}
-static ssize_t vc_sendv_compat(VLANClientState *vc, const struct iovec *iov,
+static ssize_t nc_sendv_compat(NetClientState *nc, const struct iovec *iov,
int iovcnt)
{
uint8_t buffer[4096];
size_t offset;
- offset = iov_to_buf(iov, iovcnt, buffer, 0, sizeof(buffer));
+ offset = iov_to_buf(iov, iovcnt, 0, buffer, sizeof(buffer));
- return vc->info->receive(vc, buffer, offset);
+ return nc->info->receive(nc, buffer, offset);
}
-static ssize_t qemu_deliver_packet_iov(VLANClientState *sender,
- unsigned flags,
- const struct iovec *iov,
- int iovcnt,
- void *opaque)
+ssize_t qemu_deliver_packet_iov(NetClientState *sender,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt,
+ void *opaque)
{
- VLANClientState *vc = opaque;
+ NetClientState *nc = opaque;
- if (vc->link_down) {
+ if (nc->link_down) {
return iov_size(iov, iovcnt);
}
- if (vc->info->receive_iov) {
- return vc->info->receive_iov(vc, iov, iovcnt);
+ if (nc->info->receive_iov) {
+ return nc->info->receive_iov(nc, iov, iovcnt);
} else {
- return vc_sendv_compat(vc, iov, iovcnt);
- }
-}
-
-static ssize_t qemu_vlan_deliver_packet_iov(VLANClientState *sender,
- unsigned flags,
- const struct iovec *iov,
- int iovcnt,
- void *opaque)
-{
- VLANState *vlan = opaque;
- VLANClientState *vc;
- ssize_t ret = -1;
-
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- ssize_t len;
-
- if (vc == sender) {
- continue;
- }
-
- if (vc->link_down) {
- ret = iov_size(iov, iovcnt);
- continue;
- }
-
- assert(!(flags & QEMU_NET_PACKET_FLAG_RAW));
-
- if (vc->info->receive_iov) {
- len = vc->info->receive_iov(vc, iov, iovcnt);
- } else {
- len = vc_sendv_compat(vc, iov, iovcnt);
- }
-
- ret = (ret >= 0) ? ret : len;
+ return nc_sendv_compat(nc, iov, iovcnt);
}
-
- return ret;
}
-ssize_t qemu_sendv_packet_async(VLANClientState *sender,
+ssize_t qemu_sendv_packet_async(NetClientState *sender,
const struct iovec *iov, int iovcnt,
NetPacketSent *sent_cb)
{
NetQueue *queue;
- if (sender->link_down || (!sender->peer && !sender->vlan)) {
+ if (sender->link_down || !sender->peer) {
return iov_size(iov, iovcnt);
}
- if (sender->peer) {
- queue = sender->peer->send_queue;
- } else {
- queue = sender->vlan->send_queue;
- }
+ queue = sender->peer->send_queue;
return qemu_net_queue_send_iov(queue, sender,
QEMU_NET_PACKET_FLAG_NONE,
@@ -626,48 +448,20 @@ ssize_t qemu_sendv_packet_async(VLANClientState *sender,
}
ssize_t
-qemu_sendv_packet(VLANClientState *vc, const struct iovec *iov, int iovcnt)
-{
- return qemu_sendv_packet_async(vc, iov, iovcnt, NULL);
-}
-
-/* find or alloc a new VLAN */
-VLANState *qemu_find_vlan(int id, int allocate)
+qemu_sendv_packet(NetClientState *nc, const struct iovec *iov, int iovcnt)
{
- VLANState *vlan;
-
- QTAILQ_FOREACH(vlan, &vlans, next) {
- if (vlan->id == id) {
- return vlan;
- }
- }
-
- if (!allocate) {
- return NULL;
- }
-
- vlan = g_malloc0(sizeof(VLANState));
- vlan->id = id;
- QTAILQ_INIT(&vlan->clients);
-
- vlan->send_queue = qemu_new_net_queue(qemu_vlan_deliver_packet,
- qemu_vlan_deliver_packet_iov,
- vlan);
-
- QTAILQ_INSERT_TAIL(&vlans, vlan, next);
-
- return vlan;
+ return qemu_sendv_packet_async(nc, iov, iovcnt, NULL);
}
-VLANClientState *qemu_find_netdev(const char *id)
+NetClientState *qemu_find_netdev(const char *id)
{
- VLANClientState *vc;
+ NetClientState *nc;
- QTAILQ_FOREACH(vc, &non_vlan_clients, next) {
- if (vc->info->type == NET_CLIENT_TYPE_NIC)
+ QTAILQ_FOREACH(nc, &net_clients, next) {
+ if (nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC)
continue;
- if (!strcmp(vc->name, id)) {
- return vc;
+ if (!strcmp(nc->name, id)) {
+ return nc;
}
}
@@ -688,8 +482,9 @@ int qemu_show_nic_models(const char *arg, const char *const *models)
{
int i;
- if (!arg || strcmp(arg, "?"))
+ if (!arg || !is_help_option(arg)) {
return 0;
+ }
fprintf(stderr, "qemu: Supported NIC models: ");
for (i = 0 ; models[i]; i++)
@@ -745,11 +540,15 @@ int net_handle_fd_param(Monitor *mon, const char *param)
return fd;
}
-static int net_init_nic(QemuOpts *opts, const char *name, VLANState *vlan)
+static int net_init_nic(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
int idx;
NICInfo *nd;
- const char *netdev;
+ const NetLegacyNicOptions *nic;
+
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_NIC);
+ nic = opts->nic;
idx = nic_get_free_idx();
if (idx == -1 || nb_nics >= MAX_NICS) {
@@ -761,39 +560,41 @@ static int net_init_nic(QemuOpts *opts, const char *name, VLANState *vlan)
memset(nd, 0, sizeof(*nd));
- if ((netdev = qemu_opt_get(opts, "netdev"))) {
- nd->netdev = qemu_find_netdev(netdev);
+ if (nic->has_netdev) {
+ nd->netdev = qemu_find_netdev(nic->netdev);
if (!nd->netdev) {
- error_report("netdev '%s' not found", netdev);
+ error_report("netdev '%s' not found", nic->netdev);
return -1;
}
} else {
- assert(vlan);
- nd->vlan = vlan;
+ assert(peer);
+ nd->netdev = peer;
}
if (name) {
nd->name = g_strdup(name);
}
- if (qemu_opt_get(opts, "model")) {
- nd->model = g_strdup(qemu_opt_get(opts, "model"));
+ if (nic->has_model) {
+ nd->model = g_strdup(nic->model);
}
- if (qemu_opt_get(opts, "addr")) {
- nd->devaddr = g_strdup(qemu_opt_get(opts, "addr"));
+ if (nic->has_addr) {
+ nd->devaddr = g_strdup(nic->addr);
}
- if (qemu_opt_get(opts, "macaddr") &&
- net_parse_macaddr(nd->macaddr.a, qemu_opt_get(opts, "macaddr")) < 0) {
+ if (nic->has_macaddr &&
+ net_parse_macaddr(nd->macaddr.a, nic->macaddr) < 0) {
error_report("invalid syntax for ethernet address");
return -1;
}
qemu_macaddr_default_if_unset(&nd->macaddr);
- nd->nvectors = qemu_opt_get_number(opts, "vectors",
- DEV_NVECTORS_UNSPECIFIED);
- if (nd->nvectors != DEV_NVECTORS_UNSPECIFIED &&
- (nd->nvectors < 0 || nd->nvectors > 0x7ffffff)) {
- error_report("invalid # of vectors: %d", nd->nvectors);
- return -1;
+ if (nic->has_vectors) {
+ if (nic->vectors > 0x7ffffff) {
+ error_report("invalid # of vectors: %"PRIu32, nic->vectors);
+ return -1;
+ }
+ nd->nvectors = nic->vectors;
+ } else {
+ nd->nvectors = DEV_NVECTORS_UNSPECIFIED;
}
nd->used = 1;
@@ -802,371 +603,130 @@ static int net_init_nic(QemuOpts *opts, const char *name, VLANState *vlan)
return idx;
}
-#define NET_COMMON_PARAMS_DESC \
- { \
- .name = "type", \
- .type = QEMU_OPT_STRING, \
- .help = "net client type (nic, tap etc.)", \
- }, { \
- .name = "vlan", \
- .type = QEMU_OPT_NUMBER, \
- .help = "vlan number", \
- }, { \
- .name = "name", \
- .type = QEMU_OPT_STRING, \
- .help = "identifier for monitor commands", \
- }
-
-typedef int (*net_client_init_func)(QemuOpts *opts,
- const char *name,
- VLANState *vlan);
-
-/* magic number, but compiler will warn if too small */
-#define NET_MAX_DESC 20
-
-static const struct {
- const char *type;
- net_client_init_func init;
- QemuOptDesc desc[NET_MAX_DESC];
-} net_client_types[NET_CLIENT_TYPE_MAX] = {
- [NET_CLIENT_TYPE_NONE] = {
- .type = "none",
- .desc = {
- NET_COMMON_PARAMS_DESC,
- { /* end of list */ }
- },
- },
- [NET_CLIENT_TYPE_NIC] = {
- .type = "nic",
- .init = net_init_nic,
- .desc = {
- NET_COMMON_PARAMS_DESC,
- {
- .name = "netdev",
- .type = QEMU_OPT_STRING,
- .help = "id of -netdev to connect to",
- },
- {
- .name = "macaddr",
- .type = QEMU_OPT_STRING,
- .help = "MAC address",
- }, {
- .name = "model",
- .type = QEMU_OPT_STRING,
- .help = "device model (e1000, rtl8139, virtio etc.)",
- }, {
- .name = "addr",
- .type = QEMU_OPT_STRING,
- .help = "PCI device address",
- }, {
- .name = "vectors",
- .type = QEMU_OPT_NUMBER,
- .help = "number of MSI-x vectors, 0 to disable MSI-X",
- },
- { /* end of list */ }
- },
- },
+
+static int (* const net_client_init_fun[NET_CLIENT_OPTIONS_KIND_MAX])(
+ const NetClientOptions *opts,
+ const char *name,
+ NetClientState *peer) = {
+ [NET_CLIENT_OPTIONS_KIND_NIC] = net_init_nic,
#ifdef CONFIG_SLIRP
- [NET_CLIENT_TYPE_USER] = {
- .type = "user",
- .init = net_init_slirp,
- .desc = {
- NET_COMMON_PARAMS_DESC,
- {
- .name = "hostname",
- .type = QEMU_OPT_STRING,
- .help = "client hostname reported by the builtin DHCP server",
- }, {
- .name = "restrict",
- .type = QEMU_OPT_STRING,
- .help = "isolate the guest from the host (y|yes|n|no)",
- }, {
- .name = "ip",
- .type = QEMU_OPT_STRING,
- .help = "legacy parameter, use net= instead",
- }, {
- .name = "net",
- .type = QEMU_OPT_STRING,
- .help = "IP address and optional netmask",
- }, {
- .name = "host",
- .type = QEMU_OPT_STRING,
- .help = "guest-visible address of the host",
- }, {
- .name = "tftp",
- .type = QEMU_OPT_STRING,
- .help = "root directory of the built-in TFTP server",
- }, {
- .name = "bootfile",
- .type = QEMU_OPT_STRING,
- .help = "BOOTP filename, for use with tftp=",
- }, {
- .name = "dhcpstart",
- .type = QEMU_OPT_STRING,
- .help = "the first of the 16 IPs the built-in DHCP server can assign",
- }, {
- .name = "dns",
- .type = QEMU_OPT_STRING,
- .help = "guest-visible address of the virtual nameserver",
- }, {
- .name = "smb",
- .type = QEMU_OPT_STRING,
- .help = "root directory of the built-in SMB server",
- }, {
- .name = "smbserver",
- .type = QEMU_OPT_STRING,
- .help = "IP address of the built-in SMB server",
- }, {
- .name = "hostfwd",
- .type = QEMU_OPT_STRING,
- .help = "guest port number to forward incoming TCP or UDP connections",
- }, {
- .name = "guestfwd",
- .type = QEMU_OPT_STRING,
- .help = "IP address and port to forward guest TCP connections",
- },
- { /* end of list */ }
- },
- },
+ [NET_CLIENT_OPTIONS_KIND_USER] = net_init_slirp,
#endif
- [NET_CLIENT_TYPE_TAP] = {
- .type = "tap",
- .init = net_init_tap,
- .desc = {
- NET_COMMON_PARAMS_DESC,
- {
- .name = "ifname",
- .type = QEMU_OPT_STRING,
- .help = "interface name",
- },
-#ifndef _WIN32
- {
- .name = "fd",
- .type = QEMU_OPT_STRING,
- .help = "file descriptor of an already opened tap",
- }, {
- .name = "script",
- .type = QEMU_OPT_STRING,
- .help = "script to initialize the interface",
- }, {
- .name = "downscript",
- .type = QEMU_OPT_STRING,
- .help = "script to shut down the interface",
- }, {
-#ifdef CONFIG_NET_BRIDGE
- .name = "helper",
- .type = QEMU_OPT_STRING,
- .help = "command to execute to configure bridge",
- }, {
-#endif
- .name = "sndbuf",
- .type = QEMU_OPT_SIZE,
- .help = "send buffer limit"
- }, {
- .name = "vnet_hdr",
- .type = QEMU_OPT_BOOL,
- .help = "enable the IFF_VNET_HDR flag on the tap interface"
- }, {
- .name = "vhost",
- .type = QEMU_OPT_BOOL,
- .help = "enable vhost-net network accelerator",
- }, {
- .name = "vhostfd",
- .type = QEMU_OPT_STRING,
- .help = "file descriptor of an already opened vhost net device",
- }, {
- .name = "vhostforce",
- .type = QEMU_OPT_BOOL,
- .help = "force vhost on for non-MSIX virtio guests",
- },
-#endif /* _WIN32 */
- { /* end of list */ }
- },
- },
- [NET_CLIENT_TYPE_SOCKET] = {
- .type = "socket",
- .init = net_init_socket,
- .desc = {
- NET_COMMON_PARAMS_DESC,
- {
- .name = "fd",
- .type = QEMU_OPT_STRING,
- .help = "file descriptor of an already opened socket",
- }, {
- .name = "listen",
- .type = QEMU_OPT_STRING,
- .help = "port number, and optional hostname, to listen on",
- }, {
- .name = "connect",
- .type = QEMU_OPT_STRING,
- .help = "port number, and optional hostname, to connect to",
- }, {
- .name = "mcast",
- .type = QEMU_OPT_STRING,
- .help = "UDP multicast address and port number",
- }, {
- .name = "localaddr",
- .type = QEMU_OPT_STRING,
- .help = "source address and port for multicast and udp packets",
- }, {
- .name = "udp",
- .type = QEMU_OPT_STRING,
- .help = "UDP unicast address and port number",
- },
- { /* end of list */ }
- },
- },
+ [NET_CLIENT_OPTIONS_KIND_TAP] = net_init_tap,
+ [NET_CLIENT_OPTIONS_KIND_SOCKET] = net_init_socket,
#ifdef CONFIG_VDE
- [NET_CLIENT_TYPE_VDE] = {
- .type = "vde",
- .init = net_init_vde,
- .desc = {
- NET_COMMON_PARAMS_DESC,
- {
- .name = "sock",
- .type = QEMU_OPT_STRING,
- .help = "socket path",
- }, {
- .name = "port",
- .type = QEMU_OPT_NUMBER,
- .help = "port number",
- }, {
- .name = "group",
- .type = QEMU_OPT_STRING,
- .help = "group owner of socket",
- }, {
- .name = "mode",
- .type = QEMU_OPT_NUMBER,
- .help = "permissions for socket",
- },
- { /* end of list */ }
- },
- },
+ [NET_CLIENT_OPTIONS_KIND_VDE] = net_init_vde,
#endif
- [NET_CLIENT_TYPE_DUMP] = {
- .type = "dump",
- .init = net_init_dump,
- .desc = {
- NET_COMMON_PARAMS_DESC,
- {
- .name = "len",
- .type = QEMU_OPT_SIZE,
- .help = "per-packet size limit (64k default)",
- }, {
- .name = "file",
- .type = QEMU_OPT_STRING,
- .help = "dump file path (default is qemu-vlan0.pcap)",
- },
- { /* end of list */ }
- },
- },
+ [NET_CLIENT_OPTIONS_KIND_DUMP] = net_init_dump,
#ifdef CONFIG_NET_BRIDGE
- [NET_CLIENT_TYPE_BRIDGE] = {
- .type = "bridge",
- .init = net_init_bridge,
- .desc = {
- NET_COMMON_PARAMS_DESC,
- {
- .name = "br",
- .type = QEMU_OPT_STRING,
- .help = "bridge name",
- }, {
- .name = "helper",
- .type = QEMU_OPT_STRING,
- .help = "command to execute to configure bridge",
- },
- { /* end of list */ }
- },
- },
-#endif /* CONFIG_NET_BRIDGE */
+ [NET_CLIENT_OPTIONS_KIND_BRIDGE] = net_init_bridge,
+#endif
+ [NET_CLIENT_OPTIONS_KIND_HUBPORT] = net_init_hubport,
};
-int net_client_init(QemuOpts *opts, int is_netdev, Error **errp)
+
+static int net_client_init1(const void *object, int is_netdev, Error **errp)
{
+ union {
+ const Netdev *netdev;
+ const NetLegacy *net;
+ } u;
+ const NetClientOptions *opts;
const char *name;
- const char *type;
- int i;
-
- type = qemu_opt_get(opts, "type");
- if (!type) {
- error_set(errp, QERR_MISSING_PARAMETER, "type");
- return -1;
- }
if (is_netdev) {
- if (strcmp(type, "tap") != 0 &&
-#ifdef CONFIG_NET_BRIDGE
- strcmp(type, "bridge") != 0 &&
-#endif
+ u.netdev = object;
+ opts = u.netdev->opts;
+ name = u.netdev->id;
+
+ switch (opts->kind) {
#ifdef CONFIG_SLIRP
- strcmp(type, "user") != 0 &&
+ case NET_CLIENT_OPTIONS_KIND_USER:
#endif
+ case NET_CLIENT_OPTIONS_KIND_TAP:
+ case NET_CLIENT_OPTIONS_KIND_SOCKET:
#ifdef CONFIG_VDE
- strcmp(type, "vde") != 0 &&
+ case NET_CLIENT_OPTIONS_KIND_VDE:
+#endif
+#ifdef CONFIG_NET_BRIDGE
+ case NET_CLIENT_OPTIONS_KIND_BRIDGE:
#endif
- strcmp(type, "socket") != 0) {
+ case NET_CLIENT_OPTIONS_KIND_HUBPORT:
+ break;
+
+ default:
error_set(errp, QERR_INVALID_PARAMETER_VALUE, "type",
"a netdev backend type");
return -1;
}
+ } else {
+ u.net = object;
+ opts = u.net->opts;
+ /* missing optional values have been initialized to "all bits zero" */
+ name = u.net->has_id ? u.net->id : u.net->name;
+ }
- if (qemu_opt_get(opts, "vlan")) {
- error_set(errp, QERR_INVALID_PARAMETER, "vlan");
- return -1;
- }
- if (qemu_opt_get(opts, "name")) {
- error_set(errp, QERR_INVALID_PARAMETER, "name");
- return -1;
+ if (net_client_init_fun[opts->kind]) {
+ NetClientState *peer = NULL;
+
+ /* Do not add to a vlan if it's a -netdev or a nic with a netdev=
+ * parameter. */
+ if (!is_netdev &&
+ (opts->kind != NET_CLIENT_OPTIONS_KIND_NIC ||
+ !opts->nic->has_netdev)) {
+ peer = net_hub_add_port(u.net->has_vlan ? u.net->vlan : 0, NULL);
}
- if (!qemu_opts_id(opts)) {
- error_set(errp, QERR_MISSING_PARAMETER, "id");
+
+ if (net_client_init_fun[opts->kind](opts, name, peer) < 0) {
+ /* TODO push error reporting into init() methods */
+ error_set(errp, QERR_DEVICE_INIT_FAILED,
+ NetClientOptionsKind_lookup[opts->kind]);
return -1;
}
}
+ return 0;
+}
- name = qemu_opts_id(opts);
- if (!name) {
- name = qemu_opt_get(opts, "name");
+
+static void net_visit(Visitor *v, int is_netdev, void **object, Error **errp)
+{
+ if (is_netdev) {
+ visit_type_Netdev(v, (Netdev **)object, NULL, errp);
+ } else {
+ visit_type_NetLegacy(v, (NetLegacy **)object, NULL, errp);
}
+}
- for (i = 0; i < NET_CLIENT_TYPE_MAX; i++) {
- if (net_client_types[i].type != NULL &&
- !strcmp(net_client_types[i].type, type)) {
- Error *local_err = NULL;
- VLANState *vlan = NULL;
- int ret;
- qemu_opts_validate(opts, &net_client_types[i].desc[0], &local_err);
- if (error_is_set(&local_err)) {
- error_propagate(errp, local_err);
- return -1;
- }
+int net_client_init(QemuOpts *opts, int is_netdev, Error **errp)
+{
+ void *object = NULL;
+ Error *err = NULL;
+ int ret = -1;
- /* Do not add to a vlan if it's a -netdev or a nic with a
- * netdev= parameter. */
- if (!(is_netdev ||
- (strcmp(type, "nic") == 0 && qemu_opt_get(opts, "netdev")))) {
- vlan = qemu_find_vlan(qemu_opt_get_number(opts, "vlan", 0), 1);
- }
+ {
+ OptsVisitor *ov = opts_visitor_new(opts);
- ret = 0;
- if (net_client_types[i].init) {
- ret = net_client_types[i].init(opts, name, vlan);
- if (ret < 0) {
- /* TODO push error reporting into init() methods */
- error_set(errp, QERR_DEVICE_INIT_FAILED, type);
- return -1;
- }
- }
- return ret;
- }
+ net_visit(opts_get_visitor(ov), is_netdev, &object, &err);
+ opts_visitor_cleanup(ov);
}
- error_set(errp, QERR_INVALID_PARAMETER_VALUE, "type",
- "a network client type");
- return -1;
+ if (!err) {
+ ret = net_client_init1(object, is_netdev, &err);
+ }
+
+ if (object) {
+ QapiDeallocVisitor *dv = qapi_dealloc_visitor_new();
+
+ net_visit(qapi_dealloc_get_visitor(dv), is_netdev, &object, NULL);
+ qapi_dealloc_visitor_cleanup(dv);
+ }
+
+ error_propagate(errp, err);
+ return ret;
}
+
static int net_host_check_device(const char *device)
{
int i;
@@ -1219,19 +779,19 @@ void net_host_device_add(Monitor *mon, const QDict *qdict)
void net_host_device_remove(Monitor *mon, const QDict *qdict)
{
- VLANClientState *vc;
+ NetClientState *nc;
int vlan_id = qdict_get_int(qdict, "vlan_id");
const char *device = qdict_get_str(qdict, "device");
- vc = qemu_find_vlan_client_by_name(mon, vlan_id, device);
- if (!vc) {
+ nc = net_hub_find_client_by_name(vlan_id, device);
+ if (!nc) {
return;
}
- if (!net_host_check_device(vc->model)) {
+ if (!net_host_check_device(nc->model)) {
monitor_printf(mon, "invalid host network device %s\n", device);
return;
}
- qemu_del_vlan_client(vc);
+ qemu_del_net_client(nc);
}
void netdev_add(QemuOpts *opts, Error **errp)
@@ -1271,48 +831,45 @@ exit_err:
void qmp_netdev_del(const char *id, Error **errp)
{
- VLANClientState *vc;
+ NetClientState *nc;
- vc = qemu_find_netdev(id);
- if (!vc) {
+ nc = qemu_find_netdev(id);
+ if (!nc) {
error_set(errp, QERR_DEVICE_NOT_FOUND, id);
return;
}
- qemu_del_vlan_client(vc);
+ qemu_del_net_client(nc);
qemu_opts_del(qemu_opts_find(qemu_find_opts_err("netdev", errp), id));
}
-static void print_net_client(Monitor *mon, VLANClientState *vc)
+void print_net_client(Monitor *mon, NetClientState *nc)
{
- monitor_printf(mon, "%s: type=%s,%s\n", vc->name,
- net_client_types[vc->info->type].type, vc->info_str);
+ monitor_printf(mon, "%s: type=%s,%s\n", nc->name,
+ NetClientOptionsKind_lookup[nc->info->type], nc->info_str);
}
void do_info_network(Monitor *mon)
{
- VLANState *vlan;
- VLANClientState *vc, *peer;
- net_client_type type;
+ NetClientState *nc, *peer;
+ NetClientOptionsKind type;
+
+ net_hub_info(mon);
- QTAILQ_FOREACH(vlan, &vlans, next) {
- monitor_printf(mon, "VLAN %d devices:\n", vlan->id);
+ QTAILQ_FOREACH(nc, &net_clients, next) {
+ peer = nc->peer;
+ type = nc->info->type;
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- monitor_printf(mon, " ");
- print_net_client(mon, vc);
+ /* Skip if already printed in hub info */
+ if (net_hub_id_for_client(nc, NULL) == 0) {
+ continue;
}
- }
- monitor_printf(mon, "Devices not on any VLAN:\n");
- QTAILQ_FOREACH(vc, &non_vlan_clients, next) {
- peer = vc->peer;
- type = vc->info->type;
- if (!peer || type == NET_CLIENT_TYPE_NIC) {
- monitor_printf(mon, " ");
- print_net_client(mon, vc);
+
+ if (!peer || type == NET_CLIENT_OPTIONS_KIND_NIC) {
+ print_net_client(mon, nc);
} /* else it's a netdev connected to a NIC, printed with the NIC */
- if (peer && type == NET_CLIENT_TYPE_NIC) {
- monitor_printf(mon, " \\ ");
+ if (peer && type == NET_CLIENT_OPTIONS_KIND_NIC) {
+ monitor_printf(mon, " \\ ");
print_net_client(mon, peer);
}
}
@@ -1320,32 +877,23 @@ void do_info_network(Monitor *mon)
void qmp_set_link(const char *name, bool up, Error **errp)
{
- VLANState *vlan;
- VLANClientState *vc = NULL;
+ NetClientState *nc = NULL;
- QTAILQ_FOREACH(vlan, &vlans, next) {
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- if (strcmp(vc->name, name) == 0) {
- goto done;
- }
- }
- }
- QTAILQ_FOREACH(vc, &non_vlan_clients, next) {
- if (!strcmp(vc->name, name)) {
+ QTAILQ_FOREACH(nc, &net_clients, next) {
+ if (!strcmp(nc->name, name)) {
goto done;
}
}
done:
-
- if (!vc) {
+ if (!nc) {
error_set(errp, QERR_DEVICE_NOT_FOUND, name);
return;
}
- vc->link_down = !up;
+ nc->link_down = !up;
- if (vc->info->link_status_changed) {
- vc->info->link_status_changed(vc);
+ if (nc->info->link_status_changed) {
+ nc->info->link_status_changed(nc);
}
/* Notify peer. Don't update peer link status: this makes it possible to
@@ -1355,31 +903,23 @@ done:
* Current behaviour is compatible with qemu vlans where there could be
* multiple clients that can still communicate with each other in
* disconnected mode. For now maintain this compatibility. */
- if (vc->peer && vc->peer->info->link_status_changed) {
- vc->peer->info->link_status_changed(vc->peer);
+ if (nc->peer && nc->peer->info->link_status_changed) {
+ nc->peer->info->link_status_changed(nc->peer);
}
}
void net_cleanup(void)
{
- VLANState *vlan;
- VLANClientState *vc, *next_vc;
-
- QTAILQ_FOREACH(vlan, &vlans, next) {
- QTAILQ_FOREACH_SAFE(vc, &vlan->clients, next, next_vc) {
- qemu_del_vlan_client(vc);
- }
- }
+ NetClientState *nc, *next_vc;
- QTAILQ_FOREACH_SAFE(vc, &non_vlan_clients, next, next_vc) {
- qemu_del_vlan_client(vc);
+ QTAILQ_FOREACH_SAFE(nc, &net_clients, next, next_vc) {
+ qemu_del_net_client(nc);
}
}
void net_check_clients(void)
{
- VLANState *vlan;
- VLANClientState *vc;
+ NetClientState *nc;
int i;
/* Don't warn about the default network setup that you get if
@@ -1394,35 +934,13 @@ void net_check_clients(void)
return;
}
- QTAILQ_FOREACH(vlan, &vlans, next) {
- int has_nic = 0, has_host_dev = 0;
-
- QTAILQ_FOREACH(vc, &vlan->clients, next) {
- switch (vc->info->type) {
- case NET_CLIENT_TYPE_NIC:
- has_nic = 1;
- break;
- case NET_CLIENT_TYPE_USER:
- case NET_CLIENT_TYPE_TAP:
- case NET_CLIENT_TYPE_SOCKET:
- case NET_CLIENT_TYPE_VDE:
- has_host_dev = 1;
- break;
- default: ;
- }
- }
- if (has_host_dev && !has_nic)
- fprintf(stderr, "Warning: vlan %d with no nics\n", vlan->id);
- if (has_nic && !has_host_dev)
- fprintf(stderr,
- "Warning: vlan %d is not connected to host network\n",
- vlan->id);
- }
- QTAILQ_FOREACH(vc, &non_vlan_clients, next) {
- if (!vc->peer) {
+ net_hub_check_clients();
+
+ QTAILQ_FOREACH(nc, &net_clients, next) {
+ if (!nc->peer) {
fprintf(stderr, "Warning: %s %s has no peer\n",
- vc->info->type == NET_CLIENT_TYPE_NIC ? "nic" : "netdev",
- vc->name);
+ nc->info->type == NET_CLIENT_OPTIONS_KIND_NIC ?
+ "nic" : "netdev", nc->name);
}
}
@@ -1482,8 +1000,7 @@ int net_init_clients(void)
#endif
}
- QTAILQ_INIT(&vlans);
- QTAILQ_INIT(&non_vlan_clients);
+ QTAILQ_INIT(&net_clients);
if (qemu_opts_foreach(qemu_find_opts("netdev"), net_init_netdev, NULL, 1) == -1)
return -1;
diff --git a/net.h b/net.h
index bdc2a0602d..29750567ed 100644
--- a/net.h
+++ b/net.h
@@ -7,6 +7,7 @@
#include "qemu-option.h"
#include "net/queue.h"
#include "vmstate.h"
+#include "qapi-types.h"
struct MACAddr {
uint8_t a[6];
@@ -16,41 +17,27 @@ struct MACAddr {
typedef struct NICConf {
MACAddr macaddr;
- VLANState *vlan;
- VLANClientState *peer;
+ NetClientState *peer;
int32_t bootindex;
} NICConf;
#define DEFINE_NIC_PROPERTIES(_state, _conf) \
DEFINE_PROP_MACADDR("mac", _state, _conf.macaddr), \
- DEFINE_PROP_VLAN("vlan", _state, _conf.vlan), \
+ DEFINE_PROP_VLAN("vlan", _state, _conf.peer), \
DEFINE_PROP_NETDEV("netdev", _state, _conf.peer), \
DEFINE_PROP_INT32("bootindex", _state, _conf.bootindex, -1)
-/* VLANs support */
+/* Net clients */
-typedef enum {
- NET_CLIENT_TYPE_NONE,
- NET_CLIENT_TYPE_NIC,
- NET_CLIENT_TYPE_USER,
- NET_CLIENT_TYPE_TAP,
- NET_CLIENT_TYPE_SOCKET,
- NET_CLIENT_TYPE_VDE,
- NET_CLIENT_TYPE_DUMP,
- NET_CLIENT_TYPE_BRIDGE,
-
- NET_CLIENT_TYPE_MAX
-} net_client_type;
-
-typedef void (NetPoll)(VLANClientState *, bool enable);
-typedef int (NetCanReceive)(VLANClientState *);
-typedef ssize_t (NetReceive)(VLANClientState *, const uint8_t *, size_t);
-typedef ssize_t (NetReceiveIOV)(VLANClientState *, const struct iovec *, int);
-typedef void (NetCleanup) (VLANClientState *);
-typedef void (LinkStatusChanged)(VLANClientState *);
+typedef void (NetPoll)(NetClientState *, bool enable);
+typedef int (NetCanReceive)(NetClientState *);
+typedef ssize_t (NetReceive)(NetClientState *, const uint8_t *, size_t);
+typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int);
+typedef void (NetCleanup) (NetClientState *);
+typedef void (LinkStatusChanged)(NetClientState *);
typedef struct NetClientInfo {
- net_client_type type;
+ NetClientOptionsKind type;
size_t size;
NetReceive *receive;
NetReceive *receive_raw;
@@ -61,12 +48,11 @@ typedef struct NetClientInfo {
NetPoll *poll;
} NetClientInfo;
-struct VLANClientState {
+struct NetClientState {
NetClientInfo *info;
int link_down;
- QTAILQ_ENTRY(VLANClientState) next;
- struct VLANState *vlan;
- VLANClientState *peer;
+ QTAILQ_ENTRY(NetClientState) next;
+ NetClientState *peer;
NetQueue *send_queue;
char *model;
char *name;
@@ -75,54 +61,57 @@ struct VLANClientState {
};
typedef struct NICState {
- VLANClientState nc;
+ NetClientState nc;
NICConf *conf;
void *opaque;
bool peer_deleted;
} NICState;
-struct VLANState {
- int id;
- QTAILQ_HEAD(, VLANClientState) clients;
- QTAILQ_ENTRY(VLANState) next;
- NetQueue *send_queue;
-};
-
-VLANState *qemu_find_vlan(int id, int allocate);
-VLANClientState *qemu_find_netdev(const char *id);
-VLANClientState *qemu_new_net_client(NetClientInfo *info,
- VLANState *vlan,
- VLANClientState *peer,
- const char *model,
- const char *name);
+NetClientState *qemu_find_netdev(const char *id);
+NetClientState *qemu_new_net_client(NetClientInfo *info,
+ NetClientState *peer,
+ const char *model,
+ const char *name);
NICState *qemu_new_nic(NetClientInfo *info,
NICConf *conf,
const char *model,
const char *name,
void *opaque);
-void qemu_del_vlan_client(VLANClientState *vc);
-VLANClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
- const char *client_str);
+void qemu_del_net_client(NetClientState *nc);
+NetClientState *qemu_find_vlan_client_by_name(Monitor *mon, int vlan_id,
+ const char *client_str);
typedef void (*qemu_nic_foreach)(NICState *nic, void *opaque);
void qemu_foreach_nic(qemu_nic_foreach func, void *opaque);
-int qemu_can_send_packet(VLANClientState *vc);
-ssize_t qemu_sendv_packet(VLANClientState *vc, const struct iovec *iov,
+int qemu_can_send_packet(NetClientState *nc);
+ssize_t qemu_sendv_packet(NetClientState *nc, const struct iovec *iov,
int iovcnt);
-ssize_t qemu_sendv_packet_async(VLANClientState *vc, const struct iovec *iov,
+ssize_t qemu_sendv_packet_async(NetClientState *nc, const struct iovec *iov,
int iovcnt, NetPacketSent *sent_cb);
-void qemu_send_packet(VLANClientState *vc, const uint8_t *buf, int size);
-ssize_t qemu_send_packet_raw(VLANClientState *vc, const uint8_t *buf, int size);
-ssize_t qemu_send_packet_async(VLANClientState *vc, const uint8_t *buf,
+void qemu_send_packet(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_send_packet_raw(NetClientState *nc, const uint8_t *buf, int size);
+ssize_t qemu_send_packet_async(NetClientState *nc, const uint8_t *buf,
int size, NetPacketSent *sent_cb);
-void qemu_purge_queued_packets(VLANClientState *vc);
-void qemu_flush_queued_packets(VLANClientState *vc);
-void qemu_format_nic_info_str(VLANClientState *vc, uint8_t macaddr[6]);
+void qemu_purge_queued_packets(NetClientState *nc);
+void qemu_flush_queued_packets(NetClientState *nc);
+void qemu_format_nic_info_str(NetClientState *nc, uint8_t macaddr[6]);
void qemu_macaddr_default_if_unset(MACAddr *macaddr);
int qemu_show_nic_models(const char *arg, const char *const *models);
void qemu_check_nic_model(NICInfo *nd, const char *model);
int qemu_find_nic_model(NICInfo *nd, const char * const *models,
const char *default_model);
+ssize_t qemu_deliver_packet(NetClientState *sender,
+ unsigned flags,
+ const uint8_t *data,
+ size_t size,
+ void *opaque);
+ssize_t qemu_deliver_packet_iov(NetClientState *sender,
+ unsigned flags,
+ const struct iovec *iov,
+ int iovcnt,
+ void *opaque);
+
+void print_net_client(Monitor *mon, NetClientState *nc);
void do_info_network(Monitor *mon);
/* NIC info */
@@ -134,8 +123,7 @@ struct NICInfo {
char *model;
char *name;
char *devaddr;
- VLANState *vlan;
- VLANClientState *netdev;
+ NetClientState *netdev;
int used; /* is this slot in nd_table[] being used? */
int instantiated; /* does this NICInfo correspond to an instantiated NIC? */
int nvectors;
diff --git a/net/Makefile.objs b/net/Makefile.objs
index 72f50bc903..cf04187717 100644
--- a/net/Makefile.objs
+++ b/net/Makefile.objs
@@ -1,4 +1,4 @@
-common-obj-y = queue.o checksum.o util.o
+common-obj-y = queue.o checksum.o util.o hub.o
common-obj-y += socket.o
common-obj-y += dump.o
common-obj-$(CONFIG_POSIX) += tap.o
diff --git a/net/dump.c b/net/dump.c
index f835c51187..004231d481 100644
--- a/net/dump.c
+++ b/net/dump.c
@@ -27,9 +27,10 @@
#include "qemu-error.h"
#include "qemu-log.h"
#include "qemu-timer.h"
+#include "hub.h"
typedef struct DumpState {
- VLANClientState nc;
+ NetClientState nc;
int64_t start_ts;
int fd;
int pcap_caplen;
@@ -56,7 +57,7 @@ struct pcap_sf_pkthdr {
uint32_t len;
};
-static ssize_t dump_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t dump_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
DumpState *s = DO_UPCAST(DumpState, nc, nc);
struct pcap_sf_pkthdr hdr;
@@ -85,7 +86,7 @@ static ssize_t dump_receive(VLANClientState *nc, const uint8_t *buf, size_t size
return size;
}
-static void dump_cleanup(VLANClientState *nc)
+static void dump_cleanup(NetClientState *nc)
{
DumpState *s = DO_UPCAST(DumpState, nc, nc);
@@ -93,17 +94,17 @@ static void dump_cleanup(VLANClientState *nc)
}
static NetClientInfo net_dump_info = {
- .type = NET_CLIENT_TYPE_DUMP,
+ .type = NET_CLIENT_OPTIONS_KIND_DUMP,
.size = sizeof(DumpState),
.receive = dump_receive,
.cleanup = dump_cleanup,
};
-static int net_dump_init(VLANState *vlan, const char *device,
+static int net_dump_init(NetClientState *peer, const char *device,
const char *name, const char *filename, int len)
{
struct pcap_file_hdr hdr;
- VLANClientState *nc;
+ NetClientState *nc;
DumpState *s;
struct tm tm;
int fd;
@@ -128,7 +129,7 @@ static int net_dump_init(VLANState *vlan, const char *device,
return -1;
}
- nc = qemu_new_net_client(&net_dump_info, vlan, NULL, device, name);
+ nc = qemu_new_net_client(&net_dump_info, peer, device, name);
snprintf(nc->info_str, sizeof(nc->info_str),
"dump to %s (len=%d)", filename, len);
@@ -144,21 +145,41 @@ static int net_dump_init(VLANState *vlan, const char *device,
return 0;
}
-int net_init_dump(QemuOpts *opts, const char *name, VLANState *vlan)
+int net_init_dump(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
int len;
const char *file;
char def_file[128];
+ const NetdevDumpOptions *dump;
- assert(vlan);
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_DUMP);
+ dump = opts->dump;
- file = qemu_opt_get(opts, "file");
- if (!file) {
- snprintf(def_file, sizeof(def_file), "qemu-vlan%d.pcap", vlan->id);
+ assert(peer);
+
+ if (dump->has_file) {
+ file = dump->file;
+ } else {
+ int id;
+ int ret;
+
+ ret = net_hub_id_for_client(peer, &id);
+ assert(ret == 0); /* peer must be on a hub */
+
+ snprintf(def_file, sizeof(def_file), "qemu-vlan%d.pcap", id);
file = def_file;
}
- len = qemu_opt_get_size(opts, "len", 65536);
+ if (dump->has_len) {
+ if (dump->len > INT_MAX) {
+ error_report("invalid length: %"PRIu64, dump->len);
+ return -1;
+ }
+ len = dump->len;
+ } else {
+ len = 65536;
+ }
- return net_dump_init(vlan, "dump", name, file, len);
+ return net_dump_init(peer, "dump", name, file, len);
}
diff --git a/net/dump.h b/net/dump.h
index 2b5d9ba644..33f152b460 100644
--- a/net/dump.h
+++ b/net/dump.h
@@ -25,8 +25,9 @@
#define QEMU_NET_DUMP_H
#include "net.h"
-#include "qemu-common.h"
+#include "qapi-types.h"
-int net_init_dump(QemuOpts *opts, const char *name, VLANState *vlan);
+int net_init_dump(const NetClientOptions *opts, const char *name,
+ NetClientState *peer);
#endif /* QEMU_NET_DUMP_H */
diff --git a/net/hub.c b/net/hub.c
new file mode 100644
index 0000000000..ac157e32ee
--- /dev/null
+++ b/net/hub.c
@@ -0,0 +1,339 @@
+/*
+ * Hub net client
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Zhi Yong Wu <wuzhy@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include "monitor.h"
+#include "net.h"
+#include "hub.h"
+#include "iov.h"
+
+/*
+ * A hub broadcasts incoming packets to all its ports except the source port.
+ * Hubs can be used to provide independent network segments, also confusingly
+ * named the QEMU 'vlan' feature.
+ */
+
+typedef struct NetHub NetHub;
+
+typedef struct NetHubPort {
+ NetClientState nc;
+ QLIST_ENTRY(NetHubPort) next;
+ NetHub *hub;
+ int id;
+} NetHubPort;
+
+struct NetHub {
+ int id;
+ QLIST_ENTRY(NetHub) next;
+ int num_ports;
+ QLIST_HEAD(, NetHubPort) ports;
+};
+
+static QLIST_HEAD(, NetHub) hubs = QLIST_HEAD_INITIALIZER(&hubs);
+
+static ssize_t net_hub_receive(NetHub *hub, NetHubPort *source_port,
+ const uint8_t *buf, size_t len)
+{
+ NetHubPort *port;
+
+ QLIST_FOREACH(port, &hub->ports, next) {
+ if (port == source_port) {
+ continue;
+ }
+
+ qemu_send_packet(&port->nc, buf, len);
+ }
+ return len;
+}
+
+static ssize_t net_hub_receive_iov(NetHub *hub, NetHubPort *source_port,
+ const struct iovec *iov, int iovcnt)
+{
+ NetHubPort *port;
+ ssize_t len = iov_size(iov, iovcnt);
+
+ QLIST_FOREACH(port, &hub->ports, next) {
+ if (port == source_port) {
+ continue;
+ }
+
+ qemu_sendv_packet(&port->nc, iov, iovcnt);
+ }
+ return len;
+}
+
+static NetHub *net_hub_new(int id)
+{
+ NetHub *hub;
+
+ hub = g_malloc(sizeof(*hub));
+ hub->id = id;
+ hub->num_ports = 0;
+ QLIST_INIT(&hub->ports);
+
+ QLIST_INSERT_HEAD(&hubs, hub, next);
+
+ return hub;
+}
+
+static int net_hub_port_can_receive(NetClientState *nc)
+{
+ NetHubPort *port;
+ NetHubPort *src_port = DO_UPCAST(NetHubPort, nc, nc);
+ NetHub *hub = src_port->hub;
+
+ QLIST_FOREACH(port, &hub->ports, next) {
+ if (port == src_port) {
+ continue;
+ }
+
+ if (!qemu_can_send_packet(&port->nc)) {
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+static ssize_t net_hub_port_receive(NetClientState *nc,
+ const uint8_t *buf, size_t len)
+{
+ NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
+
+ return net_hub_receive(port->hub, port, buf, len);
+}
+
+static ssize_t net_hub_port_receive_iov(NetClientState *nc,
+ const struct iovec *iov, int iovcnt)
+{
+ NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
+
+ return net_hub_receive_iov(port->hub, port, iov, iovcnt);
+}
+
+static void net_hub_port_cleanup(NetClientState *nc)
+{
+ NetHubPort *port = DO_UPCAST(NetHubPort, nc, nc);
+
+ QLIST_REMOVE(port, next);
+}
+
+static NetClientInfo net_hub_port_info = {
+ .type = NET_CLIENT_OPTIONS_KIND_HUBPORT,
+ .size = sizeof(NetHubPort),
+ .can_receive = net_hub_port_can_receive,
+ .receive = net_hub_port_receive,
+ .receive_iov = net_hub_port_receive_iov,
+ .cleanup = net_hub_port_cleanup,
+};
+
+static NetHubPort *net_hub_port_new(NetHub *hub, const char *name)
+{
+ NetClientState *nc;
+ NetHubPort *port;
+ int id = hub->num_ports++;
+ char default_name[128];
+
+ if (!name) {
+ snprintf(default_name, sizeof(default_name),
+ "hub%dport%d", hub->id, id);
+ name = default_name;
+ }
+
+ nc = qemu_new_net_client(&net_hub_port_info, NULL, "hub", name);
+ port = DO_UPCAST(NetHubPort, nc, nc);
+ port->id = id;
+ port->hub = hub;
+
+ QLIST_INSERT_HEAD(&hub->ports, port, next);
+
+ return port;
+}
+
+/**
+ * Create a port on a given hub
+ * @name: Net client name or NULL for default name.
+ *
+ * If there is no existing hub with the given id then a new hub is created.
+ */
+NetClientState *net_hub_add_port(int hub_id, const char *name)
+{
+ NetHub *hub;
+ NetHubPort *port;
+
+ QLIST_FOREACH(hub, &hubs, next) {
+ if (hub->id == hub_id) {
+ break;
+ }
+ }
+
+ if (!hub) {
+ hub = net_hub_new(hub_id);
+ }
+
+ port = net_hub_port_new(hub, name);
+ return &port->nc;
+}
+
+/**
+ * Find a specific client on a hub
+ */
+NetClientState *net_hub_find_client_by_name(int hub_id, const char *name)
+{
+ NetHub *hub;
+ NetHubPort *port;
+ NetClientState *peer;
+
+ QLIST_FOREACH(hub, &hubs, next) {
+ if (hub->id == hub_id) {
+ QLIST_FOREACH(port, &hub->ports, next) {
+ peer = port->nc.peer;
+
+ if (peer && strcmp(peer->name, name) == 0) {
+ return peer;
+ }
+ }
+ }
+ }
+ return NULL;
+}
+
+/**
+ * Find a available port on a hub; otherwise create one new port
+ */
+NetClientState *net_hub_port_find(int hub_id)
+{
+ NetHub *hub;
+ NetHubPort *port;
+ NetClientState *nc;
+
+ QLIST_FOREACH(hub, &hubs, next) {
+ if (hub->id == hub_id) {
+ QLIST_FOREACH(port, &hub->ports, next) {
+ nc = port->nc.peer;
+ if (!nc) {
+ return &(port->nc);
+ }
+ }
+ break;
+ }
+ }
+
+ nc = net_hub_add_port(hub_id, NULL);
+ return nc;
+}
+
+/**
+ * Print hub configuration
+ */
+void net_hub_info(Monitor *mon)
+{
+ NetHub *hub;
+ NetHubPort *port;
+
+ QLIST_FOREACH(hub, &hubs, next) {
+ monitor_printf(mon, "hub %d\n", hub->id);
+ QLIST_FOREACH(port, &hub->ports, next) {
+ if (port->nc.peer) {
+ monitor_printf(mon, " \\ ");
+ print_net_client(mon, port->nc.peer);
+ }
+ }
+ }
+}
+
+/**
+ * Get the hub id that a client is connected to
+ *
+ * @id Pointer for hub id output, may be NULL
+ */
+int net_hub_id_for_client(NetClientState *nc, int *id)
+{
+ NetHubPort *port;
+
+ if (nc->info->type == NET_CLIENT_OPTIONS_KIND_HUBPORT) {
+ port = DO_UPCAST(NetHubPort, nc, nc);
+ } else if (nc->peer != NULL && nc->peer->info->type ==
+ NET_CLIENT_OPTIONS_KIND_HUBPORT) {
+ port = DO_UPCAST(NetHubPort, nc, nc->peer);
+ } else {
+ return -ENOENT;
+ }
+
+ if (id) {
+ *id = port->hub->id;
+ }
+ return 0;
+}
+
+int net_init_hubport(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
+{
+ const NetdevHubPortOptions *hubport;
+
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_HUBPORT);
+ hubport = opts->hubport;
+
+ /* Treat hub port like a backend, NIC must be the one to peer */
+ if (peer) {
+ return -EINVAL;
+ }
+
+ net_hub_add_port(hubport->hubid, name);
+ return 0;
+}
+
+/**
+ * Warn if hub configurations are likely wrong
+ */
+void net_hub_check_clients(void)
+{
+ NetHub *hub;
+ NetHubPort *port;
+ NetClientState *peer;
+
+ QLIST_FOREACH(hub, &hubs, next) {
+ int has_nic = 0, has_host_dev = 0;
+
+ QLIST_FOREACH(port, &hub->ports, next) {
+ peer = port->nc.peer;
+ if (!peer) {
+ fprintf(stderr, "Warning: hub port %s has no peer\n",
+ port->nc.name);
+ continue;
+ }
+
+ switch (peer->info->type) {
+ case NET_CLIENT_OPTIONS_KIND_NIC:
+ has_nic = 1;
+ break;
+ case NET_CLIENT_OPTIONS_KIND_USER:
+ case NET_CLIENT_OPTIONS_KIND_TAP:
+ case NET_CLIENT_OPTIONS_KIND_SOCKET:
+ case NET_CLIENT_OPTIONS_KIND_VDE:
+ has_host_dev = 1;
+ break;
+ default:
+ break;
+ }
+ }
+ if (has_host_dev && !has_nic) {
+ fprintf(stderr, "Warning: vlan %d with no nics\n", hub->id);
+ }
+ if (has_nic && !has_host_dev) {
+ fprintf(stderr,
+ "Warning: vlan %d is not connected to host network\n",
+ hub->id);
+ }
+ }
+}
diff --git a/net/hub.h b/net/hub.h
new file mode 100644
index 0000000000..26a1ade1f9
--- /dev/null
+++ b/net/hub.h
@@ -0,0 +1,29 @@
+/*
+ * Hub net client
+ *
+ * Copyright IBM, Corp. 2012
+ *
+ * Authors:
+ * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+ * Zhi Yong Wu <wuzhy@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef NET_HUB_H
+#define NET_HUB_H
+
+#include "qemu-common.h"
+
+int net_init_hubport(const NetClientOptions *opts, const char *name,
+ NetClientState *peer);
+NetClientState *net_hub_add_port(int hub_id, const char *name);
+NetClientState *net_hub_find_client_by_name(int hub_id, const char *name);
+void net_hub_info(Monitor *mon);
+int net_hub_id_for_client(NetClientState *nc, int *id);
+void net_hub_check_clients(void);
+NetClientState *net_hub_port_find(int hub_id);
+
+#endif /* NET_HUB_H */
diff --git a/net/queue.c b/net/queue.c
index 1ab5247a32..e8030aafe4 100644
--- a/net/queue.c
+++ b/net/queue.c
@@ -23,6 +23,7 @@
#include "net/queue.h"
#include "qemu-queue.h"
+#include "net.h"
/* The delivery handler may only return zero if it will call
* qemu_net_queue_flush() when it determines that it is once again able
@@ -40,7 +41,7 @@
struct NetPacket {
QTAILQ_ENTRY(NetPacket) entry;
- VLANClientState *sender;
+ NetClientState *sender;
unsigned flags;
int size;
NetPacketSent *sent_cb;
@@ -48,8 +49,6 @@ struct NetPacket {
};
struct NetQueue {
- NetPacketDeliver *deliver;
- NetPacketDeliverIOV *deliver_iov;
void *opaque;
QTAILQ_HEAD(packets, NetPacket) packets;
@@ -57,16 +56,12 @@ struct NetQueue {
unsigned delivering : 1;
};
-NetQueue *qemu_new_net_queue(NetPacketDeliver *deliver,
- NetPacketDeliverIOV *deliver_iov,
- void *opaque)
+NetQueue *qemu_new_net_queue(void *opaque)
{
NetQueue *queue;
queue = g_malloc0(sizeof(NetQueue));
- queue->deliver = deliver;
- queue->deliver_iov = deliver_iov;
queue->opaque = opaque;
QTAILQ_INIT(&queue->packets);
@@ -89,7 +84,7 @@ void qemu_del_net_queue(NetQueue *queue)
}
static ssize_t qemu_net_queue_append(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const uint8_t *buf,
size_t size,
@@ -110,7 +105,7 @@ static ssize_t qemu_net_queue_append(NetQueue *queue,
}
static ssize_t qemu_net_queue_append_iov(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const struct iovec *iov,
int iovcnt,
@@ -143,7 +138,7 @@ static ssize_t qemu_net_queue_append_iov(NetQueue *queue,
}
static ssize_t qemu_net_queue_deliver(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const uint8_t *data,
size_t size)
@@ -151,14 +146,14 @@ static ssize_t qemu_net_queue_deliver(NetQueue *queue,
ssize_t ret = -1;
queue->delivering = 1;
- ret = queue->deliver(sender, flags, data, size, queue->opaque);
+ ret = qemu_deliver_packet(sender, flags, data, size, queue->opaque);
queue->delivering = 0;
return ret;
}
static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const struct iovec *iov,
int iovcnt)
@@ -166,14 +161,14 @@ static ssize_t qemu_net_queue_deliver_iov(NetQueue *queue,
ssize_t ret = -1;
queue->delivering = 1;
- ret = queue->deliver_iov(sender, flags, iov, iovcnt, queue->opaque);
+ ret = qemu_deliver_packet_iov(sender, flags, iov, iovcnt, queue->opaque);
queue->delivering = 0;
return ret;
}
ssize_t qemu_net_queue_send(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const uint8_t *data,
size_t size,
@@ -181,8 +176,8 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
{
ssize_t ret;
- if (queue->delivering) {
- return qemu_net_queue_append(queue, sender, flags, data, size, NULL);
+ if (queue->delivering || !qemu_can_send_packet(sender)) {
+ return qemu_net_queue_append(queue, sender, flags, data, size, sent_cb);
}
ret = qemu_net_queue_deliver(queue, sender, flags, data, size);
@@ -197,7 +192,7 @@ ssize_t qemu_net_queue_send(NetQueue *queue,
}
ssize_t qemu_net_queue_send_iov(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const struct iovec *iov,
int iovcnt,
@@ -205,8 +200,9 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
{
ssize_t ret;
- if (queue->delivering) {
- return qemu_net_queue_append_iov(queue, sender, flags, iov, iovcnt, NULL);
+ if (queue->delivering || !qemu_can_send_packet(sender)) {
+ return qemu_net_queue_append_iov(queue, sender, flags,
+ iov, iovcnt, sent_cb);
}
ret = qemu_net_queue_deliver_iov(queue, sender, flags, iov, iovcnt);
@@ -220,7 +216,7 @@ ssize_t qemu_net_queue_send_iov(NetQueue *queue,
return ret;
}
-void qemu_net_queue_purge(NetQueue *queue, VLANClientState *from)
+void qemu_net_queue_purge(NetQueue *queue, NetClientState *from)
{
NetPacket *packet, *next;
diff --git a/net/queue.h b/net/queue.h
index a31958e3c6..9d44a9b3b8 100644
--- a/net/queue.h
+++ b/net/queue.h
@@ -29,43 +29,30 @@
typedef struct NetPacket NetPacket;
typedef struct NetQueue NetQueue;
-typedef void (NetPacketSent) (VLANClientState *sender, ssize_t ret);
-
-typedef ssize_t (NetPacketDeliver) (VLANClientState *sender,
- unsigned flags,
- const uint8_t *buf,
- size_t size,
- void *opaque);
-
-typedef ssize_t (NetPacketDeliverIOV) (VLANClientState *sender,
- unsigned flags,
- const struct iovec *iov,
- int iovcnt,
- void *opaque);
+typedef void (NetPacketSent) (NetClientState *sender, ssize_t ret);
#define QEMU_NET_PACKET_FLAG_NONE 0
#define QEMU_NET_PACKET_FLAG_RAW (1<<0)
-NetQueue *qemu_new_net_queue(NetPacketDeliver *deliver,
- NetPacketDeliverIOV *deliver_iov,
- void *opaque);
+NetQueue *qemu_new_net_queue(void *opaque);
+
void qemu_del_net_queue(NetQueue *queue);
ssize_t qemu_net_queue_send(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const uint8_t *data,
size_t size,
NetPacketSent *sent_cb);
ssize_t qemu_net_queue_send_iov(NetQueue *queue,
- VLANClientState *sender,
+ NetClientState *sender,
unsigned flags,
const struct iovec *iov,
int iovcnt,
NetPacketSent *sent_cb);
-void qemu_net_queue_purge(NetQueue *queue, VLANClientState *from);
+void qemu_net_queue_purge(NetQueue *queue, NetClientState *from);
void qemu_net_queue_flush(NetQueue *queue);
#endif /* QEMU_NET_QUEUE_H */
diff --git a/net/slirp.c b/net/slirp.c
index 37b6ccfde9..8db66ea539 100644
--- a/net/slirp.c
+++ b/net/slirp.c
@@ -26,9 +26,11 @@
#include "config-host.h"
#ifndef _WIN32
+#include <pwd.h>
#include <sys/wait.h>
#endif
#include "net.h"
+#include "net/hub.h"
#include "monitor.h"
#include "qemu_socket.h"
#include "slirp/libslirp.h"
@@ -66,7 +68,7 @@ struct slirp_config_str {
};
typedef struct SlirpState {
- VLANClientState nc;
+ NetClientState nc;
QTAILQ_ENTRY(SlirpState) entry;
Slirp *slirp;
#ifndef _WIN32
@@ -95,13 +97,6 @@ static void slirp_smb_cleanup(SlirpState *s);
static inline void slirp_smb_cleanup(SlirpState *s) { }
#endif
-int slirp_can_output(void *opaque)
-{
- SlirpState *s = opaque;
-
- return qemu_can_send_packet(&s->nc);
-}
-
void slirp_output(void *opaque, const uint8_t *pkt, int pkt_len)
{
SlirpState *s = opaque;
@@ -109,7 +104,7 @@ void slirp_output(void *opaque, const uint8_t *pkt, int pkt_len)
qemu_send_packet(&s->nc, pkt, pkt_len);
}
-static ssize_t net_slirp_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t net_slirp_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
SlirpState *s = DO_UPCAST(SlirpState, nc, nc);
@@ -118,7 +113,7 @@ static ssize_t net_slirp_receive(VLANClientState *nc, const uint8_t *buf, size_t
return size;
}
-static void net_slirp_cleanup(VLANClientState *nc)
+static void net_slirp_cleanup(NetClientState *nc)
{
SlirpState *s = DO_UPCAST(SlirpState, nc, nc);
@@ -128,13 +123,13 @@ static void net_slirp_cleanup(VLANClientState *nc)
}
static NetClientInfo net_slirp_info = {
- .type = NET_CLIENT_TYPE_USER,
+ .type = NET_CLIENT_OPTIONS_KIND_USER,
.size = sizeof(SlirpState),
.receive = net_slirp_receive,
.cleanup = net_slirp_cleanup,
};
-static int net_slirp_init(VLANState *vlan, const char *model,
+static int net_slirp_init(NetClientState *peer, const char *model,
const char *name, int restricted,
const char *vnetwork, const char *vhost,
const char *vhostname, const char *tftp_export,
@@ -151,7 +146,7 @@ static int net_slirp_init(VLANState *vlan, const char *model,
#ifndef _WIN32
struct in_addr smbsrv = { .s_addr = 0 };
#endif
- VLANClientState *nc;
+ NetClientState *nc;
SlirpState *s;
char buf[20];
uint32_t addr;
@@ -237,7 +232,7 @@ static int net_slirp_init(VLANState *vlan, const char *model,
}
#endif
- nc = qemu_new_net_client(&net_slirp_info, vlan, NULL, model, name);
+ nc = qemu_new_net_client(&net_slirp_info, peer, model, name);
snprintf(nc->info_str, sizeof(nc->info_str),
"net=%s,restrict=%s", inet_ntoa(net),
@@ -273,7 +268,7 @@ static int net_slirp_init(VLANState *vlan, const char *model,
return 0;
error:
- qemu_del_vlan_client(nc);
+ qemu_del_net_client(nc);
return -1;
}
@@ -282,8 +277,8 @@ static SlirpState *slirp_lookup(Monitor *mon, const char *vlan,
{
if (vlan) {
- VLANClientState *nc;
- nc = qemu_find_vlan_client_by_name(mon, strtol(vlan, NULL, 0), stack);
+ NetClientState *nc;
+ nc = net_hub_find_client_by_name(strtol(vlan, NULL, 0), stack);
if (!nc) {
return NULL;
}
@@ -487,8 +482,27 @@ static int slirp_smb(SlirpState* s, const char *exported_dir,
static int instance;
char smb_conf[128];
char smb_cmdline[128];
+ struct passwd *passwd;
FILE *f;
+ passwd = getpwuid(geteuid());
+ if (!passwd) {
+ error_report("failed to retrieve user name");
+ return -1;
+ }
+
+ if (access(CONFIG_SMBD_COMMAND, F_OK)) {
+ error_report("could not find '%s', please install it",
+ CONFIG_SMBD_COMMAND);
+ return -1;
+ }
+
+ if (access(exported_dir, R_OK | X_OK)) {
+ error_report("error accessing shared directory '%s': %s",
+ exported_dir, strerror(errno));
+ return -1;
+ }
+
snprintf(s->smb_dir, sizeof(s->smb_dir), "/tmp/qemu-smb.%ld-%d",
(long)getpid(), instance++);
if (mkdir(s->smb_dir, 0700) < 0) {
@@ -517,14 +531,16 @@ static int slirp_smb(SlirpState* s, const char *exported_dir,
"[qemu]\n"
"path=%s\n"
"read only=no\n"
- "guest ok=yes\n",
+ "guest ok=yes\n"
+ "force user=%s\n",
s->smb_dir,
s->smb_dir,
s->smb_dir,
s->smb_dir,
s->smb_dir,
s->smb_dir,
- exported_dir
+ exported_dir,
+ passwd->pw_name
);
fclose(f);
@@ -616,25 +632,35 @@ static int slirp_guestfwd(SlirpState *s, const char *config_str,
fwd = g_malloc(sizeof(struct GuestFwd));
snprintf(buf, sizeof(buf), "guestfwd.tcp.%d", port);
- fwd->hd = qemu_chr_new(buf, p, NULL);
- if (!fwd->hd) {
- error_report("could not open guest forwarding device '%s'", buf);
- g_free(fwd);
- return -1;
- }
- if (slirp_add_exec(s->slirp, 3, fwd->hd, &server, port) < 0) {
- error_report("conflicting/invalid host:port in guest forwarding "
- "rule '%s'", config_str);
- g_free(fwd);
- return -1;
- }
- fwd->server = server;
- fwd->port = port;
- fwd->slirp = s->slirp;
+ if ((strlen(p) > 4) && !strncmp(p, "cmd:", 4)) {
+ if (slirp_add_exec(s->slirp, 0, &p[4], &server, port) < 0) {
+ error_report("conflicting/invalid host:port in guest forwarding "
+ "rule '%s'", config_str);
+ g_free(fwd);
+ return -1;
+ }
+ } else {
+ fwd->hd = qemu_chr_new(buf, p, NULL);
+ if (!fwd->hd) {
+ error_report("could not open guest forwarding device '%s'", buf);
+ g_free(fwd);
+ return -1;
+ }
- qemu_chr_add_handlers(fwd->hd, guestfwd_can_read, guestfwd_read,
- NULL, fwd);
+ if (slirp_add_exec(s->slirp, 3, fwd->hd, &server, port) < 0) {
+ error_report("conflicting/invalid host:port in guest forwarding "
+ "rule '%s'", config_str);
+ g_free(fwd);
+ return -1;
+ }
+ fwd->server = server;
+ fwd->port = port;
+ fwd->slirp = s->slirp;
+
+ qemu_chr_add_handlers(fwd->hd, guestfwd_can_read, guestfwd_read,
+ NULL, fwd);
+ }
return 0;
fail_syntax:
@@ -647,95 +673,55 @@ void do_info_usernet(Monitor *mon)
SlirpState *s;
QTAILQ_FOREACH(s, &slirp_stacks, entry) {
+ int id;
+ bool got_vlan_id = net_hub_id_for_client(&s->nc, &id) == 0;
monitor_printf(mon, "VLAN %d (%s):\n",
- s->nc.vlan ? s->nc.vlan->id : -1,
+ got_vlan_id ? id : -1,
s->nc.name);
slirp_connection_info(s->slirp, mon);
}
}
-static int net_init_slirp_configs(const char *name, const char *value, void *opaque)
+static void
+net_init_slirp_configs(const StringList *fwd, int flags)
{
- struct slirp_config_str *config;
-
- if (strcmp(name, "hostfwd") != 0 && strcmp(name, "guestfwd") != 0) {
- return 0;
- }
-
- config = g_malloc0(sizeof(*config));
+ while (fwd) {
+ struct slirp_config_str *config;
- pstrcpy(config->str, sizeof(config->str), value);
+ config = g_malloc0(sizeof(*config));
+ pstrcpy(config->str, sizeof(config->str), fwd->value->str);
+ config->flags = flags;
+ config->next = slirp_configs;
+ slirp_configs = config;
- if (!strcmp(name, "hostfwd")) {
- config->flags = SLIRP_CFG_HOSTFWD;
+ fwd = fwd->next;
}
-
- config->next = slirp_configs;
- slirp_configs = config;
-
- return 0;
}
-int net_init_slirp(QemuOpts *opts, const char *name, VLANState *vlan)
+int net_init_slirp(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
struct slirp_config_str *config;
- const char *vhost;
- const char *vhostname;
- const char *vdhcp_start;
- const char *vnamesrv;
- const char *tftp_export;
- const char *bootfile;
- const char *smb_export;
- const char *vsmbsrv;
- const char *restrict_opt;
- char *vnet = NULL;
- int restricted = 0;
+ char *vnet;
int ret;
+ const NetdevUserOptions *user;
- vhost = qemu_opt_get(opts, "host");
- vhostname = qemu_opt_get(opts, "hostname");
- vdhcp_start = qemu_opt_get(opts, "dhcpstart");
- vnamesrv = qemu_opt_get(opts, "dns");
- tftp_export = qemu_opt_get(opts, "tftp");
- bootfile = qemu_opt_get(opts, "bootfile");
- smb_export = qemu_opt_get(opts, "smb");
- vsmbsrv = qemu_opt_get(opts, "smbserver");
-
- restrict_opt = qemu_opt_get(opts, "restrict");
- if (restrict_opt) {
- if (!strcmp(restrict_opt, "on") ||
- !strcmp(restrict_opt, "yes") || !strcmp(restrict_opt, "y")) {
- restricted = 1;
- } else if (strcmp(restrict_opt, "off") &&
- strcmp(restrict_opt, "no") && strcmp(restrict_opt, "n")) {
- error_report("invalid option: 'restrict=%s'", restrict_opt);
- return -1;
- }
- }
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_USER);
+ user = opts->user;
- if (qemu_opt_get(opts, "ip")) {
- const char *ip = qemu_opt_get(opts, "ip");
- int l = strlen(ip) + strlen("/24") + 1;
+ vnet = user->has_net ? g_strdup(user->net) :
+ user->has_ip ? g_strdup_printf("%s/24", user->ip) :
+ NULL;
- vnet = g_malloc(l);
-
- /* emulate legacy ip= parameter */
- pstrcpy(vnet, l, ip);
- pstrcat(vnet, l, "/24");
- }
-
- if (qemu_opt_get(opts, "net")) {
- if (vnet) {
- g_free(vnet);
- }
- vnet = g_strdup(qemu_opt_get(opts, "net"));
- }
+ /* all optional fields are initialized to "all bits zero" */
- qemu_opt_foreach(opts, net_init_slirp_configs, NULL, 0);
+ net_init_slirp_configs(user->hostfwd, SLIRP_CFG_HOSTFWD);
+ net_init_slirp_configs(user->guestfwd, 0);
- ret = net_slirp_init(vlan, "user", name, restricted, vnet, vhost,
- vhostname, tftp_export, bootfile, vdhcp_start,
- vnamesrv, smb_export, vsmbsrv);
+ ret = net_slirp_init(peer, "user", name, user->q_restrict, vnet,
+ user->host, user->hostname, user->tftp,
+ user->bootfile, user->dhcpstart, user->dns, user->smb,
+ user->smbserver);
while (slirp_configs) {
config = slirp_configs;
diff --git a/net/slirp.h b/net/slirp.h
index 53fe95dc12..5f685c4fb1 100644
--- a/net/slirp.h
+++ b/net/slirp.h
@@ -27,10 +27,12 @@
#include "qemu-common.h"
#include "qdict.h"
#include "qemu-option.h"
+#include "qapi-types.h"
#ifdef CONFIG_SLIRP
-int net_init_slirp(QemuOpts *opts, const char *name, VLANState *vlan);
+int net_init_slirp(const NetClientOptions *opts, const char *name,
+ NetClientState *peer);
void net_slirp_hostfwd_add(Monitor *mon, const QDict *qdict);
void net_slirp_hostfwd_remove(Monitor *mon, const QDict *qdict);
diff --git a/net/socket.c b/net/socket.c
index fcd0a3c162..c172c249be 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -34,7 +34,8 @@
#include "qemu_socket.h"
typedef struct NetSocketState {
- VLANClientState nc;
+ NetClientState nc;
+ int listen_fd;
int fd;
int state; /* 0 = getting length, 1 = getting data */
unsigned int index;
@@ -43,15 +44,10 @@ typedef struct NetSocketState {
struct sockaddr_in dgram_dst; /* contains inet host and port destination iff connectionless (SOCK_DGRAM) */
} NetSocketState;
-typedef struct NetSocketListenState {
- VLANState *vlan;
- char *model;
- char *name;
- int fd;
-} NetSocketListenState;
+static void net_socket_accept(void *opaque);
/* XXX: we consider we can send the whole packet without blocking */
-static ssize_t net_socket_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t net_socket_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
uint32_t len;
@@ -61,7 +57,7 @@ static ssize_t net_socket_receive(VLANClientState *nc, const uint8_t *buf, size_
return send_all(s->fd, buf, size);
}
-static ssize_t net_socket_receive_dgram(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t net_socket_receive_dgram(NetClientState *nc, const uint8_t *buf, size_t size)
{
NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
@@ -86,7 +82,19 @@ static void net_socket_send(void *opaque)
/* end of connection */
eoc:
qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
+ if (s->listen_fd != -1) {
+ qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
+ }
closesocket(s->fd);
+
+ s->fd = -1;
+ s->state = 0;
+ s->index = 0;
+ s->packet_len = 0;
+ s->nc.link_down = true;
+ memset(s->buf, 0, sizeof(s->buf));
+ memset(s->nc.info_str, 0, sizeof(s->nc.info_str));
+
return;
}
buf = buf1;
@@ -231,21 +239,29 @@ fail:
return -1;
}
-static void net_socket_cleanup(VLANClientState *nc)
+static void net_socket_cleanup(NetClientState *nc)
{
NetSocketState *s = DO_UPCAST(NetSocketState, nc, nc);
- qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
- close(s->fd);
+ if (s->fd != -1) {
+ qemu_set_fd_handler(s->fd, NULL, NULL, NULL);
+ close(s->fd);
+ s->fd = -1;
+ }
+ if (s->listen_fd != -1) {
+ qemu_set_fd_handler(s->listen_fd, NULL, NULL, NULL);
+ closesocket(s->listen_fd);
+ s->listen_fd = -1;
+ }
}
static NetClientInfo net_dgram_socket_info = {
- .type = NET_CLIENT_TYPE_SOCKET,
+ .type = NET_CLIENT_OPTIONS_KIND_SOCKET,
.size = sizeof(NetSocketState),
.receive = net_socket_receive_dgram,
.cleanup = net_socket_cleanup,
};
-static NetSocketState *net_socket_fd_init_dgram(VLANState *vlan,
+static NetSocketState *net_socket_fd_init_dgram(NetClientState *peer,
const char *model,
const char *name,
int fd, int is_connected)
@@ -253,7 +269,7 @@ static NetSocketState *net_socket_fd_init_dgram(VLANState *vlan,
struct sockaddr_in saddr;
int newfd;
socklen_t saddr_len;
- VLANClientState *nc;
+ NetClientState *nc;
NetSocketState *s;
/* fd passed: multicast: "learn" dgram_dst address from bound address and save it
@@ -287,7 +303,7 @@ static NetSocketState *net_socket_fd_init_dgram(VLANState *vlan,
}
}
- nc = qemu_new_net_client(&net_dgram_socket_info, vlan, NULL, model, name);
+ nc = qemu_new_net_client(&net_dgram_socket_info, peer, model, name);
snprintf(nc->info_str, sizeof(nc->info_str),
"socket: fd=%d (%s mcast=%s:%d)",
@@ -297,11 +313,14 @@ static NetSocketState *net_socket_fd_init_dgram(VLANState *vlan,
s = DO_UPCAST(NetSocketState, nc, nc);
s->fd = fd;
+ s->listen_fd = -1;
qemu_set_fd_handler(s->fd, net_socket_send_dgram, NULL, s);
/* mcast: save bound address as dst */
- if (is_connected) s->dgram_dst=saddr;
+ if (is_connected) {
+ s->dgram_dst = saddr;
+ }
return s;
@@ -317,27 +336,28 @@ static void net_socket_connect(void *opaque)
}
static NetClientInfo net_socket_info = {
- .type = NET_CLIENT_TYPE_SOCKET,
+ .type = NET_CLIENT_OPTIONS_KIND_SOCKET,
.size = sizeof(NetSocketState),
.receive = net_socket_receive,
.cleanup = net_socket_cleanup,
};
-static NetSocketState *net_socket_fd_init_stream(VLANState *vlan,
+static NetSocketState *net_socket_fd_init_stream(NetClientState *peer,
const char *model,
const char *name,
int fd, int is_connected)
{
- VLANClientState *nc;
+ NetClientState *nc;
NetSocketState *s;
- nc = qemu_new_net_client(&net_socket_info, vlan, NULL, model, name);
+ nc = qemu_new_net_client(&net_socket_info, peer, model, name);
snprintf(nc->info_str, sizeof(nc->info_str), "socket: fd=%d", fd);
s = DO_UPCAST(NetSocketState, nc, nc);
s->fd = fd;
+ s->listen_fd = -1;
if (is_connected) {
net_socket_connect(s);
@@ -347,7 +367,7 @@ static NetSocketState *net_socket_fd_init_stream(VLANState *vlan,
return s;
}
-static NetSocketState *net_socket_fd_init(VLANState *vlan,
+static NetSocketState *net_socket_fd_init(NetClientState *peer,
const char *model, const char *name,
int fd, int is_connected)
{
@@ -362,60 +382,59 @@ static NetSocketState *net_socket_fd_init(VLANState *vlan,
}
switch(so_type) {
case SOCK_DGRAM:
- return net_socket_fd_init_dgram(vlan, model, name, fd, is_connected);
+ return net_socket_fd_init_dgram(peer, model, name, fd, is_connected);
case SOCK_STREAM:
- return net_socket_fd_init_stream(vlan, model, name, fd, is_connected);
+ return net_socket_fd_init_stream(peer, model, name, fd, is_connected);
default:
/* who knows ... this could be a eg. a pty, do warn and continue as stream */
fprintf(stderr, "qemu: warning: socket type=%d for fd=%d is not SOCK_DGRAM or SOCK_STREAM\n", so_type, fd);
- return net_socket_fd_init_stream(vlan, model, name, fd, is_connected);
+ return net_socket_fd_init_stream(peer, model, name, fd, is_connected);
}
return NULL;
}
static void net_socket_accept(void *opaque)
{
- NetSocketListenState *s = opaque;
- NetSocketState *s1;
+ NetSocketState *s = opaque;
struct sockaddr_in saddr;
socklen_t len;
int fd;
for(;;) {
len = sizeof(saddr);
- fd = qemu_accept(s->fd, (struct sockaddr *)&saddr, &len);
+ fd = qemu_accept(s->listen_fd, (struct sockaddr *)&saddr, &len);
if (fd < 0 && errno != EINTR) {
return;
} else if (fd >= 0) {
+ qemu_set_fd_handler(s->listen_fd, NULL, NULL, NULL);
break;
}
}
- s1 = net_socket_fd_init(s->vlan, s->model, s->name, fd, 1);
- if (s1) {
- snprintf(s1->nc.info_str, sizeof(s1->nc.info_str),
- "socket: connection from %s:%d",
- inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port));
- }
+
+ s->fd = fd;
+ s->nc.link_down = false;
+ net_socket_connect(s);
+ snprintf(s->nc.info_str, sizeof(s->nc.info_str),
+ "socket: connection from %s:%d",
+ inet_ntoa(saddr.sin_addr), ntohs(saddr.sin_port));
}
-static int net_socket_listen_init(VLANState *vlan,
+static int net_socket_listen_init(NetClientState *peer,
const char *model,
const char *name,
const char *host_str)
{
- NetSocketListenState *s;
- int fd, val, ret;
+ NetClientState *nc;
+ NetSocketState *s;
struct sockaddr_in saddr;
+ int fd, val, ret;
if (parse_host_port(&saddr, host_str) < 0)
return -1;
- s = g_malloc0(sizeof(NetSocketListenState));
-
fd = qemu_socket(PF_INET, SOCK_STREAM, 0);
if (fd < 0) {
perror("socket");
- g_free(s);
return -1;
}
socket_set_nonblock(fd);
@@ -427,26 +446,27 @@ static int net_socket_listen_init(VLANState *vlan,
ret = bind(fd, (struct sockaddr *)&saddr, sizeof(saddr));
if (ret < 0) {
perror("bind");
- g_free(s);
closesocket(fd);
return -1;
}
ret = listen(fd, 0);
if (ret < 0) {
perror("listen");
- g_free(s);
closesocket(fd);
return -1;
}
- s->vlan = vlan;
- s->model = g_strdup(model);
- s->name = name ? g_strdup(name) : NULL;
- s->fd = fd;
- qemu_set_fd_handler(fd, net_socket_accept, NULL, s);
+
+ nc = qemu_new_net_client(&net_socket_info, peer, model, name);
+ s = DO_UPCAST(NetSocketState, nc, nc);
+ s->fd = -1;
+ s->listen_fd = fd;
+ s->nc.link_down = true;
+
+ qemu_set_fd_handler(s->listen_fd, net_socket_accept, NULL, s);
return 0;
}
-static int net_socket_connect_init(VLANState *vlan,
+static int net_socket_connect_init(NetClientState *peer,
const char *model,
const char *name,
const char *host_str)
@@ -487,7 +507,7 @@ static int net_socket_connect_init(VLANState *vlan,
break;
}
}
- s = net_socket_fd_init(vlan, model, name, fd, connected);
+ s = net_socket_fd_init(peer, model, name, fd, connected);
if (!s)
return -1;
snprintf(s->nc.info_str, sizeof(s->nc.info_str),
@@ -496,7 +516,7 @@ static int net_socket_connect_init(VLANState *vlan,
return 0;
}
-static int net_socket_mcast_init(VLANState *vlan,
+static int net_socket_mcast_init(NetClientState *peer,
const char *model,
const char *name,
const char *host_str,
@@ -522,7 +542,7 @@ static int net_socket_mcast_init(VLANState *vlan,
if (fd < 0)
return -1;
- s = net_socket_fd_init(vlan, model, name, fd, 0);
+ s = net_socket_fd_init(peer, model, name, fd, 0);
if (!s)
return -1;
@@ -535,7 +555,7 @@ static int net_socket_mcast_init(VLANState *vlan,
}
-static int net_socket_udp_init(VLANState *vlan,
+static int net_socket_udp_init(NetClientState *peer,
const char *model,
const char *name,
const char *rhost,
@@ -573,7 +593,7 @@ static int net_socket_udp_init(VLANState *vlan,
return -1;
}
- s = net_socket_fd_init(vlan, model, name, fd, 0);
+ s = net_socket_fd_init(peer, model, name, fd, 0);
if (!s) {
return -1;
}
@@ -586,100 +606,68 @@ static int net_socket_udp_init(VLANState *vlan,
return 0;
}
-int net_init_socket(QemuOpts *opts, const char *name, VLANState *vlan)
+int net_init_socket(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
- if (qemu_opt_get(opts, "fd")) {
- int fd;
+ const NetdevSocketOptions *sock;
- if (qemu_opt_get(opts, "listen") ||
- qemu_opt_get(opts, "connect") ||
- qemu_opt_get(opts, "mcast") ||
- qemu_opt_get(opts, "localaddr")) {
- error_report("listen=, connect=, mcast= and localaddr= is invalid with fd=");
- return -1;
- }
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_SOCKET);
+ sock = opts->socket;
- fd = net_handle_fd_param(cur_mon, qemu_opt_get(opts, "fd"));
- if (fd == -1) {
- return -1;
- }
+ if (sock->has_fd + sock->has_listen + sock->has_connect + sock->has_mcast +
+ sock->has_udp != 1) {
+ error_report("exactly one of fd=, listen=, connect=, mcast= or udp="
+ " is required");
+ return -1;
+ }
- if (!net_socket_fd_init(vlan, "socket", name, fd, 1)) {
- return -1;
- }
- } else if (qemu_opt_get(opts, "listen")) {
- const char *listen;
-
- if (qemu_opt_get(opts, "fd") ||
- qemu_opt_get(opts, "connect") ||
- qemu_opt_get(opts, "mcast") ||
- qemu_opt_get(opts, "localaddr")) {
- error_report("fd=, connect=, mcast= and localaddr= is invalid with listen=");
- return -1;
- }
+ if (sock->has_localaddr && !sock->has_mcast && !sock->has_udp) {
+ error_report("localaddr= is only valid with mcast= or udp=");
+ return -1;
+ }
- listen = qemu_opt_get(opts, "listen");
+ if (sock->has_fd) {
+ int fd;
- if (net_socket_listen_init(vlan, "socket", name, listen) == -1) {
- return -1;
- }
- } else if (qemu_opt_get(opts, "connect")) {
- const char *connect;
-
- if (qemu_opt_get(opts, "fd") ||
- qemu_opt_get(opts, "listen") ||
- qemu_opt_get(opts, "mcast") ||
- qemu_opt_get(opts, "localaddr")) {
- error_report("fd=, listen=, mcast= and localaddr= is invalid with connect=");
+ fd = net_handle_fd_param(cur_mon, sock->fd);
+ if (fd == -1 || !net_socket_fd_init(peer, "socket", name, fd, 1)) {
return -1;
}
+ return 0;
+ }
- connect = qemu_opt_get(opts, "connect");
-
- if (net_socket_connect_init(vlan, "socket", name, connect) == -1) {
+ if (sock->has_listen) {
+ if (net_socket_listen_init(peer, "socket", name, sock->listen) == -1) {
return -1;
}
- } else if (qemu_opt_get(opts, "mcast")) {
- const char *mcast, *localaddr;
+ return 0;
+ }
- if (qemu_opt_get(opts, "fd") ||
- qemu_opt_get(opts, "connect") ||
- qemu_opt_get(opts, "listen")) {
- error_report("fd=, connect= and listen= is invalid with mcast=");
+ if (sock->has_connect) {
+ if (net_socket_connect_init(peer, "socket", name, sock->connect) ==
+ -1) {
return -1;
}
+ return 0;
+ }
- mcast = qemu_opt_get(opts, "mcast");
- localaddr = qemu_opt_get(opts, "localaddr");
-
- if (net_socket_mcast_init(vlan, "socket", name, mcast, localaddr) == -1) {
- return -1;
- }
- } else if (qemu_opt_get(opts, "udp")) {
- const char *udp, *localaddr;
-
- if (qemu_opt_get(opts, "fd") ||
- qemu_opt_get(opts, "connect") ||
- qemu_opt_get(opts, "listen") ||
- qemu_opt_get(opts, "mcast")) {
- error_report("fd=, connect=, listen="
- " and mcast= is invalid with udp=");
+ if (sock->has_mcast) {
+ /* if sock->localaddr is missing, it has been initialized to "all bits
+ * zero" */
+ if (net_socket_mcast_init(peer, "socket", name, sock->mcast,
+ sock->localaddr) == -1) {
return -1;
}
+ return 0;
+ }
- udp = qemu_opt_get(opts, "udp");
- localaddr = qemu_opt_get(opts, "localaddr");
- if (localaddr == NULL) {
- error_report("localaddr= is mandatory with udp=");
- return -1;
- }
-
- if (net_socket_udp_init(vlan, "udp", name, udp, localaddr) == -1) {
- return -1;
- }
- } else {
- error_report("-socket requires fd=, listen=,"
- " connect=, mcast= or udp=");
+ assert(sock->has_udp);
+ if (!sock->has_localaddr) {
+ error_report("localaddr= is mandatory with udp=");
+ return -1;
+ }
+ if (net_socket_udp_init(peer, "udp", name, sock->udp, sock->localaddr) ==
+ -1) {
return -1;
}
return 0;
diff --git a/net/socket.h b/net/socket.h
index e1fe959412..3f8a092459 100644
--- a/net/socket.h
+++ b/net/socket.h
@@ -25,8 +25,9 @@
#define QEMU_NET_SOCKET_H
#include "net.h"
-#include "qemu-common.h"
+#include "qapi-types.h"
-int net_init_socket(QemuOpts *opts, const char *name, VLANState *vlan);
+int net_init_socket(const NetClientOptions *opts, const char *name,
+ NetClientState *peer);
#endif /* QEMU_NET_SOCKET_H */
diff --git a/net/tap-aix.c b/net/tap-aix.c
index e19aaba110..f27c17729e 100644
--- a/net/tap-aix.c
+++ b/net/tap-aix.c
@@ -31,7 +31,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required
return -1;
}
-int tap_set_sndbuf(int fd, QemuOpts *opts)
+int tap_set_sndbuf(int fd, const NetdevTapOptions *tap)
{
return 0;
}
diff --git a/net/tap-bsd.c b/net/tap-bsd.c
index 937a94b11f..a3b717dd1c 100644
--- a/net/tap-bsd.c
+++ b/net/tap-bsd.c
@@ -117,7 +117,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required
return fd;
}
-int tap_set_sndbuf(int fd, QemuOpts *opts)
+int tap_set_sndbuf(int fd, const NetdevTapOptions *tap)
{
return 0;
}
diff --git a/net/tap-haiku.c b/net/tap-haiku.c
index 91dda8ebc0..34739d1562 100644
--- a/net/tap-haiku.c
+++ b/net/tap-haiku.c
@@ -31,7 +31,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required
return -1;
}
-int tap_set_sndbuf(int fd, QemuOpts *opts)
+int tap_set_sndbuf(int fd, const NetdevTapOptions *tap)
{
return 0;
}
diff --git a/net/tap-linux.c b/net/tap-linux.c
index 41d581b734..c6521bec34 100644
--- a/net/tap-linux.c
+++ b/net/tap-linux.c
@@ -98,16 +98,19 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required
*/
#define TAP_DEFAULT_SNDBUF 0
-int tap_set_sndbuf(int fd, QemuOpts *opts)
+int tap_set_sndbuf(int fd, const NetdevTapOptions *tap)
{
int sndbuf;
- sndbuf = qemu_opt_get_size(opts, "sndbuf", TAP_DEFAULT_SNDBUF);
+ sndbuf = !tap->has_sndbuf ? TAP_DEFAULT_SNDBUF :
+ tap->sndbuf > INT_MAX ? INT_MAX :
+ tap->sndbuf;
+
if (!sndbuf) {
sndbuf = INT_MAX;
}
- if (ioctl(fd, TUNSETSNDBUF, &sndbuf) == -1 && qemu_opt_get(opts, "sndbuf")) {
+ if (ioctl(fd, TUNSETSNDBUF, &sndbuf) == -1 && tap->has_sndbuf) {
error_report("TUNSETSNDBUF ioctl failed: %s", strerror(errno));
return -1;
}
diff --git a/net/tap-solaris.c b/net/tap-solaris.c
index cf764634ef..5d6ac42f24 100644
--- a/net/tap-solaris.c
+++ b/net/tap-solaris.c
@@ -197,7 +197,7 @@ int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required
return fd;
}
-int tap_set_sndbuf(int fd, QemuOpts *opts)
+int tap_set_sndbuf(int fd, const NetdevTapOptions *tap)
{
return 0;
}
diff --git a/net/tap-win32.c b/net/tap-win32.c
index a801a553c4..c0ea954ca1 100644
--- a/net/tap-win32.c
+++ b/net/tap-win32.c
@@ -630,11 +630,11 @@ static int tap_win32_open(tap_win32_overlapped_t **phandle,
/********************************************/
typedef struct TAPState {
- VLANClientState nc;
+ NetClientState nc;
tap_win32_overlapped_t *handle;
} TAPState;
-static void tap_cleanup(VLANClientState *nc)
+static void tap_cleanup(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@@ -645,7 +645,7 @@ static void tap_cleanup(VLANClientState *nc)
*/
}
-static ssize_t tap_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t tap_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@@ -667,16 +667,16 @@ static void tap_win32_send(void *opaque)
}
static NetClientInfo net_tap_win32_info = {
- .type = NET_CLIENT_TYPE_TAP,
+ .type = NET_CLIENT_OPTIONS_KIND_TAP,
.size = sizeof(TAPState),
.receive = tap_receive,
.cleanup = tap_cleanup,
};
-static int tap_win32_init(VLANState *vlan, const char *model,
+static int tap_win32_init(NetClientState *peer, const char *model,
const char *name, const char *ifname)
{
- VLANClientState *nc;
+ NetClientState *nc;
TAPState *s;
tap_win32_overlapped_t *handle;
@@ -685,7 +685,7 @@ static int tap_win32_init(VLANState *vlan, const char *model,
return -1;
}
- nc = qemu_new_net_client(&net_tap_win32_info, vlan, NULL, model, name);
+ nc = qemu_new_net_client(&net_tap_win32_info, peer, model, name);
s = DO_UPCAST(TAPState, nc, nc);
@@ -699,30 +699,32 @@ static int tap_win32_init(VLANState *vlan, const char *model,
return 0;
}
-int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)
+int net_init_tap(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
- const char *ifname;
+ const NetdevTapOptions *tap;
- ifname = qemu_opt_get(opts, "ifname");
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_TAP);
+ tap = opts->tap;
- if (!ifname) {
+ if (!tap->has_ifname) {
error_report("tap: no interface name");
return -1;
}
- if (tap_win32_init(vlan, "tap", name, ifname) == -1) {
+ if (tap_win32_init(peer, "tap", name, tap->ifname) == -1) {
return -1;
}
return 0;
}
-int tap_has_ufo(VLANClientState *vc)
+int tap_has_ufo(NetClientState *nc)
{
return 0;
}
-int tap_has_vnet_hdr(VLANClientState *vc)
+int tap_has_vnet_hdr(NetClientState *nc)
{
return 0;
}
@@ -736,16 +738,16 @@ void tap_fd_set_vnet_hdr_len(int fd, int len)
{
}
-void tap_using_vnet_hdr(VLANClientState *vc, int using_vnet_hdr)
+void tap_using_vnet_hdr(NetClientState *nc, int using_vnet_hdr)
{
}
-void tap_set_offload(VLANClientState *vc, int csum, int tso4,
+void tap_set_offload(NetClientState *nc, int csum, int tso4,
int tso6, int ecn, int ufo)
{
}
-struct vhost_net *tap_get_vhost_net(VLANClientState *nc)
+struct vhost_net *tap_get_vhost_net(NetClientState *nc)
{
return NULL;
}
diff --git a/net/tap.c b/net/tap.c
index 17e91355ce..1971525794 100644
--- a/net/tap.c
+++ b/net/tap.c
@@ -50,7 +50,7 @@
#define TAP_BUFSIZE (4096 + 65536)
typedef struct TAPState {
- VLANClientState nc;
+ NetClientState nc;
int fd;
char down_script[1024];
char down_script_arg[128];
@@ -115,7 +115,7 @@ static ssize_t tap_write_packet(TAPState *s, const struct iovec *iov, int iovcnt
return len;
}
-static ssize_t tap_receive_iov(VLANClientState *nc, const struct iovec *iov,
+static ssize_t tap_receive_iov(NetClientState *nc, const struct iovec *iov,
int iovcnt)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@@ -134,7 +134,7 @@ static ssize_t tap_receive_iov(VLANClientState *nc, const struct iovec *iov,
return tap_write_packet(s, iovp, iovcnt);
}
-static ssize_t tap_receive_raw(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t tap_receive_raw(NetClientState *nc, const uint8_t *buf, size_t size)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
struct iovec iov[2];
@@ -154,7 +154,7 @@ static ssize_t tap_receive_raw(VLANClientState *nc, const uint8_t *buf, size_t s
return tap_write_packet(s, iov, iovcnt);
}
-static ssize_t tap_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t tap_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
struct iovec iov[1];
@@ -183,7 +183,7 @@ ssize_t tap_read_packet(int tapfd, uint8_t *buf, int maxlen)
}
#endif
-static void tap_send_completed(VLANClientState *nc, ssize_t len)
+static void tap_send_completed(NetClientState *nc, ssize_t len)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
tap_read_poll(s, 1);
@@ -214,38 +214,38 @@ static void tap_send(void *opaque)
} while (size > 0 && qemu_can_send_packet(&s->nc));
}
-int tap_has_ufo(VLANClientState *nc)
+int tap_has_ufo(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
- assert(nc->info->type == NET_CLIENT_TYPE_TAP);
+ assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
return s->has_ufo;
}
-int tap_has_vnet_hdr(VLANClientState *nc)
+int tap_has_vnet_hdr(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
- assert(nc->info->type == NET_CLIENT_TYPE_TAP);
+ assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
return !!s->host_vnet_hdr_len;
}
-int tap_has_vnet_hdr_len(VLANClientState *nc, int len)
+int tap_has_vnet_hdr_len(NetClientState *nc, int len)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
- assert(nc->info->type == NET_CLIENT_TYPE_TAP);
+ assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
return tap_probe_vnet_hdr_len(s->fd, len);
}
-void tap_set_vnet_hdr_len(VLANClientState *nc, int len)
+void tap_set_vnet_hdr_len(NetClientState *nc, int len)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
- assert(nc->info->type == NET_CLIENT_TYPE_TAP);
+ assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
assert(len == sizeof(struct virtio_net_hdr_mrg_rxbuf) ||
len == sizeof(struct virtio_net_hdr));
@@ -253,19 +253,19 @@ void tap_set_vnet_hdr_len(VLANClientState *nc, int len)
s->host_vnet_hdr_len = len;
}
-void tap_using_vnet_hdr(VLANClientState *nc, int using_vnet_hdr)
+void tap_using_vnet_hdr(NetClientState *nc, int using_vnet_hdr)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
using_vnet_hdr = using_vnet_hdr != 0;
- assert(nc->info->type == NET_CLIENT_TYPE_TAP);
+ assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
assert(!!s->host_vnet_hdr_len == using_vnet_hdr);
s->using_vnet_hdr = using_vnet_hdr;
}
-void tap_set_offload(VLANClientState *nc, int csum, int tso4,
+void tap_set_offload(NetClientState *nc, int csum, int tso4,
int tso6, int ecn, int ufo)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@@ -276,7 +276,7 @@ void tap_set_offload(VLANClientState *nc, int csum, int tso4,
tap_fd_set_offload(s->fd, csum, tso4, tso6, ecn, ufo);
}
-static void tap_cleanup(VLANClientState *nc)
+static void tap_cleanup(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
@@ -296,24 +296,24 @@ static void tap_cleanup(VLANClientState *nc)
s->fd = -1;
}
-static void tap_poll(VLANClientState *nc, bool enable)
+static void tap_poll(NetClientState *nc, bool enable)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
tap_read_poll(s, enable);
tap_write_poll(s, enable);
}
-int tap_get_fd(VLANClientState *nc)
+int tap_get_fd(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
- assert(nc->info->type == NET_CLIENT_TYPE_TAP);
+ assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
return s->fd;
}
/* fd support */
static NetClientInfo net_tap_info = {
- .type = NET_CLIENT_TYPE_TAP,
+ .type = NET_CLIENT_OPTIONS_KIND_TAP,
.size = sizeof(TAPState),
.receive = tap_receive,
.receive_raw = tap_receive_raw,
@@ -322,16 +322,16 @@ static NetClientInfo net_tap_info = {
.cleanup = tap_cleanup,
};
-static TAPState *net_tap_fd_init(VLANState *vlan,
+static TAPState *net_tap_fd_init(NetClientState *peer,
const char *model,
const char *name,
int fd,
int vnet_hdr)
{
- VLANClientState *nc;
+ NetClientState *nc;
TAPState *s;
- nc = qemu_new_net_client(&net_tap_info, vlan, NULL, model, name);
+ nc = qemu_new_net_client(&net_tap_info, peer, model, name);
s = DO_UPCAST(TAPState, nc, nc);
@@ -513,20 +513,22 @@ static int net_bridge_run_helper(const char *helper, const char *bridge)
return -1;
}
-int net_init_bridge(QemuOpts *opts, const char *name, VLANState *vlan)
+int net_init_bridge(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
+ const NetdevBridgeOptions *bridge;
+ const char *helper, *br;
+
TAPState *s;
int fd, vnet_hdr;
- if (!qemu_opt_get(opts, "br")) {
- qemu_opt_set(opts, "br", DEFAULT_BRIDGE_INTERFACE);
- }
- if (!qemu_opt_get(opts, "helper")) {
- qemu_opt_set(opts, "helper", DEFAULT_BRIDGE_HELPER);
- }
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_BRIDGE);
+ bridge = opts->bridge;
- fd = net_bridge_run_helper(qemu_opt_get(opts, "helper"),
- qemu_opt_get(opts, "br"));
+ helper = bridge->has_helper ? bridge->helper : DEFAULT_BRIDGE_HELPER;
+ br = bridge->has_br ? bridge->br : DEFAULT_BRIDGE_INTERFACE;
+
+ fd = net_bridge_run_helper(helper, br);
if (fd == -1) {
return -1;
}
@@ -535,41 +537,44 @@ int net_init_bridge(QemuOpts *opts, const char *name, VLANState *vlan)
vnet_hdr = tap_probe_vnet_hdr(fd);
- s = net_tap_fd_init(vlan, "bridge", name, fd, vnet_hdr);
+ s = net_tap_fd_init(peer, "bridge", name, fd, vnet_hdr);
if (!s) {
close(fd);
return -1;
}
- snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s,br=%s",
- qemu_opt_get(opts, "helper"), qemu_opt_get(opts, "br"));
+ snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s,br=%s", helper,
+ br);
return 0;
}
-static int net_tap_init(QemuOpts *opts, int *vnet_hdr)
+static int net_tap_init(const NetdevTapOptions *tap, int *vnet_hdr,
+ const char *setup_script, char *ifname,
+ size_t ifname_sz)
{
int fd, vnet_hdr_required;
- char ifname[128] = {0,};
- const char *setup_script;
- if (qemu_opt_get(opts, "ifname")) {
- pstrcpy(ifname, sizeof(ifname), qemu_opt_get(opts, "ifname"));
+ if (tap->has_ifname) {
+ pstrcpy(ifname, ifname_sz, tap->ifname);
+ } else {
+ assert(ifname_sz > 0);
+ ifname[0] = '\0';
}
- *vnet_hdr = qemu_opt_get_bool(opts, "vnet_hdr", 1);
- if (qemu_opt_get(opts, "vnet_hdr")) {
+ if (tap->has_vnet_hdr) {
+ *vnet_hdr = tap->vnet_hdr;
vnet_hdr_required = *vnet_hdr;
} else {
+ *vnet_hdr = 1;
vnet_hdr_required = 0;
}
- TFR(fd = tap_open(ifname, sizeof(ifname), vnet_hdr, vnet_hdr_required));
+ TFR(fd = tap_open(ifname, ifname_sz, vnet_hdr, vnet_hdr_required));
if (fd < 0) {
return -1;
}
- setup_script = qemu_opt_get(opts, "script");
if (setup_script &&
setup_script[0] != '\0' &&
strcmp(setup_script, "no") != 0 &&
@@ -578,29 +583,34 @@ static int net_tap_init(QemuOpts *opts, int *vnet_hdr)
return -1;
}
- qemu_opt_set(opts, "ifname", ifname);
-
return fd;
}
-int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)
+int net_init_tap(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
- TAPState *s;
+ const NetdevTapOptions *tap;
+
int fd, vnet_hdr = 0;
const char *model;
+ TAPState *s;
- if (qemu_opt_get(opts, "fd")) {
- if (qemu_opt_get(opts, "ifname") ||
- qemu_opt_get(opts, "script") ||
- qemu_opt_get(opts, "downscript") ||
- qemu_opt_get(opts, "vnet_hdr") ||
- qemu_opt_get(opts, "helper")) {
+ /* for the no-fd, no-helper case */
+ const char *script = NULL; /* suppress wrong "uninit'd use" gcc warning */
+ char ifname[128];
+
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_TAP);
+ tap = opts->tap;
+
+ if (tap->has_fd) {
+ if (tap->has_ifname || tap->has_script || tap->has_downscript ||
+ tap->has_vnet_hdr || tap->has_helper) {
error_report("ifname=, script=, downscript=, vnet_hdr=, "
"and helper= are invalid with fd=");
return -1;
}
- fd = net_handle_fd_param(cur_mon, qemu_opt_get(opts, "fd"));
+ fd = net_handle_fd_param(cur_mon, tap->fd);
if (fd == -1) {
return -1;
}
@@ -611,18 +621,15 @@ int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)
model = "tap";
- } else if (qemu_opt_get(opts, "helper")) {
- if (qemu_opt_get(opts, "ifname") ||
- qemu_opt_get(opts, "script") ||
- qemu_opt_get(opts, "downscript") ||
- qemu_opt_get(opts, "vnet_hdr")) {
+ } else if (tap->has_helper) {
+ if (tap->has_ifname || tap->has_script || tap->has_downscript ||
+ tap->has_vnet_hdr) {
error_report("ifname=, script=, downscript=, and vnet_hdr= "
"are invalid with helper=");
return -1;
}
- fd = net_bridge_run_helper(qemu_opt_get(opts, "helper"),
- DEFAULT_BRIDGE_INTERFACE);
+ fd = net_bridge_run_helper(tap->helper, DEFAULT_BRIDGE_INTERFACE);
if (fd == -1) {
return -1;
}
@@ -634,15 +641,8 @@ int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)
model = "bridge";
} else {
- if (!qemu_opt_get(opts, "script")) {
- qemu_opt_set(opts, "script", DEFAULT_NETWORK_SCRIPT);
- }
-
- if (!qemu_opt_get(opts, "downscript")) {
- qemu_opt_set(opts, "downscript", DEFAULT_NETWORK_DOWN_SCRIPT);
- }
-
- fd = net_tap_init(opts, &vnet_hdr);
+ script = tap->has_script ? tap->script : DEFAULT_NETWORK_SCRIPT;
+ fd = net_tap_init(tap, &vnet_hdr, script, ifname, sizeof ifname);
if (fd == -1) {
return -1;
}
@@ -650,31 +650,30 @@ int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)
model = "tap";
}
- s = net_tap_fd_init(vlan, model, name, fd, vnet_hdr);
+ s = net_tap_fd_init(peer, model, name, fd, vnet_hdr);
if (!s) {
close(fd);
return -1;
}
- if (tap_set_sndbuf(s->fd, opts) < 0) {
+ if (tap_set_sndbuf(s->fd, tap) < 0) {
return -1;
}
- if (qemu_opt_get(opts, "fd")) {
+ if (tap->has_fd) {
snprintf(s->nc.info_str, sizeof(s->nc.info_str), "fd=%d", fd);
- } else if (qemu_opt_get(opts, "helper")) {
- snprintf(s->nc.info_str, sizeof(s->nc.info_str),
- "helper=%s", qemu_opt_get(opts, "helper"));
+ } else if (tap->has_helper) {
+ snprintf(s->nc.info_str, sizeof(s->nc.info_str), "helper=%s",
+ tap->helper);
} else {
- const char *ifname, *script, *downscript;
+ const char *downscript;
- ifname = qemu_opt_get(opts, "ifname");
- script = qemu_opt_get(opts, "script");
- downscript = qemu_opt_get(opts, "downscript");
+ downscript = tap->has_downscript ? tap->downscript :
+ DEFAULT_NETWORK_DOWN_SCRIPT;
snprintf(s->nc.info_str, sizeof(s->nc.info_str),
- "ifname=%s,script=%s,downscript=%s",
- ifname, script, downscript);
+ "ifname=%s,script=%s,downscript=%s", ifname, script,
+ downscript);
if (strcmp(downscript, "no") != 0) {
snprintf(s->down_script, sizeof(s->down_script), "%s", downscript);
@@ -682,25 +681,26 @@ int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)
}
}
- if (qemu_opt_get_bool(opts, "vhost", !!qemu_opt_get(opts, "vhostfd") ||
- qemu_opt_get_bool(opts, "vhostforce", false))) {
- int vhostfd, r;
- bool force = qemu_opt_get_bool(opts, "vhostforce", false);
- if (qemu_opt_get(opts, "vhostfd")) {
- r = net_handle_fd_param(cur_mon, qemu_opt_get(opts, "vhostfd"));
- if (r == -1) {
+ if (tap->has_vhost ? tap->vhost :
+ tap->has_vhostfd || (tap->has_vhostforce && tap->vhostforce)) {
+ int vhostfd;
+
+ if (tap->has_vhostfd) {
+ vhostfd = net_handle_fd_param(cur_mon, tap->vhostfd);
+ if (vhostfd == -1) {
return -1;
}
- vhostfd = r;
} else {
vhostfd = -1;
}
- s->vhost_net = vhost_net_init(&s->nc, vhostfd, force);
+
+ s->vhost_net = vhost_net_init(&s->nc, vhostfd,
+ tap->has_vhostforce && tap->vhostforce);
if (!s->vhost_net) {
error_report("vhost-net requested but could not be initialized");
return -1;
}
- } else if (qemu_opt_get(opts, "vhostfd")) {
+ } else if (tap->has_vhostfd) {
error_report("vhostfd= is not valid without vhost");
return -1;
}
@@ -708,9 +708,9 @@ int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan)
return 0;
}
-VHostNetState *tap_get_vhost_net(VLANClientState *nc)
+VHostNetState *tap_get_vhost_net(NetClientState *nc)
{
TAPState *s = DO_UPCAST(TAPState, nc, nc);
- assert(nc->info->type == NET_CLIENT_TYPE_TAP);
+ assert(nc->info->type == NET_CLIENT_OPTIONS_KIND_TAP);
return s->vhost_net;
}
diff --git a/net/tap.h b/net/tap.h
index b2a9450aab..0fb018c4b7 100644
--- a/net/tap.h
+++ b/net/tap.h
@@ -27,36 +27,38 @@
#define QEMU_NET_TAP_H
#include "qemu-common.h"
-#include "qemu-option.h"
+#include "qapi-types.h"
#define DEFAULT_NETWORK_SCRIPT "/etc/qemu-ifup"
#define DEFAULT_NETWORK_DOWN_SCRIPT "/etc/qemu-ifdown"
-int net_init_tap(QemuOpts *opts, const char *name, VLANState *vlan);
+int net_init_tap(const NetClientOptions *opts, const char *name,
+ NetClientState *peer);
int tap_open(char *ifname, int ifname_size, int *vnet_hdr, int vnet_hdr_required);
ssize_t tap_read_packet(int tapfd, uint8_t *buf, int maxlen);
-int tap_has_ufo(VLANClientState *vc);
-int tap_has_vnet_hdr(VLANClientState *vc);
-int tap_has_vnet_hdr_len(VLANClientState *vc, int len);
-void tap_using_vnet_hdr(VLANClientState *vc, int using_vnet_hdr);
-void tap_set_offload(VLANClientState *vc, int csum, int tso4, int tso6, int ecn, int ufo);
-void tap_set_vnet_hdr_len(VLANClientState *vc, int len);
+int tap_has_ufo(NetClientState *nc);
+int tap_has_vnet_hdr(NetClientState *nc);
+int tap_has_vnet_hdr_len(NetClientState *nc, int len);
+void tap_using_vnet_hdr(NetClientState *nc, int using_vnet_hdr);
+void tap_set_offload(NetClientState *nc, int csum, int tso4, int tso6, int ecn, int ufo);
+void tap_set_vnet_hdr_len(NetClientState *nc, int len);
-int tap_set_sndbuf(int fd, QemuOpts *opts);
+int tap_set_sndbuf(int fd, const NetdevTapOptions *tap);
int tap_probe_vnet_hdr(int fd);
int tap_probe_vnet_hdr_len(int fd, int len);
int tap_probe_has_ufo(int fd);
void tap_fd_set_offload(int fd, int csum, int tso4, int tso6, int ecn, int ufo);
void tap_fd_set_vnet_hdr_len(int fd, int len);
-int tap_get_fd(VLANClientState *vc);
+int tap_get_fd(NetClientState *nc);
struct vhost_net;
-struct vhost_net *tap_get_vhost_net(VLANClientState *vc);
+struct vhost_net *tap_get_vhost_net(NetClientState *nc);
-int net_init_bridge(QemuOpts *opts, const char *name, VLANState *vlan);
+int net_init_bridge(const NetClientOptions *opts, const char *name,
+ NetClientState *peer);
#endif /* QEMU_NET_TAP_H */
diff --git a/net/vde.c b/net/vde.c
index 6b9d45294a..b91a6c799b 100644
--- a/net/vde.c
+++ b/net/vde.c
@@ -33,7 +33,7 @@
#include "qemu-option.h"
typedef struct VDEState {
- VLANClientState nc;
+ NetClientState nc;
VDECONN *vde;
} VDEState;
@@ -49,7 +49,7 @@ static void vde_to_qemu(void *opaque)
}
}
-static ssize_t vde_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
+static ssize_t vde_receive(NetClientState *nc, const uint8_t *buf, size_t size)
{
VDEState *s = DO_UPCAST(VDEState, nc, nc);
ssize_t ret;
@@ -61,7 +61,7 @@ static ssize_t vde_receive(VLANClientState *nc, const uint8_t *buf, size_t size)
return ret;
}
-static void vde_cleanup(VLANClientState *nc)
+static void vde_cleanup(NetClientState *nc)
{
VDEState *s = DO_UPCAST(VDEState, nc, nc);
qemu_set_fd_handler(vde_datafd(s->vde), NULL, NULL, NULL);
@@ -69,17 +69,17 @@ static void vde_cleanup(VLANClientState *nc)
}
static NetClientInfo net_vde_info = {
- .type = NET_CLIENT_TYPE_VDE,
+ .type = NET_CLIENT_OPTIONS_KIND_VDE,
.size = sizeof(VDEState),
.receive = vde_receive,
.cleanup = vde_cleanup,
};
-static int net_vde_init(VLANState *vlan, const char *model,
+static int net_vde_init(NetClientState *peer, const char *model,
const char *name, const char *sock,
int port, const char *group, int mode)
{
- VLANClientState *nc;
+ NetClientState *nc;
VDEState *s;
VDECONN *vde;
char *init_group = (char *)group;
@@ -96,7 +96,7 @@ static int net_vde_init(VLANState *vlan, const char *model,
return -1;
}
- nc = qemu_new_net_client(&net_vde_info, vlan, NULL, model, name);
+ nc = qemu_new_net_client(&net_vde_info, peer, model, name);
snprintf(nc->info_str, sizeof(nc->info_str), "sock=%s,fd=%d",
sock, vde_datafd(vde));
@@ -110,19 +110,17 @@ static int net_vde_init(VLANState *vlan, const char *model,
return 0;
}
-int net_init_vde(QemuOpts *opts, const char *name, VLANState *vlan)
+int net_init_vde(const NetClientOptions *opts, const char *name,
+ NetClientState *peer)
{
- const char *sock;
- const char *group;
- int port, mode;
+ const NetdevVdeOptions *vde;
- sock = qemu_opt_get(opts, "sock");
- group = qemu_opt_get(opts, "group");
+ assert(opts->kind == NET_CLIENT_OPTIONS_KIND_VDE);
+ vde = opts->vde;
- port = qemu_opt_get_number(opts, "port", 0);
- mode = qemu_opt_get_number(opts, "mode", 0700);
-
- if (net_vde_init(vlan, "vde", name, sock, port, group, mode) == -1) {
+ /* missing optional values have been initialized to "all bits zero" */
+ if (net_vde_init(peer, "vde", name, vde->sock, vde->port, vde->group,
+ vde->has_mode ? vde->mode : 0700) == -1) {
return -1;
}
diff --git a/net/vde.h b/net/vde.h
index 732e5756f6..6ce6698937 100644
--- a/net/vde.h
+++ b/net/vde.h
@@ -25,11 +25,12 @@
#define QEMU_NET_VDE_H
#include "qemu-common.h"
-#include "qemu-option.h"
+#include "qapi-types.h"
#ifdef CONFIG_VDE
-int net_init_vde(QemuOpts *opts, const char *name, VLANState *vlan);
+int net_init_vde(const NetClientOptions *opts, const char *name,
+ NetClientState *peer);
#endif /* CONFIG_VDE */
diff --git a/os-posix.c b/os-posix.c
index daf3d6f6f3..79fa2288e4 100644
--- a/os-posix.c
+++ b/os-posix.c
@@ -188,6 +188,11 @@ void os_parse_cmd_args(int index, const char *optarg)
case QEMU_OPTION_daemonize:
daemonize = 1;
break;
+#if defined(CONFIG_LINUX)
+ case QEMU_OPTION_enablefips:
+ fips_set_state(true);
+ break;
+#endif
}
return;
}
diff --git a/osdep.c b/osdep.c
index 03817f0f3a..c07faf546e 100644
--- a/osdep.c
+++ b/osdep.c
@@ -24,6 +24,7 @@
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
+#include <stdbool.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
@@ -48,6 +49,8 @@ extern int madvise(caddr_t, size_t, int);
#include "trace.h"
#include "qemu_socket.h"
+static bool fips_enabled = false;
+
static const char *qemu_version = QEMU_VERSION;
int socket_set_cork(int fd, int v)
@@ -253,3 +256,29 @@ const char *qemu_get_version(void)
{
return qemu_version;
}
+
+void fips_set_state(bool requested)
+{
+#ifdef __linux__
+ if (requested) {
+ FILE *fds = fopen("/proc/sys/crypto/fips_enabled", "r");
+ if (fds != NULL) {
+ fips_enabled = (fgetc(fds) == '1');
+ fclose(fds);
+ }
+ }
+#else
+ fips_enabled = false;
+#endif /* __linux__ */
+
+#ifdef _FIPS_DEBUG
+ fprintf(stderr, "FIPS mode %s (requested %s)\n",
+ (fips_enabled ? "enabled" : "disabled"),
+ (requested ? "enabled" : "disabled"));
+#endif
+}
+
+bool fips_get_state(void)
+{
+ return fips_enabled;
+}
diff --git a/osdep.h b/osdep.h
index 3ea4af099b..d4b887d542 100644
--- a/osdep.h
+++ b/osdep.h
@@ -3,6 +3,7 @@
#include <stdarg.h>
#include <stddef.h>
+#include <stdbool.h>
#ifdef __OpenBSD__
#include <sys/types.h>
#include <sys/signal.h>
@@ -70,10 +71,12 @@ typedef signed int int_fast16_t;
#ifndef always_inline
#if !((__GNUC__ < 3) || defined(__APPLE__))
#ifdef __OPTIMIZE__
+#undef inline
#define inline __attribute__ (( always_inline )) __inline__
#endif
#endif
#else
+#undef inline
#define inline always_inline
#endif
@@ -152,4 +155,7 @@ void qemu_set_cloexec(int fd);
void qemu_set_version(const char *);
const char *qemu_get_version(void);
+void fips_set_state(bool requested);
+bool fips_get_state(void);
+
#endif
diff --git a/oslib-posix.c b/oslib-posix.c
index b6a3c7fc55..dbeb6272b8 100644
--- a/oslib-posix.c
+++ b/oslib-posix.c
@@ -41,6 +41,9 @@ extern int daemon(int, int);
therefore we need special code which handles running on Valgrind. */
# define QEMU_VMALLOC_ALIGN (512 * 4096)
# define CONFIG_VALGRIND
+#elif defined(__linux__) && defined(__s390x__)
+ /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */
+# define QEMU_VMALLOC_ALIGN (256 * 4096)
#else
# define QEMU_VMALLOC_ALIGN getpagesize()
#endif
@@ -105,6 +108,8 @@ void *qemu_memalign(size_t alignment, size_t size)
return ptr;
}
+/* conflicts with qemu_vmalloc in bsd-user/mmap.c */
+#if !defined(CONFIG_BSD_USER)
/* alloc shared memory pages */
void *qemu_vmalloc(size_t size)
{
@@ -127,6 +132,7 @@ void *qemu_vmalloc(size_t size)
trace_qemu_vmalloc(size, ptr);
return ptr;
}
+#endif
void qemu_vfree(void *ptr)
{
diff --git a/pc-bios/keymaps/fi b/pc-bios/keymaps/fi
index 2a4e0f0454..4be75865a9 100644
--- a/pc-bios/keymaps/fi
+++ b/pc-bios/keymaps/fi
@@ -99,9 +99,7 @@ asterisk 0x2b shift
acute 0x2b altgr
multiply 0x2b shift altgr
guillemotleft 0x2c altgr
-less 0x2c shift altgr
guillemotright 0x2d altgr
-greater 0x2d shift altgr
copyright 0x2e altgr
leftdoublequotemark 0x2f altgr
grave 0x2f shift altgr
diff --git a/poison.h b/poison.h
index d396f20ca4..7d7b23b1fc 100644
--- a/poison.h
+++ b/poison.h
@@ -14,6 +14,7 @@
#pragma GCC poison TARGET_M68K
#pragma GCC poison TARGET_MIPS
#pragma GCC poison TARGET_MIPS64
+#pragma GCC poison TARGET_OPENRISC
#pragma GCC poison TARGET_PPC
#pragma GCC poison TARGET_PPCEMB
#pragma GCC poison TARGET_PPC64
diff --git a/posix-aio-compat.c b/posix-aio-compat.c
index 68361f555a..96e4daf505 100644
--- a/posix-aio-compat.c
+++ b/posix-aio-compat.c
@@ -29,6 +29,7 @@
#include "qemu-common.h"
#include "trace.h"
#include "block_int.h"
+#include "iov.h"
#include "block/raw-posix-aio.h"
@@ -351,11 +352,8 @@ static void *aio_thread(void *unused)
if (ret >= 0 && ret < aiocb->aio_nbytes && aiocb->common.bs->growable) {
/* A short read means that we have reached EOF. Pad the buffer
* with zeros for bytes after EOF. */
- QEMUIOVector qiov;
-
- qemu_iovec_init_external(&qiov, aiocb->aio_iov,
- aiocb->aio_niov);
- qemu_iovec_memset_skip(&qiov, 0, aiocb->aio_nbytes - ret, ret);
+ iov_memset(aiocb->aio_iov, aiocb->aio_niov, ret,
+ 0, aiocb->aio_nbytes - ret);
ret = aiocb->aio_nbytes;
}
diff --git a/qapi-schema-guest.json b/qapi-schema-guest.json
index d4055d262a..d955cf11fb 100644
--- a/qapi-schema-guest.json
+++ b/qapi-schema-guest.json
@@ -351,6 +351,26 @@
'returns': 'int' }
##
+# @guest-fstrim:
+#
+# Discard (or "trim") blocks which are not in use by the filesystem.
+#
+# @minimum:
+# Minimum contiguous free range to discard, in bytes. Free ranges
+# smaller than this may be ignored (this is a hint and the guest
+# may not respect it). By increasing this value, the fstrim
+# operation will complete more quickly for filesystems with badly
+# fragmented free space, although not all blocks will be discarded.
+# The default value is zero, meaning "discard every free block".
+#
+# Returns: Nothing.
+#
+# Since: 1.2
+##
+{ 'command': 'guest-fstrim',
+ 'data': { '*minimum': 'int' } }
+
+##
# @guest-suspend-disk
#
# Suspend guest to disk.
diff --git a/qapi-schema.json b/qapi-schema.json
index 3b6e3468b4..bd9c450029 100644
--- a/qapi-schema.json
+++ b/qapi-schema.json
@@ -260,10 +260,15 @@
#
# @total: total amount of bytes involved in the migration process
#
+# @total_time: tota0l amount of ms since migration started. If
+# migration has ended, it returns the total migration
+# time. (since 1.2)
+#
# Since: 0.14.0.
##
{ 'type': 'MigrationStats',
- 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' } }
+ 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' ,
+ 'total_time': 'int' } }
##
# @MigrationInfo
@@ -275,8 +280,9 @@
# 'cancelled'. If this field is not returned, no migration process
# has been initiated
#
-# @ram: #optional @MigrationStats containing detailed migration status,
-# only returned if status is 'active'
+# @ram: #optional @MigrationStats containing detailed migration
+# status, only returned if status is 'active' or
+# 'completed'. 'comppleted' (since 1.2)
#
# @disk: #optional @MigrationStats containing detailed disk migration
# status, only returned if status is 'active' and it is a block
@@ -337,7 +343,7 @@
# @CPU: the index of the virtual CPU
#
# @current: this only exists for backwards compatible and should be ignored
-#
+#
# @halted: true if the virtual CPU is in the halt state. Halt usually refers
# to a processor specific low power mode.
#
@@ -392,6 +398,8 @@
#
# @backing_file: #optional the name of the backing file (for copy-on-write)
#
+# @backing_file_depth: number of files in the backing file chain (since: 1.2)
+#
# @encrypted: true if the backing device is encrypted
#
# @bps: total throughput limit in bytes per second is specified
@@ -412,9 +420,10 @@
##
{ 'type': 'BlockDeviceInfo',
'data': { 'file': 'str', 'ro': 'bool', 'drv': 'str',
- '*backing_file': 'str', 'encrypted': 'bool',
- 'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
- 'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int'} }
+ '*backing_file': 'str', 'backing_file_depth': 'int',
+ 'encrypted': 'bool', 'bps': 'int', 'bps_rd': 'int',
+ 'bps_wr': 'int', 'iops': 'int', 'iops_rd': 'int',
+ 'iops_wr': 'int'} }
##
# @BlockDeviceIoStatus:
@@ -680,7 +689,7 @@
# @SpiceInfo
#
# Information about the SPICE session.
-#
+#
# @enabled: true if the SPICE server is enabled, false otherwise
#
# @host: #optional The hostname the SPICE server is bound to. This depends on
@@ -1291,7 +1300,7 @@
##
{ 'command': 'human-monitor-command',
'data': {'command-line': 'str', '*cpu-index': 'int'},
- 'returns': 'str' }
+ 'returns': 'str' }
##
# @migrate_cancel
@@ -1452,7 +1461,7 @@
# @password: the new password
#
# @connected: #optional how to handle existing clients when changing the
-# password. If nothing is specified, defaults to `keep'
+# password. If nothing is specified, defaults to `keep'
# `fail' to fail the command if clients are connected
# `disconnect' to disconnect existing clients
# `keep' to maintain existing clients
@@ -1592,7 +1601,7 @@
# If the argument combination is invalid, InvalidParameterCombination
#
# Since: 1.1
-##
+##
{ 'command': 'block_set_io_throttle',
'data': { 'device': 'str', 'bps': 'int', 'bps_rd': 'int', 'bps_wr': 'int',
'iops': 'int', 'iops_rd': 'int', 'iops_wr': 'int' } }
@@ -1651,7 +1660,7 @@
# Returns: Nothing on success
# If the job type does not support throttling, NotSupported
# If the speed value is invalid, InvalidParameter
-# If streaming is not active on this device, DeviceNotActive
+# If no background operation is active on this device, DeviceNotActive
#
# Since: 1.1
##
@@ -1661,9 +1670,9 @@
##
# @block-job-cancel:
#
-# Stop an active block streaming operation.
+# Stop an active background block operation.
#
-# This command returns immediately after marking the active block streaming
+# This command returns immediately after marking the active background block
# operation for cancellation. It is an error to call this command if no
# operation is in progress.
#
@@ -1671,16 +1680,15 @@
# BLOCK_JOB_CANCELLED event. Before that happens the job is still visible when
# enumerated using query-block-jobs.
#
-# The image file retains its backing file unless the streaming operation happens
-# to complete just as it is being cancelled.
-#
-# A new block streaming operation can be started at a later time to finish
-# copying all data from the backing file.
+# For streaming, the image file retains its backing file unless the streaming
+# operation happens to complete just as it is being cancelled. A new streaming
+# operation can be started at a later time to finish copying all data from the
+# backing file.
#
# @device: the device name
#
# Returns: Nothing on success
-# If streaming is not active on this device, DeviceNotActive
+# If no background operation is active on this device, DeviceNotActive
# If cancellation already in progress, DeviceInUse
#
# Since: 1.1
@@ -1783,34 +1791,36 @@
#
# Dump guest's memory to vmcore. It is a synchronous operation that can take
# very long depending on the amount of guest memory. This command is only
-# supported only on i386 and x86_64
-#
-# @paging: if true, do paging to get guest's memory mapping. The @paging's
-# default value of @paging is false, If you want to use gdb to process the
-# core, please set @paging to true. The reason why the @paging's value is
-# false:
-# 1. guest machine in a catastrophic state can have corrupted memory,
-# which we cannot trust.
-# 2. The guest machine can be in read-mode even if paging is enabled.
-# For example: the guest machine uses ACPI to sleep, and ACPI sleep
-# state goes in real-mode
+# supported on i386 and x86_64.
+#
+# @paging: if true, do paging to get guest's memory mapping. This allows
+# using gdb to process the core file. However, setting @paging to false
+# may be desirable because of two reasons:
+#
+# 1. The guest may be in a catastrophic state or can have corrupted
+# memory, which cannot be trusted
+# 2. The guest can be in real-mode even if paging is enabled. For example,
+# the guest uses ACPI to sleep, and ACPI sleep state goes in real-mode
+#
# @protocol: the filename or file descriptor of the vmcore. The supported
-# protocol can be file or fd:
+# protocols are:
+#
# 1. file: the protocol starts with "file:", and the following string is
# the file's path.
# 2. fd: the protocol starts with "fd:", and the following string is the
# fd's name.
+#
# @begin: #optional if specified, the starting physical address.
+#
# @length: #optional if specified, the memory size, in bytes. If you don't
-# want to dump all guest's memory, please specify the start @begin and
-# @length
+# want to dump all guest's memory, please specify the start @begin and @length
#
# Returns: nothing on success
# If @begin contains an invalid address, InvalidParameter
# If only one of @begin and @length is specified, MissingParameter
# If @protocol stats with "fd:", and the fd cannot be found, FdNotFound
# If @protocol starts with "file:", and the file cannot be
-# opened, OpenFileFailed
+# opened, OpenFileFailed
# If @protocol does not start with "fd:" or "file:", InvalidParameter
# If an I/O error occurs while writing the file, IOError
# If the target does not support this command, Unsupported
@@ -1862,3 +1872,330 @@
# Since: 0.14.0
##
{ 'command': 'netdev_del', 'data': {'id': 'str'} }
+
+##
+# @NetdevNoneOptions
+#
+# Use it alone to have zero network devices.
+#
+# Since 1.2
+##
+{ 'type': 'NetdevNoneOptions',
+ 'data': { } }
+
+##
+# @NetLegacyNicOptions
+#
+# Create a new Network Interface Card.
+#
+# @netdev: #optional id of -netdev to connect to
+#
+# @macaddr: #optional MAC address
+#
+# @model: #optional device model (e1000, rtl8139, virtio etc.)
+#
+# @addr: #optional PCI device address
+#
+# @vectors: #optional number of MSI-x vectors, 0 to disable MSI-X
+#
+# Since 1.2
+##
+{ 'type': 'NetLegacyNicOptions',
+ 'data': {
+ '*netdev': 'str',
+ '*macaddr': 'str',
+ '*model': 'str',
+ '*addr': 'str',
+ '*vectors': 'uint32' } }
+
+##
+# @String
+#
+# A fat type wrapping 'str', to be embedded in lists.
+#
+# Since 1.2
+##
+{ 'type': 'String',
+ 'data': {
+ 'str': 'str' } }
+
+##
+# @NetdevUserOptions
+#
+# Use the user mode network stack which requires no administrator privilege to
+# run.
+#
+# @hostname: #optional client hostname reported by the builtin DHCP server
+#
+# @restrict: #optional isolate the guest from the host
+#
+# @ip: #optional legacy parameter, use net= instead
+#
+# @net: #optional IP address and optional netmask
+#
+# @host: #optional guest-visible address of the host
+#
+# @tftp: #optional root directory of the built-in TFTP server
+#
+# @bootfile: #optional BOOTP filename, for use with tftp=
+#
+# @dhcpstart: #optional the first of the 16 IPs the built-in DHCP server can
+# assign
+#
+# @dns: #optional guest-visible address of the virtual nameserver
+#
+# @smb: #optional root directory of the built-in SMB server
+#
+# @smbserver: #optional IP address of the built-in SMB server
+#
+# @hostfwd: #optional redirect incoming TCP or UDP host connections to guest
+# endpoints
+#
+# @guestfwd: #optional forward guest TCP connections
+#
+# Since 1.2
+##
+{ 'type': 'NetdevUserOptions',
+ 'data': {
+ '*hostname': 'str',
+ '*restrict': 'bool',
+ '*ip': 'str',
+ '*net': 'str',
+ '*host': 'str',
+ '*tftp': 'str',
+ '*bootfile': 'str',
+ '*dhcpstart': 'str',
+ '*dns': 'str',
+ '*smb': 'str',
+ '*smbserver': 'str',
+ '*hostfwd': ['String'],
+ '*guestfwd': ['String'] } }
+
+##
+# @NetdevTapOptions
+#
+# Connect the host TAP network interface name to the VLAN.
+#
+# @ifname: #optional interface name
+#
+# @fd: #optional file descriptor of an already opened tap
+#
+# @script: #optional script to initialize the interface
+#
+# @downscript: #optional script to shut down the interface
+#
+# @helper: #optional command to execute to configure bridge
+#
+# @sndbuf: #optional send buffer limit. Understands [TGMKkb] suffixes.
+#
+# @vnet_hdr: #optional enable the IFF_VNET_HDR flag on the tap interface
+#
+# @vhost: #optional enable vhost-net network accelerator
+#
+# @vhostfd: #optional file descriptor of an already opened vhost net device
+#
+# @vhostforce: #optional vhost on for non-MSIX virtio guests
+#
+# Since 1.2
+##
+{ 'type': 'NetdevTapOptions',
+ 'data': {
+ '*ifname': 'str',
+ '*fd': 'str',
+ '*script': 'str',
+ '*downscript': 'str',
+ '*helper': 'str',
+ '*sndbuf': 'size',
+ '*vnet_hdr': 'bool',
+ '*vhost': 'bool',
+ '*vhostfd': 'str',
+ '*vhostforce': 'bool' } }
+
+##
+# @NetdevSocketOptions
+#
+# Connect the VLAN to a remote VLAN in another QEMU virtual machine using a TCP
+# socket connection.
+#
+# @fd: #optional file descriptor of an already opened socket
+#
+# @listen: #optional port number, and optional hostname, to listen on
+#
+# @connect: #optional port number, and optional hostname, to connect to
+#
+# @mcast: #optional UDP multicast address and port number
+#
+# @localaddr: #optional source address and port for multicast and udp packets
+#
+# @udp: #optional UDP unicast address and port number
+#
+# Since 1.2
+##
+{ 'type': 'NetdevSocketOptions',
+ 'data': {
+ '*fd': 'str',
+ '*listen': 'str',
+ '*connect': 'str',
+ '*mcast': 'str',
+ '*localaddr': 'str',
+ '*udp': 'str' } }
+
+##
+# @NetdevVdeOptions
+#
+# Connect the VLAN to a vde switch running on the host.
+#
+# @sock: #optional socket path
+#
+# @port: #optional port number
+#
+# @group: #optional group owner of socket
+#
+# @mode: #optional permissions for socket
+#
+# Since 1.2
+##
+{ 'type': 'NetdevVdeOptions',
+ 'data': {
+ '*sock': 'str',
+ '*port': 'uint16',
+ '*group': 'str',
+ '*mode': 'uint16' } }
+
+##
+# @NetdevDumpOptions
+#
+# Dump VLAN network traffic to a file.
+#
+# @len: #optional per-packet size limit (64k default). Understands [TGMKkb]
+# suffixes.
+#
+# @file: #optional dump file path (default is qemu-vlan0.pcap)
+#
+# Since 1.2
+##
+{ 'type': 'NetdevDumpOptions',
+ 'data': {
+ '*len': 'size',
+ '*file': 'str' } }
+
+##
+# @NetdevBridgeOptions
+#
+# Connect a host TAP network interface to a host bridge device.
+#
+# @br: #optional bridge name
+#
+# @helper: #optional command to execute to configure bridge
+#
+# Since 1.2
+##
+{ 'type': 'NetdevBridgeOptions',
+ 'data': {
+ '*br': 'str',
+ '*helper': 'str' } }
+
+##
+# @NetdevHubPortOptions
+#
+# Connect two or more net clients through a software hub.
+#
+# @hubid: hub identifier number
+#
+# Since 1.2
+##
+{ 'type': 'NetdevHubPortOptions',
+ 'data': {
+ 'hubid': 'int32' } }
+
+##
+# @NetClientOptions
+#
+# A discriminated record of network device traits.
+#
+# Since 1.2
+##
+{ 'union': 'NetClientOptions',
+ 'data': {
+ 'none': 'NetdevNoneOptions',
+ 'nic': 'NetLegacyNicOptions',
+ 'user': 'NetdevUserOptions',
+ 'tap': 'NetdevTapOptions',
+ 'socket': 'NetdevSocketOptions',
+ 'vde': 'NetdevVdeOptions',
+ 'dump': 'NetdevDumpOptions',
+ 'bridge': 'NetdevBridgeOptions',
+ 'hubport': 'NetdevHubPortOptions' } }
+
+##
+# @NetLegacy
+#
+# Captures the configuration of a network device; legacy.
+#
+# @vlan: #optional vlan number
+#
+# @id: #optional identifier for monitor commands
+#
+# @name: #optional identifier for monitor commands, ignored if @id is present
+#
+# @opts: device type specific properties (legacy)
+#
+# Since 1.2
+##
+{ 'type': 'NetLegacy',
+ 'data': {
+ '*vlan': 'int32',
+ '*id': 'str',
+ '*name': 'str',
+ 'opts': 'NetClientOptions' } }
+
+##
+# @Netdev
+#
+# Captures the configuration of a network device.
+#
+# @id: identifier for monitor commands.
+#
+# @opts: device type specific properties
+#
+# Since 1.2
+##
+{ 'type': 'Netdev',
+ 'data': {
+ 'id': 'str',
+ 'opts': 'NetClientOptions' } }
+
+##
+# @getfd:
+#
+# Receive a file descriptor via SCM rights and assign it a name
+#
+# @fdname: file descriptor name
+#
+# Returns: Nothing on success
+# If file descriptor was not received, FdNotSupplied
+# If @fdname is not valid, InvalidParameterType
+#
+# Since: 0.14.0
+#
+# Notes: If @fdname already exists, the file descriptor assigned to
+# it will be closed and replaced by the received file
+# descriptor.
+# The 'closefd' command can be used to explicitly close the
+# file descriptor when it is no longer needed.
+##
+{ 'command': 'getfd', 'data': {'fdname': 'str'} }
+
+##
+# @closefd:
+#
+# Close a file descriptor previously passed via SCM rights
+#
+# @fdname: file descriptor name
+#
+# Returns: Nothing on success
+# If @fdname is not found, FdNotFound
+#
+# Since: 0.14.0
+##
+{ 'command': 'closefd', 'data': {'fdname': 'str'} }
diff --git a/qapi/Makefile.objs b/qapi/Makefile.objs
index d0b0c16b90..5f5846e767 100644
--- a/qapi/Makefile.objs
+++ b/qapi/Makefile.objs
@@ -1,3 +1,3 @@
qapi-obj-y = qapi-visit-core.o qapi-dealloc-visitor.o qmp-input-visitor.o
qapi-obj-y += qmp-output-visitor.o qmp-registry.o qmp-dispatch.o
-qapi-obj-y += string-input-visitor.o string-output-visitor.o
+qapi-obj-y += string-input-visitor.o string-output-visitor.o opts-visitor.o
diff --git a/qapi/opts-visitor.c b/qapi/opts-visitor.c
new file mode 100644
index 0000000000..a59d306e46
--- /dev/null
+++ b/qapi/opts-visitor.c
@@ -0,0 +1,427 @@
+/*
+ * Options Visitor
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Author: Laszlo Ersek <lersek@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#include "opts-visitor.h"
+#include "qemu-queue.h"
+#include "qemu-option-internal.h"
+#include "qapi-visit-impl.h"
+
+
+struct OptsVisitor
+{
+ Visitor visitor;
+
+ /* Ownership remains with opts_visitor_new()'s caller. */
+ const QemuOpts *opts_root;
+
+ unsigned depth;
+
+ /* Non-null iff depth is positive. Each key is a QemuOpt name. Each value
+ * is a non-empty GQueue, enumerating all QemuOpt occurrences with that
+ * name. */
+ GHashTable *unprocessed_opts;
+
+ /* The list currently being traversed with opts_start_list() /
+ * opts_next_list(). The list must have a struct element type in the
+ * schema, with a single mandatory scalar member. */
+ GQueue *repeated_opts;
+ bool repeated_opts_first;
+
+ /* If "opts_root->id" is set, reinstantiate it as a fake QemuOpt for
+ * uniformity. Only its "name" and "str" fields are set. "fake_id_opt" does
+ * not survive or escape the OptsVisitor object.
+ */
+ QemuOpt *fake_id_opt;
+};
+
+
+static void
+destroy_list(gpointer list)
+{
+ g_queue_free(list);
+}
+
+
+static void
+opts_visitor_insert(GHashTable *unprocessed_opts, const QemuOpt *opt)
+{
+ GQueue *list;
+
+ list = g_hash_table_lookup(unprocessed_opts, opt->name);
+ if (list == NULL) {
+ list = g_queue_new();
+
+ /* GHashTable will never try to free the keys -- we supply NULL as
+ * "key_destroy_func" in opts_start_struct(). Thus cast away key
+ * const-ness in order to suppress gcc's warning.
+ */
+ g_hash_table_insert(unprocessed_opts, (gpointer)opt->name, list);
+ }
+
+ /* Similarly, destroy_list() doesn't call g_queue_free_full(). */
+ g_queue_push_tail(list, (gpointer)opt);
+}
+
+
+static void
+opts_start_struct(Visitor *v, void **obj, const char *kind,
+ const char *name, size_t size, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ const QemuOpt *opt;
+
+ *obj = g_malloc0(size > 0 ? size : 1);
+ if (ov->depth++ > 0) {
+ return;
+ }
+
+ ov->unprocessed_opts = g_hash_table_new_full(&g_str_hash, &g_str_equal,
+ NULL, &destroy_list);
+ QTAILQ_FOREACH(opt, &ov->opts_root->head, next) {
+ /* ensured by qemu-option.c::opts_do_parse() */
+ assert(strcmp(opt->name, "id") != 0);
+
+ opts_visitor_insert(ov->unprocessed_opts, opt);
+ }
+
+ if (ov->opts_root->id != NULL) {
+ ov->fake_id_opt = g_malloc0(sizeof *ov->fake_id_opt);
+
+ ov->fake_id_opt->name = "id";
+ ov->fake_id_opt->str = ov->opts_root->id;
+ opts_visitor_insert(ov->unprocessed_opts, ov->fake_id_opt);
+ }
+}
+
+
+static gboolean
+ghr_true(gpointer ign_key, gpointer ign_value, gpointer ign_user_data)
+{
+ return TRUE;
+}
+
+
+static void
+opts_end_struct(Visitor *v, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ GQueue *any;
+
+ if (--ov->depth > 0) {
+ return;
+ }
+
+ /* we should have processed all (distinct) QemuOpt instances */
+ any = g_hash_table_find(ov->unprocessed_opts, &ghr_true, NULL);
+ if (any) {
+ const QemuOpt *first;
+
+ first = g_queue_peek_head(any);
+ error_set(errp, QERR_INVALID_PARAMETER, first->name);
+ }
+ g_hash_table_destroy(ov->unprocessed_opts);
+ ov->unprocessed_opts = NULL;
+ g_free(ov->fake_id_opt);
+ ov->fake_id_opt = NULL;
+}
+
+
+static GQueue *
+lookup_distinct(const OptsVisitor *ov, const char *name, Error **errp)
+{
+ GQueue *list;
+
+ list = g_hash_table_lookup(ov->unprocessed_opts, name);
+ if (!list) {
+ error_set(errp, QERR_MISSING_PARAMETER, name);
+ }
+ return list;
+}
+
+
+static void
+opts_start_list(Visitor *v, const char *name, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+
+ /* we can't traverse a list in a list */
+ assert(ov->repeated_opts == NULL);
+ ov->repeated_opts = lookup_distinct(ov, name, errp);
+ ov->repeated_opts_first = (ov->repeated_opts != NULL);
+}
+
+
+static GenericList *
+opts_next_list(Visitor *v, GenericList **list, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ GenericList **link;
+
+ if (ov->repeated_opts_first) {
+ ov->repeated_opts_first = false;
+ link = list;
+ } else {
+ const QemuOpt *opt;
+
+ opt = g_queue_pop_head(ov->repeated_opts);
+ if (g_queue_is_empty(ov->repeated_opts)) {
+ g_hash_table_remove(ov->unprocessed_opts, opt->name);
+ return NULL;
+ }
+ link = &(*list)->next;
+ }
+
+ *link = g_malloc0(sizeof **link);
+ return *link;
+}
+
+
+static void
+opts_end_list(Visitor *v, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+
+ ov->repeated_opts = NULL;
+}
+
+
+static const QemuOpt *
+lookup_scalar(const OptsVisitor *ov, const char *name, Error **errp)
+{
+ if (ov->repeated_opts == NULL) {
+ GQueue *list;
+
+ /* the last occurrence of any QemuOpt takes effect when queried by name
+ */
+ list = lookup_distinct(ov, name, errp);
+ return list ? g_queue_peek_tail(list) : NULL;
+ }
+ return g_queue_peek_head(ov->repeated_opts);
+}
+
+
+static void
+processed(OptsVisitor *ov, const char *name)
+{
+ if (ov->repeated_opts == NULL) {
+ g_hash_table_remove(ov->unprocessed_opts, name);
+ }
+}
+
+
+static void
+opts_type_str(Visitor *v, char **obj, const char *name, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ const QemuOpt *opt;
+
+ opt = lookup_scalar(ov, name, errp);
+ if (!opt) {
+ return;
+ }
+ *obj = g_strdup(opt->str ? opt->str : "");
+ processed(ov, name);
+}
+
+
+/* mimics qemu-option.c::parse_option_bool() */
+static void
+opts_type_bool(Visitor *v, bool *obj, const char *name, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ const QemuOpt *opt;
+
+ opt = lookup_scalar(ov, name, errp);
+ if (!opt) {
+ return;
+ }
+
+ if (opt->str) {
+ if (strcmp(opt->str, "on") == 0 ||
+ strcmp(opt->str, "yes") == 0 ||
+ strcmp(opt->str, "y") == 0) {
+ *obj = true;
+ } else if (strcmp(opt->str, "off") == 0 ||
+ strcmp(opt->str, "no") == 0 ||
+ strcmp(opt->str, "n") == 0) {
+ *obj = false;
+ } else {
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
+ "on|yes|y|off|no|n");
+ return;
+ }
+ } else {
+ *obj = true;
+ }
+
+ processed(ov, name);
+}
+
+
+static void
+opts_type_int(Visitor *v, int64_t *obj, const char *name, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ const QemuOpt *opt;
+ const char *str;
+ long long val;
+ char *endptr;
+
+ opt = lookup_scalar(ov, name, errp);
+ if (!opt) {
+ return;
+ }
+ str = opt->str ? opt->str : "";
+
+ errno = 0;
+ val = strtoll(str, &endptr, 0);
+ if (*str != '\0' && *endptr == '\0' && errno == 0 && INT64_MIN <= val &&
+ val <= INT64_MAX) {
+ *obj = val;
+ processed(ov, name);
+ return;
+ }
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name, "an int64 value");
+}
+
+
+static void
+opts_type_uint64(Visitor *v, uint64_t *obj, const char *name, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ const QemuOpt *opt;
+ const char *str;
+
+ opt = lookup_scalar(ov, name, errp);
+ if (!opt) {
+ return;
+ }
+
+ str = opt->str;
+ if (str != NULL) {
+ while (isspace((unsigned char)*str)) {
+ ++str;
+ }
+
+ if (*str != '-' && *str != '\0') {
+ unsigned long long val;
+ char *endptr;
+
+ /* non-empty, non-negative subject sequence */
+ errno = 0;
+ val = strtoull(str, &endptr, 0);
+ if (*endptr == '\0' && errno == 0 && val <= UINT64_MAX) {
+ *obj = val;
+ processed(ov, name);
+ return;
+ }
+ }
+ }
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
+ "an uint64 value");
+}
+
+
+static void
+opts_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+ const QemuOpt *opt;
+ int64_t val;
+ char *endptr;
+
+ opt = lookup_scalar(ov, name, errp);
+ if (!opt) {
+ return;
+ }
+
+ val = strtosz_suffix(opt->str ? opt->str : "", &endptr,
+ STRTOSZ_DEFSUFFIX_B);
+ if (val != -1 && *endptr == '\0') {
+ *obj = val;
+ processed(ov, name);
+ return;
+ }
+ error_set(errp, QERR_INVALID_PARAMETER_VALUE, opt->name,
+ "a size value representible as a non-negative int64");
+}
+
+
+static void
+opts_start_optional(Visitor *v, bool *present, const char *name,
+ Error **errp)
+{
+ OptsVisitor *ov = DO_UPCAST(OptsVisitor, visitor, v);
+
+ /* we only support a single mandatory scalar field in a list node */
+ assert(ov->repeated_opts == NULL);
+ *present = (lookup_distinct(ov, name, NULL) != NULL);
+}
+
+
+OptsVisitor *
+opts_visitor_new(const QemuOpts *opts)
+{
+ OptsVisitor *ov;
+
+ ov = g_malloc0(sizeof *ov);
+
+ ov->visitor.start_struct = &opts_start_struct;
+ ov->visitor.end_struct = &opts_end_struct;
+
+ ov->visitor.start_list = &opts_start_list;
+ ov->visitor.next_list = &opts_next_list;
+ ov->visitor.end_list = &opts_end_list;
+
+ /* input_type_enum() covers both "normal" enums and union discriminators.
+ * The union discriminator field is always generated as "type"; it should
+ * match the "type" QemuOpt child of any QemuOpts.
+ *
+ * input_type_enum() will remove the looked-up key from the
+ * "unprocessed_opts" hash even if the lookup fails, because the removal is
+ * done earlier in opts_type_str(). This should be harmless.
+ */
+ ov->visitor.type_enum = &input_type_enum;
+
+ ov->visitor.type_int = &opts_type_int;
+ ov->visitor.type_uint64 = &opts_type_uint64;
+ ov->visitor.type_size = &opts_type_size;
+ ov->visitor.type_bool = &opts_type_bool;
+ ov->visitor.type_str = &opts_type_str;
+
+ /* type_number() is not filled in, but this is not the first visitor to
+ * skip some mandatory methods... */
+
+ ov->visitor.start_optional = &opts_start_optional;
+
+ ov->opts_root = opts;
+
+ return ov;
+}
+
+
+void
+opts_visitor_cleanup(OptsVisitor *ov)
+{
+ if (ov->unprocessed_opts != NULL) {
+ g_hash_table_destroy(ov->unprocessed_opts);
+ }
+ g_free(ov->fake_id_opt);
+ memset(ov, '\0', sizeof *ov);
+}
+
+
+Visitor *
+opts_get_visitor(OptsVisitor *ov)
+{
+ return &ov->visitor;
+}
diff --git a/qapi/opts-visitor.h b/qapi/opts-visitor.h
new file mode 100644
index 0000000000..ea1a395573
--- /dev/null
+++ b/qapi/opts-visitor.h
@@ -0,0 +1,31 @@
+/*
+ * Options Visitor
+ *
+ * Copyright Red Hat, Inc. 2012
+ *
+ * Author: Laszlo Ersek <lersek@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
+ * See the COPYING.LIB file in the top-level directory.
+ *
+ */
+
+#ifndef OPTS_VISITOR_H
+#define OPTS_VISITOR_H
+
+#include "qapi-visit-core.h"
+#include "qemu-option.h"
+
+typedef struct OptsVisitor OptsVisitor;
+
+/* Contrarily to qemu-option.c::parse_option_number(), OptsVisitor's "int"
+ * parser relies on strtoll() instead of strtoull(). Consequences:
+ * - string representations of negative numbers yield negative values,
+ * - values below INT64_MIN or LLONG_MIN are rejected,
+ * - values above INT64_MAX or LLONG_MAX are rejected.
+ */
+OptsVisitor *opts_visitor_new(const QemuOpts *opts);
+void opts_visitor_cleanup(OptsVisitor *nv);
+Visitor *opts_get_visitor(OptsVisitor *nv);
+
+#endif
diff --git a/qapi/qapi-visit-core.c b/qapi/qapi-visit-core.c
index ffffbf79aa..7a82b63766 100644
--- a/qapi/qapi-visit-core.c
+++ b/qapi/qapi-visit-core.c
@@ -39,9 +39,8 @@ void visit_start_struct(Visitor *v, void **obj, const char *kind,
void visit_end_struct(Visitor *v, Error **errp)
{
- if (!error_is_set(errp)) {
- v->end_struct(v, errp);
- }
+ assert(!error_is_set(errp));
+ v->end_struct(v, errp);
}
void visit_start_list(Visitor *v, const char *name, Error **errp)
@@ -62,9 +61,8 @@ GenericList *visit_next_list(Visitor *v, GenericList **list, Error **errp)
void visit_end_list(Visitor *v, Error **errp)
{
- if (!error_is_set(errp)) {
- v->end_list(v, errp);
- }
+ assert(!error_is_set(errp));
+ v->end_list(v, errp);
}
void visit_start_optional(Visitor *v, bool *present, const char *name,
@@ -236,6 +234,13 @@ void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp)
}
}
+void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp)
+{
+ if (!error_is_set(errp)) {
+ (v->type_size ? v->type_size : v->type_uint64)(v, obj, name, errp);
+ }
+}
+
void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp)
{
if (!error_is_set(errp)) {
@@ -298,7 +303,7 @@ void input_type_enum(Visitor *v, int *obj, const char *strings[],
}
if (strings[value] == NULL) {
- error_set(errp, QERR_INVALID_PARAMETER, name ? name : "null");
+ error_set(errp, QERR_INVALID_PARAMETER, enum_str);
g_free(enum_str);
return;
}
diff --git a/qapi/qapi-visit-core.h b/qapi/qapi-visit-core.h
index a19d70c104..60acedac77 100644
--- a/qapi/qapi-visit-core.h
+++ b/qapi/qapi-visit-core.h
@@ -60,6 +60,8 @@ struct Visitor
void (*type_int16)(Visitor *v, int16_t *obj, const char *name, Error **errp);
void (*type_int32)(Visitor *v, int32_t *obj, const char *name, Error **errp);
void (*type_int64)(Visitor *v, int64_t *obj, const char *name, Error **errp);
+ /* visit_type_size() falls back to (*type_uint64)() if type_size is unset */
+ void (*type_size)(Visitor *v, uint64_t *obj, const char *name, Error **errp);
};
void visit_start_handle(Visitor *v, void **obj, const char *kind,
@@ -85,6 +87,7 @@ void visit_type_int8(Visitor *v, int8_t *obj, const char *name, Error **errp);
void visit_type_int16(Visitor *v, int16_t *obj, const char *name, Error **errp);
void visit_type_int32(Visitor *v, int32_t *obj, const char *name, Error **errp);
void visit_type_int64(Visitor *v, int64_t *obj, const char *name, Error **errp);
+void visit_type_size(Visitor *v, uint64_t *obj, const char *name, Error **errp);
void visit_type_bool(Visitor *v, bool *obj, const char *name, Error **errp);
void visit_type_str(Visitor *v, char **obj, const char *name, Error **errp);
void visit_type_number(Visitor *v, double *obj, const char *name, Error **errp);
diff --git a/qemu-bridge-helper.c b/qemu-bridge-helper.c
index aec5008e22..652eec99fd 100644
--- a/qemu-bridge-helper.c
+++ b/qemu-bridge-helper.c
@@ -35,6 +35,10 @@
#include <linux/sockios.h>
+#ifndef SIOCBRADDIF
+#include <linux/if_bridge.h>
+#endif
+
#include "qemu-queue.h"
#include "net/tap-linux.h"
@@ -221,6 +225,10 @@ static int drop_privileges(void)
int main(int argc, char **argv)
{
struct ifreq ifr;
+#ifndef SIOCBRADDIF
+ unsigned long ifargs[4];
+#endif
+ int ifindex;
int fd, ctlfd, unixfd = -1;
int use_vnet = 0;
int mtu;
@@ -361,9 +369,19 @@ int main(int argc, char **argv)
/* add the interface to the bridge */
prep_ifreq(&ifr, bridge);
- ifr.ifr_ifindex = if_nametoindex(iface);
-
- if (ioctl(ctlfd, SIOCBRADDIF, &ifr) == -1) {
+ ifindex = if_nametoindex(iface);
+#ifndef SIOCBRADDIF
+ ifargs[0] = BRCTL_ADD_IF;
+ ifargs[1] = ifindex;
+ ifargs[2] = 0;
+ ifargs[3] = 0;
+ ifr.ifr_data = (void *)ifargs;
+ ret = ioctl(ctlfd, SIOCDEVPRIVATE, &ifr);
+#else
+ ifr.ifr_ifindex = ifindex;
+ ret = ioctl(ctlfd, SIOCBRADDIF, &ifr);
+#endif
+ if (ret == -1) {
fprintf(stderr, "failed to add interface `%s' to bridge `%s': %s\n",
iface, bridge, strerror(errno));
ret = EXIT_FAILURE;
diff --git a/qemu-common.h b/qemu-common.h
index 8f87e413a7..f16079f432 100644
--- a/qemu-common.h
+++ b/qemu-common.h
@@ -17,6 +17,7 @@ typedef struct DeviceState DeviceState;
struct Monitor;
typedef struct Monitor Monitor;
+typedef struct MigrationParams MigrationParams;
/* we put basic includes here to avoid repeating them in device drivers */
#include <stdlib.h>
@@ -135,8 +136,27 @@ int qemu_main(int argc, char **argv, char **envp);
void qemu_get_timedate(struct tm *tm, int offset);
int qemu_timedate_diff(struct tm *tm);
+/**
+ * is_help_option:
+ * @s: string to test
+ *
+ * Check whether @s is one of the standard strings which indicate
+ * that the user is asking for a list of the valid values for a
+ * command option like -cpu or -M. The current accepted strings
+ * are 'help' and '?'. '?' is deprecated (it is a shell wildcard
+ * which makes it annoying to use in a reliable way) but provided
+ * for backwards compatibility.
+ *
+ * Returns: true if @s is a request for a list.
+ */
+static inline bool is_help_option(const char *s)
+{
+ return !strcmp(s, "?") || !strcmp(s, "help");
+}
+
/* cutils.c */
void pstrcpy(char *buf, int buf_size, const char *str);
+void strpadcpy(char *buf, int buf_size, const char *str, char pad);
char *pstrcat(char *buf, int buf_size, const char *s);
int strstart(const char *str, const char *val, const char **ptr);
int stristart(const char *str, const char *val, const char **ptr);
@@ -205,9 +225,6 @@ int qemu_pipe(int pipefd[2]);
#define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags)
#endif
-int qemu_recvv(int sockfd, struct iovec *iov, int len, int iov_offset);
-int qemu_sendv(int sockfd, struct iovec *iov, int len, int iov_offset);
-
/* Error handling. */
void QEMU_NORETURN hw_error(const char *fmt, ...) GCC_FMT_ATTR(1, 2);
@@ -235,8 +252,7 @@ typedef struct TextConsole TextConsole;
typedef TextConsole QEMUConsole;
typedef struct CharDriverState CharDriverState;
typedef struct MACAddr MACAddr;
-typedef struct VLANState VLANState;
-typedef struct VLANClientState VLANClientState;
+typedef struct NetClientState NetClientState;
typedef struct i2c_bus i2c_bus;
typedef struct ISABus ISABus;
typedef struct ISADevice ISADevice;
@@ -275,6 +291,13 @@ typedef enum LostTickPolicy {
LOST_TICK_MAX
} LostTickPolicy;
+typedef struct PCIHostDeviceAddress {
+ unsigned int domain;
+ unsigned int bus;
+ unsigned int slot;
+ unsigned int function;
+} PCIHostDeviceAddress;
+
void tcg_exec_init(unsigned long tb_size);
bool tcg_enabled(void);
@@ -288,7 +311,6 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id);
void qemu_cpu_kick(void *env);
void qemu_cpu_kick_self(void);
int qemu_cpu_is_self(void *env);
-bool all_cpu_threads_idle(void);
/* work queue */
struct qemu_work_item {
@@ -304,32 +326,29 @@ struct qemu_work_item {
void qemu_init_vcpu(void *env);
#endif
-/**
- * Sends an iovec (or optionally a part of it) down a socket, yielding
- * when the socket is full.
- */
-int qemu_co_sendv(int sockfd, struct iovec *iov,
- int len, int iov_offset);
/**
- * Receives data into an iovec (or optionally into a part of it) from
- * a socket, yielding when there is no data in the socket.
+ * Sends a (part of) iovec down a socket, yielding when the socket is full, or
+ * Receives data into a (part of) iovec from a socket,
+ * yielding when there is no data in the socket.
+ * The same interface as qemu_sendv_recvv(), with added yielding.
+ * XXX should mark these as coroutine_fn
*/
-int qemu_co_recvv(int sockfd, struct iovec *iov,
- int len, int iov_offset);
-
+ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
+ size_t offset, size_t bytes, bool do_send);
+#define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false)
+#define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \
+ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true)
/**
- * Sends a buffer down a socket, yielding when the socket is full.
+ * The same as above, but with just a single buffer
*/
-int qemu_co_send(int sockfd, void *buf, int len);
-
-/**
- * Receives data into a buffer from a socket, yielding when there
- * is no data in the socket.
- */
-int qemu_co_recv(int sockfd, void *buf, int len);
-
+ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send);
+#define qemu_co_recv(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, false)
+#define qemu_co_send(sockfd, buf, bytes) \
+ qemu_co_send_recv(sockfd, buf, bytes, true)
typedef struct QEMUIOVector {
struct iovec *iov;
@@ -341,16 +360,16 @@ typedef struct QEMUIOVector {
void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint);
void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov);
void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len);
-void qemu_iovec_copy(QEMUIOVector *dst, QEMUIOVector *src, uint64_t skip,
- size_t size);
-void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size);
+void qemu_iovec_concat(QEMUIOVector *dst,
+ QEMUIOVector *src, size_t soffset, size_t sbytes);
void qemu_iovec_destroy(QEMUIOVector *qiov);
void qemu_iovec_reset(QEMUIOVector *qiov);
-void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf);
-void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count);
-void qemu_iovec_memset(QEMUIOVector *qiov, int c, size_t count);
-void qemu_iovec_memset_skip(QEMUIOVector *qiov, int c, size_t count,
- size_t skip);
+size_t qemu_iovec_to_buf(QEMUIOVector *qiov, size_t offset,
+ void *buf, size_t bytes);
+size_t qemu_iovec_from_buf(QEMUIOVector *qiov, size_t offset,
+ const void *buf, size_t bytes);
+size_t qemu_iovec_memset(QEMUIOVector *qiov, size_t offset,
+ int fillc, size_t bytes);
bool buffer_is_zero(const void *buf, size_t len);
diff --git a/qemu-config.h b/qemu-config.h
index e9f2ef4c7b..12ddf3ed97 100644
--- a/qemu-config.h
+++ b/qemu-config.h
@@ -19,7 +19,7 @@ int qemu_config_parse(FILE *fp, QemuOptsList **lists, const char *fname);
int qemu_read_config_file(const char *filename);
-/* Read default Qemu config files
+/* Read default QEMU config files
*/
int qemu_read_default_config_files(bool userconfig);
diff --git a/qemu-coroutine-io.c b/qemu-coroutine-io.c
index 40fd514395..5734965003 100644
--- a/qemu-coroutine-io.c
+++ b/qemu-coroutine-io.c
@@ -25,72 +25,41 @@
#include "qemu-common.h"
#include "qemu_socket.h"
#include "qemu-coroutine.h"
+#include "iov.h"
-int coroutine_fn qemu_co_recvv(int sockfd, struct iovec *iov,
- int len, int iov_offset)
+ssize_t coroutine_fn
+qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt,
+ size_t offset, size_t bytes, bool do_send)
{
- int total = 0;
- int ret;
- while (len) {
- ret = qemu_recvv(sockfd, iov, len, iov_offset + total);
- if (ret < 0) {
+ size_t done = 0;
+ ssize_t ret;
+ while (done < bytes) {
+ ret = iov_send_recv(sockfd, iov, iov_cnt,
+ offset + done, bytes - done, do_send);
+ if (ret > 0) {
+ done += ret;
+ } else if (ret < 0) {
if (errno == EAGAIN) {
qemu_coroutine_yield();
- continue;
- }
- if (total == 0) {
- total = -1;
- }
- break;
- }
- if (ret == 0) {
- break;
- }
- total += ret, len -= ret;
- }
-
- return total;
-}
-
-int coroutine_fn qemu_co_sendv(int sockfd, struct iovec *iov,
- int len, int iov_offset)
-{
- int total = 0;
- int ret;
- while (len) {
- ret = qemu_sendv(sockfd, iov, len, iov_offset + total);
- if (ret < 0) {
- if (errno == EAGAIN) {
- qemu_coroutine_yield();
- continue;
- }
- if (total == 0) {
- total = -1;
+ } else if (done == 0) {
+ return -1;
+ } else {
+ break;
}
+ } else if (ret == 0 && !do_send) {
+ /* write (send) should never return 0.
+ * read (recv) returns 0 for end-of-file (-data).
+ * In both cases there's little point retrying,
+ * but we do for write anyway, just in case */
break;
}
- total += ret, len -= ret;
}
-
- return total;
+ return done;
}
-int coroutine_fn qemu_co_recv(int sockfd, void *buf, int len)
+ssize_t coroutine_fn
+qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send)
{
- struct iovec iov;
-
- iov.iov_base = buf;
- iov.iov_len = len;
-
- return qemu_co_recvv(sockfd, &iov, len, 0);
-}
-
-int coroutine_fn qemu_co_send(int sockfd, void *buf, int len)
-{
- struct iovec iov;
-
- iov.iov_base = buf;
- iov.iov_len = len;
-
- return qemu_co_sendv(sockfd, &iov, len, 0);
+ struct iovec iov = { .iov_base = buf, .iov_len = bytes };
+ return qemu_co_sendv_recvv(sockfd, &iov, 1, 0, bytes, do_send);
}
diff --git a/qemu-doc.texi b/qemu-doc.texi
index 0af0ff45c2..f32e9e2fb9 100644
--- a/qemu-doc.texi
+++ b/qemu-doc.texi
@@ -78,7 +78,7 @@ to ease cross-compilation and cross-debugging.
@end itemize
-QEMU can run without an host kernel driver and yet gives acceptable
+QEMU can run without a host kernel driver and yet gives acceptable
performance.
For system emulation, the following hardware targets are supported:
@@ -1124,9 +1124,11 @@ the protocol limits passwords to 8 characters it should not be considered
to provide high security. The password can be fairly easily brute-forced by
a client making repeat connections. For this reason, a VNC server using password
authentication should be restricted to only listen on the loopback interface
-or UNIX domain sockets. Password authentication is requested with the @code{password}
-option, and then once QEMU is running the password is set with the monitor. Until
-the monitor is used to set the password all clients will be rejected.
+or UNIX domain sockets. Password authentication is not supported when operating
+in FIPS 140-2 compliance mode as it requires the use of the DES cipher. Password
+authentication is requested with the @code{password} option, and then once QEMU
+is running the password is set with the monitor. Until the monitor is used to
+set the password all clients will be rejected.
@example
qemu-system-i386 [...OPTIONS...] -vnc :1,password -monitor stdio
@@ -2390,7 +2392,7 @@ Set the x86 elf interpreter prefix (default=/usr/local/qemu-i386)
@item -s size
Set the x86 stack size in bytes (default=524288)
@item -cpu model
-Select CPU model (-cpu ? for list and additional feature selection)
+Select CPU model (-cpu help for list and additional feature selection)
@item -ignore-environment
Start with an empty environment. Without this option,
the initial environment is a copy of the caller's environment.
diff --git a/qemu-ga.c b/qemu-ga.c
index 8199da789c..f1a39ec3a6 100644
--- a/qemu-ga.c
+++ b/qemu-ga.c
@@ -736,7 +736,7 @@ int main(int argc, char **argv)
break;
case 'b': {
char **list_head, **list;
- if (*optarg == '?') {
+ if (is_help_option(optarg)) {
list_head = list = qmp_get_command_list();
while (*list != NULL) {
printf("%s\n", *list);
diff --git a/qemu-img.c b/qemu-img.c
index 80cfb9b167..94a31ad9f0 100644
--- a/qemu-img.c
+++ b/qemu-img.c
@@ -350,7 +350,7 @@ static int img_create(int argc, char **argv)
img_size = (uint64_t)sval;
}
- if (options && !strcmp(options, "?")) {
+ if (options && is_help_option(options)) {
ret = print_block_option_help(filename, fmt);
goto out;
}
@@ -744,7 +744,7 @@ static int img_convert(int argc, char **argv)
/* Initialize before goto out */
qemu_progress_init(progress, 2.0);
- if (options && !strcmp(options, "?")) {
+ if (options && is_help_option(options)) {
ret = print_block_option_help(out_filename, out_fmt);
goto out;
}
@@ -1567,14 +1567,19 @@ static int img_resize(int argc, char **argv)
const char *filename, *fmt, *size;
int64_t n, total_size;
BlockDriverState *bs = NULL;
- QEMUOptionParameter *param;
- QEMUOptionParameter resize_options[] = {
- {
- .name = BLOCK_OPT_SIZE,
- .type = OPT_SIZE,
- .help = "Virtual disk size"
+ QemuOpts *param;
+ static QemuOptsList resize_options = {
+ .name = "resize_options",
+ .head = QTAILQ_HEAD_INITIALIZER(resize_options.head),
+ .desc = {
+ {
+ .name = BLOCK_OPT_SIZE,
+ .type = QEMU_OPT_SIZE,
+ .help = "Virtual disk size"
+ }, {
+ /* end of list */
+ }
},
- { NULL }
};
/* Remove size from argv manually so that negative numbers are not treated
@@ -1624,14 +1629,15 @@ static int img_resize(int argc, char **argv)
}
/* Parse size */
- param = parse_option_parameters("", resize_options, NULL);
- if (set_option_parameter(param, BLOCK_OPT_SIZE, size)) {
+ param = qemu_opts_create(&resize_options, NULL, 0, NULL);
+ if (qemu_opt_set(param, BLOCK_OPT_SIZE, size)) {
/* Error message already printed when size parsing fails */
ret = -1;
+ qemu_opts_del(param);
goto out;
}
- n = get_option_parameter(param, BLOCK_OPT_SIZE)->value.n;
- free_option_parameters(param);
+ n = qemu_opt_get_size(param, BLOCK_OPT_SIZE, 0);
+ qemu_opts_del(param);
bs = bdrv_new_open(filename, fmt, BDRV_O_FLAGS | BDRV_O_RDWR);
if (!bs) {
diff --git a/qemu-io.c b/qemu-io.c
index 5882067443..d0f4fb70c7 100644
--- a/qemu-io.c
+++ b/qemu-io.c
@@ -670,6 +670,7 @@ static int readv_f(int argc, char **argv)
print_report("read", &t2, offset, qiov.size, total, cnt, Cflag);
out:
+ qemu_iovec_destroy(&qiov);
qemu_io_free(buf);
return 0;
}
@@ -928,6 +929,7 @@ static int writev_f(int argc, char **argv)
t2 = tsub(t2, t1);
print_report("wrote", &t2, offset, qiov.size, total, cnt, Cflag);
out:
+ qemu_iovec_destroy(&qiov);
qemu_io_free(buf);
return 0;
}
@@ -1126,6 +1128,7 @@ static void aio_write_done(void *opaque, int ret)
ctx->qiov.size, 1, ctx->Cflag);
out:
qemu_io_free(ctx->buf);
+ qemu_iovec_destroy(&ctx->qiov);
g_free(ctx);
}
@@ -1166,6 +1169,7 @@ static void aio_read_done(void *opaque, int ret)
ctx->qiov.size, 1, ctx->Cflag);
out:
qemu_io_free(ctx->buf);
+ qemu_iovec_destroy(&ctx->qiov);
g_free(ctx);
}
@@ -1648,6 +1652,17 @@ static const cmdinfo_t map_cmd = {
.oneline = "prints the allocated areas of a file",
};
+static int abort_f(int argc, char **argv)
+{
+ abort();
+}
+
+static const cmdinfo_t abort_cmd = {
+ .name = "abort",
+ .cfunc = abort_f,
+ .flags = CMD_NOFILE_OK,
+ .oneline = "simulate a program crash using abort(3)",
+};
static int close_f(int argc, char **argv)
{
@@ -1901,6 +1916,7 @@ int main(int argc, char **argv)
add_command(&discard_cmd);
add_command(&alloc_cmd);
add_command(&map_cmd);
+ add_command(&abort_cmd);
add_args_command(init_args_command);
add_check_command(init_check_command);
diff --git a/qemu-log.c b/qemu-log.c
index 1ec70e7e83..396aafdf62 100644
--- a/qemu-log.c
+++ b/qemu-log.c
@@ -52,7 +52,7 @@ void qemu_log_mask(int mask, const char *fmt, ...)
}
/* enable or disable low levels log */
-void cpu_set_log(int log_flags)
+void qemu_set_log(int log_flags, bool use_own_buffers)
{
qemu_loglevel = log_flags;
if (qemu_loglevel && !qemu_logfile) {
@@ -61,19 +61,20 @@ void cpu_set_log(int log_flags)
perror(logfilename);
_exit(1);
}
-#if !defined(CONFIG_SOFTMMU)
/* must avoid mmap() usage of glibc by setting a buffer "by hand" */
- {
+ if (use_own_buffers) {
static char logfile_buf[4096];
+
setvbuf(qemu_logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
- }
-#elif defined(_WIN32)
- /* Win32 doesn't support line-buffering, so use unbuffered output. */
- setvbuf(qemu_logfile, NULL, _IONBF, 0);
+ } else {
+#if defined(_WIN32)
+ /* Win32 doesn't support line-buffering, so use unbuffered output. */
+ setvbuf(qemu_logfile, NULL, _IONBF, 0);
#else
- setvbuf(qemu_logfile, NULL, _IOLBF, 0);
+ setvbuf(qemu_logfile, NULL, _IOLBF, 0);
#endif
- log_append = 1;
+ log_append = 1;
+ }
}
if (!qemu_loglevel && qemu_logfile) {
fclose(qemu_logfile);
@@ -99,10 +100,7 @@ const CPULogItem cpu_log_items[] = {
{ CPU_LOG_TB_OP, "op",
"show micro ops for each compiled TB" },
{ CPU_LOG_TB_OP_OPT, "op_opt",
- "show micro ops "
-#ifdef TARGET_I386
- "before eflags optimization and "
-#endif
+ "show micro ops (x86 only: before eflags optimization) and\n"
"after liveness analysis" },
{ CPU_LOG_INT, "int",
"show interrupts/exceptions in short format" },
@@ -110,16 +108,12 @@ const CPULogItem cpu_log_items[] = {
"show trace before each executed TB (lots of logs)" },
{ CPU_LOG_TB_CPU, "cpu",
"show CPU state before block translation" },
-#ifdef TARGET_I386
{ CPU_LOG_PCALL, "pcall",
- "show protected mode far calls/returns/exceptions" },
+ "x86 only: show protected mode far calls/returns/exceptions" },
{ CPU_LOG_RESET, "cpu_reset",
- "show CPU state before CPU resets" },
-#endif
-#ifdef DEBUG_IOPORT
+ "x86 only: show CPU state before CPU resets" },
{ CPU_LOG_IOPORT, "ioport",
"show all i/o ports accesses" },
-#endif
{ LOG_UNIMP, "unimp",
"log unimplemented functionality" },
{ 0, NULL, NULL },
diff --git a/qemu-log.h b/qemu-log.h
index 4cdc7c7a47..5ccecf30af 100644
--- a/qemu-log.h
+++ b/qemu-log.h
@@ -142,7 +142,17 @@ typedef struct CPULogItem {
extern const CPULogItem cpu_log_items[];
-void cpu_set_log(int log_flags);
+void qemu_set_log(int log_flags, bool use_own_buffers);
+
+static inline void cpu_set_log(int log_flags)
+{
+#ifdef CONFIG_USER_ONLY
+ qemu_set_log(log_flags, true);
+#else
+ qemu_set_log(log_flags, false);
+#endif
+}
+
void cpu_set_log_filename(const char *filename);
int cpu_str_to_log_mask(const char *str);
diff --git a/qemu-nbd.c b/qemu-nbd.c
index 5a0300eb07..1c1cf6a463 100644
--- a/qemu-nbd.c
+++ b/qemu-nbd.c
@@ -33,7 +33,9 @@
#include <libgen.h>
#include <pthread.h>
-#define SOCKET_PATH "/var/lock/qemu-nbd-%s"
+#define SOCKET_PATH "/var/lock/qemu-nbd-%s"
+#define QEMU_NBD_OPT_CACHE 1
+#define QEMU_NBD_OPT_AIO 2
static NBDExport *exp;
static int verbose;
@@ -46,28 +48,43 @@ static int nb_fds;
static void usage(const char *name)
{
- printf(
+ (printf) (
"Usage: %s [OPTIONS] FILE\n"
"QEMU Disk Network Block Device Server\n"
"\n"
+" -h, --help display this help and exit\n"
+" -V, --version output version information and exit\n"
+"\n"
+"Connection properties:\n"
" -p, --port=PORT port to listen on (default `%d')\n"
-" -o, --offset=OFFSET offset into the image\n"
" -b, --bind=IFACE interface to bind to (default `0.0.0.0')\n"
" -k, --socket=PATH path to the unix socket\n"
" (default '"SOCKET_PATH"')\n"
-" -r, --read-only export read-only\n"
-" -P, --partition=NUM only expose partition NUM\n"
-" -s, --snapshot use snapshot file\n"
-" -n, --nocache disable host cache\n"
-" -c, --connect=DEV connect FILE to the local NBD device DEV\n"
-" -d, --disconnect disconnect the specified device\n"
" -e, --shared=NUM device can be shared by NUM clients (default '1')\n"
" -t, --persistent don't exit on the last connection\n"
" -v, --verbose display extra debugging information\n"
-" -h, --help display this help and exit\n"
-" -V, --version output version information and exit\n"
"\n"
-"Report bugs to <anthony@codemonkey.ws>\n"
+"Exposing part of the image:\n"
+" -o, --offset=OFFSET offset into the image\n"
+" -P, --partition=NUM only expose partition NUM\n"
+"\n"
+#ifdef __linux__
+"Kernel NBD client support:\n"
+" -c, --connect=DEV connect FILE to the local NBD device DEV\n"
+" -d, --disconnect disconnect the specified device\n"
+"\n"
+#endif
+"\n"
+"Block device options:\n"
+" -r, --read-only export read-only\n"
+" -s, --snapshot use snapshot file\n"
+" -n, --nocache disable host cache\n"
+" --cache=MODE set cache mode (none, writeback, ...)\n"
+#ifdef CONFIG_LINUX_AIO
+" --aio=MODE set AIO mode (native or threads)\n"
+#endif
+"\n"
+"Report bugs to <qemu-devel@nongnu.org>\n"
, name, NBD_DEFAULT_PORT, "DEVICE");
}
@@ -295,6 +312,10 @@ int main(int argc, char **argv)
{ "disconnect", 0, NULL, 'd' },
{ "snapshot", 0, NULL, 's' },
{ "nocache", 0, NULL, 'n' },
+ { "cache", 1, NULL, QEMU_NBD_OPT_CACHE },
+#ifdef CONFIG_LINUX_AIO
+ { "aio", 1, NULL, QEMU_NBD_OPT_AIO },
+#endif
{ "shared", 1, NULL, 'e' },
{ "persistent", 0, NULL, 't' },
{ "verbose", 0, NULL, 'v' },
@@ -309,6 +330,10 @@ int main(int argc, char **argv)
int ret;
int fd;
int persistent = 0;
+ bool seen_cache = false;
+#ifdef CONFIG_LINUX_AIO
+ bool seen_aio = false;
+#endif
pthread_t client_thread;
/* The client thread uses SIGTERM to interrupt the server. A signal
@@ -325,8 +350,32 @@ int main(int argc, char **argv)
flags |= BDRV_O_SNAPSHOT;
break;
case 'n':
- flags |= BDRV_O_NOCACHE | BDRV_O_CACHE_WB;
+ optarg = (char *) "none";
+ /* fallthrough */
+ case QEMU_NBD_OPT_CACHE:
+ if (seen_cache) {
+ errx(EXIT_FAILURE, "-n and --cache can only be specified once");
+ }
+ seen_cache = true;
+ if (bdrv_parse_cache_flags(optarg, &flags) == -1) {
+ errx(EXIT_FAILURE, "Invalid cache mode `%s'", optarg);
+ }
+ break;
+#ifdef CONFIG_LINUX_AIO
+ case QEMU_NBD_OPT_AIO:
+ if (seen_aio) {
+ errx(EXIT_FAILURE, "--aio can only be specified once");
+ }
+ seen_aio = true;
+ if (!strcmp(optarg, "native")) {
+ flags |= BDRV_O_NATIVE_AIO;
+ } else if (!strcmp(optarg, "threads")) {
+ /* this is the default */
+ } else {
+ errx(EXIT_FAILURE, "invalid aio mode `%s'", optarg);
+ }
break;
+#endif
case 'b':
bindto = optarg;
break;
diff --git a/ui/vnc-jobs-sync.c b/qemu-option-internal.h
index 49b77afcc9..19fdc1ca85 100644
--- a/ui/vnc-jobs-sync.c
+++ b/qemu-option-internal.h
@@ -1,10 +1,8 @@
/*
- * QEMU VNC display driver
+ * Commandline option parsing functions
*
- * Copyright (C) 2006 Anthony Liguori <anthony@codemonkey.ws>
- * Copyright (C) 2006 Fabrice Bellard
- * Copyright (C) 2009 Red Hat, Inc
- * Copyright (C) 2010 Corentin Chary <corentin.chary@gmail.com>
+ * Copyright (c) 2003-2008 Fabrice Bellard
+ * Copyright (c) 2009 Kevin Wolf <kwolf@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
@@ -25,49 +23,31 @@
* THE SOFTWARE.
*/
-#include "vnc.h"
-#include "vnc-jobs.h"
+#ifndef QEMU_OPTIONS_INTERNAL_H
+#define QEMU_OPTIONS_INTERNAL_H
-void vnc_jobs_clear(VncState *vs)
-{
-}
+#include "qemu-option.h"
-void vnc_jobs_join(VncState *vs)
-{
-}
+struct QemuOpt {
+ const char *name;
+ const char *str;
-VncJob *vnc_job_new(VncState *vs)
-{
- vs->job.vs = vs;
- vs->job.rectangles = 0;
+ const QemuOptDesc *desc;
+ union {
+ bool boolean;
+ uint64_t uint;
+ } value;
- vnc_write_u8(vs, VNC_MSG_SERVER_FRAMEBUFFER_UPDATE);
- vnc_write_u8(vs, 0);
- vs->job.saved_offset = vs->output.offset;
- vnc_write_u16(vs, 0);
- return &vs->job;
-}
+ QemuOpts *opts;
+ QTAILQ_ENTRY(QemuOpt) next;
+};
-void vnc_job_push(VncJob *job)
-{
- VncState *vs = job->vs;
+struct QemuOpts {
+ char *id;
+ QemuOptsList *list;
+ Location loc;
+ QTAILQ_HEAD(QemuOptHead, QemuOpt) head;
+ QTAILQ_ENTRY(QemuOpts) next;
+};
- vs->output.buffer[job->saved_offset] = (job->rectangles >> 8) & 0xFF;
- vs->output.buffer[job->saved_offset + 1] = job->rectangles & 0xFF;
- vnc_flush(job->vs);
-}
-
-int vnc_job_add_rect(VncJob *job, int x, int y, int w, int h)
-{
- int n;
-
- n = vnc_send_framebuffer_update(job->vs, x, y, w, h);
- if (n >= 0)
- job->rectangles += n;
- return n;
-}
-
-bool vnc_has_job(VncState *vs)
-{
- return false;
-}
+#endif
diff --git a/qemu-option.c b/qemu-option.c
index bb3886c6b9..27891e74e7 100644
--- a/qemu-option.c
+++ b/qemu-option.c
@@ -29,9 +29,9 @@
#include "qemu-common.h"
#include "qemu-error.h"
#include "qemu-objects.h"
-#include "qemu-option.h"
#include "error.h"
#include "qerror.h"
+#include "qemu-option-internal.h"
/*
* Extracts the name of an option from the parameter string (p points at the
@@ -511,28 +511,6 @@ void print_option_help(QEMUOptionParameter *list)
/* ------------------------------------------------------------------ */
-struct QemuOpt {
- const char *name;
- const char *str;
-
- const QemuOptDesc *desc;
- union {
- bool boolean;
- uint64_t uint;
- } value;
-
- QemuOpts *opts;
- QTAILQ_ENTRY(QemuOpt) next;
-};
-
-struct QemuOpts {
- char *id;
- QemuOptsList *list;
- Location loc;
- QTAILQ_HEAD(QemuOptHead, QemuOpt) head;
- QTAILQ_ENTRY(QemuOpts) next;
-};
-
static QemuOpt *qemu_opt_find(QemuOpts *opts, const char *name)
{
QemuOpt *opt;
@@ -551,6 +529,18 @@ const char *qemu_opt_get(QemuOpts *opts, const char *name)
return opt ? opt->str : NULL;
}
+bool qemu_opt_has_help_opt(QemuOpts *opts)
+{
+ QemuOpt *opt;
+
+ QTAILQ_FOREACH_REVERSE(opt, &opts->head, QemuOptHead, next) {
+ if (is_help_option(opt->name)) {
+ return true;
+ }
+ }
+ return false;
+}
+
bool qemu_opt_get_bool(QemuOpts *opts, const char *name, bool defval)
{
QemuOpt *opt = qemu_opt_find(opts, name);
diff --git a/qemu-option.h b/qemu-option.h
index 951dec3cc4..ca729862d5 100644
--- a/qemu-option.h
+++ b/qemu-option.h
@@ -107,6 +107,18 @@ struct QemuOptsList {
};
const char *qemu_opt_get(QemuOpts *opts, const char *name);
+/**
+ * qemu_opt_has_help_opt:
+ * @opts: options to search for a help request
+ *
+ * Check whether the options specified by @opts include one of the
+ * standard strings which indicate that the user is asking for a
+ * list of the valid values for a command line option (as defined
+ * by is_help_option()).
+ *
+ * Returns: true if @opts includes 'help' or equivalent.
+ */
+bool qemu_opt_has_help_opt(QemuOpts *opts);
bool qemu_opt_get_bool(QemuOpts *opts, const char *name, bool defval);
uint64_t qemu_opt_get_number(QemuOpts *opts, const char *name, uint64_t defval);
uint64_t qemu_opt_get_size(QemuOpts *opts, const char *name, uint64_t defval);
diff --git a/qemu-options.hx b/qemu-options.hx
index 8b662648ae..5e7d0dc035 100644
--- a/qemu-options.hx
+++ b/qemu-options.hx
@@ -6,6 +6,10 @@ HXCOMM construct option structures, enums and help message for specified
HXCOMM architectures.
HXCOMM HXCOMM can be used for comments, discarded from both texi and C
+HXCOMM TODO : when we are able to change -help output without breaking
+HXCOMM libvirt we should update the help options which refer to -cpu ?,
+HXCOMM -driver ?, etc to use the preferred -cpu help etc instead.
+
DEFHEADING(Standard options:)
STEXI
@table @option
@@ -1030,8 +1034,21 @@ is a TCP port number, not a display number.
@item password
Require that password based authentication is used for client connections.
-The password must be set separately using the @code{change} command in the
-@ref{pcsys_monitor}
+
+The password must be set separately using the @code{set_password} command in
+the @ref{pcsys_monitor}. The syntax to change your password is:
+@code{set_password <protocol> <password>} where <protocol> could be either
+"vnc" or "spice".
+
+If you would like to change <protocol> password expiration, you should use
+@code{expire_password <protocol> <expiration-time>} where expiration time could
+be one of the following options: now, never, +seconds or UNIX time of
+expiration, e.g. +60 to make password expire in 60 seconds, or 1335196800
+to make password expire on "Mon Apr 23 12:00:00 EDT 2012" (UNIX time for this
+date and time).
+
+You can also use keywords "now" or "never" for the expiration time to
+allow <protocol> password to expire immediately or never expire.
@item tls
@@ -1421,8 +1438,28 @@ Then when you use on the host @code{telnet localhost 5555}, you
connect to the guest telnet server.
@item guestfwd=[tcp]:@var{server}:@var{port}-@var{dev}
+@item guestfwd=[tcp]:@var{server}:@var{port}-@var{cmd:command}
Forward guest TCP connections to the IP address @var{server} on port @var{port}
-to the character device @var{dev}. This option can be given multiple times.
+to the character device @var{dev} or to a program executed by @var{cmd:command}
+which gets spawned for each connection. This option can be given multiple times.
+
+You can either use a chardev directly and have that one used throughout QEMU's
+lifetime, like in the following example:
+
+@example
+# open 10.10.1.1:4321 on bootup, connect 10.0.2.100:1234 to it whenever
+# the guest accesses it
+qemu -net user,guestfwd=tcp:10.0.2.100:1234-tcp:10.10.1.1:4321 [...]
+@end example
+
+Or you can execute a command on every TCP connection established by the guest,
+so that QEMU behaves similar to an inetd process for that virtual server:
+
+@example
+# call "netcat 10.10.1.1 4321" on every TCP connection to 10.0.2.100:1234
+# and connect the TCP stream to its stdin/stdout
+qemu -net 'user,guestfwd=tcp:10.0.2.100:1234-cmd:netcat 10.10.1.1 4321'
+@end example
@end table
@@ -2621,7 +2658,10 @@ DEF("nodefaults", 0, QEMU_OPTION_nodefaults, \
STEXI
@item -nodefaults
@findex -nodefaults
-Don't create default devices.
+Don't create default devices. Normally, QEMU sets the default devices like serial
+port, parallel port, virtual console, monitor device, VGA adapter, floppy and
+CD-ROM drive and others. The @code{-nodefaults} option will disable all those
+default devices.
ETEXI
#ifndef _WIN32
@@ -2677,7 +2717,9 @@ DEF("readconfig", HAS_ARG, QEMU_OPTION_readconfig,
STEXI
@item -readconfig @var{file}
@findex -readconfig
-Read device configuration from @var{file}.
+Read device configuration from @var{file}. This approach is useful when you want to spawn
+QEMU process with many command line options but you don't want to exceed the command line
+character limit.
ETEXI
DEF("writeconfig", HAS_ARG, QEMU_OPTION_writeconfig,
"-writeconfig <file>\n"
@@ -2685,7 +2727,9 @@ DEF("writeconfig", HAS_ARG, QEMU_OPTION_writeconfig,
STEXI
@item -writeconfig @var{file}
@findex -writeconfig
-Write device configuration to @var{file}.
+Write device configuration to @var{file}. The @var{file} can be either filename to save
+command line and device configuration into file or dash @code{-}) character to print the
+output to stdout. This can be later used as input file for @code{-readconfig} option.
ETEXI
DEF("nodefconfig", 0, QEMU_OPTION_nodefconfig,
"-nodefconfig\n"
@@ -2743,6 +2787,17 @@ DEF("qtest-log", HAS_ARG, QEMU_OPTION_qtest_log,
"-qtest-log LOG specify tracing options\n",
QEMU_ARCH_ALL)
+#ifdef __linux__
+DEF("enable-fips", 0, QEMU_OPTION_enablefips,
+ "-enable-fips enable FIPS 140-2 compliance\n",
+ QEMU_ARCH_ALL)
+#endif
+STEXI
+@item -enable-fips
+@findex -enable-fips
+Enable FIPS 140-2 compliance mode.
+ETEXI
+
HXCOMM This is the last statement. Insert new options before this line!
STEXI
@end table
diff --git a/qemu-sockets.c b/qemu-sockets.c
index 2ae715db76..beb2bb6f4a 100644
--- a/qemu-sockets.c
+++ b/qemu-sockets.c
@@ -11,6 +11,9 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
*/
#include <stdio.h>
#include <stdlib.h>
@@ -281,7 +284,6 @@ int inet_connect_opts(QemuOpts *opts, Error **errp)
inet_strfamily(e->ai_family),
e->ai_canonname, uaddr, uport, strerror(errno));
closesocket(sock);
- sock = -1;
continue;
}
freeaddrinfo(res);
diff --git a/qemu-tech.texi b/qemu-tech.texi
index b51a58abba..d73dda8e35 100644
--- a/qemu-tech.texi
+++ b/qemu-tech.texi
@@ -536,7 +536,7 @@ timers, especially together with the use of bottom halves (BHs).
@node Hardware interrupts
@section Hardware interrupts
-In order to be faster, QEMU does not check at every basic block if an
+In order to be faster, QEMU does not check at every basic block if a
hardware interrupt is pending. Instead, the user must asynchronously
call a specific function to tell that an interrupt is pending. This
function resets the chaining of the currently executing basic
diff --git a/qemu-thread-posix.c b/qemu-thread-posix.c
index 9e1b5fbdaa..8fbabdac36 100644
--- a/qemu-thread-posix.c
+++ b/qemu-thread-posix.c
@@ -151,7 +151,7 @@ void qemu_thread_get_self(QemuThread *thread)
thread->thread = pthread_self();
}
-int qemu_thread_is_self(QemuThread *thread)
+bool qemu_thread_is_self(QemuThread *thread)
{
return pthread_equal(pthread_self(), thread->thread);
}
diff --git a/qemu-thread-win32.c b/qemu-thread-win32.c
index 3524c8b785..177b398cc4 100644
--- a/qemu-thread-win32.c
+++ b/qemu-thread-win32.c
@@ -330,7 +330,7 @@ HANDLE qemu_thread_get_handle(QemuThread *thread)
return handle;
}
-int qemu_thread_is_self(QemuThread *thread)
+bool qemu_thread_is_self(QemuThread *thread)
{
return GetCurrentThreadId() == thread->tid;
}
diff --git a/qemu-thread.h b/qemu-thread.h
index a78a8f2524..05fdaaf50e 100644
--- a/qemu-thread.h
+++ b/qemu-thread.h
@@ -2,6 +2,7 @@
#define __QEMU_THREAD_H 1
#include <inttypes.h>
+#include <stdbool.h>
typedef struct QemuMutex QemuMutex;
typedef struct QemuCond QemuCond;
@@ -42,7 +43,7 @@ void qemu_thread_create(QemuThread *thread,
void *arg, int mode);
void *qemu_thread_join(QemuThread *thread);
void qemu_thread_get_self(QemuThread *thread);
-int qemu_thread_is_self(QemuThread *thread);
+bool qemu_thread_is_self(QemuThread *thread);
void qemu_thread_exit(void *retval);
#endif
diff --git a/qemu-timer.c b/qemu-timer.c
index de9897788d..5aea94e8e0 100644
--- a/qemu-timer.c
+++ b/qemu-timer.c
@@ -112,14 +112,10 @@ static int64_t qemu_next_alarm_deadline(void)
static void qemu_rearm_alarm_timer(struct qemu_alarm_timer *t)
{
- int64_t nearest_delta_ns;
- if (!rt_clock->active_timers &&
- !vm_clock->active_timers &&
- !host_clock->active_timers) {
- return;
+ int64_t nearest_delta_ns = qemu_next_alarm_deadline();
+ if (nearest_delta_ns < INT64_MAX) {
+ t->rearm(t, nearest_delta_ns);
}
- nearest_delta_ns = qemu_next_alarm_deadline();
- t->rearm(t, nearest_delta_ns);
}
/* TODO: MIN_TIMER_REARM_NS should be optimized */
@@ -183,7 +179,7 @@ void configure_alarms(char const *opt)
char *name;
struct qemu_alarm_timer tmp;
- if (!strcmp(opt, "?")) {
+ if (is_help_option(opt)) {
show_available_alarms();
exit(0);
}
diff --git a/qga/Makefile.objs b/qga/Makefile.objs
index 6a4d843436..cd3e13516c 100644
--- a/qga/Makefile.objs
+++ b/qga/Makefile.objs
@@ -1,3 +1,5 @@
qga-obj-y = commands.o guest-agent-command-state.o
qga-obj-$(CONFIG_POSIX) += commands-posix.o channel-posix.o
qga-obj-$(CONFIG_WIN32) += commands-win32.o channel-win32.o service-win32.o
+qga-obj-y += qapi-generated/qga-qapi-types.o qapi-generated/qga-qapi-visit.o
+qga-obj-y += qapi-generated/qga-qmp-marshal.o
diff --git a/qga/commands-posix.c b/qga/commands-posix.c
index 00d035da95..ce9042123c 100644
--- a/qga/commands-posix.c
+++ b/qga/commands-posix.c
@@ -38,9 +38,12 @@ extern char **environ;
#include <sys/socket.h>
#include <net/if.h>
-#if defined(__linux__) && defined(FIFREEZE)
+#ifdef FIFREEZE
#define CONFIG_FSFREEZE
#endif
+#ifdef FITRIM
+#define CONFIG_FSTRIM
+#endif
#endif
void qmp_guest_shutdown(bool has_mode, const char *mode, Error **err)
@@ -312,19 +315,18 @@ static void guest_file_init(void)
/* linux-specific implementations. avoid this if at all possible. */
#if defined(__linux__)
-#if defined(CONFIG_FSFREEZE)
-
-typedef struct GuestFsfreezeMount {
+#if defined(CONFIG_FSFREEZE) || defined(CONFIG_FSTRIM)
+typedef struct FsMount {
char *dirname;
char *devtype;
- QTAILQ_ENTRY(GuestFsfreezeMount) next;
-} GuestFsfreezeMount;
+ QTAILQ_ENTRY(FsMount) next;
+} FsMount;
-typedef QTAILQ_HEAD(, GuestFsfreezeMount) GuestFsfreezeMountList;
+typedef QTAILQ_HEAD(, FsMount) FsMountList;
-static void guest_fsfreeze_free_mount_list(GuestFsfreezeMountList *mounts)
+static void free_fs_mount_list(FsMountList *mounts)
{
- GuestFsfreezeMount *mount, *temp;
+ FsMount *mount, *temp;
if (!mounts) {
return;
@@ -341,10 +343,10 @@ static void guest_fsfreeze_free_mount_list(GuestFsfreezeMountList *mounts)
/*
* Walk the mount table and build a list of local file systems
*/
-static int guest_fsfreeze_build_mount_list(GuestFsfreezeMountList *mounts)
+static int build_fs_mount_list(FsMountList *mounts)
{
struct mntent *ment;
- GuestFsfreezeMount *mount;
+ FsMount *mount;
char const *mtab = "/proc/self/mounts";
FILE *fp;
@@ -367,7 +369,7 @@ static int guest_fsfreeze_build_mount_list(GuestFsfreezeMountList *mounts)
continue;
}
- mount = g_malloc0(sizeof(GuestFsfreezeMount));
+ mount = g_malloc0(sizeof(FsMount));
mount->dirname = g_strdup(ment->mnt_dir);
mount->devtype = g_strdup(ment->mnt_type);
@@ -378,6 +380,9 @@ static int guest_fsfreeze_build_mount_list(GuestFsfreezeMountList *mounts)
return 0;
}
+#endif
+
+#if defined(CONFIG_FSFREEZE)
/*
* Return status of freeze/thaw
@@ -398,15 +403,15 @@ GuestFsfreezeStatus qmp_guest_fsfreeze_status(Error **err)
int64_t qmp_guest_fsfreeze_freeze(Error **err)
{
int ret = 0, i = 0;
- GuestFsfreezeMountList mounts;
- struct GuestFsfreezeMount *mount;
+ FsMountList mounts;
+ struct FsMount *mount;
int fd;
char err_msg[512];
slog("guest-fsfreeze called");
QTAILQ_INIT(&mounts);
- ret = guest_fsfreeze_build_mount_list(&mounts);
+ ret = build_fs_mount_list(&mounts);
if (ret < 0) {
return ret;
}
@@ -447,11 +452,11 @@ int64_t qmp_guest_fsfreeze_freeze(Error **err)
close(fd);
}
- guest_fsfreeze_free_mount_list(&mounts);
+ free_fs_mount_list(&mounts);
return i;
error:
- guest_fsfreeze_free_mount_list(&mounts);
+ free_fs_mount_list(&mounts);
qmp_guest_fsfreeze_thaw(NULL);
return 0;
}
@@ -462,12 +467,12 @@ error:
int64_t qmp_guest_fsfreeze_thaw(Error **err)
{
int ret;
- GuestFsfreezeMountList mounts;
- GuestFsfreezeMount *mount;
+ FsMountList mounts;
+ FsMount *mount;
int fd, i = 0, logged;
QTAILQ_INIT(&mounts);
- ret = guest_fsfreeze_build_mount_list(&mounts);
+ ret = build_fs_mount_list(&mounts);
if (ret) {
error_set(err, QERR_QGA_COMMAND_FAILED,
"failed to enumerate filesystems");
@@ -507,7 +512,7 @@ int64_t qmp_guest_fsfreeze_thaw(Error **err)
}
ga_unset_frozen(ga_state);
- guest_fsfreeze_free_mount_list(&mounts);
+ free_fs_mount_list(&mounts);
return i;
}
@@ -525,6 +530,65 @@ static void guest_fsfreeze_cleanup(void)
}
#endif /* CONFIG_FSFREEZE */
+#if defined(CONFIG_FSTRIM)
+/*
+ * Walk list of mounted file systems in the guest, and trim them.
+ */
+void qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **err)
+{
+ int ret = 0;
+ FsMountList mounts;
+ struct FsMount *mount;
+ int fd;
+ char err_msg[512];
+ struct fstrim_range r = {
+ .start = 0,
+ .len = -1,
+ .minlen = has_minimum ? minimum : 0,
+ };
+
+ slog("guest-fstrim called");
+
+ QTAILQ_INIT(&mounts);
+ ret = build_fs_mount_list(&mounts);
+ if (ret < 0) {
+ return;
+ }
+
+ QTAILQ_FOREACH(mount, &mounts, next) {
+ fd = qemu_open(mount->dirname, O_RDONLY);
+ if (fd == -1) {
+ sprintf(err_msg, "failed to open %s, %s", mount->dirname,
+ strerror(errno));
+ error_set(err, QERR_QGA_COMMAND_FAILED, err_msg);
+ goto error;
+ }
+
+ /* We try to cull filesytems we know won't work in advance, but other
+ * filesytems may not implement fstrim for less obvious reasons. These
+ * will report EOPNOTSUPP; we simply ignore these errors. Any other
+ * error means an unexpected error, so return it in those cases. In
+ * some other cases ENOTTY will be reported (e.g. CD-ROMs).
+ */
+ ret = ioctl(fd, FITRIM, &r);
+ if (ret == -1) {
+ if (errno != ENOTTY && errno != EOPNOTSUPP) {
+ sprintf(err_msg, "failed to trim %s, %s",
+ mount->dirname, strerror(errno));
+ error_set(err, QERR_QGA_COMMAND_FAILED, err_msg);
+ close(fd);
+ goto error;
+ }
+ }
+ close(fd);
+ }
+
+error:
+ free_fs_mount_list(&mounts);
+}
+#endif /* CONFIG_FSTRIM */
+
+
#define LINUX_SYS_STATE_FILE "/sys/power/state"
#define SUSPEND_SUPPORTED 0
#define SUSPEND_NOT_SUPPORTED 1
@@ -918,7 +982,15 @@ int64_t qmp_guest_fsfreeze_thaw(Error **err)
return 0;
}
+#endif /* CONFIG_FSFREEZE */
+#if !defined(CONFIG_FSTRIM)
+void qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **err)
+{
+ error_set(err, QERR_UNSUPPORTED);
+
+ return;
+}
#endif
/* register init/cleanup routines for stateful command groups */
diff --git a/qga/commands-win32.c b/qga/commands-win32.c
index eb8d1405d3..54bc5462e2 100644
--- a/qga/commands-win32.c
+++ b/qga/commands-win32.c
@@ -173,6 +173,17 @@ int64_t qmp_guest_fsfreeze_thaw(Error **err)
return 0;
}
+/*
+ * Walk list of mounted file systems in the guest, and discard unused
+ * areas.
+ */
+void qmp_guest_fstrim(bool has_minimum, int64_t minimum, Error **err)
+{
+ error_set(err, QERR_UNSUPPORTED);
+
+ return;
+}
+
typedef enum {
GUEST_SUSPEND_MODE_DISK,
GUEST_SUSPEND_MODE_RAM
diff --git a/qmp-commands.hx b/qmp-commands.hx
index 2e1a38e695..ac466382c0 100644
--- a/qmp-commands.hx
+++ b/qmp-commands.hx
@@ -873,8 +873,7 @@ EQMP
.args_type = "fdname:s",
.params = "getfd name",
.help = "receive a file descriptor via SCM rights and assign it a name",
- .user_print = monitor_user_noop,
- .mhandler.cmd_new = do_getfd,
+ .mhandler.cmd_new = qmp_marshal_input_getfd,
},
SQMP
@@ -892,6 +891,14 @@ Example:
-> { "execute": "getfd", "arguments": { "fdname": "fd1" } }
<- { "return": {} }
+Notes:
+
+(1) If the name specified by the "fdname" argument already exists,
+ the file descriptor assigned to it will be closed and replaced
+ by the received file descriptor.
+(2) The 'closefd' command can be used to explicitly close the file
+ descriptor when it is no longer needed.
+
EQMP
{
@@ -899,8 +906,7 @@ EQMP
.args_type = "fdname:s",
.params = "closefd name",
.help = "close a file descriptor previously passed via SCM rights",
- .user_print = monitor_user_noop,
- .mhandler.cmd_new = do_closefd,
+ .mhandler.cmd_new = qmp_marshal_input_closefd,
},
SQMP
@@ -1311,6 +1317,7 @@ Each json-object contain the following:
"nbd", "parallels", "qcow", "qcow2", "raw",
"tftp", "vdi", "vmdk", "vpc", "vvfat"
- "backing_file": backing file name (json-string, optional)
+ - "backing_file_depth": number of files in the backing file chain (json-int)
- "encrypted": true if encrypted, false otherwise (json-bool)
- "bps": limit total bytes per second (json-int)
- "bps_rd": limit read bytes per second (json-int)
@@ -1339,6 +1346,7 @@ Example:
"drv":"qcow2",
"encrypted":false,
"file":"disks/test.img",
+ "backing_file_depth":0,
"bps":1000000,
"bps_rd":0,
"bps_wr":0,
diff --git a/roms/Makefile b/roms/Makefile
index 0114e6f33f..feb9c2b145 100644
--- a/roms/Makefile
+++ b/roms/Makefile
@@ -1,10 +1,27 @@
+vgabios_variants := stdvga cirrus vmware qxl
+
default:
@echo "nothing is build by default"
@echo "available build targets:"
@echo " bios -- update bios.bin (seabios)"
+ @echo " seavgabios -- update vgabios binaries (seabios)"
+ @echo " lgplvgabios -- update vgabios binaries (lgpl)"
bios: config.seabios
sh configure-seabios.sh $<
make -C seabios out/bios.bin
cp seabios/out/bios.bin ../pc-bios/bios.bin
+
+seavgabios: $(patsubst %,seavgabios-%,$(vgabios_variants))
+
+seavgabios-%: config.vga.%
+ sh configure-seabios.sh $<
+ make -C seabios out/vgabios.bin
+ cp seabios/out/vgabios.bin ../pc-bios/vgabios-$*.bin
+
+lgplvgabios: $(patsubst %,lgplvgabios-%,$(vgabios_variants))
+
+lgplvgabios-%:
+ make -C vgabios vgabios-$*.bin
+ cp vgabios/VGABIOS-lgpl-latest.$*.bin ../pc-bios/vgabios-$*.bin
diff --git a/roms/config.vga.cirrus b/roms/config.vga.cirrus
new file mode 100644
index 0000000000..c8fe58239f
--- /dev/null
+++ b/roms/config.vga.cirrus
@@ -0,0 +1,3 @@
+CONFIG_BUILD_VGABIOS=y
+CONFIG_VGA_CIRRUS=y
+CONFIG_VGA_PCI=y
diff --git a/roms/config.vga.isavga b/roms/config.vga.isavga
new file mode 100644
index 0000000000..e55e294a0c
--- /dev/null
+++ b/roms/config.vga.isavga
@@ -0,0 +1,3 @@
+CONFIG_BUILD_VGABIOS=y
+CONFIG_VGA_BOCHS=y
+CONFIG_VGA_PCI=n
diff --git a/roms/config.vga.qxl b/roms/config.vga.qxl
new file mode 100644
index 0000000000..d393f0c34f
--- /dev/null
+++ b/roms/config.vga.qxl
@@ -0,0 +1,6 @@
+CONFIG_BUILD_VGABIOS=y
+CONFIG_VGA_BOCHS=y
+CONFIG_VGA_PCI=y
+CONFIG_OVERRIDE_PCI_ID=y
+CONFIG_VGA_VID=0x1b36
+CONFIG_VGA_DID=0x0100
diff --git a/roms/config.vga.stdvga b/roms/config.vga.stdvga
new file mode 100644
index 0000000000..7d063b787c
--- /dev/null
+++ b/roms/config.vga.stdvga
@@ -0,0 +1,3 @@
+CONFIG_BUILD_VGABIOS=y
+CONFIG_VGA_BOCHS=y
+CONFIG_VGA_PCI=y
diff --git a/roms/config.vga.vmware b/roms/config.vga.vmware
new file mode 100644
index 0000000000..eb10427afd
--- /dev/null
+++ b/roms/config.vga.vmware
@@ -0,0 +1,6 @@
+CONFIG_BUILD_VGABIOS=y
+CONFIG_VGA_BOCHS=y
+CONFIG_VGA_PCI=y
+CONFIG_OVERRIDE_PCI_ID=y
+CONFIG_VGA_VID=0x15ad
+CONFIG_VGA_DID=0x0405
diff --git a/rules.mak b/rules.mak
index 60f3e96541..a28494679a 100644
--- a/rules.mak
+++ b/rules.mak
@@ -94,7 +94,6 @@ define unnest-dir
$(foreach var,$(nested-vars),$(call push-var,$(var),$1/))
$(eval obj := $(obj)/$1)
$(eval include $(SRC_PATH)/$1/Makefile.objs)
-$(eval -include $(wildcard $1/*.d))
$(eval obj := $(patsubst %/$1,%,$(obj)))
$(foreach var,$(nested-vars),$(call pop-var,$(var),$1/))
endef
@@ -113,4 +112,6 @@ define unnest-vars
$(call unnest-vars-1)
$(foreach var,$(nested-vars),$(eval $(var) := $(filter-out %/, $($(var)))))
$(shell mkdir -p $(sort $(foreach var,$(nested-vars),$(dir $($(var))))))
+$(foreach var,$(nested-vars), $(eval \
+ -include $(addsuffix *.d, $(sort $(dir $($(var)))))))
endef
diff --git a/savevm.c b/savevm.c
index faa81457d5..6e82b2d3e3 100644
--- a/savevm.c
+++ b/savevm.c
@@ -85,6 +85,7 @@
#include "cpus.h"
#include "memory.h"
#include "qmp-commands.h"
+#include "trace.h"
#define SELF_ANNOUNCE_ROUNDS 5
@@ -1170,10 +1171,7 @@ typedef struct SaveStateEntry {
int alias_id;
int version_id;
int section_id;
- SaveSetParamsHandler *set_params;
- SaveLiveStateHandler *save_live_state;
- SaveStateHandler *save_state;
- LoadStateHandler *load_state;
+ SaveVMHandlers *ops;
const VMStateDescription *vmsd;
void *opaque;
CompatEntry *compat;
@@ -1225,10 +1223,7 @@ int register_savevm_live(DeviceState *dev,
const char *idstr,
int instance_id,
int version_id,
- SaveSetParamsHandler *set_params,
- SaveLiveStateHandler *save_live_state,
- SaveStateHandler *save_state,
- LoadStateHandler *load_state,
+ SaveVMHandlers *ops,
void *opaque)
{
SaveStateEntry *se;
@@ -1236,15 +1231,12 @@ int register_savevm_live(DeviceState *dev,
se = g_malloc0(sizeof(SaveStateEntry));
se->version_id = version_id;
se->section_id = global_section_id++;
- se->set_params = set_params;
- se->save_live_state = save_live_state;
- se->save_state = save_state;
- se->load_state = load_state;
+ se->ops = ops;
se->opaque = opaque;
se->vmsd = NULL;
se->no_migrate = 0;
/* if this is a live_savem then set is_ram */
- if (save_live_state != NULL) {
+ if (ops->save_live_setup != NULL) {
se->is_ram = 1;
}
@@ -1283,8 +1275,11 @@ int register_savevm(DeviceState *dev,
LoadStateHandler *load_state,
void *opaque)
{
+ SaveVMHandlers *ops = g_malloc0(sizeof(SaveVMHandlers));
+ ops->save_state = save_state;
+ ops->load_state = load_state;
return register_savevm_live(dev, idstr, instance_id, version_id,
- NULL, NULL, save_state, load_state, opaque);
+ ops, opaque);
}
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
@@ -1308,6 +1303,7 @@ void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
if (se->compat) {
g_free(se->compat);
}
+ g_free(se->ops);
g_free(se);
}
}
@@ -1326,9 +1322,6 @@ int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
se = g_malloc0(sizeof(SaveStateEntry));
se->version_id = vmsd->version_id;
se->section_id = global_section_id++;
- se->save_live_state = NULL;
- se->save_state = NULL;
- se->load_state = NULL;
se->opaque = opaque;
se->vmsd = vmsd;
se->alias_id = alias_id;
@@ -1523,7 +1516,7 @@ void vmstate_save_state(QEMUFile *f, const VMStateDescription *vmsd,
static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
{
if (!se->vmsd) { /* Old style */
- return se->load_state(f, se->opaque, version_id);
+ return se->ops->load_state(f, se->opaque, version_id);
}
return vmstate_load_state(f, se->vmsd, se->opaque, version_id);
}
@@ -1531,7 +1524,7 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
{
if (!se->vmsd) { /* Old style */
- se->save_state(f, se->opaque);
+ se->ops->save_state(f, se->opaque);
return;
}
vmstate_save_state(f,se->vmsd, se->opaque);
@@ -1561,16 +1554,17 @@ bool qemu_savevm_state_blocked(Error **errp)
return false;
}
-int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared)
+int qemu_savevm_state_begin(QEMUFile *f,
+ const MigrationParams *params)
{
SaveStateEntry *se;
int ret;
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
- if(se->set_params == NULL) {
+ if (!se->ops || !se->ops->set_params) {
continue;
- }
- se->set_params(blk_enable, shared, se->opaque);
+ }
+ se->ops->set_params(params, se->opaque);
}
qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
@@ -1579,9 +1573,14 @@ int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared)
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
int len;
- if (se->save_live_state == NULL)
+ if (!se->ops || !se->ops->save_live_setup) {
continue;
-
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
/* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_START);
qemu_put_be32(f, se->section_id);
@@ -1594,7 +1593,7 @@ int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared)
qemu_put_be32(f, se->instance_id);
qemu_put_be32(f, se->version_id);
- ret = se->save_live_state(f, QEMU_VM_SECTION_START, se->opaque);
+ ret = se->ops->save_live_setup(f, se->opaque);
if (ret < 0) {
qemu_savevm_state_cancel(f);
return ret;
@@ -1621,14 +1620,25 @@ int qemu_savevm_state_iterate(QEMUFile *f)
int ret = 1;
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
- if (se->save_live_state == NULL)
+ if (!se->ops || !se->ops->save_live_iterate) {
continue;
-
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+ if (qemu_file_rate_limit(f)) {
+ return 0;
+ }
+ trace_savevm_section_start();
/* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_PART);
qemu_put_be32(f, se->section_id);
- ret = se->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque);
+ ret = se->ops->save_live_iterate(f, se->opaque);
+ trace_savevm_section_end(se->section_id);
+
if (ret <= 0) {
/* Do not proceed to the next vmstate before this one reported
completion of the current stage. This serializes the migration
@@ -1655,14 +1665,21 @@ int qemu_savevm_state_complete(QEMUFile *f)
cpu_synchronize_all_states();
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
- if (se->save_live_state == NULL)
+ if (!se->ops || !se->ops->save_live_complete) {
continue;
-
+ }
+ if (se->ops && se->ops->is_active) {
+ if (!se->ops->is_active(se->opaque)) {
+ continue;
+ }
+ }
+ trace_savevm_section_start();
/* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_END);
qemu_put_be32(f, se->section_id);
- ret = se->save_live_state(f, QEMU_VM_SECTION_END, se->opaque);
+ ret = se->ops->save_live_complete(f, se->opaque);
+ trace_savevm_section_end(se->section_id);
if (ret < 0) {
return ret;
}
@@ -1671,9 +1688,10 @@ int qemu_savevm_state_complete(QEMUFile *f)
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
int len;
- if (se->save_state == NULL && se->vmsd == NULL)
+ if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
continue;
-
+ }
+ trace_savevm_section_start();
/* Section type */
qemu_put_byte(f, QEMU_VM_SECTION_FULL);
qemu_put_be32(f, se->section_id);
@@ -1687,6 +1705,7 @@ int qemu_savevm_state_complete(QEMUFile *f)
qemu_put_be32(f, se->version_id);
vmstate_save(f, se);
+ trace_savevm_section_end(se->section_id);
}
qemu_put_byte(f, QEMU_VM_EOF);
@@ -1699,8 +1718,8 @@ void qemu_savevm_state_cancel(QEMUFile *f)
SaveStateEntry *se;
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
- if (se->save_live_state) {
- se->save_live_state(f, -1, se->opaque);
+ if (se->ops && se->ops->cancel) {
+ se->ops->cancel(se->opaque);
}
}
}
@@ -1708,13 +1727,17 @@ void qemu_savevm_state_cancel(QEMUFile *f)
static int qemu_savevm_state(QEMUFile *f)
{
int ret;
+ MigrationParams params = {
+ .blk = 0,
+ .shared = 0
+ };
if (qemu_savevm_state_blocked(NULL)) {
ret = -EINVAL;
goto out;
}
- ret = qemu_savevm_state_begin(f, 0, 0);
+ ret = qemu_savevm_state_begin(f, &params);
if (ret < 0)
goto out;
@@ -1749,7 +1772,7 @@ static int qemu_save_device_state(QEMUFile *f)
if (se->is_ram) {
continue;
}
- if (se->save_state == NULL && se->vmsd == NULL) {
+ if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
continue;
}
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 8850a5f436..b98dc6cad1 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2849,6 +2849,11 @@ sub process {
ERROR("lockdep_no_validate class is reserved for device->mutex.\n" . $herecurr);
}
}
+
+# QEMU specific tests
+ if ($rawline =~ /\b(?:Qemu|QEmu)\b/) {
+ WARN("use QEMU instead of Qemu or QEmu\n" . $herecurr);
+ }
}
# If we have no input at all, then there is nothing to report on
diff --git a/scripts/make-release b/scripts/make-release
new file mode 100755
index 0000000000..196c755f57
--- /dev/null
+++ b/scripts/make-release
@@ -0,0 +1,24 @@
+#!/bin/bash -e
+#
+# QEMU Release Script
+#
+# Copyright IBM, Corp. 2012
+#
+# Authors:
+# Anthony Liguori <aliguori@us.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPLv2 or later.
+# See the COPYING file in the top-level directory.
+
+src="$1"
+version="$2"
+destination=qemu-${version}
+
+git clone "${src}" ${destination}
+pushd ${destination}
+git checkout "v${version}"
+git submodule update --init
+rm -rf .git roms/*/.git
+popd
+tar cfj ${destination}.tar.bz2 ${destination}
+rm -rf ${destination}
diff --git a/scripts/qapi-visit.py b/scripts/qapi-visit.py
index 8d4e94a45f..04ef7c41ab 100644
--- a/scripts/qapi-visit.py
+++ b/scripts/qapi-visit.py
@@ -17,32 +17,49 @@ import os
import getopt
import errno
-def generate_visit_struct_body(field_prefix, members):
- ret = ""
+def generate_visit_struct_body(field_prefix, name, members):
+ ret = mcgen('''
+if (!error_is_set(errp)) {
+''')
+ push_indent()
+
if len(field_prefix):
field_prefix = field_prefix + "."
+ ret += mcgen('''
+Error **errp = &err; /* from outer scope */
+Error *err = NULL;
+visit_start_struct(m, NULL, "", "%(name)s", 0, &err);
+''',
+ name=name)
+ else:
+ ret += mcgen('''
+Error *err = NULL;
+visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
+''',
+ name=name)
+
+ ret += mcgen('''
+if (!err) {
+ if (!obj || *obj) {
+''')
+
+ push_indent()
+ push_indent()
for argname, argentry, optional, structured in parse_args(members):
if optional:
ret += mcgen('''
-visit_start_optional(m, (obj && *obj) ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", errp);
-if ((*obj)->%(prefix)shas_%(c_name)s) {
+visit_start_optional(m, obj ? &(*obj)->%(c_prefix)shas_%(c_name)s : NULL, "%(name)s", &err);
+if (obj && (*obj)->%(prefix)shas_%(c_name)s) {
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
c_name=c_var(argname), name=argname)
push_indent()
if structured:
- ret += mcgen('''
-visit_start_struct(m, NULL, "", "%(name)s", 0, errp);
-''',
- name=argname)
- ret += generate_visit_struct_body(field_prefix + argname, argentry)
- ret += mcgen('''
-visit_end_struct(m, errp);
-''')
+ ret += generate_visit_struct_body(field_prefix + argname, argname, argentry)
else:
ret += mcgen('''
-visit_type_%(type)s(m, (obj && *obj) ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", errp);
+visit_type_%(type)s(m, obj ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "%(name)s", &err);
''',
c_prefix=c_var(field_prefix), prefix=field_prefix,
type=type_name(argentry), c_name=c_var(argname),
@@ -52,7 +69,25 @@ visit_type_%(type)s(m, (obj && *obj) ? &(*obj)->%(c_prefix)s%(c_name)s : NULL, "
pop_indent()
ret += mcgen('''
}
-visit_end_optional(m, errp);
+visit_end_optional(m, &err);
+''')
+
+ pop_indent()
+ ret += mcgen('''
+
+ error_propagate(errp, err);
+ err = NULL;
+}
+''')
+
+ pop_indent()
+ pop_indent()
+ ret += mcgen('''
+ /* Always call end_struct if start_struct succeeded. */
+ visit_end_struct(m, &err);
+ }
+ error_propagate(errp, err);
+}
''')
return ret
@@ -61,22 +96,14 @@ def generate_visit_struct(name, members):
void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **errp)
{
- if (error_is_set(errp)) {
- return;
- }
- visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), errp);
- if (obj && !*obj) {
- goto end;
- }
''',
name=name)
+
push_indent()
- ret += generate_visit_struct_body("", members)
+ ret += generate_visit_struct_body("", name, members)
pop_indent()
ret += mcgen('''
-end:
- visit_end_struct(m, errp);
}
''')
return ret
@@ -87,18 +114,23 @@ def generate_visit_list(name, members):
void visit_type_%(name)sList(Visitor *m, %(name)sList ** obj, const char *name, Error **errp)
{
GenericList *i, **prev = (GenericList **)obj;
+ Error *err = NULL;
- if (error_is_set(errp)) {
- return;
- }
- visit_start_list(m, name, errp);
-
- for (; (i = visit_next_list(m, prev, errp)) != NULL; prev = &i) {
- %(name)sList *native_i = (%(name)sList *)i;
- visit_type_%(name)s(m, &native_i->value, NULL, errp);
+ if (!error_is_set(errp)) {
+ visit_start_list(m, name, &err);
+ if (!err) {
+ for (; (i = visit_next_list(m, prev, &err)) != NULL; prev = &i) {
+ %(name)sList *native_i = (%(name)sList *)i;
+ visit_type_%(name)s(m, &native_i->value, NULL, &err);
+ }
+ error_propagate(errp, err);
+ err = NULL;
+
+ /* Always call end_list if start_list succeeded. */
+ visit_end_list(m, &err);
+ }
+ error_propagate(errp, err);
}
-
- visit_end_list(m, errp);
}
''',
name=name)
@@ -122,27 +154,23 @@ void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **
{
Error *err = NULL;
- if (error_is_set(errp)) {
- return;
- }
- visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
- if (obj && !*obj) {
- goto end;
- }
- visit_type_%(name)sKind(m, &(*obj)->kind, "type", &err);
- if (err) {
- error_propagate(errp, err);
- goto end;
- }
- switch ((*obj)->kind) {
+ if (!error_is_set(errp)) {
+ visit_start_struct(m, (void **)obj, "%(name)s", name, sizeof(%(name)s), &err);
+ if (!err) {
+ if (!obj || *obj) {
+ visit_type_%(name)sKind(m, &(*obj)->kind, "type", &err);
+ if (!err) {
+ switch ((*obj)->kind) {
''',
name=name)
+ push_indent()
+ push_indent()
for key in members:
ret += mcgen('''
- case %(abbrev)s_KIND_%(enum)s:
- visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", errp);
- break;
+ case %(abbrev)s_KIND_%(enum)s:
+ visit_type_%(c_type)s(m, &(*obj)->%(c_name)s, "data", &err);
+ break;
''',
abbrev = de_camel_case(name).upper(),
enum = c_fun(de_camel_case(key)).upper(),
@@ -150,11 +178,25 @@ void visit_type_%(name)s(Visitor *m, %(name)s ** obj, const char *name, Error **
c_name=c_fun(key))
ret += mcgen('''
- default:
- abort();
+ default:
+ abort();
+ }
+ }
+ error_propagate(errp, err);
+ err = NULL;
+ }
+''')
+ pop_indent()
+ ret += mcgen('''
+ /* Always call end_struct if start_struct succeeded. */
+ visit_end_struct(m, &err);
}
-end:
- visit_end_struct(m, errp);
+ error_propagate(errp, err);
+}
+''')
+
+ pop_indent();
+ ret += mcgen('''
}
''')
diff --git a/scripts/qapi.py b/scripts/qapi.py
index e06233666b..122b4cb6d1 100644
--- a/scripts/qapi.py
+++ b/scripts/qapi.py
@@ -13,18 +13,29 @@ from ordereddict import OrderedDict
def tokenize(data):
while len(data):
- if data[0] in ['{', '}', ':', ',', '[', ']']:
- yield data[0]
- data = data[1:]
- elif data[0] in ' \n':
- data = data[1:]
- elif data[0] == "'":
- data = data[1:]
+ ch = data[0]
+ data = data[1:]
+ if ch in ['{', '}', ':', ',', '[', ']']:
+ yield ch
+ elif ch in ' \n':
+ None
+ elif ch == "'":
string = ''
- while data[0] != "'":
- string += data[0]
+ esc = False
+ while True:
+ if (data == ''):
+ raise Exception("Mismatched quotes")
+ ch = data[0]
data = data[1:]
- data = data[1:]
+ if esc:
+ string += ch
+ esc = False
+ elif ch == "\\":
+ esc = True
+ elif ch == "'":
+ break
+ else:
+ string += ch
yield string
def parse(tokens):
@@ -131,6 +142,22 @@ def camel_case(name):
return new_name
def c_var(name):
+ # ANSI X3J11/88-090, 3.1.1
+ c89_words = set(['auto', 'break', 'case', 'char', 'const', 'continue',
+ 'default', 'do', 'double', 'else', 'enum', 'extern', 'float',
+ 'for', 'goto', 'if', 'int', 'long', 'register', 'return',
+ 'short', 'signed', 'sizeof', 'static', 'struct', 'switch',
+ 'typedef', 'union', 'unsigned', 'void', 'volatile', 'while'])
+ # ISO/IEC 9899:1999, 6.4.1
+ c99_words = set(['inline', 'restrict', '_Bool', '_Complex', '_Imaginary'])
+ # ISO/IEC 9899:2011, 6.4.1
+ c11_words = set(['_Alignas', '_Alignof', '_Atomic', '_Generic', '_Noreturn',
+ '_Static_assert', '_Thread_local'])
+ # GCC http://gcc.gnu.org/onlinedocs/gcc-4.7.1/gcc/C-Extensions.html
+ # excluding _.*
+ gcc_words = set(['asm', 'typeof'])
+ if name in c89_words | c99_words | c11_words | gcc_words:
+ return "q_" + name
return name.replace('-', '_').lstrip("*")
def c_fun(name):
@@ -159,6 +186,12 @@ def c_type(name):
return 'char *'
elif name == 'int':
return 'int64_t'
+ elif (name == 'int8' or name == 'int16' or name == 'int32' or
+ name == 'int64' or name == 'uint8' or name == 'uint16' or
+ name == 'uint32' or name == 'uint64'):
+ return name + '_t'
+ elif name == 'size':
+ return 'uint64_t'
elif name == 'bool':
return 'bool'
elif name == 'number':
diff --git a/scripts/simpletrace.py b/scripts/simpletrace.py
index f55e5e63f9..9b4419f7c3 100755
--- a/scripts/simpletrace.py
+++ b/scripts/simpletrace.py
@@ -12,53 +12,69 @@
import struct
import re
import inspect
+from tracetool import _read_events, Event
+from tracetool.backend.simple import is_string
header_event_id = 0xffffffffffffffff
header_magic = 0xf2b177cb0aa429b4
-header_version = 0
dropped_event_id = 0xfffffffffffffffe
-trace_fmt = '=QQQQQQQQ'
-trace_len = struct.calcsize(trace_fmt)
-event_re = re.compile(r'(disable\s+)?([a-zA-Z0-9_]+)\(([^)]*)\).*')
+log_header_fmt = '=QQQ'
+rec_header_fmt = '=QQII'
-def parse_events(fobj):
- """Parse a trace-events file into {event_num: (name, arg1, ...)}."""
-
- def get_argnames(args):
- """Extract argument names from a parameter list."""
- return tuple(arg.split()[-1].lstrip('*') for arg in args.split(','))
-
- events = {dropped_event_id: ('dropped', 'count')}
- event_num = 0
- for line in fobj:
- m = event_re.match(line.strip())
- if m is None:
- continue
-
- disable, name, args = m.groups()
- events[event_num] = (name,) + get_argnames(args)
- event_num += 1
- return events
+def read_header(fobj, hfmt):
+ '''Read a trace record header'''
+ hlen = struct.calcsize(hfmt)
+ hdr = fobj.read(hlen)
+ if len(hdr) != hlen:
+ return None
+ return struct.unpack(hfmt, hdr)
-def read_record(fobj):
+def get_record(edict, rechdr, fobj):
"""Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
- s = fobj.read(trace_len)
- if len(s) != trace_len:
+ if rechdr is None:
return None
- return struct.unpack(trace_fmt, s)
+ rec = (rechdr[0], rechdr[1])
+ if rechdr[0] != dropped_event_id:
+ event_id = rechdr[0]
+ event = edict[event_id]
+ for type, name in event.args:
+ if is_string(type):
+ l = fobj.read(4)
+ (len,) = struct.unpack('=L', l)
+ s = fobj.read(len)
+ rec = rec + (s,)
+ else:
+ (value,) = struct.unpack('=Q', fobj.read(8))
+ rec = rec + (value,)
+ else:
+ (value,) = struct.unpack('=Q', fobj.read(8))
+ rec = rec + (value,)
+ return rec
+
+
+def read_record(edict, fobj):
+ """Deserialize a trace record from a file into a tuple (event_num, timestamp, arg1, ..., arg6)."""
+ rechdr = read_header(fobj, rec_header_fmt)
+ return get_record(edict, rechdr, fobj) # return tuple of record elements
-def read_trace_file(fobj):
+def read_trace_file(edict, fobj):
"""Deserialize trace records from a file, yielding record tuples (event_num, timestamp, arg1, ..., arg6)."""
- header = read_record(fobj)
+ header = read_header(fobj, log_header_fmt)
if header is None or \
header[0] != header_event_id or \
- header[1] != header_magic or \
- header[2] != header_version:
- raise ValueError('not a trace file or incompatible version')
+ header[1] != header_magic:
+ raise ValueError('Not a valid trace file!')
+ if header[2] != 0 and \
+ header[2] != 2:
+ raise ValueError('Unknown version of tracelog format!')
+
+ log_version = header[2]
+ if log_version == 0:
+ raise ValueError('Older log format, not supported with this Qemu release!')
while True:
- rec = read_record(fobj)
+ rec = read_record(edict, fobj)
if rec is None:
break
@@ -89,16 +105,29 @@ class Analyzer(object):
def process(events, log, analyzer):
"""Invoke an analyzer on each event in a log."""
if isinstance(events, str):
- events = parse_events(open(events, 'r'))
+ events = _read_events(open(events, 'r'))
if isinstance(log, str):
log = open(log, 'rb')
+ enabled_events = []
+ dropped_event = Event.build("Dropped_Event(uint64_t num_events_dropped)")
+ edict = {dropped_event_id: dropped_event}
+
+ for e in events:
+ if 'disable' not in e.properties:
+ enabled_events.append(e)
+ for num, event in enumerate(enabled_events):
+ edict[num] = event
+
def build_fn(analyzer, event):
- fn = getattr(analyzer, event[0], None)
+ if isinstance(event, str):
+ return analyzer.catchall
+
+ fn = getattr(analyzer, event.name, None)
if fn is None:
return analyzer.catchall
- event_argcount = len(event) - 1
+ event_argcount = len(event.args)
fn_argcount = len(inspect.getargspec(fn)[0]) - 1
if fn_argcount == event_argcount + 1:
# Include timestamp as first argument
@@ -109,9 +138,9 @@ def process(events, log, analyzer):
analyzer.begin()
fn_cache = {}
- for rec in read_trace_file(log):
+ for rec in read_trace_file(edict, log):
event_num = rec[0]
- event = events[event_num]
+ event = edict[event_num]
if event_num not in fn_cache:
fn_cache[event_num] = build_fn(analyzer, event)
fn_cache[event_num](event, rec)
@@ -128,7 +157,7 @@ def run(analyzer):
sys.stderr.write('usage: %s <trace-events> <trace-file>\n' % sys.argv[0])
sys.exit(1)
- events = parse_events(open(sys.argv[1], 'r'))
+ events = _read_events(open(sys.argv[1], 'r'))
process(events, sys.argv[2], analyzer)
if __name__ == '__main__':
@@ -137,15 +166,20 @@ if __name__ == '__main__':
self.last_timestamp = None
def catchall(self, event, rec):
+ i = 1
timestamp = rec[1]
if self.last_timestamp is None:
self.last_timestamp = timestamp
delta_ns = timestamp - self.last_timestamp
self.last_timestamp = timestamp
- fields = [event[0], '%0.3f' % (delta_ns / 1000.0)]
- for i in xrange(1, len(event)):
- fields.append('%s=0x%x' % (event[i], rec[i + 1]))
+ fields = [event.name, '%0.3f' % (delta_ns / 1000.0)]
+ for type, name in event.args:
+ if is_string(type):
+ fields.append('%s=%s' % (name, rec[i + 1]))
+ else:
+ fields.append('%s=0x%x' % (name, rec[i + 1]))
+ i += 1
print ' '.join(fields)
run(Formatter())
diff --git a/scripts/tracetool/backend/simple.py b/scripts/tracetool/backend/simple.py
index fbb5717c66..c7e47d6d72 100644
--- a/scripts/tracetool/backend/simple.py
+++ b/scripts/tracetool/backend/simple.py
@@ -15,9 +15,16 @@ __email__ = "stefanha@linux.vnet.ibm.com"
from tracetool import out
+def is_string(arg):
+ strtype = ('const char*', 'char*', 'const char *', 'char *')
+ if arg.lstrip().startswith(strtype):
+ return True
+ else:
+ return False
def c(events):
out('#include "trace.h"',
+ '#include "trace/simple.h"',
'',
'TraceEvent trace_list[] = {')
@@ -26,30 +33,75 @@ def c(events):
name = e.name,
)
- out('};')
+ out('};',
+ '')
+
+ for num, event in enumerate(events):
+ out('void trace_%(name)s(%(args)s)',
+ '{',
+ ' TraceBufferRecord rec;',
+ name = event.name,
+ args = event.args,
+ )
+ sizes = []
+ for type_, name in event.args:
+ if is_string(type_):
+ out(' size_t arg%(name)s_len = %(name)s ? MIN(strlen(%(name)s), MAX_TRACE_STRLEN) : 0;',
+ name = name,
+ )
+ strsizeinfo = "4 + arg%s_len" % name
+ sizes.append(strsizeinfo)
+ else:
+ sizes.append("8")
+ sizestr = " + ".join(sizes)
+ if len(event.args) == 0:
+ sizestr = '0'
+
+
+ out('',
+ ' if (!trace_list[%(event_id)s].state) {',
+ ' return;',
+ ' }',
+ '',
+ ' if (trace_record_start(&rec, %(event_id)s, %(size_str)s)) {',
+ ' return; /* Trace Buffer Full, Event Dropped ! */',
+ ' }',
+ event_id = num,
+ size_str = sizestr,
+ )
+
+ if len(event.args) > 0:
+ for type_, name in event.args:
+ # string
+ if is_string(type_):
+ out(' trace_record_write_str(&rec, %(name)s, arg%(name)s_len);',
+ name = name,
+ )
+ # pointer var (not string)
+ elif type_.endswith('*'):
+ out(' trace_record_write_u64(&rec, (uint64_t)(uint64_t *)%(name)s);',
+ name = name,
+ )
+ # primitive data type
+ else:
+ out(' trace_record_write_u64(&rec, (uint64_t)%(name)s);',
+ name = name,
+ )
+
+ out(' trace_record_finish(&rec);',
+ '}',
+ '')
+
def h(events):
out('#include "trace/simple.h"',
'')
- for num, e in enumerate(events):
- if len(e.args):
- argstr = e.args.names()
- arg_prefix = ', (uint64_t)(uintptr_t)'
- cast_args = arg_prefix + arg_prefix.join(argstr)
- simple_args = (str(num) + cast_args)
- else:
- simple_args = str(num)
-
- out('static inline void trace_%(name)s(%(args)s)',
- '{',
- ' trace%(argc)d(%(trace_args)s);',
- '}',
- name = e.name,
- args = e.args,
- argc = len(e.args),
- trace_args = simple_args,
+ for event in events:
+ out('void trace_%(name)s(%(args)s);',
+ name = event.name,
+ args = event.args,
)
-
+ out('')
out('#define NR_TRACE_EVENTS %d' % len(events))
out('extern TraceEvent trace_list[NR_TRACE_EVENTS];')
diff --git a/slirp/if.c b/slirp/if.c
index 096cf6fd07..533295dd07 100644
--- a/slirp/if.c
+++ b/slirp/if.c
@@ -177,11 +177,6 @@ void if_start(Slirp *slirp)
}
while (ifm_next) {
- /* check if we can really output */
- if (!slirp_can_output(slirp->opaque)) {
- break;
- }
-
ifm = ifm_next;
from_batchq = next_from_batchq;
diff --git a/slirp/libslirp.h b/slirp/libslirp.h
index 77527ad922..9b471b5053 100644
--- a/slirp/libslirp.h
+++ b/slirp/libslirp.h
@@ -25,7 +25,6 @@ void slirp_select_poll(fd_set *readfds, fd_set *writefds, fd_set *xfds,
void slirp_input(Slirp *slirp, const uint8_t *pkt, int pkt_len);
/* you must provide the following functions: */
-int slirp_can_output(void *opaque);
void slirp_output(void *opaque, const uint8_t *pkt, int pkt_len);
int slirp_add_hostfwd(Slirp *slirp, int is_udp,
diff --git a/slirp/main.h b/slirp/main.h
index 028df4b361..1f3b84de92 100644
--- a/slirp/main.h
+++ b/slirp/main.h
@@ -31,6 +31,7 @@ extern char *exec_shell;
extern u_int curtime;
extern fd_set *global_readfds, *global_writefds, *global_xfds;
extern struct in_addr loopback_addr;
+extern unsigned long loopback_mask;
extern char *username;
extern char *socket_path;
extern int towrite_max;
diff --git a/slirp/slirp.c b/slirp/slirp.c
index 90473eb74a..38e0a2193a 100644
--- a/slirp/slirp.c
+++ b/slirp/slirp.c
@@ -29,6 +29,8 @@
/* host loopback address */
struct in_addr loopback_addr;
+/* host loopback network mask */
+unsigned long loopback_mask;
/* emulated hosts use the MAC addr 52:55:IP:IP:IP:IP */
static const uint8_t special_ethaddr[ETH_ALEN] = {
@@ -191,6 +193,7 @@ static void slirp_init_once(void)
#endif
loopback_addr.s_addr = htonl(INADDR_LOOPBACK);
+ loopback_mask = htonl(IN_CLASSA_NET);
}
static void slirp_state_save(QEMUFile *f, void *opaque);
diff --git a/slirp/tcp_subr.c b/slirp/tcp_subr.c
index 0a545c41e7..025b374367 100644
--- a/slirp/tcp_subr.c
+++ b/slirp/tcp_subr.c
@@ -435,8 +435,11 @@ tcp_connect(struct socket *inso)
so->so_fport = addr.sin_port;
so->so_faddr = addr.sin_addr;
/* Translate connections from localhost to the real hostname */
- if (so->so_faddr.s_addr == 0 || so->so_faddr.s_addr == loopback_addr.s_addr)
- so->so_faddr = slirp->vhost_addr;
+ if (so->so_faddr.s_addr == 0 ||
+ (so->so_faddr.s_addr & loopback_mask) ==
+ (loopback_addr.s_addr & loopback_mask)) {
+ so->so_faddr = slirp->vhost_addr;
+ }
/* Close the accept() socket, set right state */
if (inso->so_state & SS_FACCEPTONCE) {
diff --git a/sysemu.h b/sysemu.h
index bc2c788921..4669348a12 100644
--- a/sysemu.h
+++ b/sysemu.h
@@ -77,7 +77,8 @@ void do_info_snapshots(Monitor *mon);
void qemu_announce_self(void);
bool qemu_savevm_state_blocked(Error **errp);
-int qemu_savevm_state_begin(QEMUFile *f, int blk_enable, int shared);
+int qemu_savevm_state_begin(QEMUFile *f,
+ const MigrationParams *params);
int qemu_savevm_state_iterate(QEMUFile *f);
int qemu_savevm_state_complete(QEMUFile *f);
void qemu_savevm_state_cancel(QEMUFile *f);
@@ -133,9 +134,10 @@ extern uint8_t qemu_extra_params_fw[2];
extern QEMUClock *rtc_clock;
#define MAX_NODES 64
+#define MAX_CPUMASK_BITS 255
extern int nb_numa_nodes;
extern uint64_t node_mem[MAX_NODES];
-extern uint64_t node_cpumask[MAX_NODES];
+extern unsigned long *node_cpumask[MAX_NODES];
#define MAX_OPTION_ROMS 16
typedef struct QEMUOptionRom {
diff --git a/target-alpha/cpu.h b/target-alpha/cpu.h
index 99f9ee168d..5689760cef 100644
--- a/target-alpha/cpu.h
+++ b/target-alpha/cpu.h
@@ -40,9 +40,20 @@
#define TARGET_PAGE_BITS 13
+#ifdef CONFIG_USER_ONLY
+/* ??? The kernel likes to give addresses in high memory. If the host has
+ more virtual address space than the guest, this can lead to impossible
+ allocations. Honor the long-standing assumption that only kernel addrs
+ are negative, but otherwise allow allocations anywhere. This could lead
+ to tricky emulation problems for programs doing tagged addressing, but
+ that's far fewer than encounter the impossible allocation problem. */
+#define TARGET_PHYS_ADDR_SPACE_BITS 63
+#define TARGET_VIRT_ADDR_SPACE_BITS 63
+#else
/* ??? EV4 has 34 phys addr bits, EV5 has 40, EV6 has 44. */
-#define TARGET_PHYS_ADDR_SPACE_BITS 44
-#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
+#define TARGET_PHYS_ADDR_SPACE_BITS 44
+#define TARGET_VIRT_ADDR_SPACE_BITS (30 + TARGET_PAGE_BITS)
+#endif
/* Alpha major type */
enum {
diff --git a/target-arm/cpu.c b/target-arm/cpu.c
index ae5795337f..b00f5fa547 100644
--- a/target-arm/cpu.c
+++ b/target-arm/cpu.c
@@ -129,7 +129,7 @@ static void arm_cpu_reset(CPUState *s)
static inline void set_feature(CPUARMState *env, int feature)
{
- env->features |= 1u << feature;
+ env->features |= 1ULL << feature;
}
static void arm_cpu_initfn(Object *obj)
@@ -192,6 +192,9 @@ void arm_cpu_realize(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_VFP3)) {
set_feature(env, ARM_FEATURE_VFP);
}
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ set_feature(env, ARM_FEATURE_PXN);
+ }
register_cp_regs_for_features(cpu);
}
@@ -532,6 +535,7 @@ static void cortex_a15_initfn(Object *obj)
set_feature(&cpu->env, ARM_FEATURE_V7MP);
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS);
+ set_feature(&cpu->env, ARM_FEATURE_LPAE);
cpu->midr = 0x412fc0f1;
cpu->reset_fpsid = 0x410430f0;
cpu->mvfr0 = 0x10110222;
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 33afa185e9..191895cca8 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -113,7 +113,9 @@ typedef struct CPUARMState {
uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
uint32_t c1_scr; /* secure config register. */
uint32_t c2_base0; /* MMU translation table base 0. */
- uint32_t c2_base1; /* MMU translation table base 1. */
+ uint32_t c2_base0_hi; /* MMU translation table base 0, high 32 bits */
+ uint32_t c2_base1; /* MMU translation table base 0. */
+ uint32_t c2_base1_hi; /* MMU translation table base 1, high 32 bits */
uint32_t c2_control; /* MMU translation table base control. */
uint32_t c2_mask; /* MMU translation table base selection mask. */
uint32_t c2_base_mask; /* MMU translation table base 0 mask. */
@@ -127,6 +129,7 @@ typedef struct CPUARMState {
uint32_t c6_insn; /* Fault address registers. */
uint32_t c6_data;
uint32_t c7_par; /* Translation result. */
+ uint32_t c7_par_hi; /* Translation result, high 32 bits */
uint32_t c9_insn; /* Cache lockdown registers. */
uint32_t c9_data;
uint32_t c9_pmcr; /* performance monitor control register */
@@ -221,7 +224,7 @@ typedef struct CPUARMState {
/* These fields after the common ones so they are preserved on reset. */
/* Internal CPU feature flags. */
- uint32_t features;
+ uint64_t features;
void *nvic;
const struct arm_boot_info *boot_info;
@@ -386,11 +389,13 @@ enum arm_features {
ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
+ ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
+ ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
};
static inline int arm_feature(CPUARMState *env, int feature)
{
- return (env->features & (1u << feature)) != 0;
+ return (env->features & (1ULL << feature)) != 0;
}
void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
@@ -619,7 +624,7 @@ static inline bool cp_access_ok(CPUARMState *env,
#define TARGET_PAGE_BITS 10
#endif
-#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_PHYS_ADDR_SPACE_BITS 40
#define TARGET_VIRT_ADDR_SPACE_BITS 32
static inline CPUARMState *cpu_init(const char *cpu_model)
@@ -636,7 +641,7 @@ static inline CPUARMState *cpu_init(const char *cpu_model)
#define cpu_signal_handler cpu_arm_signal_handler
#define cpu_list arm_cpu_list
-#define CPU_SAVE_VERSION 7
+#define CPU_SAVE_VERSION 9
/* MMU modes definitions */
#define MMU_MODE0_SUFFIX _kernel
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 23099236ad..5727da296c 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -3,11 +3,12 @@
#include "helper.h"
#include "host-utils.h"
#include "sysemu.h"
+#include "bitops.h"
#ifndef CONFIG_USER_ONLY
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
int access_type, int is_user,
- uint32_t *phys_ptr, int *prot,
+ target_phys_addr_t *phys_ptr, int *prot,
target_ulong *page_size);
#endif
@@ -216,9 +217,9 @@ static const ARMCPRegInfo v6_cp_reginfo[] = {
.access = PL1_W, .type = ARM_CP_NOP },
{ .name = "ISB", .cp = 15, .crn = 7, .crm = 5, .opc1 = 0, .opc2 = 4,
.access = PL0_W, .type = ARM_CP_NOP },
- { .name = "ISB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
+ { .name = "DSB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 4,
.access = PL0_W, .type = ARM_CP_NOP },
- { .name = "ISB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
+ { .name = "DMB", .cp = 15, .crn = 7, .crm = 10, .opc1 = 0, .opc2 = 5,
.access = PL0_W, .type = ARM_CP_NOP },
{ .name = "IFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_insn),
@@ -346,7 +347,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = {
*/
{ .name = "DBGDRAR", .cp = 14, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
- { .name = "DBGDRAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
+ { .name = "DBGDSAR", .cp = 14, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL0_R, .type = ARM_CP_CONST, .resetvalue = 0 },
/* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */
{ .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4,
@@ -491,7 +492,9 @@ static const ARMCPRegInfo generic_timer_cp_reginfo[] = {
static int par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
- if (arm_feature(env, ARM_FEATURE_V7)) {
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ env->cp15.c7_par = value;
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
env->cp15.c7_par = value & 0xfffff6ff;
} else {
env->cp15.c7_par = value & 0xfffff1ff;
@@ -501,9 +504,20 @@ static int par_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
#ifndef CONFIG_USER_ONLY
/* get_phys_addr() isn't present for user-mode-only targets */
+
+/* Return true if extended addresses are enabled, ie this is an
+ * LPAE implementation and we are using the long-descriptor translation
+ * table format because the TTBCR EAE bit is set.
+ */
+static inline bool extended_addresses_enabled(CPUARMState *env)
+{
+ return arm_feature(env, ARM_FEATURE_LPAE)
+ && (env->cp15.c2_control & (1 << 31));
+}
+
static int ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
- uint32_t phys_addr;
+ target_phys_addr_t phys_addr;
target_ulong page_size;
int prot;
int ret, is_user = ri->opc2 & 2;
@@ -515,18 +529,44 @@ static int ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
}
ret = get_phys_addr(env, value, access_type, is_user,
&phys_addr, &prot, &page_size);
- if (ret == 0) {
- /* We do not set any attribute bits in the PAR */
- if (page_size == (1 << 24)
- && arm_feature(env, ARM_FEATURE_V7)) {
- env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
+ if (extended_addresses_enabled(env)) {
+ /* ret is a DFSR/IFSR value for the long descriptor
+ * translation table format, but with WnR always clear.
+ * Convert it to a 64-bit PAR.
+ */
+ uint64_t par64 = (1 << 11); /* LPAE bit always set */
+ if (ret == 0) {
+ par64 |= phys_addr & ~0xfffULL;
+ /* We don't set the ATTR or SH fields in the PAR. */
} else {
- env->cp15.c7_par = phys_addr & 0xfffff000;
+ par64 |= 1; /* F */
+ par64 |= (ret & 0x3f) << 1; /* FS */
+ /* Note that S2WLK and FSTAGE are always zero, because we don't
+ * implement virtualization and therefore there can't be a stage 2
+ * fault.
+ */
}
+ env->cp15.c7_par = par64;
+ env->cp15.c7_par_hi = par64 >> 32;
} else {
- env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
- ((ret & (12 << 1)) >> 6) |
- ((ret & 0xf) << 1) | 1;
+ /* ret is a DFSR/IFSR value for the short descriptor
+ * translation table format (with WnR always clear).
+ * Convert it to a 32-bit PAR.
+ */
+ if (ret == 0) {
+ /* We do not set any attribute bits in the PAR */
+ if (page_size == (1 << 24)
+ && arm_feature(env, ARM_FEATURE_V7)) {
+ env->cp15.c7_par = (phys_addr & 0xff000000) | 1 << 1;
+ } else {
+ env->cp15.c7_par = phys_addr & 0xfffff000;
+ }
+ } else {
+ env->cp15.c7_par = ((ret & (10 << 1)) >> 5) |
+ ((ret & (12 << 1)) >> 6) |
+ ((ret & 0xf) << 1) | 1;
+ }
+ env->cp15.c7_par_hi = 0;
}
return 0;
}
@@ -653,7 +693,20 @@ static const ARMCPRegInfo pmsav5_cp_reginfo[] = {
static int vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- value &= 7;
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ value &= ~((7 << 19) | (3 << 14) | (0xf << 3));
+ /* With LPAE the TTBCR could result in a change of ASID
+ * via the TTBCR.A1 bit, so do a TLB flush.
+ */
+ tlb_flush(env, 1);
+ } else {
+ value &= 7;
+ }
+ /* Note that we always calculate c2_mask and c2_base_mask, but
+ * they are only used for short-descriptor tables (ie if EAE is 0);
+ * for long-descriptor tables the TTBCR fields are used differently
+ * and the c2_mask and c2_base_mask values are meaningless.
+ */
env->cp15.c2_control = value;
env->cp15.c2_mask = ~(((uint32_t)0xffffffffu) >> value);
env->cp15.c2_base_mask = ~((uint32_t)0x3fffu >> value);
@@ -679,7 +732,7 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
.fieldoffset = offsetof(CPUARMState, cp15.c2_base0), .resetvalue = 0, },
{ .name = "TTBR1", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c2_base0), .resetvalue = 0, },
+ .fieldoffset = offsetof(CPUARMState, cp15.c2_base1), .resetvalue = 0, },
{ .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .writefn = vmsa_ttbcr_write,
.resetfn = vmsa_ttbcr_reset,
@@ -871,6 +924,96 @@ static const ARMCPRegInfo mpidr_cp_reginfo[] = {
REGINFO_SENTINEL
};
+static int par64_read(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t *value)
+{
+ *value = ((uint64_t)env->cp15.c7_par_hi << 32) | env->cp15.c7_par;
+ return 0;
+}
+
+static int par64_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
+{
+ env->cp15.c7_par_hi = value >> 32;
+ env->cp15.c7_par = value;
+ return 0;
+}
+
+static void par64_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ env->cp15.c7_par_hi = 0;
+ env->cp15.c7_par = 0;
+}
+
+static int ttbr064_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t *value)
+{
+ *value = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
+ return 0;
+}
+
+static int ttbr064_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c2_base0_hi = value >> 32;
+ env->cp15.c2_base0 = value;
+ /* Writes to the 64 bit format TTBRs may change the ASID */
+ tlb_flush(env, 1);
+ return 0;
+}
+
+static void ttbr064_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ env->cp15.c2_base0_hi = 0;
+ env->cp15.c2_base0 = 0;
+}
+
+static int ttbr164_read(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t *value)
+{
+ *value = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
+ return 0;
+}
+
+static int ttbr164_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ env->cp15.c2_base1_hi = value >> 32;
+ env->cp15.c2_base1 = value;
+ return 0;
+}
+
+static void ttbr164_reset(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ env->cp15.c2_base1_hi = 0;
+ env->cp15.c2_base1 = 0;
+}
+
+static const ARMCPRegInfo lpae_cp_reginfo[] = {
+ /* NOP AMAIR0/1: the override is because these clash with tha rather
+ * broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
+ */
+ { .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
+ .resetvalue = 0 },
+ { .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
+ .resetvalue = 0 },
+ /* 64 bit access versions of the (dummy) debug registers */
+ { .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "DBGDSAR", .cp = 14, .crm = 2, .opc1 = 0,
+ .access = PL0_R, .type = ARM_CP_CONST|ARM_CP_64BIT, .resetvalue = 0 },
+ { .name = "PAR", .cp = 15, .crm = 7, .opc1 = 0,
+ .access = PL1_RW, .type = ARM_CP_64BIT,
+ .readfn = par64_read, .writefn = par64_write, .resetfn = par64_reset },
+ { .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
+ .access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr064_read,
+ .writefn = ttbr064_write, .resetfn = ttbr064_reset },
+ { .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
+ .access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr164_read,
+ .writefn = ttbr164_write, .resetfn = ttbr164_reset },
+ REGINFO_SENTINEL
+};
+
static int sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
env->cp15.c1_sys = value;
@@ -1016,6 +1159,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
if (arm_feature(env, ARM_FEATURE_MPIDR)) {
define_arm_cp_regs(cpu, mpidr_cp_reginfo);
}
+ if (arm_feature(env, ARM_FEATURE_LPAE)) {
+ define_arm_cp_regs(cpu, lpae_cp_reginfo);
+ }
/* Slightly awkwardly, the OMAP and StrongARM cores need all of
* cp15 crn=0 to be writes-ignored, whereas for other cores they should
* be read-only (ie write causes UNDEF exception).
@@ -1833,8 +1979,8 @@ static uint32_t get_level1_table_address(CPUARMState *env, uint32_t address)
}
static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot,
- target_ulong *page_size)
+ int is_user, target_phys_addr_t *phys_ptr,
+ int *prot, target_ulong *page_size)
{
int code;
uint32_t table;
@@ -1843,7 +1989,7 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
int ap;
int domain;
int domain_prot;
- uint32_t phys_addr;
+ target_phys_addr_t phys_addr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
@@ -1928,45 +2074,46 @@ do_fault:
}
static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot,
- target_ulong *page_size)
+ int is_user, target_phys_addr_t *phys_ptr,
+ int *prot, target_ulong *page_size)
{
int code;
uint32_t table;
uint32_t desc;
uint32_t xn;
+ uint32_t pxn = 0;
int type;
int ap;
- int domain;
+ int domain = 0;
int domain_prot;
- uint32_t phys_addr;
+ target_phys_addr_t phys_addr;
/* Pagetable walk. */
/* Lookup l1 descriptor. */
table = get_level1_table_address(env, address);
desc = ldl_phys(table);
type = (desc & 3);
- if (type == 0) {
- /* Section translation fault. */
+ if (type == 0 || (type == 3 && !arm_feature(env, ARM_FEATURE_PXN))) {
+ /* Section translation fault, or attempt to use the encoding
+ * which is Reserved on implementations without PXN.
+ */
code = 5;
- domain = 0;
goto do_fault;
- } else if (type == 2 && (desc & (1 << 18))) {
- /* Supersection. */
- domain = 0;
- } else {
- /* Section or page. */
+ }
+ if ((type == 1) || !(desc & (1 << 18))) {
+ /* Page or Section. */
domain = (desc >> 5) & 0x0f;
}
domain_prot = (env->cp15.c3 >> (domain * 2)) & 3;
if (domain_prot == 0 || domain_prot == 2) {
- if (type == 2)
+ if (type != 1) {
code = 9; /* Section domain fault. */
- else
+ } else {
code = 11; /* Page domain fault. */
+ }
goto do_fault;
}
- if (type == 2) {
+ if (type != 1) {
if (desc & (1 << 18)) {
/* Supersection. */
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
@@ -1978,8 +2125,12 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
}
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
xn = desc & (1 << 4);
+ pxn = desc & 1;
code = 13;
} else {
+ if (arm_feature(env, ARM_FEATURE_PXN)) {
+ pxn = (desc >> 2) & 1;
+ }
/* Lookup l2 entry. */
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
desc = ldl_phys(table);
@@ -2007,6 +2158,9 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
if (domain_prot == 3) {
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
} else {
+ if (pxn && !is_user) {
+ xn = 1;
+ }
if (xn && access_type == 2)
goto do_fault;
@@ -2031,8 +2185,187 @@ do_fault:
return code | (domain << 4);
}
-static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot)
+/* Fault type for long-descriptor MMU fault reporting; this corresponds
+ * to bits [5..2] in the STATUS field in long-format DFSR/IFSR.
+ */
+typedef enum {
+ translation_fault = 1,
+ access_fault = 2,
+ permission_fault = 3,
+} MMUFaultType;
+
+static int get_phys_addr_lpae(CPUARMState *env, uint32_t address,
+ int access_type, int is_user,
+ target_phys_addr_t *phys_ptr, int *prot,
+ target_ulong *page_size_ptr)
+{
+ /* Read an LPAE long-descriptor translation table. */
+ MMUFaultType fault_type = translation_fault;
+ uint32_t level = 1;
+ uint32_t epd;
+ uint32_t tsz;
+ uint64_t ttbr;
+ int ttbr_select;
+ int n;
+ target_phys_addr_t descaddr;
+ uint32_t tableattrs;
+ target_ulong page_size;
+ uint32_t attrs;
+
+ /* Determine whether this address is in the region controlled by
+ * TTBR0 or TTBR1 (or if it is in neither region and should fault).
+ * This is a Non-secure PL0/1 stage 1 translation, so controlled by
+ * TTBCR/TTBR0/TTBR1 in accordance with ARM ARM DDI0406C table B-32:
+ */
+ uint32_t t0sz = extract32(env->cp15.c2_control, 0, 3);
+ uint32_t t1sz = extract32(env->cp15.c2_control, 16, 3);
+ if (t0sz && !extract32(address, 32 - t0sz, t0sz)) {
+ /* there is a ttbr0 region and we are in it (high bits all zero) */
+ ttbr_select = 0;
+ } else if (t1sz && !extract32(~address, 32 - t1sz, t1sz)) {
+ /* there is a ttbr1 region and we are in it (high bits all one) */
+ ttbr_select = 1;
+ } else if (!t0sz) {
+ /* ttbr0 region is "everything not in the ttbr1 region" */
+ ttbr_select = 0;
+ } else if (!t1sz) {
+ /* ttbr1 region is "everything not in the ttbr0 region" */
+ ttbr_select = 1;
+ } else {
+ /* in the gap between the two regions, this is a Translation fault */
+ fault_type = translation_fault;
+ goto do_fault;
+ }
+
+ /* Note that QEMU ignores shareability and cacheability attributes,
+ * so we don't need to do anything with the SH, ORGN, IRGN fields
+ * in the TTBCR. Similarly, TTBCR:A1 selects whether we get the
+ * ASID from TTBR0 or TTBR1, but QEMU's TLB doesn't currently
+ * implement any ASID-like capability so we can ignore it (instead
+ * we will always flush the TLB any time the ASID is changed).
+ */
+ if (ttbr_select == 0) {
+ ttbr = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
+ epd = extract32(env->cp15.c2_control, 7, 1);
+ tsz = t0sz;
+ } else {
+ ttbr = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
+ epd = extract32(env->cp15.c2_control, 23, 1);
+ tsz = t1sz;
+ }
+
+ if (epd) {
+ /* Translation table walk disabled => Translation fault on TLB miss */
+ goto do_fault;
+ }
+
+ /* If the region is small enough we will skip straight to a 2nd level
+ * lookup. This affects the number of bits of the address used in
+ * combination with the TTBR to find the first descriptor. ('n' here
+ * matches the usage in the ARM ARM sB3.6.6, where bits [39..n] are
+ * from the TTBR, [n-1..3] from the vaddr, and [2..0] always zero).
+ */
+ if (tsz > 1) {
+ level = 2;
+ n = 14 - tsz;
+ } else {
+ n = 5 - tsz;
+ }
+
+ /* Clear the vaddr bits which aren't part of the within-region address,
+ * so that we don't have to special case things when calculating the
+ * first descriptor address.
+ */
+ address &= (0xffffffffU >> tsz);
+
+ /* Now we can extract the actual base address from the TTBR */
+ descaddr = extract64(ttbr, 0, 40);
+ descaddr &= ~((1ULL << n) - 1);
+
+ tableattrs = 0;
+ for (;;) {
+ uint64_t descriptor;
+
+ descaddr |= ((address >> (9 * (4 - level))) & 0xff8);
+ descriptor = ldq_phys(descaddr);
+ if (!(descriptor & 1) ||
+ (!(descriptor & 2) && (level == 3))) {
+ /* Invalid, or the Reserved level 3 encoding */
+ goto do_fault;
+ }
+ descaddr = descriptor & 0xfffffff000ULL;
+
+ if ((descriptor & 2) && (level < 3)) {
+ /* Table entry. The top five bits are attributes which may
+ * propagate down through lower levels of the table (and
+ * which are all arranged so that 0 means "no effect", so
+ * we can gather them up by ORing in the bits at each level).
+ */
+ tableattrs |= extract64(descriptor, 59, 5);
+ level++;
+ continue;
+ }
+ /* Block entry at level 1 or 2, or page entry at level 3.
+ * These are basically the same thing, although the number
+ * of bits we pull in from the vaddr varies.
+ */
+ page_size = (1 << (39 - (9 * level)));
+ descaddr |= (address & (page_size - 1));
+ /* Extract attributes from the descriptor and merge with table attrs */
+ attrs = extract64(descriptor, 2, 10)
+ | (extract64(descriptor, 52, 12) << 10);
+ attrs |= extract32(tableattrs, 0, 2) << 11; /* XN, PXN */
+ attrs |= extract32(tableattrs, 3, 1) << 5; /* APTable[1] => AP[2] */
+ /* The sense of AP[1] vs APTable[0] is reversed, as APTable[0] == 1
+ * means "force PL1 access only", which means forcing AP[1] to 0.
+ */
+ if (extract32(tableattrs, 2, 1)) {
+ attrs &= ~(1 << 4);
+ }
+ /* Since we're always in the Non-secure state, NSTable is ignored. */
+ break;
+ }
+ /* Here descaddr is the final physical address, and attributes
+ * are all in attrs.
+ */
+ fault_type = access_fault;
+ if ((attrs & (1 << 8)) == 0) {
+ /* Access flag */
+ goto do_fault;
+ }
+ fault_type = permission_fault;
+ if (is_user && !(attrs & (1 << 4))) {
+ /* Unprivileged access not enabled */
+ goto do_fault;
+ }
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ if (attrs & (1 << 12) || (!is_user && (attrs & (1 << 11)))) {
+ /* XN or PXN */
+ if (access_type == 2) {
+ goto do_fault;
+ }
+ *prot &= ~PAGE_EXEC;
+ }
+ if (attrs & (1 << 5)) {
+ /* Write access forbidden */
+ if (access_type == 1) {
+ goto do_fault;
+ }
+ *prot &= ~PAGE_WRITE;
+ }
+
+ *phys_ptr = descaddr;
+ *page_size_ptr = page_size;
+ return 0;
+
+do_fault:
+ /* Long-descriptor format IFSR/DFSR value */
+ return (1 << 9) | (fault_type << 2) | level;
+}
+
+static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
+ int access_type, int is_user,
+ target_phys_addr_t *phys_ptr, int *prot)
{
int n;
uint32_t mask;
@@ -2091,9 +2424,32 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address, int access_type
return 0;
}
+/* get_phys_addr - get the physical address for this virtual address
+ *
+ * Find the physical address corresponding to the given virtual address,
+ * by doing a translation table walk on MMU based systems or using the
+ * MPU state on MPU based systems.
+ *
+ * Returns 0 if the translation was successful. Otherwise, phys_ptr,
+ * prot and page_size are not filled in, and the return value provides
+ * information on why the translation aborted, in the format of a
+ * DFSR/IFSR fault register, with the following caveats:
+ * * we honour the short vs long DFSR format differences.
+ * * the WnR bit is never set (the caller must do this).
+ * * for MPU based systems we don't bother to return a full FSR format
+ * value.
+ *
+ * @env: CPUARMState
+ * @address: virtual address to get physical address for
+ * @access_type: 0 for read, 1 for write, 2 for execute
+ * @is_user: 0 for privileged access, 1 for user
+ * @phys_ptr: set to the physical address corresponding to the virtual address
+ * @prot: set to the permissions for the page containing phys_ptr
+ * @page_size: set to the size of the page containing phys_ptr
+ */
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
int access_type, int is_user,
- uint32_t *phys_ptr, int *prot,
+ target_phys_addr_t *phys_ptr, int *prot,
target_ulong *page_size)
{
/* Fast Context Switch Extension. */
@@ -2110,6 +2466,9 @@ static inline int get_phys_addr(CPUARMState *env, uint32_t address,
*page_size = TARGET_PAGE_SIZE;
return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
prot);
+ } else if (extended_addresses_enabled(env)) {
+ return get_phys_addr_lpae(env, address, access_type, is_user, phys_ptr,
+ prot, page_size);
} else if (env->cp15.c1_sys & (1 << 23)) {
return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
prot, page_size);
@@ -2122,7 +2481,7 @@ static inline int get_phys_addr(CPUARMState *env, uint32_t address,
int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
int access_type, int mmu_idx)
{
- uint32_t phys_addr;
+ target_phys_addr_t phys_addr;
target_ulong page_size;
int prot;
int ret, is_user;
@@ -2132,7 +2491,7 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
&page_size);
if (ret == 0) {
/* Map a single [sub]page. */
- phys_addr &= ~(uint32_t)0x3ff;
+ phys_addr &= ~(target_phys_addr_t)0x3ff;
address &= ~(uint32_t)0x3ff;
tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
return 0;
@@ -2154,7 +2513,7 @@ int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
target_phys_addr_t cpu_get_phys_page_debug(CPUARMState *env, target_ulong addr)
{
- uint32_t phys_addr;
+ target_phys_addr_t phys_addr;
target_ulong page_size;
int prot;
int ret;
diff --git a/target-arm/machine.c b/target-arm/machine.c
index a2a75fbd19..68dca7ffb2 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -27,7 +27,9 @@ void cpu_save(QEMUFile *f, void *opaque)
qemu_put_be32(f, env->cp15.c1_xscaleauxcr);
qemu_put_be32(f, env->cp15.c1_scr);
qemu_put_be32(f, env->cp15.c2_base0);
+ qemu_put_be32(f, env->cp15.c2_base0_hi);
qemu_put_be32(f, env->cp15.c2_base1);
+ qemu_put_be32(f, env->cp15.c2_base1_hi);
qemu_put_be32(f, env->cp15.c2_control);
qemu_put_be32(f, env->cp15.c2_mask);
qemu_put_be32(f, env->cp15.c2_base_mask);
@@ -42,6 +44,7 @@ void cpu_save(QEMUFile *f, void *opaque)
qemu_put_be32(f, env->cp15.c6_insn);
qemu_put_be32(f, env->cp15.c6_data);
qemu_put_be32(f, env->cp15.c7_par);
+ qemu_put_be32(f, env->cp15.c7_par_hi);
qemu_put_be32(f, env->cp15.c9_insn);
qemu_put_be32(f, env->cp15.c9_data);
qemu_put_be32(f, env->cp15.c9_pmcr);
@@ -60,7 +63,7 @@ void cpu_save(QEMUFile *f, void *opaque)
qemu_put_be32(f, env->cp15.c15_diagnostic);
qemu_put_be32(f, env->cp15.c15_power_diagnostic);
- qemu_put_be32(f, env->features);
+ qemu_put_be64(f, env->features);
if (arm_feature(env, ARM_FEATURE_VFP)) {
for (i = 0; i < 16; i++) {
@@ -144,7 +147,9 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
env->cp15.c1_xscaleauxcr = qemu_get_be32(f);
env->cp15.c1_scr = qemu_get_be32(f);
env->cp15.c2_base0 = qemu_get_be32(f);
+ env->cp15.c2_base0_hi = qemu_get_be32(f);
env->cp15.c2_base1 = qemu_get_be32(f);
+ env->cp15.c2_base1_hi = qemu_get_be32(f);
env->cp15.c2_control = qemu_get_be32(f);
env->cp15.c2_mask = qemu_get_be32(f);
env->cp15.c2_base_mask = qemu_get_be32(f);
@@ -159,6 +164,7 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
env->cp15.c6_insn = qemu_get_be32(f);
env->cp15.c6_data = qemu_get_be32(f);
env->cp15.c7_par = qemu_get_be32(f);
+ env->cp15.c7_par_hi = qemu_get_be32(f);
env->cp15.c9_insn = qemu_get_be32(f);
env->cp15.c9_data = qemu_get_be32(f);
env->cp15.c9_pmcr = qemu_get_be32(f);
@@ -177,7 +183,7 @@ int cpu_load(QEMUFile *f, void *opaque, int version_id)
env->cp15.c15_diagnostic = qemu_get_be32(f);
env->cp15.c15_power_diagnostic = qemu_get_be32(f);
- env->features = qemu_get_be32(f);
+ env->features = qemu_get_be64(f);
if (arm_feature(env, ARM_FEATURE_VFP)) {
for (i = 0; i < 16; i++) {
diff --git a/target-arm/translate.c b/target-arm/translate.c
index a2a0ecddad..29008a4b34 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -6236,7 +6236,7 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
}
gen_set_pc_im(s->pc);
s->is_jmp = DISAS_WFI;
- break;
+ return 0;
default:
break;
}
@@ -6263,7 +6263,9 @@ static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
tcg_gen_trunc_i64_i32(tmp, tmp64);
store_reg(s, rt, tmp);
tcg_gen_shri_i64(tmp64, tmp64, 32);
+ tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, tmp64);
+ tcg_temp_free_i64(tmp64);
store_reg(s, rt2, tmp);
} else {
TCGv tmp;
diff --git a/target-i386/Makefile.objs b/target-i386/Makefile.objs
index f91375578c..683fd59af9 100644
--- a/target-i386/Makefile.objs
+++ b/target-i386/Makefile.objs
@@ -1,7 +1,16 @@
-obj-y += translate.o op_helper.o helper.o cpu.o
+obj-y += translate.o helper.o cpu.o
+obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
+obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o
obj-$(CONFIG_KVM) += kvm.o hyperv.o
obj-$(CONFIG_LINUX_USER) += ioport-user.o
obj-$(CONFIG_BSD_USER) += ioport-user.o
-$(obj)/op_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/fpu_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/cc_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/int_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/svm_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/smm_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/misc_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/mem_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
+$(obj)/seg_helper.o: QEMU_CFLAGS += $(HELPER_CFLAGS)
diff --git a/target-i386/cc_helper.c b/target-i386/cc_helper.c
new file mode 100644
index 0000000000..ff654bc5ed
--- /dev/null
+++ b/target-i386/cc_helper.c
@@ -0,0 +1,387 @@
+/*
+ * x86 condition code helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "helper.h"
+
+const uint8_t parity_table[256] = {
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
+ 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
+};
+
+#define SHIFT 0
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+
+#define SHIFT 3
+#include "cc_helper_template.h"
+#undef SHIFT
+
+#endif
+
+static int compute_all_eflags(void)
+{
+ return CC_SRC;
+}
+
+static int compute_c_eflags(void)
+{
+ return CC_SRC & CC_C;
+}
+
+uint32_t helper_cc_compute_all(int op)
+{
+ switch (op) {
+ default: /* should never happen */
+ return 0;
+
+ case CC_OP_EFLAGS:
+ return compute_all_eflags();
+
+ case CC_OP_MULB:
+ return compute_all_mulb();
+ case CC_OP_MULW:
+ return compute_all_mulw();
+ case CC_OP_MULL:
+ return compute_all_mull();
+
+ case CC_OP_ADDB:
+ return compute_all_addb();
+ case CC_OP_ADDW:
+ return compute_all_addw();
+ case CC_OP_ADDL:
+ return compute_all_addl();
+
+ case CC_OP_ADCB:
+ return compute_all_adcb();
+ case CC_OP_ADCW:
+ return compute_all_adcw();
+ case CC_OP_ADCL:
+ return compute_all_adcl();
+
+ case CC_OP_SUBB:
+ return compute_all_subb();
+ case CC_OP_SUBW:
+ return compute_all_subw();
+ case CC_OP_SUBL:
+ return compute_all_subl();
+
+ case CC_OP_SBBB:
+ return compute_all_sbbb();
+ case CC_OP_SBBW:
+ return compute_all_sbbw();
+ case CC_OP_SBBL:
+ return compute_all_sbbl();
+
+ case CC_OP_LOGICB:
+ return compute_all_logicb();
+ case CC_OP_LOGICW:
+ return compute_all_logicw();
+ case CC_OP_LOGICL:
+ return compute_all_logicl();
+
+ case CC_OP_INCB:
+ return compute_all_incb();
+ case CC_OP_INCW:
+ return compute_all_incw();
+ case CC_OP_INCL:
+ return compute_all_incl();
+
+ case CC_OP_DECB:
+ return compute_all_decb();
+ case CC_OP_DECW:
+ return compute_all_decw();
+ case CC_OP_DECL:
+ return compute_all_decl();
+
+ case CC_OP_SHLB:
+ return compute_all_shlb();
+ case CC_OP_SHLW:
+ return compute_all_shlw();
+ case CC_OP_SHLL:
+ return compute_all_shll();
+
+ case CC_OP_SARB:
+ return compute_all_sarb();
+ case CC_OP_SARW:
+ return compute_all_sarw();
+ case CC_OP_SARL:
+ return compute_all_sarl();
+
+#ifdef TARGET_X86_64
+ case CC_OP_MULQ:
+ return compute_all_mulq();
+
+ case CC_OP_ADDQ:
+ return compute_all_addq();
+
+ case CC_OP_ADCQ:
+ return compute_all_adcq();
+
+ case CC_OP_SUBQ:
+ return compute_all_subq();
+
+ case CC_OP_SBBQ:
+ return compute_all_sbbq();
+
+ case CC_OP_LOGICQ:
+ return compute_all_logicq();
+
+ case CC_OP_INCQ:
+ return compute_all_incq();
+
+ case CC_OP_DECQ:
+ return compute_all_decq();
+
+ case CC_OP_SHLQ:
+ return compute_all_shlq();
+
+ case CC_OP_SARQ:
+ return compute_all_sarq();
+#endif
+ }
+}
+
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
+{
+ CPUX86State *saved_env;
+ uint32_t ret;
+
+ saved_env = env;
+ env = env1;
+ ret = helper_cc_compute_all(op);
+ env = saved_env;
+ return ret;
+}
+
+uint32_t helper_cc_compute_c(int op)
+{
+ switch (op) {
+ default: /* should never happen */
+ return 0;
+
+ case CC_OP_EFLAGS:
+ return compute_c_eflags();
+
+ case CC_OP_MULB:
+ return compute_c_mull();
+ case CC_OP_MULW:
+ return compute_c_mull();
+ case CC_OP_MULL:
+ return compute_c_mull();
+
+ case CC_OP_ADDB:
+ return compute_c_addb();
+ case CC_OP_ADDW:
+ return compute_c_addw();
+ case CC_OP_ADDL:
+ return compute_c_addl();
+
+ case CC_OP_ADCB:
+ return compute_c_adcb();
+ case CC_OP_ADCW:
+ return compute_c_adcw();
+ case CC_OP_ADCL:
+ return compute_c_adcl();
+
+ case CC_OP_SUBB:
+ return compute_c_subb();
+ case CC_OP_SUBW:
+ return compute_c_subw();
+ case CC_OP_SUBL:
+ return compute_c_subl();
+
+ case CC_OP_SBBB:
+ return compute_c_sbbb();
+ case CC_OP_SBBW:
+ return compute_c_sbbw();
+ case CC_OP_SBBL:
+ return compute_c_sbbl();
+
+ case CC_OP_LOGICB:
+ return compute_c_logicb();
+ case CC_OP_LOGICW:
+ return compute_c_logicw();
+ case CC_OP_LOGICL:
+ return compute_c_logicl();
+
+ case CC_OP_INCB:
+ return compute_c_incl();
+ case CC_OP_INCW:
+ return compute_c_incl();
+ case CC_OP_INCL:
+ return compute_c_incl();
+
+ case CC_OP_DECB:
+ return compute_c_incl();
+ case CC_OP_DECW:
+ return compute_c_incl();
+ case CC_OP_DECL:
+ return compute_c_incl();
+
+ case CC_OP_SHLB:
+ return compute_c_shlb();
+ case CC_OP_SHLW:
+ return compute_c_shlw();
+ case CC_OP_SHLL:
+ return compute_c_shll();
+
+ case CC_OP_SARB:
+ return compute_c_sarl();
+ case CC_OP_SARW:
+ return compute_c_sarl();
+ case CC_OP_SARL:
+ return compute_c_sarl();
+
+#ifdef TARGET_X86_64
+ case CC_OP_MULQ:
+ return compute_c_mull();
+
+ case CC_OP_ADDQ:
+ return compute_c_addq();
+
+ case CC_OP_ADCQ:
+ return compute_c_adcq();
+
+ case CC_OP_SUBQ:
+ return compute_c_subq();
+
+ case CC_OP_SBBQ:
+ return compute_c_sbbq();
+
+ case CC_OP_LOGICQ:
+ return compute_c_logicq();
+
+ case CC_OP_INCQ:
+ return compute_c_incl();
+
+ case CC_OP_DECQ:
+ return compute_c_incl();
+
+ case CC_OP_SHLQ:
+ return compute_c_shlq();
+
+ case CC_OP_SARQ:
+ return compute_c_sarl();
+#endif
+ }
+}
+
+void helper_write_eflags(target_ulong t0, uint32_t update_mask)
+{
+ cpu_load_eflags(env, t0, update_mask);
+}
+
+target_ulong helper_read_eflags(void)
+{
+ uint32_t eflags;
+
+ eflags = helper_cc_compute_all(CC_OP);
+ eflags |= (DF & DF_MASK);
+ eflags |= env->eflags & ~(VM_MASK | RF_MASK);
+ return eflags;
+}
+
+void helper_clts(void)
+{
+ env->cr[0] &= ~CR0_TS_MASK;
+ env->hflags &= ~HF_TS_MASK;
+}
+
+void helper_reset_rf(void)
+{
+ env->eflags &= ~RF_MASK;
+}
+
+void helper_cli(void)
+{
+ env->eflags &= ~IF_MASK;
+}
+
+void helper_sti(void)
+{
+ env->eflags |= IF_MASK;
+}
+
+#if 0
+/* vm86plus instructions */
+void helper_cli_vm(void)
+{
+ env->eflags &= ~VIF_MASK;
+}
+
+void helper_sti_vm(void)
+{
+ env->eflags |= VIF_MASK;
+ if (env->eflags & VIP_MASK) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+}
+#endif
+
+void helper_set_inhibit_irq(void)
+{
+ env->hflags |= HF_INHIBIT_IRQ_MASK;
+}
+
+void helper_reset_inhibit_irq(void)
+{
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+}
diff --git a/target-i386/helper_template.h b/target-i386/cc_helper_template.h
index afc41fb980..ff22830f6b 100644
--- a/target-i386/helper_template.h
+++ b/target-i386/cc_helper_template.h
@@ -1,5 +1,5 @@
/*
- * i386 helpers
+ * x86 condition code helpers
*
* Copyright (c) 2008 Fabrice Bellard
*
@@ -16,34 +16,25 @@
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+
#define DATA_BITS (1 << (3 + SHIFT))
-#define SHIFT_MASK (DATA_BITS - 1)
#define SIGN_MASK (((target_ulong)1) << (DATA_BITS - 1))
-#if DATA_BITS <= 32
-#define SHIFT1_MASK 0x1f
-#else
-#define SHIFT1_MASK 0x3f
-#endif
#if DATA_BITS == 8
#define SUFFIX b
#define DATA_TYPE uint8_t
-#define DATA_STYPE int8_t
#define DATA_MASK 0xff
#elif DATA_BITS == 16
#define SUFFIX w
#define DATA_TYPE uint16_t
-#define DATA_STYPE int16_t
#define DATA_MASK 0xffff
#elif DATA_BITS == 32
#define SUFFIX l
#define DATA_TYPE uint32_t
-#define DATA_STYPE int32_t
#define DATA_MASK 0xffffffff
#elif DATA_BITS == 64
#define SUFFIX q
#define DATA_TYPE uint64_t
-#define DATA_STYPE int64_t
#define DATA_MASK 0xffffffffffffffffULL
#else
#error unhandled operand size
@@ -55,6 +46,7 @@ static int glue(compute_all_add, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
target_long src1, src2;
+
src1 = CC_SRC;
src2 = CC_DST - CC_SRC;
cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1;
@@ -70,6 +62,7 @@ static int glue(compute_c_add, SUFFIX)(void)
{
int cf;
target_long src1;
+
src1 = CC_SRC;
cf = (DATA_TYPE)CC_DST < (DATA_TYPE)src1;
return cf;
@@ -79,6 +72,7 @@ static int glue(compute_all_adc, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
target_long src1, src2;
+
src1 = CC_SRC;
src2 = CC_DST - CC_SRC - 1;
cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1;
@@ -94,6 +88,7 @@ static int glue(compute_c_adc, SUFFIX)(void)
{
int cf;
target_long src1;
+
src1 = CC_SRC;
cf = (DATA_TYPE)CC_DST <= (DATA_TYPE)src1;
return cf;
@@ -103,6 +98,7 @@ static int glue(compute_all_sub, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
target_long src1, src2;
+
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 < (DATA_TYPE)src2;
@@ -118,6 +114,7 @@ static int glue(compute_c_sub, SUFFIX)(void)
{
int cf;
target_long src1, src2;
+
src1 = CC_DST + CC_SRC;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 < (DATA_TYPE)src2;
@@ -128,6 +125,7 @@ static int glue(compute_all_sbb, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
target_long src1, src2;
+
src1 = CC_DST + CC_SRC + 1;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2;
@@ -143,6 +141,7 @@ static int glue(compute_c_sbb, SUFFIX)(void)
{
int cf;
target_long src1, src2;
+
src1 = CC_DST + CC_SRC + 1;
src2 = CC_SRC;
cf = (DATA_TYPE)src1 <= (DATA_TYPE)src2;
@@ -152,6 +151,7 @@ static int glue(compute_c_sbb, SUFFIX)(void)
static int glue(compute_all_logic, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
+
cf = 0;
pf = parity_table[(uint8_t)CC_DST];
af = 0;
@@ -170,6 +170,7 @@ static int glue(compute_all_inc, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
target_long src1, src2;
+
src1 = CC_DST - 1;
src2 = 1;
cf = CC_SRC;
@@ -192,6 +193,7 @@ static int glue(compute_all_dec, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
target_long src1, src2;
+
src1 = CC_DST + 1;
src2 = 1;
cf = CC_SRC;
@@ -206,6 +208,7 @@ static int glue(compute_all_dec, SUFFIX)(void)
static int glue(compute_all_shl, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
+
cf = (CC_SRC >> (DATA_BITS - 1)) & CC_C;
pf = parity_table[(uint8_t)CC_DST];
af = 0; /* undefined */
@@ -231,6 +234,7 @@ static int glue(compute_c_sar, SUFFIX)(void)
static int glue(compute_all_sar, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
+
cf = CC_SRC & 1;
pf = parity_table[(uint8_t)CC_DST];
af = 0; /* undefined */
@@ -245,6 +249,7 @@ static int glue(compute_all_sar, SUFFIX)(void)
static int glue(compute_c_mul, SUFFIX)(void)
{
int cf;
+
cf = (CC_SRC != 0);
return cf;
}
@@ -255,6 +260,7 @@ static int glue(compute_c_mul, SUFFIX)(void)
static int glue(compute_all_mul, SUFFIX)(void)
{
int cf, pf, af, zf, sf, of;
+
cf = (CC_SRC != 0);
pf = parity_table[(uint8_t)CC_DST];
af = 0; /* undefined */
@@ -264,71 +270,8 @@ static int glue(compute_all_mul, SUFFIX)(void)
return cf | pf | af | zf | sf | of;
}
-/* shifts */
-
-target_ulong glue(helper_rcl, SUFFIX)(target_ulong t0, target_ulong t1)
-{
- int count, eflags;
- target_ulong src;
- target_long res;
-
- count = t1 & SHIFT1_MASK;
-#if DATA_BITS == 16
- count = rclw_table[count];
-#elif DATA_BITS == 8
- count = rclb_table[count];
-#endif
- if (count) {
- eflags = helper_cc_compute_all(CC_OP);
- t0 &= DATA_MASK;
- src = t0;
- res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1));
- if (count > 1)
- res |= t0 >> (DATA_BITS + 1 - count);
- t0 = res;
- env->cc_tmp = (eflags & ~(CC_C | CC_O)) |
- (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
- ((src >> (DATA_BITS - count)) & CC_C);
- } else {
- env->cc_tmp = -1;
- }
- return t0;
-}
-
-target_ulong glue(helper_rcr, SUFFIX)(target_ulong t0, target_ulong t1)
-{
- int count, eflags;
- target_ulong src;
- target_long res;
-
- count = t1 & SHIFT1_MASK;
-#if DATA_BITS == 16
- count = rclw_table[count];
-#elif DATA_BITS == 8
- count = rclb_table[count];
-#endif
- if (count) {
- eflags = helper_cc_compute_all(CC_OP);
- t0 &= DATA_MASK;
- src = t0;
- res = (t0 >> count) | ((target_ulong)(eflags & CC_C) << (DATA_BITS - count));
- if (count > 1)
- res |= t0 << (DATA_BITS + 1 - count);
- t0 = res;
- env->cc_tmp = (eflags & ~(CC_C | CC_O)) |
- (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
- ((src >> (count - 1)) & CC_C);
- } else {
- env->cc_tmp = -1;
- }
- return t0;
-}
-
#undef DATA_BITS
-#undef SHIFT_MASK
-#undef SHIFT1_MASK
#undef SIGN_MASK
#undef DATA_TYPE
-#undef DATA_STYPE
#undef DATA_MASK
#undef SUFFIX
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 445274c97d..880cfea3f8 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -31,6 +31,8 @@
#include "hyperv.h"
+#include "hw/hw.h"
+
/* feature flags taken from "Intel Processor Identification and the CPUID
* Instruction" and AMD's "CPUID Specification". In cases of disagreement
* between feature naming conventions, aliases may be added.
@@ -50,7 +52,7 @@ static const char *ext_feature_name[] = {
"ds_cpl", "vmx", "smx", "est",
"tm2", "ssse3", "cid", NULL,
"fma", "cx16", "xtpr", "pdcm",
- NULL, NULL, "dca", "sse4.1|sse4_1",
+ NULL, "pcid", "dca", "sse4.1|sse4_1",
"sse4.2|sse4_2", "x2apic", "movbe", "popcnt",
"tsc-deadline", "aes", "xsave", "osxsave",
"avx", NULL, NULL, "hypervisor",
@@ -77,7 +79,7 @@ static const char *ext3_feature_name[] = {
};
static const char *kvm_feature_name[] = {
- "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, NULL, NULL,
+ "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
@@ -1303,7 +1305,7 @@ void x86_cpudef_setup(void)
builtin_x86_defs[i].flags = 1;
/* Look for specific "cpudef" models that */
- /* have the QEmu version in .model_id */
+ /* have the QEMU version in .model_id */
for (j = 0; j < ARRAY_SIZE(model_with_versions); j++) {
if (strcmp(model_with_versions[j], builtin_x86_defs[i].name) == 0) {
pstrcpy(builtin_x86_defs[i].model_id, sizeof(builtin_x86_defs[i].model_id), "QEMU Virtual CPU version ");
@@ -1686,8 +1688,31 @@ static void x86_cpu_reset(CPUState *s)
env->dr[7] = DR7_FIXED_1;
cpu_breakpoint_remove_all(env, BP_CPU);
cpu_watchpoint_remove_all(env, BP_CPU);
+
+#if !defined(CONFIG_USER_ONLY)
+ /* We hard-wire the BSP to the first CPU. */
+ if (env->cpu_index == 0) {
+ apic_designate_bsp(env->apic_state);
+ }
+
+ env->halted = !cpu_is_bsp(cpu);
+#endif
}
+#ifndef CONFIG_USER_ONLY
+bool cpu_is_bsp(X86CPU *cpu)
+{
+ return cpu_get_apic_base(cpu->env.apic_state) & MSR_IA32_APICBASE_BSP;
+}
+
+/* TODO: remove me, when reset over QOM tree is implemented */
+static void x86_cpu_machine_reset_cb(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ cpu_reset(CPU(cpu));
+}
+#endif
+
static void mce_init(X86CPU *cpu)
{
CPUX86State *cenv = &cpu->env;
@@ -1708,8 +1733,13 @@ void x86_cpu_realize(Object *obj, Error **errp)
{
X86CPU *cpu = X86_CPU(obj);
+#ifndef CONFIG_USER_ONLY
+ qemu_register_reset(x86_cpu_machine_reset_cb, cpu);
+#endif
+
mce_init(cpu);
qemu_init_vcpu(&cpu->env);
+ cpu_reset(CPU(cpu));
}
static void x86_cpu_initfn(Object *obj)
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 80dcb49391..60f9e972bd 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -400,6 +400,7 @@
#define CPUID_EXT_X2APIC (1 << 21)
#define CPUID_EXT_MOVBE (1 << 22)
#define CPUID_EXT_POPCNT (1 << 23)
+#define CPUID_EXT_TSC_DEADLINE_TIMER (1 << 24)
#define CPUID_EXT_XSAVE (1 << 26)
#define CPUID_EXT_OSXSAVE (1 << 27)
#define CPUID_EXT_HYPERVISOR (1 << 31)
@@ -477,6 +478,7 @@
for syscall instruction */
/* i386-specific interrupt pending bits. */
+#define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1
#define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2
#define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3
#define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4
@@ -1011,6 +1013,16 @@ static inline int cpu_mmu_index (CPUX86State *env)
#define CC_DST (env->cc_dst)
#define CC_OP (env->cc_op)
+/* n must be a constant to be efficient */
+static inline target_long lshift(target_long x, int n)
+{
+ if (n >= 0) {
+ return x << n;
+ } else {
+ return x >> (-n);
+ }
+}
+
/* float macros */
#define FT0 (env->ft0)
#define ST0 (env->fpregs[env->fpstt].d)
@@ -1038,7 +1050,8 @@ static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp)
static inline bool cpu_has_work(CPUX86State *env)
{
- return ((env->interrupt_request & CPU_INTERRUPT_HARD) &&
+ return ((env->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_POLL)) &&
(env->eflags & IF_MASK)) ||
(env->interrupt_request & (CPU_INTERRUPT_NMI |
CPU_INTERRUPT_INIT |
@@ -1072,19 +1085,57 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank,
uint64_t status, uint64_t mcg_status, uint64_t addr,
uint64_t misc, int flags);
+/* excp_helper.c */
+void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index);
+void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index,
+ int error_code);
+void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int,
+ int error_code, int next_eip_addend);
+
+/* cc_helper.c */
+extern const uint8_t parity_table[256];
+uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
+
+static inline uint32_t cpu_compute_eflags(CPUX86State *env)
+{
+ return env->eflags | cpu_cc_compute_all(env, CC_OP) | (DF & DF_MASK);
+}
+
+/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
+static inline void cpu_load_eflags(CPUX86State *env, int eflags,
+ int update_mask)
+{
+ CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
+ DF = 1 - (2 * ((eflags >> 10) & 1));
+ env->eflags = (env->eflags & ~update_mask) |
+ (eflags & update_mask) | 0x2;
+}
+
+/* load efer and update the corresponding hflags. XXX: do consistency
+ checks with cpuid bits? */
+static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
+{
+ env->efer = val;
+ env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
+ if (env->efer & MSR_EFER_LMA) {
+ env->hflags |= HF_LMA_MASK;
+ }
+ if (env->efer & MSR_EFER_SVME) {
+ env->hflags |= HF_SVME_MASK;
+ }
+}
+
+/* svm_helper.c */
+void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+ uint64_t param);
+void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1);
+
/* op_helper.c */
void do_interrupt(CPUX86State *env);
void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw);
-void QEMU_NORETURN raise_exception_env(int exception_index, CPUX86State *nenv);
-void QEMU_NORETURN raise_exception_err_env(CPUX86State *nenv, int exception_index,
- int error_code);
void do_smm_enter(CPUX86State *env1);
-void svm_check_intercept(CPUX86State *env1, uint32_t type);
-
-uint32_t cpu_cc_compute_all(CPUX86State *env1, int op);
-
void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
#endif /* CPU_I386_H */
diff --git a/target-i386/excp_helper.c b/target-i386/excp_helper.c
new file mode 100644
index 0000000000..aaa5ca2090
--- /dev/null
+++ b/target-i386/excp_helper.c
@@ -0,0 +1,129 @@
+/*
+ * x86 exception helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "qemu-log.h"
+#include "sysemu.h"
+#include "helper.h"
+
+#if 0
+#define raise_exception_err(env, a, b) \
+ do { \
+ qemu_log("raise_exception line=%d\n", __LINE__); \
+ (raise_exception_err)(env, a, b); \
+ } while (0)
+#endif
+
+void helper_raise_interrupt(CPUX86State *env, int intno, int next_eip_addend)
+{
+ raise_interrupt(env, intno, 1, 0, next_eip_addend);
+}
+
+void helper_raise_exception(CPUX86State *env, int exception_index)
+{
+ raise_exception(env, exception_index);
+}
+
+/*
+ * Check nested exceptions and change to double or triple fault if
+ * needed. It should only be called, if this is not an interrupt.
+ * Returns the new exception number.
+ */
+static int check_exception(CPUX86State *env, int intno, int *error_code)
+{
+ int first_contributory = env->old_exception == 0 ||
+ (env->old_exception >= 10 &&
+ env->old_exception <= 13);
+ int second_contributory = intno == 0 ||
+ (intno >= 10 && intno <= 13);
+
+ qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
+ env->old_exception, intno);
+
+#if !defined(CONFIG_USER_ONLY)
+ if (env->old_exception == EXCP08_DBLE) {
+ if (env->hflags & HF_SVMI_MASK) {
+ cpu_vmexit(env, SVM_EXIT_SHUTDOWN, 0); /* does not return */
+ }
+
+ qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
+
+ qemu_system_reset_request();
+ return EXCP_HLT;
+ }
+#endif
+
+ if ((first_contributory && second_contributory)
+ || (env->old_exception == EXCP0E_PAGE &&
+ (second_contributory || (intno == EXCP0E_PAGE)))) {
+ intno = EXCP08_DBLE;
+ *error_code = 0;
+ }
+
+ if (second_contributory || (intno == EXCP0E_PAGE) ||
+ (intno == EXCP08_DBLE)) {
+ env->old_exception = intno;
+ }
+
+ return intno;
+}
+
+/*
+ * Signal an interruption. It is executed in the main CPU loop.
+ * is_int is TRUE if coming from the int instruction. next_eip is the
+ * EIP value AFTER the interrupt instruction. It is only relevant if
+ * is_int is TRUE.
+ */
+static void QEMU_NORETURN raise_interrupt2(CPUX86State *env, int intno,
+ int is_int, int error_code,
+ int next_eip_addend)
+{
+ if (!is_int) {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_EXCP_BASE + intno,
+ error_code);
+ intno = check_exception(env, intno, &error_code);
+ } else {
+ cpu_svm_check_intercept_param(env, SVM_EXIT_SWINT, 0);
+ }
+
+ env->exception_index = intno;
+ env->error_code = error_code;
+ env->exception_is_int = is_int;
+ env->exception_next_eip = env->eip + next_eip_addend;
+ cpu_loop_exit(env);
+}
+
+/* shortcuts to generate exceptions */
+
+void QEMU_NORETURN raise_interrupt(CPUX86State *env, int intno, int is_int,
+ int error_code, int next_eip_addend)
+{
+ raise_interrupt2(env, intno, is_int, error_code, next_eip_addend);
+}
+
+void raise_exception_err(CPUX86State *env, int exception_index,
+ int error_code)
+{
+ raise_interrupt2(env, exception_index, 0, error_code, 0);
+}
+
+void raise_exception(CPUX86State *env, int exception_index)
+{
+ raise_interrupt2(env, exception_index, 0, 0, 0);
+}
diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c
new file mode 100644
index 0000000000..6065c2e72d
--- /dev/null
+++ b/target-i386/fpu_helper.c
@@ -0,0 +1,1304 @@
+/*
+ * x86 FPU, MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4/PNI helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <math.h>
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "helper.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+#define FPU_RC_MASK 0xc00
+#define FPU_RC_NEAR 0x000
+#define FPU_RC_DOWN 0x400
+#define FPU_RC_UP 0x800
+#define FPU_RC_CHOP 0xc00
+
+#define MAXTAN 9223372036854775808.0
+
+/* the following deal with x86 long double-precision numbers */
+#define MAXEXPD 0x7fff
+#define EXPBIAS 16383
+#define EXPD(fp) (fp.l.upper & 0x7fff)
+#define SIGND(fp) ((fp.l.upper) & 0x8000)
+#define MANTD(fp) (fp.l.lower)
+#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
+
+#define FPUS_IE (1 << 0)
+#define FPUS_DE (1 << 1)
+#define FPUS_ZE (1 << 2)
+#define FPUS_OE (1 << 3)
+#define FPUS_UE (1 << 4)
+#define FPUS_PE (1 << 5)
+#define FPUS_SF (1 << 6)
+#define FPUS_SE (1 << 7)
+#define FPUS_B (1 << 15)
+
+#define FPUC_EM 0x3f
+
+#define floatx80_lg2 make_floatx80(0x3ffd, 0x9a209a84fbcff799LL)
+#define floatx80_l2e make_floatx80(0x3fff, 0xb8aa3b295c17f0bcLL)
+#define floatx80_l2t make_floatx80(0x4000, 0xd49a784bcd1b8afeLL)
+
+static inline void fpush(void)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fptags[env->fpstt] = 0; /* validate stack entry */
+}
+
+static inline void fpop(void)
+{
+ env->fptags[env->fpstt] = 1; /* invalidate stack entry */
+ env->fpstt = (env->fpstt + 1) & 7;
+}
+
+static inline floatx80 helper_fldt(target_ulong ptr)
+{
+ CPU_LDoubleU temp;
+
+ temp.l.lower = ldq(ptr);
+ temp.l.upper = lduw(ptr + 8);
+ return temp.d;
+}
+
+static inline void helper_fstt(floatx80 f, target_ulong ptr)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = f;
+ stq(ptr, temp.l.lower);
+ stw(ptr + 8, temp.l.upper);
+}
+
+/* x87 FPU helpers */
+
+static inline double floatx80_to_double(floatx80 a)
+{
+ union {
+ float64 f64;
+ double d;
+ } u;
+
+ u.f64 = floatx80_to_float64(a, &env->fp_status);
+ return u.d;
+}
+
+static inline floatx80 double_to_floatx80(double a)
+{
+ union {
+ float64 f64;
+ double d;
+ } u;
+
+ u.d = a;
+ return float64_to_floatx80(u.f64, &env->fp_status);
+}
+
+static void fpu_set_exception(int mask)
+{
+ env->fpus |= mask;
+ if (env->fpus & (~env->fpuc & FPUC_EM)) {
+ env->fpus |= FPUS_SE | FPUS_B;
+ }
+}
+
+static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
+{
+ if (floatx80_is_zero(b)) {
+ fpu_set_exception(FPUS_ZE);
+ }
+ return floatx80_div(a, b, &env->fp_status);
+}
+
+static void fpu_raise_exception(void)
+{
+ if (env->cr[0] & CR0_NE_MASK) {
+ raise_exception(env, EXCP10_COPR);
+ }
+#if !defined(CONFIG_USER_ONLY)
+ else {
+ cpu_set_ferr(env);
+ }
+#endif
+}
+
+void helper_flds_FT0(uint32_t val)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ u.i = val;
+ FT0 = float32_to_floatx80(u.f, &env->fp_status);
+}
+
+void helper_fldl_FT0(uint64_t val)
+{
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ u.i = val;
+ FT0 = float64_to_floatx80(u.f, &env->fp_status);
+}
+
+void helper_fildl_FT0(int32_t val)
+{
+ FT0 = int32_to_floatx80(val, &env->fp_status);
+}
+
+void helper_flds_ST0(uint32_t val)
+{
+ int new_fpstt;
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ u.i = val;
+ env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fldl_ST0(uint64_t val)
+{
+ int new_fpstt;
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ u.i = val;
+ env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildl_ST0(int32_t val)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fildll_ST0(int64_t val)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+uint32_t helper_fsts_ST0(void)
+{
+ union {
+ float32 f;
+ uint32_t i;
+ } u;
+
+ u.f = floatx80_to_float32(ST0, &env->fp_status);
+ return u.i;
+}
+
+uint64_t helper_fstl_ST0(void)
+{
+ union {
+ float64 f;
+ uint64_t i;
+ } u;
+
+ u.f = floatx80_to_float64(ST0, &env->fp_status);
+ return u.i;
+}
+
+int32_t helper_fist_ST0(void)
+{
+ int32_t val;
+
+ val = floatx80_to_int32(ST0, &env->fp_status);
+ if (val != (int16_t)val) {
+ val = -32768;
+ }
+ return val;
+}
+
+int32_t helper_fistl_ST0(void)
+{
+ int32_t val;
+
+ val = floatx80_to_int32(ST0, &env->fp_status);
+ return val;
+}
+
+int64_t helper_fistll_ST0(void)
+{
+ int64_t val;
+
+ val = floatx80_to_int64(ST0, &env->fp_status);
+ return val;
+}
+
+int32_t helper_fistt_ST0(void)
+{
+ int32_t val;
+
+ val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+ if (val != (int16_t)val) {
+ val = -32768;
+ }
+ return val;
+}
+
+int32_t helper_fisttl_ST0(void)
+{
+ int32_t val;
+
+ val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
+ return val;
+}
+
+int64_t helper_fisttll_ST0(void)
+{
+ int64_t val;
+
+ val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
+ return val;
+}
+
+void helper_fldt_ST0(target_ulong ptr)
+{
+ int new_fpstt;
+
+ new_fpstt = (env->fpstt - 1) & 7;
+ env->fpregs[new_fpstt].d = helper_fldt(ptr);
+ env->fpstt = new_fpstt;
+ env->fptags[new_fpstt] = 0; /* validate stack entry */
+}
+
+void helper_fstt_ST0(target_ulong ptr)
+{
+ helper_fstt(ST0, ptr);
+}
+
+void helper_fpush(void)
+{
+ fpush();
+}
+
+void helper_fpop(void)
+{
+ fpop();
+}
+
+void helper_fdecstp(void)
+{
+ env->fpstt = (env->fpstt - 1) & 7;
+ env->fpus &= ~0x4700;
+}
+
+void helper_fincstp(void)
+{
+ env->fpstt = (env->fpstt + 1) & 7;
+ env->fpus &= ~0x4700;
+}
+
+/* FPU move */
+
+void helper_ffree_STN(int st_index)
+{
+ env->fptags[(env->fpstt + st_index) & 7] = 1;
+}
+
+void helper_fmov_ST0_FT0(void)
+{
+ ST0 = FT0;
+}
+
+void helper_fmov_FT0_STN(int st_index)
+{
+ FT0 = ST(st_index);
+}
+
+void helper_fmov_ST0_STN(int st_index)
+{
+ ST0 = ST(st_index);
+}
+
+void helper_fmov_STN_ST0(int st_index)
+{
+ ST(st_index) = ST0;
+}
+
+void helper_fxchg_ST0_STN(int st_index)
+{
+ floatx80 tmp;
+
+ tmp = ST(st_index);
+ ST(st_index) = ST0;
+ ST0 = tmp;
+}
+
+/* FPU operations */
+
+static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
+
+void helper_fcom_ST0_FT0(void)
+{
+ int ret;
+
+ ret = floatx80_compare(ST0, FT0, &env->fp_status);
+ env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+}
+
+void helper_fucom_ST0_FT0(void)
+{
+ int ret;
+
+ ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+ env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
+}
+
+static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
+
+void helper_fcomi_ST0_FT0(void)
+{
+ int eflags;
+ int ret;
+
+ ret = floatx80_compare(ST0, FT0, &env->fp_status);
+ eflags = helper_cc_compute_all(CC_OP);
+ eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+ CC_SRC = eflags;
+}
+
+void helper_fucomi_ST0_FT0(void)
+{
+ int eflags;
+ int ret;
+
+ ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
+ eflags = helper_cc_compute_all(CC_OP);
+ eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
+ CC_SRC = eflags;
+}
+
+void helper_fadd_ST0_FT0(void)
+{
+ ST0 = floatx80_add(ST0, FT0, &env->fp_status);
+}
+
+void helper_fmul_ST0_FT0(void)
+{
+ ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
+}
+
+void helper_fsub_ST0_FT0(void)
+{
+ ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
+}
+
+void helper_fsubr_ST0_FT0(void)
+{
+ ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
+}
+
+void helper_fdiv_ST0_FT0(void)
+{
+ ST0 = helper_fdiv(ST0, FT0);
+}
+
+void helper_fdivr_ST0_FT0(void)
+{
+ ST0 = helper_fdiv(FT0, ST0);
+}
+
+/* fp operations between STN and ST0 */
+
+void helper_fadd_STN_ST0(int st_index)
+{
+ ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fmul_STN_ST0(int st_index)
+{
+ ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fsub_STN_ST0(int st_index)
+{
+ ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
+}
+
+void helper_fsubr_STN_ST0(int st_index)
+{
+ ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
+}
+
+void helper_fdiv_STN_ST0(int st_index)
+{
+ floatx80 *p;
+
+ p = &ST(st_index);
+ *p = helper_fdiv(*p, ST0);
+}
+
+void helper_fdivr_STN_ST0(int st_index)
+{
+ floatx80 *p;
+
+ p = &ST(st_index);
+ *p = helper_fdiv(ST0, *p);
+}
+
+/* misc FPU operations */
+void helper_fchs_ST0(void)
+{
+ ST0 = floatx80_chs(ST0);
+}
+
+void helper_fabs_ST0(void)
+{
+ ST0 = floatx80_abs(ST0);
+}
+
+void helper_fld1_ST0(void)
+{
+ ST0 = floatx80_one;
+}
+
+void helper_fldl2t_ST0(void)
+{
+ ST0 = floatx80_l2t;
+}
+
+void helper_fldl2e_ST0(void)
+{
+ ST0 = floatx80_l2e;
+}
+
+void helper_fldpi_ST0(void)
+{
+ ST0 = floatx80_pi;
+}
+
+void helper_fldlg2_ST0(void)
+{
+ ST0 = floatx80_lg2;
+}
+
+void helper_fldln2_ST0(void)
+{
+ ST0 = floatx80_ln2;
+}
+
+void helper_fldz_ST0(void)
+{
+ ST0 = floatx80_zero;
+}
+
+void helper_fldz_FT0(void)
+{
+ FT0 = floatx80_zero;
+}
+
+uint32_t helper_fnstsw(void)
+{
+ return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+}
+
+uint32_t helper_fnstcw(void)
+{
+ return env->fpuc;
+}
+
+static void update_fp_status(void)
+{
+ int rnd_type;
+
+ /* set rounding mode */
+ switch (env->fpuc & FPU_RC_MASK) {
+ default:
+ case FPU_RC_NEAR:
+ rnd_type = float_round_nearest_even;
+ break;
+ case FPU_RC_DOWN:
+ rnd_type = float_round_down;
+ break;
+ case FPU_RC_UP:
+ rnd_type = float_round_up;
+ break;
+ case FPU_RC_CHOP:
+ rnd_type = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->fp_status);
+ switch ((env->fpuc >> 8) & 3) {
+ case 0:
+ rnd_type = 32;
+ break;
+ case 2:
+ rnd_type = 64;
+ break;
+ case 3:
+ default:
+ rnd_type = 80;
+ break;
+ }
+ set_floatx80_rounding_precision(rnd_type, &env->fp_status);
+}
+
+void helper_fldcw(uint32_t val)
+{
+ env->fpuc = val;
+ update_fp_status();
+}
+
+void helper_fclex(void)
+{
+ env->fpus &= 0x7f00;
+}
+
+void helper_fwait(void)
+{
+ if (env->fpus & FPUS_SE) {
+ fpu_raise_exception();
+ }
+}
+
+void helper_fninit(void)
+{
+ env->fpus = 0;
+ env->fpstt = 0;
+ env->fpuc = 0x37f;
+ env->fptags[0] = 1;
+ env->fptags[1] = 1;
+ env->fptags[2] = 1;
+ env->fptags[3] = 1;
+ env->fptags[4] = 1;
+ env->fptags[5] = 1;
+ env->fptags[6] = 1;
+ env->fptags[7] = 1;
+}
+
+/* BCD ops */
+
+void helper_fbld_ST0(target_ulong ptr)
+{
+ floatx80 tmp;
+ uint64_t val;
+ unsigned int v;
+ int i;
+
+ val = 0;
+ for (i = 8; i >= 0; i--) {
+ v = ldub(ptr + i);
+ val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
+ }
+ tmp = int64_to_floatx80(val, &env->fp_status);
+ if (ldub(ptr + 9) & 0x80) {
+ floatx80_chs(tmp);
+ }
+ fpush();
+ ST0 = tmp;
+}
+
+void helper_fbst_ST0(target_ulong ptr)
+{
+ int v;
+ target_ulong mem_ref, mem_end;
+ int64_t val;
+
+ val = floatx80_to_int64(ST0, &env->fp_status);
+ mem_ref = ptr;
+ mem_end = mem_ref + 9;
+ if (val < 0) {
+ stb(mem_end, 0x80);
+ val = -val;
+ } else {
+ stb(mem_end, 0x00);
+ }
+ while (mem_ref < mem_end) {
+ if (val == 0) {
+ break;
+ }
+ v = val % 100;
+ val = val / 100;
+ v = ((v / 10) << 4) | (v % 10);
+ stb(mem_ref++, v);
+ }
+ while (mem_ref < mem_end) {
+ stb(mem_ref++, 0);
+ }
+}
+
+void helper_f2xm1(void)
+{
+ double val = floatx80_to_double(ST0);
+
+ val = pow(2.0, val) - 1.0;
+ ST0 = double_to_floatx80(val);
+}
+
+void helper_fyl2x(void)
+{
+ double fptemp = floatx80_to_double(ST0);
+
+ if (fptemp > 0.0) {
+ fptemp = log(fptemp) / log(2.0); /* log2(ST) */
+ fptemp *= floatx80_to_double(ST1);
+ ST1 = double_to_floatx80(fptemp);
+ fpop();
+ } else {
+ env->fpus &= ~0x4700;
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fptan(void)
+{
+ double fptemp = floatx80_to_double(ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ fptemp = tan(fptemp);
+ ST0 = double_to_floatx80(fptemp);
+ fpush();
+ ST0 = floatx80_one;
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**52 only */
+ }
+}
+
+void helper_fpatan(void)
+{
+ double fptemp, fpsrcop;
+
+ fpsrcop = floatx80_to_double(ST1);
+ fptemp = floatx80_to_double(ST0);
+ ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
+ fpop();
+}
+
+void helper_fxtract(void)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = ST0;
+
+ if (floatx80_is_zero(ST0)) {
+ /* Easy way to generate -inf and raising division by 0 exception */
+ ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero,
+ &env->fp_status);
+ fpush();
+ ST0 = temp.d;
+ } else {
+ int expdif;
+
+ expdif = EXPD(temp) - EXPBIAS;
+ /* DP exponent bias */
+ ST0 = int32_to_floatx80(expdif, &env->fp_status);
+ fpush();
+ BIASEXPONENT(temp);
+ ST0 = temp.d;
+ }
+}
+
+void helper_fprem1(void)
+{
+ double st0, st1, dblq, fpsrcop, fptemp;
+ CPU_LDoubleU fpsrcop1, fptemp1;
+ int expdif;
+ signed long long int q;
+
+ st0 = floatx80_to_double(ST0);
+ st1 = floatx80_to_double(ST1);
+
+ if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
+ ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ return;
+ }
+
+ fpsrcop = st0;
+ fptemp = st1;
+ fpsrcop1.d = ST0;
+ fptemp1.d = ST1;
+ expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+
+ if (expdif < 0) {
+ /* optimisation? taken from the AMD docs */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* ST0 is unchanged */
+ return;
+ }
+
+ if (expdif < 53) {
+ dblq = fpsrcop / fptemp;
+ /* round dblq towards nearest integer */
+ dblq = rint(dblq);
+ st0 = fpsrcop - fptemp * dblq;
+
+ /* convert dblq to q by truncating towards zero */
+ if (dblq < 0.0) {
+ q = (signed long long int)(-dblq);
+ } else {
+ q = (signed long long int)dblq;
+ }
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* (C0,C3,C1) <-- (q2,q1,q0) */
+ env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
+ env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
+ env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
+ } else {
+ env->fpus |= 0x400; /* C2 <-- 1 */
+ fptemp = pow(2.0, expdif - 50);
+ fpsrcop = (st0 / st1) / fptemp;
+ /* fpsrcop = integer obtained by chopping */
+ fpsrcop = (fpsrcop < 0.0) ?
+ -(floor(fabs(fpsrcop))) : floor(fpsrcop);
+ st0 -= (st1 * fpsrcop * fptemp);
+ }
+ ST0 = double_to_floatx80(st0);
+}
+
+void helper_fprem(void)
+{
+ double st0, st1, dblq, fpsrcop, fptemp;
+ CPU_LDoubleU fpsrcop1, fptemp1;
+ int expdif;
+ signed long long int q;
+
+ st0 = floatx80_to_double(ST0);
+ st1 = floatx80_to_double(ST1);
+
+ if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
+ ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ return;
+ }
+
+ fpsrcop = st0;
+ fptemp = st1;
+ fpsrcop1.d = ST0;
+ fptemp1.d = ST1;
+ expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
+
+ if (expdif < 0) {
+ /* optimisation? taken from the AMD docs */
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* ST0 is unchanged */
+ return;
+ }
+
+ if (expdif < 53) {
+ dblq = fpsrcop / fptemp; /* ST0 / ST1 */
+ /* round dblq towards zero */
+ dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
+ st0 = fpsrcop - fptemp * dblq; /* fpsrcop is ST0 */
+
+ /* convert dblq to q by truncating towards zero */
+ if (dblq < 0.0) {
+ q = (signed long long int)(-dblq);
+ } else {
+ q = (signed long long int)dblq;
+ }
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ /* (C0,C3,C1) <-- (q2,q1,q0) */
+ env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
+ env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
+ env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
+ } else {
+ int N = 32 + (expdif % 32); /* as per AMD docs */
+
+ env->fpus |= 0x400; /* C2 <-- 1 */
+ fptemp = pow(2.0, (double)(expdif - N));
+ fpsrcop = (st0 / st1) / fptemp;
+ /* fpsrcop = integer obtained by chopping */
+ fpsrcop = (fpsrcop < 0.0) ?
+ -(floor(fabs(fpsrcop))) : floor(fpsrcop);
+ st0 -= (st1 * fpsrcop * fptemp);
+ }
+ ST0 = double_to_floatx80(st0);
+}
+
+void helper_fyl2xp1(void)
+{
+ double fptemp = floatx80_to_double(ST0);
+
+ if ((fptemp + 1.0) > 0.0) {
+ fptemp = log(fptemp + 1.0) / log(2.0); /* log2(ST + 1.0) */
+ fptemp *= floatx80_to_double(ST1);
+ ST1 = double_to_floatx80(fptemp);
+ fpop();
+ } else {
+ env->fpus &= ~0x4700;
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fsqrt(void)
+{
+ if (floatx80_is_neg(ST0)) {
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ env->fpus |= 0x400;
+ }
+ ST0 = floatx80_sqrt(ST0, &env->fp_status);
+}
+
+void helper_fsincos(void)
+{
+ double fptemp = floatx80_to_double(ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(sin(fptemp));
+ fpush();
+ ST0 = double_to_floatx80(cos(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**63 only */
+ }
+}
+
+void helper_frndint(void)
+{
+ ST0 = floatx80_round_to_int(ST0, &env->fp_status);
+}
+
+void helper_fscale(void)
+{
+ if (floatx80_is_any_nan(ST1)) {
+ ST0 = ST1;
+ } else {
+ int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
+ ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
+ }
+}
+
+void helper_fsin(void)
+{
+ double fptemp = floatx80_to_double(ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(sin(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**53 only */
+ }
+}
+
+void helper_fcos(void)
+{
+ double fptemp = floatx80_to_double(ST0);
+
+ if ((fptemp > MAXTAN) || (fptemp < -MAXTAN)) {
+ env->fpus |= 0x400;
+ } else {
+ ST0 = double_to_floatx80(cos(fptemp));
+ env->fpus &= ~0x400; /* C2 <-- 0 */
+ /* the above code is for |arg| < 2**63 only */
+ }
+}
+
+void helper_fxam_ST0(void)
+{
+ CPU_LDoubleU temp;
+ int expdif;
+
+ temp.d = ST0;
+
+ env->fpus &= ~0x4700; /* (C3,C2,C1,C0) <-- 0000 */
+ if (SIGND(temp)) {
+ env->fpus |= 0x200; /* C1 <-- 1 */
+ }
+
+ /* XXX: test fptags too */
+ expdif = EXPD(temp);
+ if (expdif == MAXEXPD) {
+ if (MANTD(temp) == 0x8000000000000000ULL) {
+ env->fpus |= 0x500; /* Infinity */
+ } else {
+ env->fpus |= 0x100; /* NaN */
+ }
+ } else if (expdif == 0) {
+ if (MANTD(temp) == 0) {
+ env->fpus |= 0x4000; /* Zero */
+ } else {
+ env->fpus |= 0x4400; /* Denormal */
+ }
+ } else {
+ env->fpus |= 0x400;
+ }
+}
+
+void helper_fstenv(target_ulong ptr, int data32)
+{
+ int fpus, fptag, exp, i;
+ uint64_t mant;
+ CPU_LDoubleU tmp;
+
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ fptag = 0;
+ for (i = 7; i >= 0; i--) {
+ fptag <<= 2;
+ if (env->fptags[i]) {
+ fptag |= 3;
+ } else {
+ tmp.d = env->fpregs[i].d;
+ exp = EXPD(tmp);
+ mant = MANTD(tmp);
+ if (exp == 0 && mant == 0) {
+ /* zero */
+ fptag |= 1;
+ } else if (exp == 0 || exp == MAXEXPD
+ || (mant & (1LL << 63)) == 0) {
+ /* NaNs, infinity, denormal */
+ fptag |= 2;
+ }
+ }
+ }
+ if (data32) {
+ /* 32 bit */
+ stl(ptr, env->fpuc);
+ stl(ptr + 4, fpus);
+ stl(ptr + 8, fptag);
+ stl(ptr + 12, 0); /* fpip */
+ stl(ptr + 16, 0); /* fpcs */
+ stl(ptr + 20, 0); /* fpoo */
+ stl(ptr + 24, 0); /* fpos */
+ } else {
+ /* 16 bit */
+ stw(ptr, env->fpuc);
+ stw(ptr + 2, fpus);
+ stw(ptr + 4, fptag);
+ stw(ptr + 6, 0);
+ stw(ptr + 8, 0);
+ stw(ptr + 10, 0);
+ stw(ptr + 12, 0);
+ }
+}
+
+void helper_fldenv(target_ulong ptr, int data32)
+{
+ int i, fpus, fptag;
+
+ if (data32) {
+ env->fpuc = lduw(ptr);
+ fpus = lduw(ptr + 4);
+ fptag = lduw(ptr + 8);
+ } else {
+ env->fpuc = lduw(ptr);
+ fpus = lduw(ptr + 2);
+ fptag = lduw(ptr + 4);
+ }
+ env->fpstt = (fpus >> 11) & 7;
+ env->fpus = fpus & ~0x3800;
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = ((fptag & 3) == 3);
+ fptag >>= 2;
+ }
+}
+
+void helper_fsave(target_ulong ptr, int data32)
+{
+ floatx80 tmp;
+ int i;
+
+ helper_fstenv(ptr, data32);
+
+ ptr += (14 << data32);
+ for (i = 0; i < 8; i++) {
+ tmp = ST(i);
+ helper_fstt(tmp, ptr);
+ ptr += 10;
+ }
+
+ /* fninit */
+ env->fpus = 0;
+ env->fpstt = 0;
+ env->fpuc = 0x37f;
+ env->fptags[0] = 1;
+ env->fptags[1] = 1;
+ env->fptags[2] = 1;
+ env->fptags[3] = 1;
+ env->fptags[4] = 1;
+ env->fptags[5] = 1;
+ env->fptags[6] = 1;
+ env->fptags[7] = 1;
+}
+
+void helper_frstor(target_ulong ptr, int data32)
+{
+ floatx80 tmp;
+ int i;
+
+ helper_fldenv(ptr, data32);
+ ptr += (14 << data32);
+
+ for (i = 0; i < 8; i++) {
+ tmp = helper_fldt(ptr);
+ ST(i) = tmp;
+ ptr += 10;
+ }
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = s;
+
+ helper_fsave(ptr, data32);
+
+ env = saved_env;
+}
+
+void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = s;
+
+ helper_frstor(ptr, data32);
+
+ env = saved_env;
+}
+#endif
+
+void helper_fxsave(target_ulong ptr, int data64)
+{
+ int fpus, fptag, i, nb_xmm_regs;
+ floatx80 tmp;
+ target_ulong addr;
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+
+ fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
+ fptag = 0;
+ for (i = 0; i < 8; i++) {
+ fptag |= (env->fptags[i] << i);
+ }
+ stw(ptr, env->fpuc);
+ stw(ptr + 2, fpus);
+ stw(ptr + 4, fptag ^ 0xff);
+#ifdef TARGET_X86_64
+ if (data64) {
+ stq(ptr + 0x08, 0); /* rip */
+ stq(ptr + 0x10, 0); /* rdp */
+ } else
+#endif
+ {
+ stl(ptr + 0x08, 0); /* eip */
+ stl(ptr + 0x0c, 0); /* sel */
+ stl(ptr + 0x10, 0); /* dp */
+ stl(ptr + 0x14, 0); /* sel */
+ }
+
+ addr = ptr + 0x20;
+ for (i = 0; i < 8; i++) {
+ tmp = ST(i);
+ helper_fstt(tmp, addr);
+ addr += 16;
+ }
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ /* XXX: finish it */
+ stl(ptr + 0x18, env->mxcsr); /* mxcsr */
+ stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+ addr = ptr + 0xa0;
+ /* Fast FXSAVE leaves out the XMM registers */
+ if (!(env->efer & MSR_EFER_FFXSR)
+ || (env->hflags & HF_CPL_MASK)
+ || !(env->hflags & HF_LMA_MASK)) {
+ for (i = 0; i < nb_xmm_regs; i++) {
+ stq(addr, env->xmm_regs[i].XMM_Q(0));
+ stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
+ addr += 16;
+ }
+ }
+ }
+}
+
+void helper_fxrstor(target_ulong ptr, int data64)
+{
+ int i, fpus, fptag, nb_xmm_regs;
+ floatx80 tmp;
+ target_ulong addr;
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+
+ env->fpuc = lduw(ptr);
+ fpus = lduw(ptr + 2);
+ fptag = lduw(ptr + 4);
+ env->fpstt = (fpus >> 11) & 7;
+ env->fpus = fpus & ~0x3800;
+ fptag ^= 0xff;
+ for (i = 0; i < 8; i++) {
+ env->fptags[i] = ((fptag >> i) & 1);
+ }
+
+ addr = ptr + 0x20;
+ for (i = 0; i < 8; i++) {
+ tmp = helper_fldt(addr);
+ ST(i) = tmp;
+ addr += 16;
+ }
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ /* XXX: finish it */
+ env->mxcsr = ldl(ptr + 0x18);
+ /* ldl(ptr + 0x1c); */
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+ addr = ptr + 0xa0;
+ /* Fast FXRESTORE leaves out the XMM registers */
+ if (!(env->efer & MSR_EFER_FFXSR)
+ || (env->hflags & HF_CPL_MASK)
+ || !(env->hflags & HF_LMA_MASK)) {
+ for (i = 0; i < nb_xmm_regs; i++) {
+ env->xmm_regs[i].XMM_Q(0) = ldq(addr);
+ env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
+ addr += 16;
+ }
+ }
+ }
+}
+
+void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
+{
+ CPU_LDoubleU temp;
+
+ temp.d = f;
+ *pmant = temp.l.lower;
+ *pexp = temp.l.upper;
+}
+
+floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
+{
+ CPU_LDoubleU temp;
+
+ temp.l.upper = upper;
+ temp.l.lower = mant;
+ return temp.d;
+}
+
+/* MMX/SSE */
+/* XXX: optimize by storing fptt and fptags in the static cpu state */
+
+#define SSE_DAZ 0x0040
+#define SSE_RC_MASK 0x6000
+#define SSE_RC_NEAR 0x0000
+#define SSE_RC_DOWN 0x2000
+#define SSE_RC_UP 0x4000
+#define SSE_RC_CHOP 0x6000
+#define SSE_FZ 0x8000
+
+static void update_sse_status(void)
+{
+ int rnd_type;
+
+ /* set rounding mode */
+ switch (env->mxcsr & SSE_RC_MASK) {
+ default:
+ case SSE_RC_NEAR:
+ rnd_type = float_round_nearest_even;
+ break;
+ case SSE_RC_DOWN:
+ rnd_type = float_round_down;
+ break;
+ case SSE_RC_UP:
+ rnd_type = float_round_up;
+ break;
+ case SSE_RC_CHOP:
+ rnd_type = float_round_to_zero;
+ break;
+ }
+ set_float_rounding_mode(rnd_type, &env->sse_status);
+
+ /* set denormals are zero */
+ set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
+
+ /* set flush to zero */
+ set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
+}
+
+void helper_ldmxcsr(uint32_t val)
+{
+ env->mxcsr = val;
+ update_sse_status();
+}
+
+void helper_enter_mmx(void)
+{
+ env->fpstt = 0;
+ *(uint32_t *)(env->fptags) = 0;
+ *(uint32_t *)(env->fptags + 4) = 0;
+}
+
+void helper_emms(void)
+{
+ /* set to empty state */
+ *(uint32_t *)(env->fptags) = 0x01010101;
+ *(uint32_t *)(env->fptags + 4) = 0x01010101;
+}
+
+/* XXX: suppress */
+void helper_movq(void *d, void *s)
+{
+ *(uint64_t *)d = *(uint64_t *)s;
+}
+
+#define SHIFT 0
+#include "ops_sse.h"
+
+#define SHIFT 1
+#include "ops_sse.h"
diff --git a/target-i386/helper.c b/target-i386/helper.c
index c52ec130e5..8a5da3d7c0 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -949,7 +949,7 @@ void breakpoint_handler(CPUX86State *env)
if (env->watchpoint_hit->flags & BP_CPU) {
env->watchpoint_hit = NULL;
if (check_hw_breakpoints(env, 0))
- raise_exception_env(EXCP01_DB, env);
+ raise_exception(env, EXCP01_DB);
else
cpu_resume_from_signal(env, NULL);
}
@@ -958,7 +958,7 @@ void breakpoint_handler(CPUX86State *env)
if (bp->pc == env->eip) {
if (bp->flags & BP_CPU) {
check_hw_breakpoints(env, 1);
- raise_exception_env(EXCP01_DB, env);
+ raise_exception(env, EXCP01_DB);
}
break;
}
@@ -1177,7 +1177,6 @@ void do_cpu_init(X86CPU *cpu)
env->interrupt_request = sipi;
env->pat = pat;
apic_init_reset(env->apic_state);
- env->halted = !cpu_is_bsp(env);
}
void do_cpu_sipi(X86CPU *cpu)
diff --git a/target-i386/helper.h b/target-i386/helper.h
index 761954e925..99ca18396d 100644
--- a/target-i386/helper.h
+++ b/target-i386/helper.h
@@ -63,8 +63,8 @@ DEF_HELPER_1(monitor, void, tl)
DEF_HELPER_1(mwait, void, int)
DEF_HELPER_0(debug, void)
DEF_HELPER_0(reset_rf, void)
-DEF_HELPER_2(raise_interrupt, void, int, int)
-DEF_HELPER_1(raise_exception, void, int)
+DEF_HELPER_3(raise_interrupt, void, env, int, int)
+DEF_HELPER_2(raise_exception, void, env, int)
DEF_HELPER_0(cli, void)
DEF_HELPER_0(sti, void)
DEF_HELPER_0(set_inhibit_irq, void)
diff --git a/target-i386/int_helper.c b/target-i386/int_helper.c
new file mode 100644
index 0000000000..e1f66f5ad1
--- /dev/null
+++ b/target-i386/int_helper.c
@@ -0,0 +1,500 @@
+/*
+ * x86 integer helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "host-utils.h"
+#include "helper.h"
+
+//#define DEBUG_MULDIV
+
+/* modulo 9 table */
+static const uint8_t rclb_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 0, 1, 2, 3, 4, 5,
+ 6, 7, 8, 0, 1, 2, 3, 4,
+};
+
+/* modulo 17 table */
+static const uint8_t rclw_table[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 0, 1, 2, 3, 4, 5, 6,
+ 7, 8, 9, 10, 11, 12, 13, 14,
+};
+
+/* division, flags are undefined */
+
+void helper_divb_AL(target_ulong t0)
+{
+ unsigned int num, den, q, r;
+
+ num = (EAX & 0xffff);
+ den = (t0 & 0xff);
+ if (den == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q = (num / den);
+ if (q > 0xff) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q &= 0xff;
+ r = (num % den) & 0xff;
+ EAX = (EAX & ~0xffff) | (r << 8) | q;
+}
+
+void helper_idivb_AL(target_ulong t0)
+{
+ int num, den, q, r;
+
+ num = (int16_t)EAX;
+ den = (int8_t)t0;
+ if (den == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q = (num / den);
+ if (q != (int8_t)q) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q &= 0xff;
+ r = (num % den) & 0xff;
+ EAX = (EAX & ~0xffff) | (r << 8) | q;
+}
+
+void helper_divw_AX(target_ulong t0)
+{
+ unsigned int num, den, q, r;
+
+ num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
+ den = (t0 & 0xffff);
+ if (den == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q = (num / den);
+ if (q > 0xffff) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q &= 0xffff;
+ r = (num % den) & 0xffff;
+ EAX = (EAX & ~0xffff) | q;
+ EDX = (EDX & ~0xffff) | r;
+}
+
+void helper_idivw_AX(target_ulong t0)
+{
+ int num, den, q, r;
+
+ num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
+ den = (int16_t)t0;
+ if (den == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q = (num / den);
+ if (q != (int16_t)q) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q &= 0xffff;
+ r = (num % den) & 0xffff;
+ EAX = (EAX & ~0xffff) | q;
+ EDX = (EDX & ~0xffff) | r;
+}
+
+void helper_divl_EAX(target_ulong t0)
+{
+ unsigned int den, r;
+ uint64_t num, q;
+
+ num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
+ den = t0;
+ if (den == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q = (num / den);
+ r = (num % den);
+ if (q > 0xffffffff) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ EAX = (uint32_t)q;
+ EDX = (uint32_t)r;
+}
+
+void helper_idivl_EAX(target_ulong t0)
+{
+ int den, r;
+ int64_t num, q;
+
+ num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
+ den = t0;
+ if (den == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ q = (num / den);
+ r = (num % den);
+ if (q != (int32_t)q) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ EAX = (uint32_t)q;
+ EDX = (uint32_t)r;
+}
+
+/* bcd */
+
+/* XXX: exception */
+void helper_aam(int base)
+{
+ int al, ah;
+
+ al = EAX & 0xff;
+ ah = al / base;
+ al = al % base;
+ EAX = (EAX & ~0xffff) | al | (ah << 8);
+ CC_DST = al;
+}
+
+void helper_aad(int base)
+{
+ int al, ah;
+
+ al = EAX & 0xff;
+ ah = (EAX >> 8) & 0xff;
+ al = ((ah * base) + al) & 0xff;
+ EAX = (EAX & ~0xffff) | al;
+ CC_DST = al;
+}
+
+void helper_aaa(void)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = helper_cc_compute_all(CC_OP);
+ af = eflags & CC_A;
+ al = EAX & 0xff;
+ ah = (EAX >> 8) & 0xff;
+
+ icarry = (al > 0xf9);
+ if (((al & 0x0f) > 9) || af) {
+ al = (al + 6) & 0x0f;
+ ah = (ah + 1 + icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ EAX = (EAX & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void helper_aas(void)
+{
+ int icarry;
+ int al, ah, af;
+ int eflags;
+
+ eflags = helper_cc_compute_all(CC_OP);
+ af = eflags & CC_A;
+ al = EAX & 0xff;
+ ah = (EAX >> 8) & 0xff;
+
+ icarry = (al < 6);
+ if (((al & 0x0f) > 9) || af) {
+ al = (al - 6) & 0x0f;
+ ah = (ah - 1 - icarry) & 0xff;
+ eflags |= CC_C | CC_A;
+ } else {
+ eflags &= ~(CC_C | CC_A);
+ al &= 0x0f;
+ }
+ EAX = (EAX & ~0xffff) | al | (ah << 8);
+ CC_SRC = eflags;
+}
+
+void helper_daa(void)
+{
+ int old_al, al, af, cf;
+ int eflags;
+
+ eflags = helper_cc_compute_all(CC_OP);
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ old_al = al = EAX & 0xff;
+
+ eflags = 0;
+ if (((al & 0x0f) > 9) || af) {
+ al = (al + 6) & 0xff;
+ eflags |= CC_A;
+ }
+ if ((old_al > 0x99) || cf) {
+ al = (al + 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ EAX = (EAX & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+void helper_das(void)
+{
+ int al, al1, af, cf;
+ int eflags;
+
+ eflags = helper_cc_compute_all(CC_OP);
+ cf = eflags & CC_C;
+ af = eflags & CC_A;
+ al = EAX & 0xff;
+
+ eflags = 0;
+ al1 = al;
+ if (((al & 0x0f) > 9) || af) {
+ eflags |= CC_A;
+ if (al < 6 || cf) {
+ eflags |= CC_C;
+ }
+ al = (al - 6) & 0xff;
+ }
+ if ((al1 > 0x99) || cf) {
+ al = (al - 0x60) & 0xff;
+ eflags |= CC_C;
+ }
+ EAX = (EAX & ~0xff) | al;
+ /* well, speed is not an issue here, so we compute the flags by hand */
+ eflags |= (al == 0) << 6; /* zf */
+ eflags |= parity_table[al]; /* pf */
+ eflags |= (al & 0x80); /* sf */
+ CC_SRC = eflags;
+}
+
+#ifdef TARGET_X86_64
+static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
+{
+ *plow += a;
+ /* carry test */
+ if (*plow < a) {
+ (*phigh)++;
+ }
+ *phigh += b;
+}
+
+static void neg128(uint64_t *plow, uint64_t *phigh)
+{
+ *plow = ~*plow;
+ *phigh = ~*phigh;
+ add128(plow, phigh, 1, 0);
+}
+
+/* return TRUE if overflow */
+static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
+{
+ uint64_t q, r, a1, a0;
+ int i, qb, ab;
+
+ a0 = *plow;
+ a1 = *phigh;
+ if (a1 == 0) {
+ q = a0 / b;
+ r = a0 % b;
+ *plow = q;
+ *phigh = r;
+ } else {
+ if (a1 >= b) {
+ return 1;
+ }
+ /* XXX: use a better algorithm */
+ for (i = 0; i < 64; i++) {
+ ab = a1 >> 63;
+ a1 = (a1 << 1) | (a0 >> 63);
+ if (ab || a1 >= b) {
+ a1 -= b;
+ qb = 1;
+ } else {
+ qb = 0;
+ }
+ a0 = (a0 << 1) | qb;
+ }
+#if defined(DEBUG_MULDIV)
+ printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64
+ ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
+ *phigh, *plow, b, a0, a1);
+#endif
+ *plow = a0;
+ *phigh = a1;
+ }
+ return 0;
+}
+
+/* return TRUE if overflow */
+static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
+{
+ int sa, sb;
+
+ sa = ((int64_t)*phigh < 0);
+ if (sa) {
+ neg128(plow, phigh);
+ }
+ sb = (b < 0);
+ if (sb) {
+ b = -b;
+ }
+ if (div64(plow, phigh, b) != 0) {
+ return 1;
+ }
+ if (sa ^ sb) {
+ if (*plow > (1ULL << 63)) {
+ return 1;
+ }
+ *plow = -*plow;
+ } else {
+ if (*plow >= (1ULL << 63)) {
+ return 1;
+ }
+ }
+ if (sa) {
+ *phigh = -*phigh;
+ }
+ return 0;
+}
+
+void helper_mulq_EAX_T0(target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ mulu64(&r0, &r1, EAX, t0);
+ EAX = r0;
+ EDX = r1;
+ CC_DST = r0;
+ CC_SRC = r1;
+}
+
+void helper_imulq_EAX_T0(target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ muls64(&r0, &r1, EAX, t0);
+ EAX = r0;
+ EDX = r1;
+ CC_DST = r0;
+ CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
+}
+
+target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
+{
+ uint64_t r0, r1;
+
+ muls64(&r0, &r1, t0, t1);
+ CC_DST = r0;
+ CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
+ return r0;
+}
+
+void helper_divq_EAX(target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ if (t0 == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ r0 = EAX;
+ r1 = EDX;
+ if (div64(&r0, &r1, t0)) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ EAX = r0;
+ EDX = r1;
+}
+
+void helper_idivq_EAX(target_ulong t0)
+{
+ uint64_t r0, r1;
+
+ if (t0 == 0) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ r0 = EAX;
+ r1 = EDX;
+ if (idiv64(&r0, &r1, t0)) {
+ raise_exception(env, EXCP00_DIVZ);
+ }
+ EAX = r0;
+ EDX = r1;
+}
+#endif
+
+/* bit operations */
+target_ulong helper_bsf(target_ulong t0)
+{
+ int count;
+ target_ulong res;
+
+ res = t0;
+ count = 0;
+ while ((res & 1) == 0) {
+ count++;
+ res >>= 1;
+ }
+ return count;
+}
+
+target_ulong helper_lzcnt(target_ulong t0, int wordsize)
+{
+ int count;
+ target_ulong res, mask;
+
+ if (wordsize > 0 && t0 == 0) {
+ return wordsize;
+ }
+ res = t0;
+ count = TARGET_LONG_BITS - 1;
+ mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
+ while ((res & mask) == 0) {
+ count--;
+ res <<= 1;
+ }
+ if (wordsize > 0) {
+ return wordsize - 1 - count;
+ }
+ return count;
+}
+
+target_ulong helper_bsr(target_ulong t0)
+{
+ return helper_lzcnt(t0, 0);
+}
+
+#define SHIFT 0
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 1
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#define SHIFT 2
+#include "shift_helper_template.h"
+#undef SHIFT
+
+#ifdef TARGET_X86_64
+#define SHIFT 3
+#include "shift_helper_template.h"
+#undef SHIFT
+#endif
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 0d0d8f69d3..4cfb3faf01 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -361,8 +361,13 @@ int kvm_arch_init_vcpu(CPUX86State *env)
env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX);
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
+ j = env->cpuid_ext_features & CPUID_EXT_TSC_DEADLINE_TIMER;
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX);
env->cpuid_ext_features |= i;
+ if (j && kvm_irqchip_in_kernel() &&
+ kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) {
+ env->cpuid_ext_features |= CPUID_EXT_TSC_DEADLINE_TIMER;
+ }
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001,
0, R_EDX);
@@ -579,11 +584,13 @@ int kvm_arch_init_vcpu(CPUX86State *env)
void kvm_arch_reset_vcpu(CPUX86State *env)
{
+ X86CPU *cpu = x86_env_get_cpu(env);
+
env->exception_injected = -1;
env->interrupt_injected = -1;
env->xcr0 = 1;
if (kvm_irqchip_in_kernel()) {
- env->mp_state = cpu_is_bsp(env) ? KVM_MP_STATE_RUNNABLE :
+ env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE :
KVM_MP_STATE_UNINITIALIZED;
} else {
env->mp_state = KVM_MP_STATE_RUNNABLE;
@@ -1727,6 +1734,10 @@ int kvm_arch_process_async_events(CPUX86State *env)
return 0;
}
+ if (env->interrupt_request & CPU_INTERRUPT_POLL) {
+ env->interrupt_request &= ~CPU_INTERRUPT_POLL;
+ apic_poll_irq(env->apic_state);
+ }
if (((env->interrupt_request & CPU_INTERRUPT_HARD) &&
(env->eflags & IF_MASK)) ||
(env->interrupt_request & CPU_INTERRUPT_NMI)) {
diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c
new file mode 100644
index 0000000000..91353c0788
--- /dev/null
+++ b/target-i386/mem_helper.c
@@ -0,0 +1,161 @@
+/*
+ * x86 memory access helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "helper.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+/* broken thread support */
+
+static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
+
+void helper_lock(void)
+{
+ spin_lock(&global_cpu_lock);
+}
+
+void helper_unlock(void)
+{
+ spin_unlock(&global_cpu_lock);
+}
+
+void helper_cmpxchg8b(target_ulong a0)
+{
+ uint64_t d;
+ int eflags;
+
+ eflags = helper_cc_compute_all(CC_OP);
+ d = ldq(a0);
+ if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
+ stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
+ eflags |= CC_Z;
+ } else {
+ /* always do the store */
+ stq(a0, d);
+ EDX = (uint32_t)(d >> 32);
+ EAX = (uint32_t)d;
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+}
+
+#ifdef TARGET_X86_64
+void helper_cmpxchg16b(target_ulong a0)
+{
+ uint64_t d0, d1;
+ int eflags;
+
+ if ((a0 & 0xf) != 0) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+ eflags = helper_cc_compute_all(CC_OP);
+ d0 = ldq(a0);
+ d1 = ldq(a0 + 8);
+ if (d0 == EAX && d1 == EDX) {
+ stq(a0, EBX);
+ stq(a0 + 8, ECX);
+ eflags |= CC_Z;
+ } else {
+ /* always do the store */
+ stq(a0, d0);
+ stq(a0 + 8, d1);
+ EDX = d1;
+ EAX = d0;
+ eflags &= ~CC_Z;
+ }
+ CC_SRC = eflags;
+}
+#endif
+
+void helper_boundw(target_ulong a0, int v)
+{
+ int low, high;
+
+ low = ldsw(a0);
+ high = ldsw(a0 + 2);
+ v = (int16_t)v;
+ if (v < low || v > high) {
+ raise_exception(env, EXCP05_BOUND);
+ }
+}
+
+void helper_boundl(target_ulong a0, int v)
+{
+ int low, high;
+
+ low = ldl(a0);
+ high = ldl(a0 + 4);
+ if (v < low || v > high) {
+ raise_exception(env, EXCP05_BOUND);
+ }
+}
+
+#if !defined(CONFIG_USER_ONLY)
+
+#define MMUSUFFIX _mmu
+
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+#endif
+
+#if !defined(CONFIG_USER_ONLY)
+/* try to fill the TLB and return an exception if error. If retaddr is
+ NULL, it means that the function was called in C code (i.e. not
+ from generated code or from helper.c) */
+/* XXX: fix it to restore all registers */
+void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
+ uintptr_t retaddr)
+{
+ TranslationBlock *tb;
+ int ret;
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = env1;
+
+ ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
+ if (ret) {
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ tb = tb_find_pc(retaddr);
+ if (tb) {
+ /* the PC is inside the translated code. It means that we have
+ a virtual CPU fault */
+ cpu_restore_state(tb, env, retaddr);
+ }
+ }
+ raise_exception_err(env, env->exception_index, env->error_code);
+ }
+ env = saved_env;
+}
+#endif
diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c
new file mode 100644
index 0000000000..ce675b7218
--- /dev/null
+++ b/target-i386/misc_helper.c
@@ -0,0 +1,603 @@
+/*
+ * x86 misc helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "ioport.h"
+#include "helper.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+/* check if Port I/O is allowed in TSS */
+static inline void check_io(int addr, int size)
+{
+ int io_offset, val, mask;
+
+ /* TSS must be a valid 32 bit one */
+ if (!(env->tr.flags & DESC_P_MASK) ||
+ ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
+ env->tr.limit < 103) {
+ goto fail;
+ }
+ io_offset = lduw_kernel(env->tr.base + 0x66);
+ io_offset += (addr >> 3);
+ /* Note: the check needs two bytes */
+ if ((io_offset + 1) > env->tr.limit) {
+ goto fail;
+ }
+ val = lduw_kernel(env->tr.base + io_offset);
+ val >>= (addr & 7);
+ mask = (1 << size) - 1;
+ /* all bits must be zero to allow the I/O */
+ if ((val & mask) != 0) {
+ fail:
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+}
+
+void helper_check_iob(uint32_t t0)
+{
+ check_io(t0, 1);
+}
+
+void helper_check_iow(uint32_t t0)
+{
+ check_io(t0, 2);
+}
+
+void helper_check_iol(uint32_t t0)
+{
+ check_io(t0, 4);
+}
+
+void helper_outb(uint32_t port, uint32_t data)
+{
+ cpu_outb(port, data & 0xff);
+}
+
+target_ulong helper_inb(uint32_t port)
+{
+ return cpu_inb(port);
+}
+
+void helper_outw(uint32_t port, uint32_t data)
+{
+ cpu_outw(port, data & 0xffff);
+}
+
+target_ulong helper_inw(uint32_t port)
+{
+ return cpu_inw(port);
+}
+
+void helper_outl(uint32_t port, uint32_t data)
+{
+ cpu_outl(port, data);
+}
+
+target_ulong helper_inl(uint32_t port)
+{
+ return cpu_inl(port);
+}
+
+void helper_into(int next_eip_addend)
+{
+ int eflags;
+
+ eflags = helper_cc_compute_all(CC_OP);
+ if (eflags & CC_O) {
+ raise_interrupt(env, EXCP04_INTO, 1, 0, next_eip_addend);
+ }
+}
+
+void helper_single_step(void)
+{
+#ifndef CONFIG_USER_ONLY
+ check_hw_breakpoints(env, 1);
+ env->dr[6] |= DR6_BS;
+#endif
+ raise_exception(env, EXCP01_DB);
+}
+
+void helper_cpuid(void)
+{
+ uint32_t eax, ebx, ecx, edx;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_CPUID, 0);
+
+ cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
+ EAX = eax;
+ EBX = ebx;
+ ECX = ecx;
+ EDX = edx;
+}
+
+#if defined(CONFIG_USER_ONLY)
+target_ulong helper_read_crN(int reg)
+{
+ return 0;
+}
+
+void helper_write_crN(int reg, target_ulong t0)
+{
+}
+
+void helper_movl_drN_T0(int reg, target_ulong t0)
+{
+}
+#else
+target_ulong helper_read_crN(int reg)
+{
+ target_ulong val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_READ_CR0 + reg, 0);
+ switch (reg) {
+ default:
+ val = env->cr[reg];
+ break;
+ case 8:
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ val = cpu_get_apic_tpr(env->apic_state);
+ } else {
+ val = env->v_tpr;
+ }
+ break;
+ }
+ return val;
+}
+
+void helper_write_crN(int reg, target_ulong t0)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_WRITE_CR0 + reg, 0);
+ switch (reg) {
+ case 0:
+ cpu_x86_update_cr0(env, t0);
+ break;
+ case 3:
+ cpu_x86_update_cr3(env, t0);
+ break;
+ case 4:
+ cpu_x86_update_cr4(env, t0);
+ break;
+ case 8:
+ if (!(env->hflags2 & HF2_VINTR_MASK)) {
+ cpu_set_apic_tpr(env->apic_state, t0);
+ }
+ env->v_tpr = t0 & 0x0f;
+ break;
+ default:
+ env->cr[reg] = t0;
+ break;
+ }
+}
+
+void helper_movl_drN_T0(int reg, target_ulong t0)
+{
+ int i;
+
+ if (reg < 4) {
+ hw_breakpoint_remove(env, reg);
+ env->dr[reg] = t0;
+ hw_breakpoint_insert(env, reg);
+ } else if (reg == 7) {
+ for (i = 0; i < 4; i++) {
+ hw_breakpoint_remove(env, i);
+ }
+ env->dr[7] = t0;
+ for (i = 0; i < 4; i++) {
+ hw_breakpoint_insert(env, i);
+ }
+ } else {
+ env->dr[reg] = t0;
+ }
+}
+#endif
+
+void helper_lmsw(target_ulong t0)
+{
+ /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
+ if already set to one. */
+ t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
+ helper_write_crN(0, t0);
+}
+
+void helper_invlpg(target_ulong addr)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPG, 0);
+ tlb_flush_page(env, addr);
+}
+
+void helper_rdtsc(void)
+{
+ uint64_t val;
+
+ if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_RDTSC, 0);
+
+ val = cpu_get_tsc(env) + env->tsc_offset;
+ EAX = (uint32_t)(val);
+ EDX = (uint32_t)(val >> 32);
+}
+
+void helper_rdtscp(void)
+{
+ helper_rdtsc();
+ ECX = (uint32_t)(env->tsc_aux);
+}
+
+void helper_rdpmc(void)
+{
+ if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_RDPMC, 0);
+
+ /* currently unimplemented */
+ qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
+ raise_exception_err(env, EXCP06_ILLOP, 0);
+}
+
+#if defined(CONFIG_USER_ONLY)
+void helper_wrmsr(void)
+{
+}
+
+void helper_rdmsr(void)
+{
+}
+#else
+void helper_wrmsr(void)
+{
+ uint64_t val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 1);
+
+ val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
+
+ switch ((uint32_t)ECX) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = val & 0xffff;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = val;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = val;
+ break;
+ case MSR_IA32_APICBASE:
+ cpu_set_apic_base(env->apic_state, val);
+ break;
+ case MSR_EFER:
+ {
+ uint64_t update_mask;
+
+ update_mask = 0;
+ if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL) {
+ update_mask |= MSR_EFER_SCE;
+ }
+ if (env->cpuid_ext2_features & CPUID_EXT2_LM) {
+ update_mask |= MSR_EFER_LME;
+ }
+ if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ if (env->cpuid_ext2_features & CPUID_EXT2_NX) {
+ update_mask |= MSR_EFER_NXE;
+ }
+ if (env->cpuid_ext3_features & CPUID_EXT3_SVM) {
+ update_mask |= MSR_EFER_SVME;
+ }
+ if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR) {
+ update_mask |= MSR_EFER_FFXSR;
+ }
+ cpu_load_efer(env, (env->efer & ~update_mask) |
+ (val & update_mask));
+ }
+ break;
+ case MSR_STAR:
+ env->star = val;
+ break;
+ case MSR_PAT:
+ env->pat = val;
+ break;
+ case MSR_VM_HSAVE_PA:
+ env->vm_hsave = val;
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ env->lstar = val;
+ break;
+ case MSR_CSTAR:
+ env->cstar = val;
+ break;
+ case MSR_FMASK:
+ env->fmask = val;
+ break;
+ case MSR_FSBASE:
+ env->segs[R_FS].base = val;
+ break;
+ case MSR_GSBASE:
+ env->segs[R_GS].base = val;
+ break;
+ case MSR_KERNELGSBASE:
+ env->kernelgsbase = val;
+ break;
+#endif
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
+ break;
+ case MSR_MTRRfix64K_00000:
+ env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
+ break;
+ case MSR_MTRRdefType:
+ env->mtrr_deftype = val;
+ break;
+ case MSR_MCG_STATUS:
+ env->mcg_status = val;
+ break;
+ case MSR_MCG_CTL:
+ if ((env->mcg_cap & MCG_CTL_P)
+ && (val == 0 || val == ~(uint64_t)0)) {
+ env->mcg_ctl = val;
+ }
+ break;
+ case MSR_TSC_AUX:
+ env->tsc_aux = val;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ env->msr_ia32_misc_enable = val;
+ break;
+ default:
+ if ((uint32_t)ECX >= MSR_MC0_CTL
+ && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
+ if ((offset & 0x3) != 0
+ || (val == 0 || val == ~(uint64_t)0)) {
+ env->mce_banks[offset] = val;
+ }
+ break;
+ }
+ /* XXX: exception? */
+ break;
+ }
+}
+
+void helper_rdmsr(void)
+{
+ uint64_t val;
+
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MSR, 0);
+
+ switch ((uint32_t)ECX) {
+ case MSR_IA32_SYSENTER_CS:
+ val = env->sysenter_cs;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ val = env->sysenter_esp;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ val = env->sysenter_eip;
+ break;
+ case MSR_IA32_APICBASE:
+ val = cpu_get_apic_base(env->apic_state);
+ break;
+ case MSR_EFER:
+ val = env->efer;
+ break;
+ case MSR_STAR:
+ val = env->star;
+ break;
+ case MSR_PAT:
+ val = env->pat;
+ break;
+ case MSR_VM_HSAVE_PA:
+ val = env->vm_hsave;
+ break;
+ case MSR_IA32_PERF_STATUS:
+ /* tsc_increment_by_tick */
+ val = 1000ULL;
+ /* CPU multiplier */
+ val |= (((uint64_t)4ULL) << 40);
+ break;
+#ifdef TARGET_X86_64
+ case MSR_LSTAR:
+ val = env->lstar;
+ break;
+ case MSR_CSTAR:
+ val = env->cstar;
+ break;
+ case MSR_FMASK:
+ val = env->fmask;
+ break;
+ case MSR_FSBASE:
+ val = env->segs[R_FS].base;
+ break;
+ case MSR_GSBASE:
+ val = env->segs[R_GS].base;
+ break;
+ case MSR_KERNELGSBASE:
+ val = env->kernelgsbase;
+ break;
+ case MSR_TSC_AUX:
+ val = env->tsc_aux;
+ break;
+#endif
+ case MSR_MTRRphysBase(0):
+ case MSR_MTRRphysBase(1):
+ case MSR_MTRRphysBase(2):
+ case MSR_MTRRphysBase(3):
+ case MSR_MTRRphysBase(4):
+ case MSR_MTRRphysBase(5):
+ case MSR_MTRRphysBase(6):
+ case MSR_MTRRphysBase(7):
+ val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
+ break;
+ case MSR_MTRRphysMask(0):
+ case MSR_MTRRphysMask(1):
+ case MSR_MTRRphysMask(2):
+ case MSR_MTRRphysMask(3):
+ case MSR_MTRRphysMask(4):
+ case MSR_MTRRphysMask(5):
+ case MSR_MTRRphysMask(6):
+ case MSR_MTRRphysMask(7):
+ val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
+ break;
+ case MSR_MTRRfix64K_00000:
+ val = env->mtrr_fixed[0];
+ break;
+ case MSR_MTRRfix16K_80000:
+ case MSR_MTRRfix16K_A0000:
+ val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
+ break;
+ case MSR_MTRRfix4K_C0000:
+ case MSR_MTRRfix4K_C8000:
+ case MSR_MTRRfix4K_D0000:
+ case MSR_MTRRfix4K_D8000:
+ case MSR_MTRRfix4K_E0000:
+ case MSR_MTRRfix4K_E8000:
+ case MSR_MTRRfix4K_F0000:
+ case MSR_MTRRfix4K_F8000:
+ val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
+ break;
+ case MSR_MTRRdefType:
+ val = env->mtrr_deftype;
+ break;
+ case MSR_MTRRcap:
+ if (env->cpuid_features & CPUID_MTRR) {
+ val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT |
+ MSR_MTRRcap_WC_SUPPORTED;
+ } else {
+ /* XXX: exception? */
+ val = 0;
+ }
+ break;
+ case MSR_MCG_CAP:
+ val = env->mcg_cap;
+ break;
+ case MSR_MCG_CTL:
+ if (env->mcg_cap & MCG_CTL_P) {
+ val = env->mcg_ctl;
+ } else {
+ val = 0;
+ }
+ break;
+ case MSR_MCG_STATUS:
+ val = env->mcg_status;
+ break;
+ case MSR_IA32_MISC_ENABLE:
+ val = env->msr_ia32_misc_enable;
+ break;
+ default:
+ if ((uint32_t)ECX >= MSR_MC0_CTL
+ && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
+ uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
+ val = env->mce_banks[offset];
+ break;
+ }
+ /* XXX: exception? */
+ val = 0;
+ break;
+ }
+ EAX = (uint32_t)(val);
+ EDX = (uint32_t)(val >> 32);
+}
+#endif
+
+static void do_hlt(void)
+{
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
+ env->halted = 1;
+ env->exception_index = EXCP_HLT;
+ cpu_loop_exit(env);
+}
+
+void helper_hlt(int next_eip_addend)
+{
+ cpu_svm_check_intercept_param(env, SVM_EXIT_HLT, 0);
+ EIP += next_eip_addend;
+
+ do_hlt();
+}
+
+void helper_monitor(target_ulong ptr)
+{
+ if ((uint32_t)ECX != 0) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+ /* XXX: store address? */
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MONITOR, 0);
+}
+
+void helper_mwait(int next_eip_addend)
+{
+ if ((uint32_t)ECX != 0) {
+ raise_exception(env, EXCP0D_GPF);
+ }
+ cpu_svm_check_intercept_param(env, SVM_EXIT_MWAIT, 0);
+ EIP += next_eip_addend;
+
+ /* XXX: not complete but not completely erroneous */
+ if (env->cpu_index != 0 || env->next_cpu != NULL) {
+ /* more than one CPU: do not sleep because another CPU may
+ wake this one */
+ } else {
+ do_hlt();
+ }
+}
+
+void helper_debug(void)
+{
+ env->exception_index = EXCP_DEBUG;
+ cpu_loop_exit(env);
+}
diff --git a/target-i386/op_helper.c b/target-i386/op_helper.c
deleted file mode 100644
index 2862ea4a92..0000000000
--- a/target-i386/op_helper.c
+++ /dev/null
@@ -1,5923 +0,0 @@
-/*
- * i386 helpers
- *
- * Copyright (c) 2003 Fabrice Bellard
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <math.h>
-#include "cpu.h"
-#include "dyngen-exec.h"
-#include "host-utils.h"
-#include "ioport.h"
-#include "qemu-log.h"
-#include "cpu-defs.h"
-#include "helper.h"
-
-#if !defined(CONFIG_USER_ONLY)
-#include "softmmu_exec.h"
-#endif /* !defined(CONFIG_USER_ONLY) */
-
-//#define DEBUG_PCALL
-
-#ifdef DEBUG_PCALL
-# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
-# define LOG_PCALL_STATE(env) \
- log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
-#else
-# define LOG_PCALL(...) do { } while (0)
-# define LOG_PCALL_STATE(env) do { } while (0)
-#endif
-
-/* n must be a constant to be efficient */
-static inline target_long lshift(target_long x, int n)
-{
- if (n >= 0) {
- return x << n;
- } else {
- return x >> (-n);
- }
-}
-
-#define FPU_RC_MASK 0xc00
-#define FPU_RC_NEAR 0x000
-#define FPU_RC_DOWN 0x400
-#define FPU_RC_UP 0x800
-#define FPU_RC_CHOP 0xc00
-
-#define MAXTAN 9223372036854775808.0
-
-/* the following deal with x86 long double-precision numbers */
-#define MAXEXPD 0x7fff
-#define EXPBIAS 16383
-#define EXPD(fp) (fp.l.upper & 0x7fff)
-#define SIGND(fp) ((fp.l.upper) & 0x8000)
-#define MANTD(fp) (fp.l.lower)
-#define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
-
-static inline void fpush(void)
-{
- env->fpstt = (env->fpstt - 1) & 7;
- env->fptags[env->fpstt] = 0; /* validate stack entry */
-}
-
-static inline void fpop(void)
-{
- env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
- env->fpstt = (env->fpstt + 1) & 7;
-}
-
-static inline floatx80 helper_fldt(target_ulong ptr)
-{
- CPU_LDoubleU temp;
-
- temp.l.lower = ldq(ptr);
- temp.l.upper = lduw(ptr + 8);
- return temp.d;
-}
-
-static inline void helper_fstt(floatx80 f, target_ulong ptr)
-{
- CPU_LDoubleU temp;
-
- temp.d = f;
- stq(ptr, temp.l.lower);
- stw(ptr + 8, temp.l.upper);
-}
-
-#define FPUS_IE (1 << 0)
-#define FPUS_DE (1 << 1)
-#define FPUS_ZE (1 << 2)
-#define FPUS_OE (1 << 3)
-#define FPUS_UE (1 << 4)
-#define FPUS_PE (1 << 5)
-#define FPUS_SF (1 << 6)
-#define FPUS_SE (1 << 7)
-#define FPUS_B (1 << 15)
-
-#define FPUC_EM 0x3f
-
-static inline uint32_t compute_eflags(void)
-{
- return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
-}
-
-/* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
-static inline void load_eflags(int eflags, int update_mask)
-{
- CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
- DF = 1 - (2 * ((eflags >> 10) & 1));
- env->eflags = (env->eflags & ~update_mask) |
- (eflags & update_mask) | 0x2;
-}
-
-/* load efer and update the corresponding hflags. XXX: do consistency
- checks with cpuid bits ? */
-static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
-{
- env->efer = val;
- env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
- if (env->efer & MSR_EFER_LMA) {
- env->hflags |= HF_LMA_MASK;
- }
- if (env->efer & MSR_EFER_SVME) {
- env->hflags |= HF_SVME_MASK;
- }
-}
-
-#if 0
-#define raise_exception_err(a, b)\
-do {\
- qemu_log("raise_exception line=%d\n", __LINE__);\
- (raise_exception_err)(a, b);\
-} while (0)
-#endif
-
-static void QEMU_NORETURN raise_exception_err(int exception_index,
- int error_code);
-
-static const uint8_t parity_table[256] = {
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
- 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
-};
-
-/* modulo 17 table */
-static const uint8_t rclw_table[32] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9,10,11,12,13,14,15,
- 16, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 9,10,11,12,13,14,
-};
-
-/* modulo 9 table */
-static const uint8_t rclb_table[32] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 0, 1, 2, 3, 4, 5, 6,
- 7, 8, 0, 1, 2, 3, 4, 5,
- 6, 7, 8, 0, 1, 2, 3, 4,
-};
-
-#define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
-#define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
-#define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
-
-/* broken thread support */
-
-static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
-
-void helper_lock(void)
-{
- spin_lock(&global_cpu_lock);
-}
-
-void helper_unlock(void)
-{
- spin_unlock(&global_cpu_lock);
-}
-
-void helper_write_eflags(target_ulong t0, uint32_t update_mask)
-{
- load_eflags(t0, update_mask);
-}
-
-target_ulong helper_read_eflags(void)
-{
- uint32_t eflags;
- eflags = helper_cc_compute_all(CC_OP);
- eflags |= (DF & DF_MASK);
- eflags |= env->eflags & ~(VM_MASK | RF_MASK);
- return eflags;
-}
-
-/* return non zero if error */
-static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
- int selector)
-{
- SegmentCache *dt;
- int index;
- target_ulong ptr;
-
- if (selector & 0x4)
- dt = &env->ldt;
- else
- dt = &env->gdt;
- index = selector & ~7;
- if ((index + 7) > dt->limit)
- return -1;
- ptr = dt->base + index;
- *e1_ptr = ldl_kernel(ptr);
- *e2_ptr = ldl_kernel(ptr + 4);
- return 0;
-}
-
-static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
-{
- unsigned int limit;
- limit = (e1 & 0xffff) | (e2 & 0x000f0000);
- if (e2 & DESC_G_MASK)
- limit = (limit << 12) | 0xfff;
- return limit;
-}
-
-static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
-{
- return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
-}
-
-static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
-{
- sc->base = get_seg_base(e1, e2);
- sc->limit = get_seg_limit(e1, e2);
- sc->flags = e2;
-}
-
-/* init the segment cache in vm86 mode. */
-static inline void load_seg_vm(int seg, int selector)
-{
- selector &= 0xffff;
- cpu_x86_load_seg_cache(env, seg, selector,
- (selector << 4), 0xffff, 0);
-}
-
-static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
- uint32_t *esp_ptr, int dpl)
-{
- int type, index, shift;
-
-#if 0
- {
- int i;
- printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
- for(i=0;i<env->tr.limit;i++) {
- printf("%02x ", env->tr.base[i]);
- if ((i & 7) == 7) printf("\n");
- }
- printf("\n");
- }
-#endif
-
- if (!(env->tr.flags & DESC_P_MASK))
- cpu_abort(env, "invalid tss");
- type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
- if ((type & 7) != 1)
- cpu_abort(env, "invalid tss type");
- shift = type >> 3;
- index = (dpl * 4 + 2) << shift;
- if (index + (4 << shift) - 1 > env->tr.limit)
- raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
- if (shift == 0) {
- *esp_ptr = lduw_kernel(env->tr.base + index);
- *ss_ptr = lduw_kernel(env->tr.base + index + 2);
- } else {
- *esp_ptr = ldl_kernel(env->tr.base + index);
- *ss_ptr = lduw_kernel(env->tr.base + index + 4);
- }
-}
-
-/* XXX: merge with load_seg() */
-static void tss_load_seg(int seg_reg, int selector)
-{
- uint32_t e1, e2;
- int rpl, dpl, cpl;
-
- if ((selector & 0xfffc) != 0) {
- if (load_segment(&e1, &e2, selector) != 0)
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- if (!(e2 & DESC_S_MASK))
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- rpl = selector & 3;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- if (seg_reg == R_CS) {
- if (!(e2 & DESC_CS_MASK))
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- /* XXX: is it correct ? */
- if (dpl != rpl)
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- if ((e2 & DESC_C_MASK) && dpl > rpl)
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- } else if (seg_reg == R_SS) {
- /* SS must be writable data */
- if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- if (dpl != cpl || dpl != rpl)
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- } else {
- /* not readable code */
- if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- /* if data or non conforming code, checks the rights */
- if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
- if (dpl < cpl || dpl < rpl)
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- }
- }
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
- cpu_x86_load_seg_cache(env, seg_reg, selector,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2),
- e2);
- } else {
- if (seg_reg == R_SS || seg_reg == R_CS)
- raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
- }
-}
-
-#define SWITCH_TSS_JMP 0
-#define SWITCH_TSS_IRET 1
-#define SWITCH_TSS_CALL 2
-
-/* XXX: restore CPU state in registers (PowerPC case) */
-static void switch_tss(int tss_selector,
- uint32_t e1, uint32_t e2, int source,
- uint32_t next_eip)
-{
- int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
- target_ulong tss_base;
- uint32_t new_regs[8], new_segs[6];
- uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
- uint32_t old_eflags, eflags_mask;
- SegmentCache *dt;
- int index;
- target_ulong ptr;
-
- type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
- LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
-
- /* if task gate, we read the TSS segment and we load it */
- if (type == 5) {
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
- tss_selector = e1 >> 16;
- if (tss_selector & 4)
- raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
- if (load_segment(&e1, &e2, tss_selector) != 0)
- raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
- if (e2 & DESC_S_MASK)
- raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
- type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
- if ((type & 7) != 1)
- raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
- }
-
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
-
- if (type & 8)
- tss_limit_max = 103;
- else
- tss_limit_max = 43;
- tss_limit = get_seg_limit(e1, e2);
- tss_base = get_seg_base(e1, e2);
- if ((tss_selector & 4) != 0 ||
- tss_limit < tss_limit_max)
- raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
- old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
- if (old_type & 8)
- old_tss_limit_max = 103;
- else
- old_tss_limit_max = 43;
-
- /* read all the registers from the new TSS */
- if (type & 8) {
- /* 32 bit */
- new_cr3 = ldl_kernel(tss_base + 0x1c);
- new_eip = ldl_kernel(tss_base + 0x20);
- new_eflags = ldl_kernel(tss_base + 0x24);
- for(i = 0; i < 8; i++)
- new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
- for(i = 0; i < 6; i++)
- new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
- new_ldt = lduw_kernel(tss_base + 0x60);
- new_trap = ldl_kernel(tss_base + 0x64);
- } else {
- /* 16 bit */
- new_cr3 = 0;
- new_eip = lduw_kernel(tss_base + 0x0e);
- new_eflags = lduw_kernel(tss_base + 0x10);
- for(i = 0; i < 8; i++)
- new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
- for(i = 0; i < 4; i++)
- new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
- new_ldt = lduw_kernel(tss_base + 0x2a);
- new_segs[R_FS] = 0;
- new_segs[R_GS] = 0;
- new_trap = 0;
- }
- /* XXX: avoid a compiler warning, see
- http://support.amd.com/us/Processor_TechDocs/24593.pdf
- chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
- (void)new_trap;
-
- /* NOTE: we must avoid memory exceptions during the task switch,
- so we make dummy accesses before */
- /* XXX: it can still fail in some cases, so a bigger hack is
- necessary to valid the TLB after having done the accesses */
-
- v1 = ldub_kernel(env->tr.base);
- v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
- stb_kernel(env->tr.base, v1);
- stb_kernel(env->tr.base + old_tss_limit_max, v2);
-
- /* clear busy bit (it is restartable) */
- if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
- target_ulong ptr;
- uint32_t e2;
- ptr = env->gdt.base + (env->tr.selector & ~7);
- e2 = ldl_kernel(ptr + 4);
- e2 &= ~DESC_TSS_BUSY_MASK;
- stl_kernel(ptr + 4, e2);
- }
- old_eflags = compute_eflags();
- if (source == SWITCH_TSS_IRET)
- old_eflags &= ~NT_MASK;
-
- /* save the current state in the old TSS */
- if (type & 8) {
- /* 32 bit */
- stl_kernel(env->tr.base + 0x20, next_eip);
- stl_kernel(env->tr.base + 0x24, old_eflags);
- stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
- stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
- stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
- stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
- stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
- stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
- stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
- stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
- for(i = 0; i < 6; i++)
- stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
- } else {
- /* 16 bit */
- stw_kernel(env->tr.base + 0x0e, next_eip);
- stw_kernel(env->tr.base + 0x10, old_eflags);
- stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
- stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
- stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
- stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
- stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
- stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
- stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
- stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
- for(i = 0; i < 4; i++)
- stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
- }
-
- /* now if an exception occurs, it will occurs in the next task
- context */
-
- if (source == SWITCH_TSS_CALL) {
- stw_kernel(tss_base, env->tr.selector);
- new_eflags |= NT_MASK;
- }
-
- /* set busy bit */
- if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
- target_ulong ptr;
- uint32_t e2;
- ptr = env->gdt.base + (tss_selector & ~7);
- e2 = ldl_kernel(ptr + 4);
- e2 |= DESC_TSS_BUSY_MASK;
- stl_kernel(ptr + 4, e2);
- }
-
- /* set the new CPU state */
- /* from this point, any exception which occurs can give problems */
- env->cr[0] |= CR0_TS_MASK;
- env->hflags |= HF_TS_MASK;
- env->tr.selector = tss_selector;
- env->tr.base = tss_base;
- env->tr.limit = tss_limit;
- env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
-
- if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
- cpu_x86_update_cr3(env, new_cr3);
- }
-
- /* load all registers without an exception, then reload them with
- possible exception */
- env->eip = new_eip;
- eflags_mask = TF_MASK | AC_MASK | ID_MASK |
- IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
- if (!(type & 8))
- eflags_mask &= 0xffff;
- load_eflags(new_eflags, eflags_mask);
- /* XXX: what to do in 16 bit case ? */
- EAX = new_regs[0];
- ECX = new_regs[1];
- EDX = new_regs[2];
- EBX = new_regs[3];
- ESP = new_regs[4];
- EBP = new_regs[5];
- ESI = new_regs[6];
- EDI = new_regs[7];
- if (new_eflags & VM_MASK) {
- for(i = 0; i < 6; i++)
- load_seg_vm(i, new_segs[i]);
- /* in vm86, CPL is always 3 */
- cpu_x86_set_cpl(env, 3);
- } else {
- /* CPL is set the RPL of CS */
- cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
- /* first just selectors as the rest may trigger exceptions */
- for(i = 0; i < 6; i++)
- cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
- }
-
- env->ldt.selector = new_ldt & ~4;
- env->ldt.base = 0;
- env->ldt.limit = 0;
- env->ldt.flags = 0;
-
- /* load the LDT */
- if (new_ldt & 4)
- raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
-
- if ((new_ldt & 0xfffc) != 0) {
- dt = &env->gdt;
- index = new_ldt & ~7;
- if ((index + 7) > dt->limit)
- raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
- ptr = dt->base + index;
- e1 = ldl_kernel(ptr);
- e2 = ldl_kernel(ptr + 4);
- if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
- raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
- load_seg_cache_raw_dt(&env->ldt, e1, e2);
- }
-
- /* load the segments */
- if (!(new_eflags & VM_MASK)) {
- tss_load_seg(R_CS, new_segs[R_CS]);
- tss_load_seg(R_SS, new_segs[R_SS]);
- tss_load_seg(R_ES, new_segs[R_ES]);
- tss_load_seg(R_DS, new_segs[R_DS]);
- tss_load_seg(R_FS, new_segs[R_FS]);
- tss_load_seg(R_GS, new_segs[R_GS]);
- }
-
- /* check that EIP is in the CS segment limits */
- if (new_eip > env->segs[R_CS].limit) {
- /* XXX: different exception if CALL ? */
- raise_exception_err(EXCP0D_GPF, 0);
- }
-
-#ifndef CONFIG_USER_ONLY
- /* reset local breakpoints */
- if (env->dr[7] & 0x55) {
- for (i = 0; i < 4; i++) {
- if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
- hw_breakpoint_remove(env, i);
- }
- env->dr[7] &= ~0x55;
- }
-#endif
-}
-
-/* check if Port I/O is allowed in TSS */
-static inline void check_io(int addr, int size)
-{
- int io_offset, val, mask;
-
- /* TSS must be a valid 32 bit one */
- if (!(env->tr.flags & DESC_P_MASK) ||
- ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
- env->tr.limit < 103)
- goto fail;
- io_offset = lduw_kernel(env->tr.base + 0x66);
- io_offset += (addr >> 3);
- /* Note: the check needs two bytes */
- if ((io_offset + 1) > env->tr.limit)
- goto fail;
- val = lduw_kernel(env->tr.base + io_offset);
- val >>= (addr & 7);
- mask = (1 << size) - 1;
- /* all bits must be zero to allow the I/O */
- if ((val & mask) != 0) {
- fail:
- raise_exception_err(EXCP0D_GPF, 0);
- }
-}
-
-void helper_check_iob(uint32_t t0)
-{
- check_io(t0, 1);
-}
-
-void helper_check_iow(uint32_t t0)
-{
- check_io(t0, 2);
-}
-
-void helper_check_iol(uint32_t t0)
-{
- check_io(t0, 4);
-}
-
-void helper_outb(uint32_t port, uint32_t data)
-{
- cpu_outb(port, data & 0xff);
-}
-
-target_ulong helper_inb(uint32_t port)
-{
- return cpu_inb(port);
-}
-
-void helper_outw(uint32_t port, uint32_t data)
-{
- cpu_outw(port, data & 0xffff);
-}
-
-target_ulong helper_inw(uint32_t port)
-{
- return cpu_inw(port);
-}
-
-void helper_outl(uint32_t port, uint32_t data)
-{
- cpu_outl(port, data);
-}
-
-target_ulong helper_inl(uint32_t port)
-{
- return cpu_inl(port);
-}
-
-static inline unsigned int get_sp_mask(unsigned int e2)
-{
- if (e2 & DESC_B_MASK)
- return 0xffffffff;
- else
- return 0xffff;
-}
-
-static int exeption_has_error_code(int intno)
-{
- switch(intno) {
- case 8:
- case 10:
- case 11:
- case 12:
- case 13:
- case 14:
- case 17:
- return 1;
- }
- return 0;
-}
-
-#ifdef TARGET_X86_64
-#define SET_ESP(val, sp_mask)\
-do {\
- if ((sp_mask) == 0xffff)\
- ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
- else if ((sp_mask) == 0xffffffffLL)\
- ESP = (uint32_t)(val);\
- else\
- ESP = (val);\
-} while (0)
-#else
-#define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
-#endif
-
-/* in 64-bit machines, this can overflow. So this segment addition macro
- * can be used to trim the value to 32-bit whenever needed */
-#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
-
-/* XXX: add a is_user flag to have proper security support */
-#define PUSHW(ssp, sp, sp_mask, val)\
-{\
- sp -= 2;\
- stw_kernel((ssp) + (sp & (sp_mask)), (val));\
-}
-
-#define PUSHL(ssp, sp, sp_mask, val)\
-{\
- sp -= 4;\
- stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
-}
-
-#define POPW(ssp, sp, sp_mask, val)\
-{\
- val = lduw_kernel((ssp) + (sp & (sp_mask)));\
- sp += 2;\
-}
-
-#define POPL(ssp, sp, sp_mask, val)\
-{\
- val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
- sp += 4;\
-}
-
-/* protected mode interrupt */
-static void do_interrupt_protected(int intno, int is_int, int error_code,
- unsigned int next_eip, int is_hw)
-{
- SegmentCache *dt;
- target_ulong ptr, ssp;
- int type, dpl, selector, ss_dpl, cpl;
- int has_error_code, new_stack, shift;
- uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
- uint32_t old_eip, sp_mask;
-
- has_error_code = 0;
- if (!is_int && !is_hw)
- has_error_code = exeption_has_error_code(intno);
- if (is_int)
- old_eip = next_eip;
- else
- old_eip = env->eip;
-
- dt = &env->idt;
- if (intno * 8 + 7 > dt->limit)
- raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
- ptr = dt->base + intno * 8;
- e1 = ldl_kernel(ptr);
- e2 = ldl_kernel(ptr + 4);
- /* check gate type */
- type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
- switch(type) {
- case 5: /* task gate */
- /* must do that check here to return the correct error code */
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
- switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
- if (has_error_code) {
- int type;
- uint32_t mask;
- /* push the error code */
- type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
- shift = type >> 3;
- if (env->segs[R_SS].flags & DESC_B_MASK)
- mask = 0xffffffff;
- else
- mask = 0xffff;
- esp = (ESP - (2 << shift)) & mask;
- ssp = env->segs[R_SS].base + esp;
- if (shift)
- stl_kernel(ssp, error_code);
- else
- stw_kernel(ssp, error_code);
- SET_ESP(esp, mask);
- }
- return;
- case 6: /* 286 interrupt gate */
- case 7: /* 286 trap gate */
- case 14: /* 386 interrupt gate */
- case 15: /* 386 trap gate */
- break;
- default:
- raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
- break;
- }
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- /* check privilege if software int */
- if (is_int && dpl < cpl)
- raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
- /* check valid bit */
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
- selector = e1 >> 16;
- offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
- if ((selector & 0xfffc) == 0)
- raise_exception_err(EXCP0D_GPF, 0);
-
- if (load_segment(&e1, &e2, selector) != 0)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (dpl > cpl)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
- if (!(e2 & DESC_C_MASK) && dpl < cpl) {
- /* to inner privilege */
- get_ss_esp_from_tss(&ss, &esp, dpl);
- if ((ss & 0xfffc) == 0)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if ((ss & 3) != dpl)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if (load_segment(&ss_e1, &ss_e2, ss) != 0)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
- if (ss_dpl != dpl)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if (!(ss_e2 & DESC_S_MASK) ||
- (ss_e2 & DESC_CS_MASK) ||
- !(ss_e2 & DESC_W_MASK))
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if (!(ss_e2 & DESC_P_MASK))
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- new_stack = 1;
- sp_mask = get_sp_mask(ss_e2);
- ssp = get_seg_base(ss_e1, ss_e2);
- } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
- /* to same privilege */
- if (env->eflags & VM_MASK)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- new_stack = 0;
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- esp = ESP;
- dpl = cpl;
- } else {
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- new_stack = 0; /* avoid warning */
- sp_mask = 0; /* avoid warning */
- ssp = 0; /* avoid warning */
- esp = 0; /* avoid warning */
- }
-
- shift = type >> 3;
-
-#if 0
- /* XXX: check that enough room is available */
- push_size = 6 + (new_stack << 2) + (has_error_code << 1);
- if (env->eflags & VM_MASK)
- push_size += 8;
- push_size <<= shift;
-#endif
- if (shift == 1) {
- if (new_stack) {
- if (env->eflags & VM_MASK) {
- PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
- PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
- PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
- PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
- }
- PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
- PUSHL(ssp, esp, sp_mask, ESP);
- }
- PUSHL(ssp, esp, sp_mask, compute_eflags());
- PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, esp, sp_mask, old_eip);
- if (has_error_code) {
- PUSHL(ssp, esp, sp_mask, error_code);
- }
- } else {
- if (new_stack) {
- if (env->eflags & VM_MASK) {
- PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
- PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
- PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
- PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
- }
- PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
- PUSHW(ssp, esp, sp_mask, ESP);
- }
- PUSHW(ssp, esp, sp_mask, compute_eflags());
- PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, esp, sp_mask, old_eip);
- if (has_error_code) {
- PUSHW(ssp, esp, sp_mask, error_code);
- }
- }
-
- if (new_stack) {
- if (env->eflags & VM_MASK) {
- cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
- cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
- cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
- cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
- }
- ss = (ss & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_SS, ss,
- ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
- }
- SET_ESP(esp, sp_mask);
-
- selector = (selector & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_CS, selector,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2),
- e2);
- cpu_x86_set_cpl(env, dpl);
- env->eip = offset;
-
- /* interrupt gate clear IF mask */
- if ((type & 1) == 0) {
- env->eflags &= ~IF_MASK;
- }
- env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
-}
-
-#ifdef TARGET_X86_64
-
-#define PUSHQ(sp, val)\
-{\
- sp -= 8;\
- stq_kernel(sp, (val));\
-}
-
-#define POPQ(sp, val)\
-{\
- val = ldq_kernel(sp);\
- sp += 8;\
-}
-
-static inline target_ulong get_rsp_from_tss(int level)
-{
- int index;
-
-#if 0
- printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
- env->tr.base, env->tr.limit);
-#endif
-
- if (!(env->tr.flags & DESC_P_MASK))
- cpu_abort(env, "invalid tss");
- index = 8 * level + 4;
- if ((index + 7) > env->tr.limit)
- raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
- return ldq_kernel(env->tr.base + index);
-}
-
-/* 64 bit interrupt */
-static void do_interrupt64(int intno, int is_int, int error_code,
- target_ulong next_eip, int is_hw)
-{
- SegmentCache *dt;
- target_ulong ptr;
- int type, dpl, selector, cpl, ist;
- int has_error_code, new_stack;
- uint32_t e1, e2, e3, ss;
- target_ulong old_eip, esp, offset;
-
- has_error_code = 0;
- if (!is_int && !is_hw)
- has_error_code = exeption_has_error_code(intno);
- if (is_int)
- old_eip = next_eip;
- else
- old_eip = env->eip;
-
- dt = &env->idt;
- if (intno * 16 + 15 > dt->limit)
- raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
- ptr = dt->base + intno * 16;
- e1 = ldl_kernel(ptr);
- e2 = ldl_kernel(ptr + 4);
- e3 = ldl_kernel(ptr + 8);
- /* check gate type */
- type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
- switch(type) {
- case 14: /* 386 interrupt gate */
- case 15: /* 386 trap gate */
- break;
- default:
- raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
- break;
- }
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- /* check privilege if software int */
- if (is_int && dpl < cpl)
- raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
- /* check valid bit */
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
- selector = e1 >> 16;
- offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
- ist = e2 & 7;
- if ((selector & 0xfffc) == 0)
- raise_exception_err(EXCP0D_GPF, 0);
-
- if (load_segment(&e1, &e2, selector) != 0)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (dpl > cpl)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
- if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
- /* to inner privilege */
- if (ist != 0)
- esp = get_rsp_from_tss(ist + 3);
- else
- esp = get_rsp_from_tss(dpl);
- esp &= ~0xfLL; /* align stack */
- ss = 0;
- new_stack = 1;
- } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
- /* to same privilege */
- if (env->eflags & VM_MASK)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- new_stack = 0;
- if (ist != 0)
- esp = get_rsp_from_tss(ist + 3);
- else
- esp = ESP;
- esp &= ~0xfLL; /* align stack */
- dpl = cpl;
- } else {
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- new_stack = 0; /* avoid warning */
- esp = 0; /* avoid warning */
- }
-
- PUSHQ(esp, env->segs[R_SS].selector);
- PUSHQ(esp, ESP);
- PUSHQ(esp, compute_eflags());
- PUSHQ(esp, env->segs[R_CS].selector);
- PUSHQ(esp, old_eip);
- if (has_error_code) {
- PUSHQ(esp, error_code);
- }
-
- if (new_stack) {
- ss = 0 | dpl;
- cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
- }
- ESP = esp;
-
- selector = (selector & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_CS, selector,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2),
- e2);
- cpu_x86_set_cpl(env, dpl);
- env->eip = offset;
-
- /* interrupt gate clear IF mask */
- if ((type & 1) == 0) {
- env->eflags &= ~IF_MASK;
- }
- env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
-}
-#endif
-
-#ifdef TARGET_X86_64
-#if defined(CONFIG_USER_ONLY)
-void helper_syscall(int next_eip_addend)
-{
- env->exception_index = EXCP_SYSCALL;
- env->exception_next_eip = env->eip + next_eip_addend;
- cpu_loop_exit(env);
-}
-#else
-void helper_syscall(int next_eip_addend)
-{
- int selector;
-
- if (!(env->efer & MSR_EFER_SCE)) {
- raise_exception_err(EXCP06_ILLOP, 0);
- }
- selector = (env->star >> 32) & 0xffff;
- if (env->hflags & HF_LMA_MASK) {
- int code64;
-
- ECX = env->eip + next_eip_addend;
- env->regs[11] = compute_eflags();
-
- code64 = env->hflags & HF_CS64_MASK;
-
- cpu_x86_set_cpl(env, 0);
- cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
- cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_W_MASK | DESC_A_MASK);
- env->eflags &= ~env->fmask;
- load_eflags(env->eflags, 0);
- if (code64)
- env->eip = env->lstar;
- else
- env->eip = env->cstar;
- } else {
- ECX = (uint32_t)(env->eip + next_eip_addend);
-
- cpu_x86_set_cpl(env, 0);
- cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_W_MASK | DESC_A_MASK);
- env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
- env->eip = (uint32_t)env->star;
- }
-}
-#endif
-#endif
-
-#ifdef TARGET_X86_64
-void helper_sysret(int dflag)
-{
- int cpl, selector;
-
- if (!(env->efer & MSR_EFER_SCE)) {
- raise_exception_err(EXCP06_ILLOP, 0);
- }
- cpl = env->hflags & HF_CPL_MASK;
- if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
- raise_exception_err(EXCP0D_GPF, 0);
- }
- selector = (env->star >> 48) & 0xffff;
- if (env->hflags & HF_LMA_MASK) {
- if (dflag == 2) {
- cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
- DESC_L_MASK);
- env->eip = ECX;
- } else {
- cpu_x86_load_seg_cache(env, R_CS, selector | 3,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- env->eip = (uint32_t)ECX;
- }
- cpu_x86_load_seg_cache(env, R_SS, selector + 8,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_W_MASK | DESC_A_MASK);
- load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
- IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
- cpu_x86_set_cpl(env, 3);
- } else {
- cpu_x86_load_seg_cache(env, R_CS, selector | 3,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- env->eip = (uint32_t)ECX;
- cpu_x86_load_seg_cache(env, R_SS, selector + 8,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_W_MASK | DESC_A_MASK);
- env->eflags |= IF_MASK;
- cpu_x86_set_cpl(env, 3);
- }
-}
-#endif
-
-/* real mode interrupt */
-static void do_interrupt_real(int intno, int is_int, int error_code,
- unsigned int next_eip)
-{
- SegmentCache *dt;
- target_ulong ptr, ssp;
- int selector;
- uint32_t offset, esp;
- uint32_t old_cs, old_eip;
-
- /* real mode (simpler !) */
- dt = &env->idt;
- if (intno * 4 + 3 > dt->limit)
- raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
- ptr = dt->base + intno * 4;
- offset = lduw_kernel(ptr);
- selector = lduw_kernel(ptr + 2);
- esp = ESP;
- ssp = env->segs[R_SS].base;
- if (is_int)
- old_eip = next_eip;
- else
- old_eip = env->eip;
- old_cs = env->segs[R_CS].selector;
- /* XXX: use SS segment size ? */
- PUSHW(ssp, esp, 0xffff, compute_eflags());
- PUSHW(ssp, esp, 0xffff, old_cs);
- PUSHW(ssp, esp, 0xffff, old_eip);
-
- /* update processor state */
- ESP = (ESP & ~0xffff) | (esp & 0xffff);
- env->eip = offset;
- env->segs[R_CS].selector = selector;
- env->segs[R_CS].base = (selector << 4);
- env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
-}
-
-#if defined(CONFIG_USER_ONLY)
-/* fake user mode interrupt */
-static void do_interrupt_user(int intno, int is_int, int error_code,
- target_ulong next_eip)
-{
- SegmentCache *dt;
- target_ulong ptr;
- int dpl, cpl, shift;
- uint32_t e2;
-
- dt = &env->idt;
- if (env->hflags & HF_LMA_MASK) {
- shift = 4;
- } else {
- shift = 3;
- }
- ptr = dt->base + (intno << shift);
- e2 = ldl_kernel(ptr + 4);
-
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- /* check privilege if software int */
- if (is_int && dpl < cpl)
- raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
-
- /* Since we emulate only user space, we cannot do more than
- exiting the emulation with the suitable exception and error
- code */
- if (is_int)
- EIP = next_eip;
-}
-
-#else
-
-static void handle_even_inj(int intno, int is_int, int error_code,
- int is_hw, int rm)
-{
- uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
- if (!(event_inj & SVM_EVTINJ_VALID)) {
- int type;
- if (is_int)
- type = SVM_EVTINJ_TYPE_SOFT;
- else
- type = SVM_EVTINJ_TYPE_EXEPT;
- event_inj = intno | type | SVM_EVTINJ_VALID;
- if (!rm && exeption_has_error_code(intno)) {
- event_inj |= SVM_EVTINJ_VALID_ERR;
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
- }
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
- }
-}
-#endif
-
-/*
- * Begin execution of an interruption. is_int is TRUE if coming from
- * the int instruction. next_eip is the EIP value AFTER the interrupt
- * instruction. It is only relevant if is_int is TRUE.
- */
-static void do_interrupt_all(int intno, int is_int, int error_code,
- target_ulong next_eip, int is_hw)
-{
- if (qemu_loglevel_mask(CPU_LOG_INT)) {
- if ((env->cr[0] & CR0_PE_MASK)) {
- static int count;
- qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
- count, intno, error_code, is_int,
- env->hflags & HF_CPL_MASK,
- env->segs[R_CS].selector, EIP,
- (int)env->segs[R_CS].base + EIP,
- env->segs[R_SS].selector, ESP);
- if (intno == 0x0e) {
- qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
- } else {
- qemu_log(" EAX=" TARGET_FMT_lx, EAX);
- }
- qemu_log("\n");
- log_cpu_state(env, X86_DUMP_CCOP);
-#if 0
- {
- int i;
- target_ulong ptr;
- qemu_log(" code=");
- ptr = env->segs[R_CS].base + env->eip;
- for(i = 0; i < 16; i++) {
- qemu_log(" %02x", ldub(ptr + i));
- }
- qemu_log("\n");
- }
-#endif
- count++;
- }
- }
- if (env->cr[0] & CR0_PE_MASK) {
-#if !defined(CONFIG_USER_ONLY)
- if (env->hflags & HF_SVMI_MASK)
- handle_even_inj(intno, is_int, error_code, is_hw, 0);
-#endif
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK) {
- do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
- } else
-#endif
- {
- do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
- }
- } else {
-#if !defined(CONFIG_USER_ONLY)
- if (env->hflags & HF_SVMI_MASK)
- handle_even_inj(intno, is_int, error_code, is_hw, 1);
-#endif
- do_interrupt_real(intno, is_int, error_code, next_eip);
- }
-
-#if !defined(CONFIG_USER_ONLY)
- if (env->hflags & HF_SVMI_MASK) {
- uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
- }
-#endif
-}
-
-void do_interrupt(CPUX86State *env1)
-{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
-#if defined(CONFIG_USER_ONLY)
- /* if user mode only, we simulate a fake exception
- which will be handled outside the cpu execution
- loop */
- do_interrupt_user(env->exception_index,
- env->exception_is_int,
- env->error_code,
- env->exception_next_eip);
- /* successfully delivered */
- env->old_exception = -1;
-#else
- /* simulate a real cpu exception. On i386, it can
- trigger new exceptions, but we do not handle
- double or triple faults yet. */
- do_interrupt_all(env->exception_index,
- env->exception_is_int,
- env->error_code,
- env->exception_next_eip, 0);
- /* successfully delivered */
- env->old_exception = -1;
-#endif
- env = saved_env;
-}
-
-void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
-{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
- do_interrupt_all(intno, 0, 0, 0, is_hw);
- env = saved_env;
-}
-
-/* This should come from sysemu.h - if we could include it here... */
-void qemu_system_reset_request(void);
-
-/*
- * Check nested exceptions and change to double or triple fault if
- * needed. It should only be called, if this is not an interrupt.
- * Returns the new exception number.
- */
-static int check_exception(int intno, int *error_code)
-{
- int first_contributory = env->old_exception == 0 ||
- (env->old_exception >= 10 &&
- env->old_exception <= 13);
- int second_contributory = intno == 0 ||
- (intno >= 10 && intno <= 13);
-
- qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
- env->old_exception, intno);
-
-#if !defined(CONFIG_USER_ONLY)
- if (env->old_exception == EXCP08_DBLE) {
- if (env->hflags & HF_SVMI_MASK)
- helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
-
- qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
-
- qemu_system_reset_request();
- return EXCP_HLT;
- }
-#endif
-
- if ((first_contributory && second_contributory)
- || (env->old_exception == EXCP0E_PAGE &&
- (second_contributory || (intno == EXCP0E_PAGE)))) {
- intno = EXCP08_DBLE;
- *error_code = 0;
- }
-
- if (second_contributory || (intno == EXCP0E_PAGE) ||
- (intno == EXCP08_DBLE))
- env->old_exception = intno;
-
- return intno;
-}
-
-/*
- * Signal an interruption. It is executed in the main CPU loop.
- * is_int is TRUE if coming from the int instruction. next_eip is the
- * EIP value AFTER the interrupt instruction. It is only relevant if
- * is_int is TRUE.
- */
-static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
- int next_eip_addend)
-{
- if (!is_int) {
- helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
- intno = check_exception(intno, &error_code);
- } else {
- helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
- }
-
- env->exception_index = intno;
- env->error_code = error_code;
- env->exception_is_int = is_int;
- env->exception_next_eip = env->eip + next_eip_addend;
- cpu_loop_exit(env);
-}
-
-/* shortcuts to generate exceptions */
-
-static void QEMU_NORETURN raise_exception_err(int exception_index,
- int error_code)
-{
- raise_interrupt(exception_index, 0, error_code, 0);
-}
-
-void raise_exception_err_env(CPUX86State *nenv, int exception_index,
- int error_code)
-{
- env = nenv;
- raise_interrupt(exception_index, 0, error_code, 0);
-}
-
-static void QEMU_NORETURN raise_exception(int exception_index)
-{
- raise_interrupt(exception_index, 0, 0, 0);
-}
-
-void raise_exception_env(int exception_index, CPUX86State *nenv)
-{
- env = nenv;
- raise_exception(exception_index);
-}
-/* SMM support */
-
-#if defined(CONFIG_USER_ONLY)
-
-void do_smm_enter(CPUX86State *env1)
-{
-}
-
-void helper_rsm(void)
-{
-}
-
-#else
-
-#ifdef TARGET_X86_64
-#define SMM_REVISION_ID 0x00020064
-#else
-#define SMM_REVISION_ID 0x00020000
-#endif
-
-void do_smm_enter(CPUX86State *env1)
-{
- target_ulong sm_state;
- SegmentCache *dt;
- int i, offset;
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
-
- qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
- log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
-
- env->hflags |= HF_SMM_MASK;
- cpu_smm_update(env);
-
- sm_state = env->smbase + 0x8000;
-
-#ifdef TARGET_X86_64
- for(i = 0; i < 6; i++) {
- dt = &env->segs[i];
- offset = 0x7e00 + i * 16;
- stw_phys(sm_state + offset, dt->selector);
- stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
- stl_phys(sm_state + offset + 4, dt->limit);
- stq_phys(sm_state + offset + 8, dt->base);
- }
-
- stq_phys(sm_state + 0x7e68, env->gdt.base);
- stl_phys(sm_state + 0x7e64, env->gdt.limit);
-
- stw_phys(sm_state + 0x7e70, env->ldt.selector);
- stq_phys(sm_state + 0x7e78, env->ldt.base);
- stl_phys(sm_state + 0x7e74, env->ldt.limit);
- stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
-
- stq_phys(sm_state + 0x7e88, env->idt.base);
- stl_phys(sm_state + 0x7e84, env->idt.limit);
-
- stw_phys(sm_state + 0x7e90, env->tr.selector);
- stq_phys(sm_state + 0x7e98, env->tr.base);
- stl_phys(sm_state + 0x7e94, env->tr.limit);
- stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
-
- stq_phys(sm_state + 0x7ed0, env->efer);
-
- stq_phys(sm_state + 0x7ff8, EAX);
- stq_phys(sm_state + 0x7ff0, ECX);
- stq_phys(sm_state + 0x7fe8, EDX);
- stq_phys(sm_state + 0x7fe0, EBX);
- stq_phys(sm_state + 0x7fd8, ESP);
- stq_phys(sm_state + 0x7fd0, EBP);
- stq_phys(sm_state + 0x7fc8, ESI);
- stq_phys(sm_state + 0x7fc0, EDI);
- for(i = 8; i < 16; i++)
- stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
- stq_phys(sm_state + 0x7f78, env->eip);
- stl_phys(sm_state + 0x7f70, compute_eflags());
- stl_phys(sm_state + 0x7f68, env->dr[6]);
- stl_phys(sm_state + 0x7f60, env->dr[7]);
-
- stl_phys(sm_state + 0x7f48, env->cr[4]);
- stl_phys(sm_state + 0x7f50, env->cr[3]);
- stl_phys(sm_state + 0x7f58, env->cr[0]);
-
- stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
- stl_phys(sm_state + 0x7f00, env->smbase);
-#else
- stl_phys(sm_state + 0x7ffc, env->cr[0]);
- stl_phys(sm_state + 0x7ff8, env->cr[3]);
- stl_phys(sm_state + 0x7ff4, compute_eflags());
- stl_phys(sm_state + 0x7ff0, env->eip);
- stl_phys(sm_state + 0x7fec, EDI);
- stl_phys(sm_state + 0x7fe8, ESI);
- stl_phys(sm_state + 0x7fe4, EBP);
- stl_phys(sm_state + 0x7fe0, ESP);
- stl_phys(sm_state + 0x7fdc, EBX);
- stl_phys(sm_state + 0x7fd8, EDX);
- stl_phys(sm_state + 0x7fd4, ECX);
- stl_phys(sm_state + 0x7fd0, EAX);
- stl_phys(sm_state + 0x7fcc, env->dr[6]);
- stl_phys(sm_state + 0x7fc8, env->dr[7]);
-
- stl_phys(sm_state + 0x7fc4, env->tr.selector);
- stl_phys(sm_state + 0x7f64, env->tr.base);
- stl_phys(sm_state + 0x7f60, env->tr.limit);
- stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
-
- stl_phys(sm_state + 0x7fc0, env->ldt.selector);
- stl_phys(sm_state + 0x7f80, env->ldt.base);
- stl_phys(sm_state + 0x7f7c, env->ldt.limit);
- stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
-
- stl_phys(sm_state + 0x7f74, env->gdt.base);
- stl_phys(sm_state + 0x7f70, env->gdt.limit);
-
- stl_phys(sm_state + 0x7f58, env->idt.base);
- stl_phys(sm_state + 0x7f54, env->idt.limit);
-
- for(i = 0; i < 6; i++) {
- dt = &env->segs[i];
- if (i < 3)
- offset = 0x7f84 + i * 12;
- else
- offset = 0x7f2c + (i - 3) * 12;
- stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
- stl_phys(sm_state + offset + 8, dt->base);
- stl_phys(sm_state + offset + 4, dt->limit);
- stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
- }
- stl_phys(sm_state + 0x7f14, env->cr[4]);
-
- stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
- stl_phys(sm_state + 0x7ef8, env->smbase);
-#endif
- /* init SMM cpu state */
-
-#ifdef TARGET_X86_64
- cpu_load_efer(env, 0);
-#endif
- load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->eip = 0x00008000;
- cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
- 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
- cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
-
- cpu_x86_update_cr0(env,
- env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
- cpu_x86_update_cr4(env, 0);
- env->dr[7] = 0x00000400;
- CC_OP = CC_OP_EFLAGS;
- env = saved_env;
-}
-
-void helper_rsm(void)
-{
- target_ulong sm_state;
- int i, offset;
- uint32_t val;
-
- sm_state = env->smbase + 0x8000;
-#ifdef TARGET_X86_64
- cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
-
- for(i = 0; i < 6; i++) {
- offset = 0x7e00 + i * 16;
- cpu_x86_load_seg_cache(env, i,
- lduw_phys(sm_state + offset),
- ldq_phys(sm_state + offset + 8),
- ldl_phys(sm_state + offset + 4),
- (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
- }
-
- env->gdt.base = ldq_phys(sm_state + 0x7e68);
- env->gdt.limit = ldl_phys(sm_state + 0x7e64);
-
- env->ldt.selector = lduw_phys(sm_state + 0x7e70);
- env->ldt.base = ldq_phys(sm_state + 0x7e78);
- env->ldt.limit = ldl_phys(sm_state + 0x7e74);
- env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
-
- env->idt.base = ldq_phys(sm_state + 0x7e88);
- env->idt.limit = ldl_phys(sm_state + 0x7e84);
-
- env->tr.selector = lduw_phys(sm_state + 0x7e90);
- env->tr.base = ldq_phys(sm_state + 0x7e98);
- env->tr.limit = ldl_phys(sm_state + 0x7e94);
- env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
-
- EAX = ldq_phys(sm_state + 0x7ff8);
- ECX = ldq_phys(sm_state + 0x7ff0);
- EDX = ldq_phys(sm_state + 0x7fe8);
- EBX = ldq_phys(sm_state + 0x7fe0);
- ESP = ldq_phys(sm_state + 0x7fd8);
- EBP = ldq_phys(sm_state + 0x7fd0);
- ESI = ldq_phys(sm_state + 0x7fc8);
- EDI = ldq_phys(sm_state + 0x7fc0);
- for(i = 8; i < 16; i++)
- env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
- env->eip = ldq_phys(sm_state + 0x7f78);
- load_eflags(ldl_phys(sm_state + 0x7f70),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->dr[6] = ldl_phys(sm_state + 0x7f68);
- env->dr[7] = ldl_phys(sm_state + 0x7f60);
-
- cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
- cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
- cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
-
- val = ldl_phys(sm_state + 0x7efc); /* revision ID */
- if (val & 0x20000) {
- env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
- }
-#else
- cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
- cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
- load_eflags(ldl_phys(sm_state + 0x7ff4),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- env->eip = ldl_phys(sm_state + 0x7ff0);
- EDI = ldl_phys(sm_state + 0x7fec);
- ESI = ldl_phys(sm_state + 0x7fe8);
- EBP = ldl_phys(sm_state + 0x7fe4);
- ESP = ldl_phys(sm_state + 0x7fe0);
- EBX = ldl_phys(sm_state + 0x7fdc);
- EDX = ldl_phys(sm_state + 0x7fd8);
- ECX = ldl_phys(sm_state + 0x7fd4);
- EAX = ldl_phys(sm_state + 0x7fd0);
- env->dr[6] = ldl_phys(sm_state + 0x7fcc);
- env->dr[7] = ldl_phys(sm_state + 0x7fc8);
-
- env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
- env->tr.base = ldl_phys(sm_state + 0x7f64);
- env->tr.limit = ldl_phys(sm_state + 0x7f60);
- env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
-
- env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
- env->ldt.base = ldl_phys(sm_state + 0x7f80);
- env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
- env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
-
- env->gdt.base = ldl_phys(sm_state + 0x7f74);
- env->gdt.limit = ldl_phys(sm_state + 0x7f70);
-
- env->idt.base = ldl_phys(sm_state + 0x7f58);
- env->idt.limit = ldl_phys(sm_state + 0x7f54);
-
- for(i = 0; i < 6; i++) {
- if (i < 3)
- offset = 0x7f84 + i * 12;
- else
- offset = 0x7f2c + (i - 3) * 12;
- cpu_x86_load_seg_cache(env, i,
- ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
- ldl_phys(sm_state + offset + 8),
- ldl_phys(sm_state + offset + 4),
- (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
- }
- cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
-
- val = ldl_phys(sm_state + 0x7efc); /* revision ID */
- if (val & 0x20000) {
- env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
- }
-#endif
- CC_OP = CC_OP_EFLAGS;
- env->hflags &= ~HF_SMM_MASK;
- cpu_smm_update(env);
-
- qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
- log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
-}
-
-#endif /* !CONFIG_USER_ONLY */
-
-
-/* division, flags are undefined */
-
-void helper_divb_AL(target_ulong t0)
-{
- unsigned int num, den, q, r;
-
- num = (EAX & 0xffff);
- den = (t0 & 0xff);
- if (den == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- q = (num / den);
- if (q > 0xff)
- raise_exception(EXCP00_DIVZ);
- q &= 0xff;
- r = (num % den) & 0xff;
- EAX = (EAX & ~0xffff) | (r << 8) | q;
-}
-
-void helper_idivb_AL(target_ulong t0)
-{
- int num, den, q, r;
-
- num = (int16_t)EAX;
- den = (int8_t)t0;
- if (den == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- q = (num / den);
- if (q != (int8_t)q)
- raise_exception(EXCP00_DIVZ);
- q &= 0xff;
- r = (num % den) & 0xff;
- EAX = (EAX & ~0xffff) | (r << 8) | q;
-}
-
-void helper_divw_AX(target_ulong t0)
-{
- unsigned int num, den, q, r;
-
- num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
- den = (t0 & 0xffff);
- if (den == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- q = (num / den);
- if (q > 0xffff)
- raise_exception(EXCP00_DIVZ);
- q &= 0xffff;
- r = (num % den) & 0xffff;
- EAX = (EAX & ~0xffff) | q;
- EDX = (EDX & ~0xffff) | r;
-}
-
-void helper_idivw_AX(target_ulong t0)
-{
- int num, den, q, r;
-
- num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
- den = (int16_t)t0;
- if (den == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- q = (num / den);
- if (q != (int16_t)q)
- raise_exception(EXCP00_DIVZ);
- q &= 0xffff;
- r = (num % den) & 0xffff;
- EAX = (EAX & ~0xffff) | q;
- EDX = (EDX & ~0xffff) | r;
-}
-
-void helper_divl_EAX(target_ulong t0)
-{
- unsigned int den, r;
- uint64_t num, q;
-
- num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
- den = t0;
- if (den == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- q = (num / den);
- r = (num % den);
- if (q > 0xffffffff)
- raise_exception(EXCP00_DIVZ);
- EAX = (uint32_t)q;
- EDX = (uint32_t)r;
-}
-
-void helper_idivl_EAX(target_ulong t0)
-{
- int den, r;
- int64_t num, q;
-
- num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
- den = t0;
- if (den == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- q = (num / den);
- r = (num % den);
- if (q != (int32_t)q)
- raise_exception(EXCP00_DIVZ);
- EAX = (uint32_t)q;
- EDX = (uint32_t)r;
-}
-
-/* bcd */
-
-/* XXX: exception */
-void helper_aam(int base)
-{
- int al, ah;
- al = EAX & 0xff;
- ah = al / base;
- al = al % base;
- EAX = (EAX & ~0xffff) | al | (ah << 8);
- CC_DST = al;
-}
-
-void helper_aad(int base)
-{
- int al, ah;
- al = EAX & 0xff;
- ah = (EAX >> 8) & 0xff;
- al = ((ah * base) + al) & 0xff;
- EAX = (EAX & ~0xffff) | al;
- CC_DST = al;
-}
-
-void helper_aaa(void)
-{
- int icarry;
- int al, ah, af;
- int eflags;
-
- eflags = helper_cc_compute_all(CC_OP);
- af = eflags & CC_A;
- al = EAX & 0xff;
- ah = (EAX >> 8) & 0xff;
-
- icarry = (al > 0xf9);
- if (((al & 0x0f) > 9 ) || af) {
- al = (al + 6) & 0x0f;
- ah = (ah + 1 + icarry) & 0xff;
- eflags |= CC_C | CC_A;
- } else {
- eflags &= ~(CC_C | CC_A);
- al &= 0x0f;
- }
- EAX = (EAX & ~0xffff) | al | (ah << 8);
- CC_SRC = eflags;
-}
-
-void helper_aas(void)
-{
- int icarry;
- int al, ah, af;
- int eflags;
-
- eflags = helper_cc_compute_all(CC_OP);
- af = eflags & CC_A;
- al = EAX & 0xff;
- ah = (EAX >> 8) & 0xff;
-
- icarry = (al < 6);
- if (((al & 0x0f) > 9 ) || af) {
- al = (al - 6) & 0x0f;
- ah = (ah - 1 - icarry) & 0xff;
- eflags |= CC_C | CC_A;
- } else {
- eflags &= ~(CC_C | CC_A);
- al &= 0x0f;
- }
- EAX = (EAX & ~0xffff) | al | (ah << 8);
- CC_SRC = eflags;
-}
-
-void helper_daa(void)
-{
- int old_al, al, af, cf;
- int eflags;
-
- eflags = helper_cc_compute_all(CC_OP);
- cf = eflags & CC_C;
- af = eflags & CC_A;
- old_al = al = EAX & 0xff;
-
- eflags = 0;
- if (((al & 0x0f) > 9 ) || af) {
- al = (al + 6) & 0xff;
- eflags |= CC_A;
- }
- if ((old_al > 0x99) || cf) {
- al = (al + 0x60) & 0xff;
- eflags |= CC_C;
- }
- EAX = (EAX & ~0xff) | al;
- /* well, speed is not an issue here, so we compute the flags by hand */
- eflags |= (al == 0) << 6; /* zf */
- eflags |= parity_table[al]; /* pf */
- eflags |= (al & 0x80); /* sf */
- CC_SRC = eflags;
-}
-
-void helper_das(void)
-{
- int al, al1, af, cf;
- int eflags;
-
- eflags = helper_cc_compute_all(CC_OP);
- cf = eflags & CC_C;
- af = eflags & CC_A;
- al = EAX & 0xff;
-
- eflags = 0;
- al1 = al;
- if (((al & 0x0f) > 9 ) || af) {
- eflags |= CC_A;
- if (al < 6 || cf)
- eflags |= CC_C;
- al = (al - 6) & 0xff;
- }
- if ((al1 > 0x99) || cf) {
- al = (al - 0x60) & 0xff;
- eflags |= CC_C;
- }
- EAX = (EAX & ~0xff) | al;
- /* well, speed is not an issue here, so we compute the flags by hand */
- eflags |= (al == 0) << 6; /* zf */
- eflags |= parity_table[al]; /* pf */
- eflags |= (al & 0x80); /* sf */
- CC_SRC = eflags;
-}
-
-void helper_into(int next_eip_addend)
-{
- int eflags;
- eflags = helper_cc_compute_all(CC_OP);
- if (eflags & CC_O) {
- raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
- }
-}
-
-void helper_cmpxchg8b(target_ulong a0)
-{
- uint64_t d;
- int eflags;
-
- eflags = helper_cc_compute_all(CC_OP);
- d = ldq(a0);
- if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
- stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
- eflags |= CC_Z;
- } else {
- /* always do the store */
- stq(a0, d);
- EDX = (uint32_t)(d >> 32);
- EAX = (uint32_t)d;
- eflags &= ~CC_Z;
- }
- CC_SRC = eflags;
-}
-
-#ifdef TARGET_X86_64
-void helper_cmpxchg16b(target_ulong a0)
-{
- uint64_t d0, d1;
- int eflags;
-
- if ((a0 & 0xf) != 0)
- raise_exception(EXCP0D_GPF);
- eflags = helper_cc_compute_all(CC_OP);
- d0 = ldq(a0);
- d1 = ldq(a0 + 8);
- if (d0 == EAX && d1 == EDX) {
- stq(a0, EBX);
- stq(a0 + 8, ECX);
- eflags |= CC_Z;
- } else {
- /* always do the store */
- stq(a0, d0);
- stq(a0 + 8, d1);
- EDX = d1;
- EAX = d0;
- eflags &= ~CC_Z;
- }
- CC_SRC = eflags;
-}
-#endif
-
-void helper_single_step(void)
-{
-#ifndef CONFIG_USER_ONLY
- check_hw_breakpoints(env, 1);
- env->dr[6] |= DR6_BS;
-#endif
- raise_exception(EXCP01_DB);
-}
-
-void helper_cpuid(void)
-{
- uint32_t eax, ebx, ecx, edx;
-
- helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
-
- cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
- EAX = eax;
- EBX = ebx;
- ECX = ecx;
- EDX = edx;
-}
-
-void helper_enter_level(int level, int data32, target_ulong t1)
-{
- target_ulong ssp;
- uint32_t esp_mask, esp, ebp;
-
- esp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- ebp = EBP;
- esp = ESP;
- if (data32) {
- /* 32 bit */
- esp -= 4;
- while (--level) {
- esp -= 4;
- ebp -= 4;
- stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
- }
- esp -= 4;
- stl(ssp + (esp & esp_mask), t1);
- } else {
- /* 16 bit */
- esp -= 2;
- while (--level) {
- esp -= 2;
- ebp -= 2;
- stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
- }
- esp -= 2;
- stw(ssp + (esp & esp_mask), t1);
- }
-}
-
-#ifdef TARGET_X86_64
-void helper_enter64_level(int level, int data64, target_ulong t1)
-{
- target_ulong esp, ebp;
- ebp = EBP;
- esp = ESP;
-
- if (data64) {
- /* 64 bit */
- esp -= 8;
- while (--level) {
- esp -= 8;
- ebp -= 8;
- stq(esp, ldq(ebp));
- }
- esp -= 8;
- stq(esp, t1);
- } else {
- /* 16 bit */
- esp -= 2;
- while (--level) {
- esp -= 2;
- ebp -= 2;
- stw(esp, lduw(ebp));
- }
- esp -= 2;
- stw(esp, t1);
- }
-}
-#endif
-
-void helper_lldt(int selector)
-{
- SegmentCache *dt;
- uint32_t e1, e2;
- int index, entry_limit;
- target_ulong ptr;
-
- selector &= 0xffff;
- if ((selector & 0xfffc) == 0) {
- /* XXX: NULL selector case: invalid LDT */
- env->ldt.base = 0;
- env->ldt.limit = 0;
- } else {
- if (selector & 0x4)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- dt = &env->gdt;
- index = selector & ~7;
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK)
- entry_limit = 15;
- else
-#endif
- entry_limit = 7;
- if ((index + entry_limit) > dt->limit)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- ptr = dt->base + index;
- e1 = ldl_kernel(ptr);
- e2 = ldl_kernel(ptr + 4);
- if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK) {
- uint32_t e3;
- e3 = ldl_kernel(ptr + 8);
- load_seg_cache_raw_dt(&env->ldt, e1, e2);
- env->ldt.base |= (target_ulong)e3 << 32;
- } else
-#endif
- {
- load_seg_cache_raw_dt(&env->ldt, e1, e2);
- }
- }
- env->ldt.selector = selector;
-}
-
-void helper_ltr(int selector)
-{
- SegmentCache *dt;
- uint32_t e1, e2;
- int index, type, entry_limit;
- target_ulong ptr;
-
- selector &= 0xffff;
- if ((selector & 0xfffc) == 0) {
- /* NULL selector case: invalid TR */
- env->tr.base = 0;
- env->tr.limit = 0;
- env->tr.flags = 0;
- } else {
- if (selector & 0x4)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- dt = &env->gdt;
- index = selector & ~7;
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK)
- entry_limit = 15;
- else
-#endif
- entry_limit = 7;
- if ((index + entry_limit) > dt->limit)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- ptr = dt->base + index;
- e1 = ldl_kernel(ptr);
- e2 = ldl_kernel(ptr + 4);
- type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
- if ((e2 & DESC_S_MASK) ||
- (type != 1 && type != 9))
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK) {
- uint32_t e3, e4;
- e3 = ldl_kernel(ptr + 8);
- e4 = ldl_kernel(ptr + 12);
- if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- load_seg_cache_raw_dt(&env->tr, e1, e2);
- env->tr.base |= (target_ulong)e3 << 32;
- } else
-#endif
- {
- load_seg_cache_raw_dt(&env->tr, e1, e2);
- }
- e2 |= DESC_TSS_BUSY_MASK;
- stl_kernel(ptr + 4, e2);
- }
- env->tr.selector = selector;
-}
-
-/* only works if protected mode and not VM86. seg_reg must be != R_CS */
-void helper_load_seg(int seg_reg, int selector)
-{
- uint32_t e1, e2;
- int cpl, dpl, rpl;
- SegmentCache *dt;
- int index;
- target_ulong ptr;
-
- selector &= 0xffff;
- cpl = env->hflags & HF_CPL_MASK;
- if ((selector & 0xfffc) == 0) {
- /* null selector case */
- if (seg_reg == R_SS
-#ifdef TARGET_X86_64
- && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
-#endif
- )
- raise_exception_err(EXCP0D_GPF, 0);
- cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
- } else {
-
- if (selector & 0x4)
- dt = &env->ldt;
- else
- dt = &env->gdt;
- index = selector & ~7;
- if ((index + 7) > dt->limit)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- ptr = dt->base + index;
- e1 = ldl_kernel(ptr);
- e2 = ldl_kernel(ptr + 4);
-
- if (!(e2 & DESC_S_MASK))
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- rpl = selector & 3;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (seg_reg == R_SS) {
- /* must be writable segment */
- if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (rpl != cpl || dpl != cpl)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- } else {
- /* must be readable segment */
- if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
-
- if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
- /* if not conforming code, test rights */
- if (dpl < cpl || dpl < rpl)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- }
- }
-
- if (!(e2 & DESC_P_MASK)) {
- if (seg_reg == R_SS)
- raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
- else
- raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
- }
-
- /* set the access bit if not already set */
- if (!(e2 & DESC_A_MASK)) {
- e2 |= DESC_A_MASK;
- stl_kernel(ptr + 4, e2);
- }
-
- cpu_x86_load_seg_cache(env, seg_reg, selector,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2),
- e2);
-#if 0
- qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
- selector, (unsigned long)sc->base, sc->limit, sc->flags);
-#endif
- }
-}
-
-/* protected mode jump */
-void helper_ljmp_protected(int new_cs, target_ulong new_eip,
- int next_eip_addend)
-{
- int gate_cs, type;
- uint32_t e1, e2, cpl, dpl, rpl, limit;
- target_ulong next_eip;
-
- if ((new_cs & 0xfffc) == 0)
- raise_exception_err(EXCP0D_GPF, 0);
- if (load_segment(&e1, &e2, new_cs) != 0)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->hflags & HF_CPL_MASK;
- if (e2 & DESC_S_MASK) {
- if (!(e2 & DESC_CS_MASK))
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (e2 & DESC_C_MASK) {
- /* conforming code segment */
- if (dpl > cpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- } else {
- /* non conforming code segment */
- rpl = new_cs & 3;
- if (rpl > cpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- if (dpl != cpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- }
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
- limit = get_seg_limit(e1, e2);
- if (new_eip > limit &&
- !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
- get_seg_base(e1, e2), limit, e2);
- EIP = new_eip;
- } else {
- /* jump to call or task gate */
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- rpl = new_cs & 3;
- cpl = env->hflags & HF_CPL_MASK;
- type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
- switch(type) {
- case 1: /* 286 TSS */
- case 9: /* 386 TSS */
- case 5: /* task gate */
- if (dpl < cpl || dpl < rpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- next_eip = env->eip + next_eip_addend;
- switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
- CC_OP = CC_OP_EFLAGS;
- break;
- case 4: /* 286 call gate */
- case 12: /* 386 call gate */
- if ((dpl < cpl) || (dpl < rpl))
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
- gate_cs = e1 >> 16;
- new_eip = (e1 & 0xffff);
- if (type == 12)
- new_eip |= (e2 & 0xffff0000);
- if (load_segment(&e1, &e2, gate_cs) != 0)
- raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- /* must be code segment */
- if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
- (DESC_S_MASK | DESC_CS_MASK)))
- raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
- if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
- (!(e2 & DESC_C_MASK) && (dpl != cpl)))
- raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
- limit = get_seg_limit(e1, e2);
- if (new_eip > limit)
- raise_exception_err(EXCP0D_GPF, 0);
- cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
- get_seg_base(e1, e2), limit, e2);
- EIP = new_eip;
- break;
- default:
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- break;
- }
- }
-}
-
-/* real mode call */
-void helper_lcall_real(int new_cs, target_ulong new_eip1,
- int shift, int next_eip)
-{
- int new_eip;
- uint32_t esp, esp_mask;
- target_ulong ssp;
-
- new_eip = new_eip1;
- esp = ESP;
- esp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- if (shift) {
- PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, esp, esp_mask, next_eip);
- } else {
- PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, esp, esp_mask, next_eip);
- }
-
- SET_ESP(esp, esp_mask);
- env->eip = new_eip;
- env->segs[R_CS].selector = new_cs;
- env->segs[R_CS].base = (new_cs << 4);
-}
-
-/* protected mode call */
-void helper_lcall_protected(int new_cs, target_ulong new_eip,
- int shift, int next_eip_addend)
-{
- int new_stack, i;
- uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
- uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
- uint32_t val, limit, old_sp_mask;
- target_ulong ssp, old_ssp, next_eip;
-
- next_eip = env->eip + next_eip_addend;
- LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
- LOG_PCALL_STATE(env);
- if ((new_cs & 0xfffc) == 0)
- raise_exception_err(EXCP0D_GPF, 0);
- if (load_segment(&e1, &e2, new_cs) != 0)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->hflags & HF_CPL_MASK;
- LOG_PCALL("desc=%08x:%08x\n", e1, e2);
- if (e2 & DESC_S_MASK) {
- if (!(e2 & DESC_CS_MASK))
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (e2 & DESC_C_MASK) {
- /* conforming code segment */
- if (dpl > cpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- } else {
- /* non conforming code segment */
- rpl = new_cs & 3;
- if (rpl > cpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- if (dpl != cpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- }
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
-
-#ifdef TARGET_X86_64
- /* XXX: check 16/32 bit cases in long mode */
- if (shift == 2) {
- target_ulong rsp;
- /* 64 bit case */
- rsp = ESP;
- PUSHQ(rsp, env->segs[R_CS].selector);
- PUSHQ(rsp, next_eip);
- /* from this point, not restartable */
- ESP = rsp;
- cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2), e2);
- EIP = new_eip;
- } else
-#endif
- {
- sp = ESP;
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- if (shift) {
- PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, sp, sp_mask, next_eip);
- } else {
- PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, sp, sp_mask, next_eip);
- }
-
- limit = get_seg_limit(e1, e2);
- if (new_eip > limit)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- /* from this point, not restartable */
- SET_ESP(sp, sp_mask);
- cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
- get_seg_base(e1, e2), limit, e2);
- EIP = new_eip;
- }
- } else {
- /* check gate type */
- type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- rpl = new_cs & 3;
- switch(type) {
- case 1: /* available 286 TSS */
- case 9: /* available 386 TSS */
- case 5: /* task gate */
- if (dpl < cpl || dpl < rpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
- CC_OP = CC_OP_EFLAGS;
- return;
- case 4: /* 286 call gate */
- case 12: /* 386 call gate */
- break;
- default:
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- break;
- }
- shift = type >> 3;
-
- if (dpl < cpl || dpl < rpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- /* check valid bit */
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
- selector = e1 >> 16;
- offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
- param_count = e2 & 0x1f;
- if ((selector & 0xfffc) == 0)
- raise_exception_err(EXCP0D_GPF, 0);
-
- if (load_segment(&e1, &e2, selector) != 0)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (dpl > cpl)
- raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
-
- if (!(e2 & DESC_C_MASK) && dpl < cpl) {
- /* to inner privilege */
- get_ss_esp_from_tss(&ss, &sp, dpl);
- LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
- ss, sp, param_count, ESP);
- if ((ss & 0xfffc) == 0)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if ((ss & 3) != dpl)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if (load_segment(&ss_e1, &ss_e2, ss) != 0)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
- if (ss_dpl != dpl)
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if (!(ss_e2 & DESC_S_MASK) ||
- (ss_e2 & DESC_CS_MASK) ||
- !(ss_e2 & DESC_W_MASK))
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
- if (!(ss_e2 & DESC_P_MASK))
- raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
-
- // push_size = ((param_count * 2) + 8) << shift;
-
- old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
- old_ssp = env->segs[R_SS].base;
-
- sp_mask = get_sp_mask(ss_e2);
- ssp = get_seg_base(ss_e1, ss_e2);
- if (shift) {
- PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
- PUSHL(ssp, sp, sp_mask, ESP);
- for(i = param_count - 1; i >= 0; i--) {
- val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
- PUSHL(ssp, sp, sp_mask, val);
- }
- } else {
- PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
- PUSHW(ssp, sp, sp_mask, ESP);
- for(i = param_count - 1; i >= 0; i--) {
- val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
- PUSHW(ssp, sp, sp_mask, val);
- }
- }
- new_stack = 1;
- } else {
- /* to same privilege */
- sp = ESP;
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
- ssp = env->segs[R_SS].base;
- // push_size = (4 << shift);
- new_stack = 0;
- }
-
- if (shift) {
- PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHL(ssp, sp, sp_mask, next_eip);
- } else {
- PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
- PUSHW(ssp, sp, sp_mask, next_eip);
- }
-
- /* from this point, not restartable */
-
- if (new_stack) {
- ss = (ss & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_SS, ss,
- ssp,
- get_seg_limit(ss_e1, ss_e2),
- ss_e2);
- }
-
- selector = (selector & ~3) | dpl;
- cpu_x86_load_seg_cache(env, R_CS, selector,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2),
- e2);
- cpu_x86_set_cpl(env, dpl);
- SET_ESP(sp, sp_mask);
- EIP = offset;
- }
-}
-
-/* real and vm86 mode iret */
-void helper_iret_real(int shift)
-{
- uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
- target_ulong ssp;
- int eflags_mask;
-
- sp_mask = 0xffff; /* XXXX: use SS segment size ? */
- sp = ESP;
- ssp = env->segs[R_SS].base;
- if (shift == 1) {
- /* 32 bits */
- POPL(ssp, sp, sp_mask, new_eip);
- POPL(ssp, sp, sp_mask, new_cs);
- new_cs &= 0xffff;
- POPL(ssp, sp, sp_mask, new_eflags);
- } else {
- /* 16 bits */
- POPW(ssp, sp, sp_mask, new_eip);
- POPW(ssp, sp, sp_mask, new_cs);
- POPW(ssp, sp, sp_mask, new_eflags);
- }
- ESP = (ESP & ~sp_mask) | (sp & sp_mask);
- env->segs[R_CS].selector = new_cs;
- env->segs[R_CS].base = (new_cs << 4);
- env->eip = new_eip;
- if (env->eflags & VM_MASK)
- eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
- else
- eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
- if (shift == 0)
- eflags_mask &= 0xffff;
- load_eflags(new_eflags, eflags_mask);
- env->hflags2 &= ~HF2_NMI_MASK;
-}
-
-static inline void validate_seg(int seg_reg, int cpl)
-{
- int dpl;
- uint32_t e2;
-
- /* XXX: on x86_64, we do not want to nullify FS and GS because
- they may still contain a valid base. I would be interested to
- know how a real x86_64 CPU behaves */
- if ((seg_reg == R_FS || seg_reg == R_GS) &&
- (env->segs[seg_reg].selector & 0xfffc) == 0)
- return;
-
- e2 = env->segs[seg_reg].flags;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
- /* data or non conforming code segment */
- if (dpl < cpl) {
- cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
- }
- }
-}
-
-/* protected mode iret */
-static inline void helper_ret_protected(int shift, int is_iret, int addend)
-{
- uint32_t new_cs, new_eflags, new_ss;
- uint32_t new_es, new_ds, new_fs, new_gs;
- uint32_t e1, e2, ss_e1, ss_e2;
- int cpl, dpl, rpl, eflags_mask, iopl;
- target_ulong ssp, sp, new_eip, new_esp, sp_mask;
-
-#ifdef TARGET_X86_64
- if (shift == 2)
- sp_mask = -1;
- else
-#endif
- sp_mask = get_sp_mask(env->segs[R_SS].flags);
- sp = ESP;
- ssp = env->segs[R_SS].base;
- new_eflags = 0; /* avoid warning */
-#ifdef TARGET_X86_64
- if (shift == 2) {
- POPQ(sp, new_eip);
- POPQ(sp, new_cs);
- new_cs &= 0xffff;
- if (is_iret) {
- POPQ(sp, new_eflags);
- }
- } else
-#endif
- if (shift == 1) {
- /* 32 bits */
- POPL(ssp, sp, sp_mask, new_eip);
- POPL(ssp, sp, sp_mask, new_cs);
- new_cs &= 0xffff;
- if (is_iret) {
- POPL(ssp, sp, sp_mask, new_eflags);
- if (new_eflags & VM_MASK)
- goto return_to_vm86;
- }
- } else {
- /* 16 bits */
- POPW(ssp, sp, sp_mask, new_eip);
- POPW(ssp, sp, sp_mask, new_cs);
- if (is_iret)
- POPW(ssp, sp, sp_mask, new_eflags);
- }
- LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
- new_cs, new_eip, shift, addend);
- LOG_PCALL_STATE(env);
- if ((new_cs & 0xfffc) == 0)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- if (load_segment(&e1, &e2, new_cs) != 0)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- if (!(e2 & DESC_S_MASK) ||
- !(e2 & DESC_CS_MASK))
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- cpl = env->hflags & HF_CPL_MASK;
- rpl = new_cs & 3;
- if (rpl < cpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- if (e2 & DESC_C_MASK) {
- if (dpl > rpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- } else {
- if (dpl != rpl)
- raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
- }
- if (!(e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
-
- sp += addend;
- if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
- ((env->hflags & HF_CS64_MASK) && !is_iret))) {
- /* return to same privilege level */
- cpu_x86_load_seg_cache(env, R_CS, new_cs,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2),
- e2);
- } else {
- /* return to different privilege level */
-#ifdef TARGET_X86_64
- if (shift == 2) {
- POPQ(sp, new_esp);
- POPQ(sp, new_ss);
- new_ss &= 0xffff;
- } else
-#endif
- if (shift == 1) {
- /* 32 bits */
- POPL(ssp, sp, sp_mask, new_esp);
- POPL(ssp, sp, sp_mask, new_ss);
- new_ss &= 0xffff;
- } else {
- /* 16 bits */
- POPW(ssp, sp, sp_mask, new_esp);
- POPW(ssp, sp, sp_mask, new_ss);
- }
- LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
- new_ss, new_esp);
- if ((new_ss & 0xfffc) == 0) {
-#ifdef TARGET_X86_64
- /* NULL ss is allowed in long mode if cpl != 3*/
- /* XXX: test CS64 ? */
- if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
- cpu_x86_load_seg_cache(env, R_SS, new_ss,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
- DESC_W_MASK | DESC_A_MASK);
- ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
- } else
-#endif
- {
- raise_exception_err(EXCP0D_GPF, 0);
- }
- } else {
- if ((new_ss & 3) != rpl)
- raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
- if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
- raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
- if (!(ss_e2 & DESC_S_MASK) ||
- (ss_e2 & DESC_CS_MASK) ||
- !(ss_e2 & DESC_W_MASK))
- raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
- dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
- if (dpl != rpl)
- raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
- if (!(ss_e2 & DESC_P_MASK))
- raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
- cpu_x86_load_seg_cache(env, R_SS, new_ss,
- get_seg_base(ss_e1, ss_e2),
- get_seg_limit(ss_e1, ss_e2),
- ss_e2);
- }
-
- cpu_x86_load_seg_cache(env, R_CS, new_cs,
- get_seg_base(e1, e2),
- get_seg_limit(e1, e2),
- e2);
- cpu_x86_set_cpl(env, rpl);
- sp = new_esp;
-#ifdef TARGET_X86_64
- if (env->hflags & HF_CS64_MASK)
- sp_mask = -1;
- else
-#endif
- sp_mask = get_sp_mask(ss_e2);
-
- /* validate data segments */
- validate_seg(R_ES, rpl);
- validate_seg(R_DS, rpl);
- validate_seg(R_FS, rpl);
- validate_seg(R_GS, rpl);
-
- sp += addend;
- }
- SET_ESP(sp, sp_mask);
- env->eip = new_eip;
- if (is_iret) {
- /* NOTE: 'cpl' is the _old_ CPL */
- eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
- if (cpl == 0)
- eflags_mask |= IOPL_MASK;
- iopl = (env->eflags >> IOPL_SHIFT) & 3;
- if (cpl <= iopl)
- eflags_mask |= IF_MASK;
- if (shift == 0)
- eflags_mask &= 0xffff;
- load_eflags(new_eflags, eflags_mask);
- }
- return;
-
- return_to_vm86:
- POPL(ssp, sp, sp_mask, new_esp);
- POPL(ssp, sp, sp_mask, new_ss);
- POPL(ssp, sp, sp_mask, new_es);
- POPL(ssp, sp, sp_mask, new_ds);
- POPL(ssp, sp, sp_mask, new_fs);
- POPL(ssp, sp, sp_mask, new_gs);
-
- /* modify processor state */
- load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
- IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
- load_seg_vm(R_CS, new_cs & 0xffff);
- cpu_x86_set_cpl(env, 3);
- load_seg_vm(R_SS, new_ss & 0xffff);
- load_seg_vm(R_ES, new_es & 0xffff);
- load_seg_vm(R_DS, new_ds & 0xffff);
- load_seg_vm(R_FS, new_fs & 0xffff);
- load_seg_vm(R_GS, new_gs & 0xffff);
-
- env->eip = new_eip & 0xffff;
- ESP = new_esp;
-}
-
-void helper_iret_protected(int shift, int next_eip)
-{
- int tss_selector, type;
- uint32_t e1, e2;
-
- /* specific case for TSS */
- if (env->eflags & NT_MASK) {
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK)
- raise_exception_err(EXCP0D_GPF, 0);
-#endif
- tss_selector = lduw_kernel(env->tr.base + 0);
- if (tss_selector & 4)
- raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
- if (load_segment(&e1, &e2, tss_selector) != 0)
- raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
- type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
- /* NOTE: we check both segment and busy TSS */
- if (type != 3)
- raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
- switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
- } else {
- helper_ret_protected(shift, 1, 0);
- }
- env->hflags2 &= ~HF2_NMI_MASK;
-}
-
-void helper_lret_protected(int shift, int addend)
-{
- helper_ret_protected(shift, 0, addend);
-}
-
-void helper_sysenter(void)
-{
- if (env->sysenter_cs == 0) {
- raise_exception_err(EXCP0D_GPF, 0);
- }
- env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
- cpu_x86_set_cpl(env, 0);
-
-#ifdef TARGET_X86_64
- if (env->hflags & HF_LMA_MASK) {
- cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
- } else
-#endif
- {
- cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- }
- cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK |
- DESC_W_MASK | DESC_A_MASK);
- ESP = env->sysenter_esp;
- EIP = env->sysenter_eip;
-}
-
-void helper_sysexit(int dflag)
-{
- int cpl;
-
- cpl = env->hflags & HF_CPL_MASK;
- if (env->sysenter_cs == 0 || cpl != 0) {
- raise_exception_err(EXCP0D_GPF, 0);
- }
- cpu_x86_set_cpl(env, 3);
-#ifdef TARGET_X86_64
- if (dflag == 2) {
- cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
- cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_W_MASK | DESC_A_MASK);
- } else
-#endif
- {
- cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
- cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
- 0, 0xffffffff,
- DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
- DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
- DESC_W_MASK | DESC_A_MASK);
- }
- ESP = ECX;
- EIP = EDX;
-}
-
-#if defined(CONFIG_USER_ONLY)
-target_ulong helper_read_crN(int reg)
-{
- return 0;
-}
-
-void helper_write_crN(int reg, target_ulong t0)
-{
-}
-
-void helper_movl_drN_T0(int reg, target_ulong t0)
-{
-}
-#else
-target_ulong helper_read_crN(int reg)
-{
- target_ulong val;
-
- helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
- switch(reg) {
- default:
- val = env->cr[reg];
- break;
- case 8:
- if (!(env->hflags2 & HF2_VINTR_MASK)) {
- val = cpu_get_apic_tpr(env->apic_state);
- } else {
- val = env->v_tpr;
- }
- break;
- }
- return val;
-}
-
-void helper_write_crN(int reg, target_ulong t0)
-{
- helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
- switch(reg) {
- case 0:
- cpu_x86_update_cr0(env, t0);
- break;
- case 3:
- cpu_x86_update_cr3(env, t0);
- break;
- case 4:
- cpu_x86_update_cr4(env, t0);
- break;
- case 8:
- if (!(env->hflags2 & HF2_VINTR_MASK)) {
- cpu_set_apic_tpr(env->apic_state, t0);
- }
- env->v_tpr = t0 & 0x0f;
- break;
- default:
- env->cr[reg] = t0;
- break;
- }
-}
-
-void helper_movl_drN_T0(int reg, target_ulong t0)
-{
- int i;
-
- if (reg < 4) {
- hw_breakpoint_remove(env, reg);
- env->dr[reg] = t0;
- hw_breakpoint_insert(env, reg);
- } else if (reg == 7) {
- for (i = 0; i < 4; i++)
- hw_breakpoint_remove(env, i);
- env->dr[7] = t0;
- for (i = 0; i < 4; i++)
- hw_breakpoint_insert(env, i);
- } else
- env->dr[reg] = t0;
-}
-#endif
-
-void helper_lmsw(target_ulong t0)
-{
- /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
- if already set to one. */
- t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
- helper_write_crN(0, t0);
-}
-
-void helper_clts(void)
-{
- env->cr[0] &= ~CR0_TS_MASK;
- env->hflags &= ~HF_TS_MASK;
-}
-
-void helper_invlpg(target_ulong addr)
-{
- helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
- tlb_flush_page(env, addr);
-}
-
-void helper_rdtsc(void)
-{
- uint64_t val;
-
- if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
- raise_exception(EXCP0D_GPF);
- }
- helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
-
- val = cpu_get_tsc(env) + env->tsc_offset;
- EAX = (uint32_t)(val);
- EDX = (uint32_t)(val >> 32);
-}
-
-void helper_rdtscp(void)
-{
- helper_rdtsc();
- ECX = (uint32_t)(env->tsc_aux);
-}
-
-void helper_rdpmc(void)
-{
- if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
- raise_exception(EXCP0D_GPF);
- }
- helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
-
- /* currently unimplemented */
- qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
- raise_exception_err(EXCP06_ILLOP, 0);
-}
-
-#if defined(CONFIG_USER_ONLY)
-void helper_wrmsr(void)
-{
-}
-
-void helper_rdmsr(void)
-{
-}
-#else
-void helper_wrmsr(void)
-{
- uint64_t val;
-
- helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
-
- val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
-
- switch((uint32_t)ECX) {
- case MSR_IA32_SYSENTER_CS:
- env->sysenter_cs = val & 0xffff;
- break;
- case MSR_IA32_SYSENTER_ESP:
- env->sysenter_esp = val;
- break;
- case MSR_IA32_SYSENTER_EIP:
- env->sysenter_eip = val;
- break;
- case MSR_IA32_APICBASE:
- cpu_set_apic_base(env->apic_state, val);
- break;
- case MSR_EFER:
- {
- uint64_t update_mask;
- update_mask = 0;
- if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
- update_mask |= MSR_EFER_SCE;
- if (env->cpuid_ext2_features & CPUID_EXT2_LM)
- update_mask |= MSR_EFER_LME;
- if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
- update_mask |= MSR_EFER_FFXSR;
- if (env->cpuid_ext2_features & CPUID_EXT2_NX)
- update_mask |= MSR_EFER_NXE;
- if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
- update_mask |= MSR_EFER_SVME;
- if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
- update_mask |= MSR_EFER_FFXSR;
- cpu_load_efer(env, (env->efer & ~update_mask) |
- (val & update_mask));
- }
- break;
- case MSR_STAR:
- env->star = val;
- break;
- case MSR_PAT:
- env->pat = val;
- break;
- case MSR_VM_HSAVE_PA:
- env->vm_hsave = val;
- break;
-#ifdef TARGET_X86_64
- case MSR_LSTAR:
- env->lstar = val;
- break;
- case MSR_CSTAR:
- env->cstar = val;
- break;
- case MSR_FMASK:
- env->fmask = val;
- break;
- case MSR_FSBASE:
- env->segs[R_FS].base = val;
- break;
- case MSR_GSBASE:
- env->segs[R_GS].base = val;
- break;
- case MSR_KERNELGSBASE:
- env->kernelgsbase = val;
- break;
-#endif
- case MSR_MTRRphysBase(0):
- case MSR_MTRRphysBase(1):
- case MSR_MTRRphysBase(2):
- case MSR_MTRRphysBase(3):
- case MSR_MTRRphysBase(4):
- case MSR_MTRRphysBase(5):
- case MSR_MTRRphysBase(6):
- case MSR_MTRRphysBase(7):
- env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
- break;
- case MSR_MTRRphysMask(0):
- case MSR_MTRRphysMask(1):
- case MSR_MTRRphysMask(2):
- case MSR_MTRRphysMask(3):
- case MSR_MTRRphysMask(4):
- case MSR_MTRRphysMask(5):
- case MSR_MTRRphysMask(6):
- case MSR_MTRRphysMask(7):
- env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
- break;
- case MSR_MTRRfix64K_00000:
- env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
- break;
- case MSR_MTRRfix16K_80000:
- case MSR_MTRRfix16K_A0000:
- env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
- break;
- case MSR_MTRRfix4K_C0000:
- case MSR_MTRRfix4K_C8000:
- case MSR_MTRRfix4K_D0000:
- case MSR_MTRRfix4K_D8000:
- case MSR_MTRRfix4K_E0000:
- case MSR_MTRRfix4K_E8000:
- case MSR_MTRRfix4K_F0000:
- case MSR_MTRRfix4K_F8000:
- env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
- break;
- case MSR_MTRRdefType:
- env->mtrr_deftype = val;
- break;
- case MSR_MCG_STATUS:
- env->mcg_status = val;
- break;
- case MSR_MCG_CTL:
- if ((env->mcg_cap & MCG_CTL_P)
- && (val == 0 || val == ~(uint64_t)0))
- env->mcg_ctl = val;
- break;
- case MSR_TSC_AUX:
- env->tsc_aux = val;
- break;
- case MSR_IA32_MISC_ENABLE:
- env->msr_ia32_misc_enable = val;
- break;
- default:
- if ((uint32_t)ECX >= MSR_MC0_CTL
- && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
- uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
- if ((offset & 0x3) != 0
- || (val == 0 || val == ~(uint64_t)0))
- env->mce_banks[offset] = val;
- break;
- }
- /* XXX: exception ? */
- break;
- }
-}
-
-void helper_rdmsr(void)
-{
- uint64_t val;
-
- helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
-
- switch((uint32_t)ECX) {
- case MSR_IA32_SYSENTER_CS:
- val = env->sysenter_cs;
- break;
- case MSR_IA32_SYSENTER_ESP:
- val = env->sysenter_esp;
- break;
- case MSR_IA32_SYSENTER_EIP:
- val = env->sysenter_eip;
- break;
- case MSR_IA32_APICBASE:
- val = cpu_get_apic_base(env->apic_state);
- break;
- case MSR_EFER:
- val = env->efer;
- break;
- case MSR_STAR:
- val = env->star;
- break;
- case MSR_PAT:
- val = env->pat;
- break;
- case MSR_VM_HSAVE_PA:
- val = env->vm_hsave;
- break;
- case MSR_IA32_PERF_STATUS:
- /* tsc_increment_by_tick */
- val = 1000ULL;
- /* CPU multiplier */
- val |= (((uint64_t)4ULL) << 40);
- break;
-#ifdef TARGET_X86_64
- case MSR_LSTAR:
- val = env->lstar;
- break;
- case MSR_CSTAR:
- val = env->cstar;
- break;
- case MSR_FMASK:
- val = env->fmask;
- break;
- case MSR_FSBASE:
- val = env->segs[R_FS].base;
- break;
- case MSR_GSBASE:
- val = env->segs[R_GS].base;
- break;
- case MSR_KERNELGSBASE:
- val = env->kernelgsbase;
- break;
- case MSR_TSC_AUX:
- val = env->tsc_aux;
- break;
-#endif
- case MSR_MTRRphysBase(0):
- case MSR_MTRRphysBase(1):
- case MSR_MTRRphysBase(2):
- case MSR_MTRRphysBase(3):
- case MSR_MTRRphysBase(4):
- case MSR_MTRRphysBase(5):
- case MSR_MTRRphysBase(6):
- case MSR_MTRRphysBase(7):
- val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
- break;
- case MSR_MTRRphysMask(0):
- case MSR_MTRRphysMask(1):
- case MSR_MTRRphysMask(2):
- case MSR_MTRRphysMask(3):
- case MSR_MTRRphysMask(4):
- case MSR_MTRRphysMask(5):
- case MSR_MTRRphysMask(6):
- case MSR_MTRRphysMask(7):
- val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
- break;
- case MSR_MTRRfix64K_00000:
- val = env->mtrr_fixed[0];
- break;
- case MSR_MTRRfix16K_80000:
- case MSR_MTRRfix16K_A0000:
- val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
- break;
- case MSR_MTRRfix4K_C0000:
- case MSR_MTRRfix4K_C8000:
- case MSR_MTRRfix4K_D0000:
- case MSR_MTRRfix4K_D8000:
- case MSR_MTRRfix4K_E0000:
- case MSR_MTRRfix4K_E8000:
- case MSR_MTRRfix4K_F0000:
- case MSR_MTRRfix4K_F8000:
- val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
- break;
- case MSR_MTRRdefType:
- val = env->mtrr_deftype;
- break;
- case MSR_MTRRcap:
- if (env->cpuid_features & CPUID_MTRR)
- val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
- else
- /* XXX: exception ? */
- val = 0;
- break;
- case MSR_MCG_CAP:
- val = env->mcg_cap;
- break;
- case MSR_MCG_CTL:
- if (env->mcg_cap & MCG_CTL_P)
- val = env->mcg_ctl;
- else
- val = 0;
- break;
- case MSR_MCG_STATUS:
- val = env->mcg_status;
- break;
- case MSR_IA32_MISC_ENABLE:
- val = env->msr_ia32_misc_enable;
- break;
- default:
- if ((uint32_t)ECX >= MSR_MC0_CTL
- && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
- uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
- val = env->mce_banks[offset];
- break;
- }
- /* XXX: exception ? */
- val = 0;
- break;
- }
- EAX = (uint32_t)(val);
- EDX = (uint32_t)(val >> 32);
-}
-#endif
-
-target_ulong helper_lsl(target_ulong selector1)
-{
- unsigned int limit;
- uint32_t e1, e2, eflags, selector;
- int rpl, dpl, cpl, type;
-
- selector = selector1 & 0xffff;
- eflags = helper_cc_compute_all(CC_OP);
- if ((selector & 0xfffc) == 0)
- goto fail;
- if (load_segment(&e1, &e2, selector) != 0)
- goto fail;
- rpl = selector & 3;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- if (e2 & DESC_S_MASK) {
- if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
- /* conforming */
- } else {
- if (dpl < cpl || dpl < rpl)
- goto fail;
- }
- } else {
- type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
- switch(type) {
- case 1:
- case 2:
- case 3:
- case 9:
- case 11:
- break;
- default:
- goto fail;
- }
- if (dpl < cpl || dpl < rpl) {
- fail:
- CC_SRC = eflags & ~CC_Z;
- return 0;
- }
- }
- limit = get_seg_limit(e1, e2);
- CC_SRC = eflags | CC_Z;
- return limit;
-}
-
-target_ulong helper_lar(target_ulong selector1)
-{
- uint32_t e1, e2, eflags, selector;
- int rpl, dpl, cpl, type;
-
- selector = selector1 & 0xffff;
- eflags = helper_cc_compute_all(CC_OP);
- if ((selector & 0xfffc) == 0)
- goto fail;
- if (load_segment(&e1, &e2, selector) != 0)
- goto fail;
- rpl = selector & 3;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- if (e2 & DESC_S_MASK) {
- if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
- /* conforming */
- } else {
- if (dpl < cpl || dpl < rpl)
- goto fail;
- }
- } else {
- type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
- switch(type) {
- case 1:
- case 2:
- case 3:
- case 4:
- case 5:
- case 9:
- case 11:
- case 12:
- break;
- default:
- goto fail;
- }
- if (dpl < cpl || dpl < rpl) {
- fail:
- CC_SRC = eflags & ~CC_Z;
- return 0;
- }
- }
- CC_SRC = eflags | CC_Z;
- return e2 & 0x00f0ff00;
-}
-
-void helper_verr(target_ulong selector1)
-{
- uint32_t e1, e2, eflags, selector;
- int rpl, dpl, cpl;
-
- selector = selector1 & 0xffff;
- eflags = helper_cc_compute_all(CC_OP);
- if ((selector & 0xfffc) == 0)
- goto fail;
- if (load_segment(&e1, &e2, selector) != 0)
- goto fail;
- if (!(e2 & DESC_S_MASK))
- goto fail;
- rpl = selector & 3;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- if (e2 & DESC_CS_MASK) {
- if (!(e2 & DESC_R_MASK))
- goto fail;
- if (!(e2 & DESC_C_MASK)) {
- if (dpl < cpl || dpl < rpl)
- goto fail;
- }
- } else {
- if (dpl < cpl || dpl < rpl) {
- fail:
- CC_SRC = eflags & ~CC_Z;
- return;
- }
- }
- CC_SRC = eflags | CC_Z;
-}
-
-void helper_verw(target_ulong selector1)
-{
- uint32_t e1, e2, eflags, selector;
- int rpl, dpl, cpl;
-
- selector = selector1 & 0xffff;
- eflags = helper_cc_compute_all(CC_OP);
- if ((selector & 0xfffc) == 0)
- goto fail;
- if (load_segment(&e1, &e2, selector) != 0)
- goto fail;
- if (!(e2 & DESC_S_MASK))
- goto fail;
- rpl = selector & 3;
- dpl = (e2 >> DESC_DPL_SHIFT) & 3;
- cpl = env->hflags & HF_CPL_MASK;
- if (e2 & DESC_CS_MASK) {
- goto fail;
- } else {
- if (dpl < cpl || dpl < rpl)
- goto fail;
- if (!(e2 & DESC_W_MASK)) {
- fail:
- CC_SRC = eflags & ~CC_Z;
- return;
- }
- }
- CC_SRC = eflags | CC_Z;
-}
-
-/* x87 FPU helpers */
-
-static inline double floatx80_to_double(floatx80 a)
-{
- union {
- float64 f64;
- double d;
- } u;
-
- u.f64 = floatx80_to_float64(a, &env->fp_status);
- return u.d;
-}
-
-static inline floatx80 double_to_floatx80(double a)
-{
- union {
- float64 f64;
- double d;
- } u;
-
- u.d = a;
- return float64_to_floatx80(u.f64, &env->fp_status);
-}
-
-static void fpu_set_exception(int mask)
-{
- env->fpus |= mask;
- if (env->fpus & (~env->fpuc & FPUC_EM))
- env->fpus |= FPUS_SE | FPUS_B;
-}
-
-static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
-{
- if (floatx80_is_zero(b)) {
- fpu_set_exception(FPUS_ZE);
- }
- return floatx80_div(a, b, &env->fp_status);
-}
-
-static void fpu_raise_exception(void)
-{
- if (env->cr[0] & CR0_NE_MASK) {
- raise_exception(EXCP10_COPR);
- }
-#if !defined(CONFIG_USER_ONLY)
- else {
- cpu_set_ferr(env);
- }
-#endif
-}
-
-void helper_flds_FT0(uint32_t val)
-{
- union {
- float32 f;
- uint32_t i;
- } u;
- u.i = val;
- FT0 = float32_to_floatx80(u.f, &env->fp_status);
-}
-
-void helper_fldl_FT0(uint64_t val)
-{
- union {
- float64 f;
- uint64_t i;
- } u;
- u.i = val;
- FT0 = float64_to_floatx80(u.f, &env->fp_status);
-}
-
-void helper_fildl_FT0(int32_t val)
-{
- FT0 = int32_to_floatx80(val, &env->fp_status);
-}
-
-void helper_flds_ST0(uint32_t val)
-{
- int new_fpstt;
- union {
- float32 f;
- uint32_t i;
- } u;
- new_fpstt = (env->fpstt - 1) & 7;
- u.i = val;
- env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
- env->fpstt = new_fpstt;
- env->fptags[new_fpstt] = 0; /* validate stack entry */
-}
-
-void helper_fldl_ST0(uint64_t val)
-{
- int new_fpstt;
- union {
- float64 f;
- uint64_t i;
- } u;
- new_fpstt = (env->fpstt - 1) & 7;
- u.i = val;
- env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
- env->fpstt = new_fpstt;
- env->fptags[new_fpstt] = 0; /* validate stack entry */
-}
-
-void helper_fildl_ST0(int32_t val)
-{
- int new_fpstt;
- new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
- env->fpstt = new_fpstt;
- env->fptags[new_fpstt] = 0; /* validate stack entry */
-}
-
-void helper_fildll_ST0(int64_t val)
-{
- int new_fpstt;
- new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
- env->fpstt = new_fpstt;
- env->fptags[new_fpstt] = 0; /* validate stack entry */
-}
-
-uint32_t helper_fsts_ST0(void)
-{
- union {
- float32 f;
- uint32_t i;
- } u;
- u.f = floatx80_to_float32(ST0, &env->fp_status);
- return u.i;
-}
-
-uint64_t helper_fstl_ST0(void)
-{
- union {
- float64 f;
- uint64_t i;
- } u;
- u.f = floatx80_to_float64(ST0, &env->fp_status);
- return u.i;
-}
-
-int32_t helper_fist_ST0(void)
-{
- int32_t val;
- val = floatx80_to_int32(ST0, &env->fp_status);
- if (val != (int16_t)val)
- val = -32768;
- return val;
-}
-
-int32_t helper_fistl_ST0(void)
-{
- int32_t val;
- val = floatx80_to_int32(ST0, &env->fp_status);
- return val;
-}
-
-int64_t helper_fistll_ST0(void)
-{
- int64_t val;
- val = floatx80_to_int64(ST0, &env->fp_status);
- return val;
-}
-
-int32_t helper_fistt_ST0(void)
-{
- int32_t val;
- val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
- if (val != (int16_t)val)
- val = -32768;
- return val;
-}
-
-int32_t helper_fisttl_ST0(void)
-{
- int32_t val;
- val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
- return val;
-}
-
-int64_t helper_fisttll_ST0(void)
-{
- int64_t val;
- val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
- return val;
-}
-
-void helper_fldt_ST0(target_ulong ptr)
-{
- int new_fpstt;
- new_fpstt = (env->fpstt - 1) & 7;
- env->fpregs[new_fpstt].d = helper_fldt(ptr);
- env->fpstt = new_fpstt;
- env->fptags[new_fpstt] = 0; /* validate stack entry */
-}
-
-void helper_fstt_ST0(target_ulong ptr)
-{
- helper_fstt(ST0, ptr);
-}
-
-void helper_fpush(void)
-{
- fpush();
-}
-
-void helper_fpop(void)
-{
- fpop();
-}
-
-void helper_fdecstp(void)
-{
- env->fpstt = (env->fpstt - 1) & 7;
- env->fpus &= (~0x4700);
-}
-
-void helper_fincstp(void)
-{
- env->fpstt = (env->fpstt + 1) & 7;
- env->fpus &= (~0x4700);
-}
-
-/* FPU move */
-
-void helper_ffree_STN(int st_index)
-{
- env->fptags[(env->fpstt + st_index) & 7] = 1;
-}
-
-void helper_fmov_ST0_FT0(void)
-{
- ST0 = FT0;
-}
-
-void helper_fmov_FT0_STN(int st_index)
-{
- FT0 = ST(st_index);
-}
-
-void helper_fmov_ST0_STN(int st_index)
-{
- ST0 = ST(st_index);
-}
-
-void helper_fmov_STN_ST0(int st_index)
-{
- ST(st_index) = ST0;
-}
-
-void helper_fxchg_ST0_STN(int st_index)
-{
- floatx80 tmp;
- tmp = ST(st_index);
- ST(st_index) = ST0;
- ST0 = tmp;
-}
-
-/* FPU operations */
-
-static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
-
-void helper_fcom_ST0_FT0(void)
-{
- int ret;
-
- ret = floatx80_compare(ST0, FT0, &env->fp_status);
- env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
-}
-
-void helper_fucom_ST0_FT0(void)
-{
- int ret;
-
- ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
- env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
-}
-
-static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
-
-void helper_fcomi_ST0_FT0(void)
-{
- int eflags;
- int ret;
-
- ret = floatx80_compare(ST0, FT0, &env->fp_status);
- eflags = helper_cc_compute_all(CC_OP);
- eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
- CC_SRC = eflags;
-}
-
-void helper_fucomi_ST0_FT0(void)
-{
- int eflags;
- int ret;
-
- ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
- eflags = helper_cc_compute_all(CC_OP);
- eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
- CC_SRC = eflags;
-}
-
-void helper_fadd_ST0_FT0(void)
-{
- ST0 = floatx80_add(ST0, FT0, &env->fp_status);
-}
-
-void helper_fmul_ST0_FT0(void)
-{
- ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
-}
-
-void helper_fsub_ST0_FT0(void)
-{
- ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
-}
-
-void helper_fsubr_ST0_FT0(void)
-{
- ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
-}
-
-void helper_fdiv_ST0_FT0(void)
-{
- ST0 = helper_fdiv(ST0, FT0);
-}
-
-void helper_fdivr_ST0_FT0(void)
-{
- ST0 = helper_fdiv(FT0, ST0);
-}
-
-/* fp operations between STN and ST0 */
-
-void helper_fadd_STN_ST0(int st_index)
-{
- ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
-}
-
-void helper_fmul_STN_ST0(int st_index)
-{
- ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
-}
-
-void helper_fsub_STN_ST0(int st_index)
-{
- ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
-}
-
-void helper_fsubr_STN_ST0(int st_index)
-{
- ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
-}
-
-void helper_fdiv_STN_ST0(int st_index)
-{
- floatx80 *p;
- p = &ST(st_index);
- *p = helper_fdiv(*p, ST0);
-}
-
-void helper_fdivr_STN_ST0(int st_index)
-{
- floatx80 *p;
- p = &ST(st_index);
- *p = helper_fdiv(ST0, *p);
-}
-
-/* misc FPU operations */
-void helper_fchs_ST0(void)
-{
- ST0 = floatx80_chs(ST0);
-}
-
-void helper_fabs_ST0(void)
-{
- ST0 = floatx80_abs(ST0);
-}
-
-void helper_fld1_ST0(void)
-{
- ST0 = floatx80_one;
-}
-
-void helper_fldl2t_ST0(void)
-{
- ST0 = floatx80_l2t;
-}
-
-void helper_fldl2e_ST0(void)
-{
- ST0 = floatx80_l2e;
-}
-
-void helper_fldpi_ST0(void)
-{
- ST0 = floatx80_pi;
-}
-
-void helper_fldlg2_ST0(void)
-{
- ST0 = floatx80_lg2;
-}
-
-void helper_fldln2_ST0(void)
-{
- ST0 = floatx80_ln2;
-}
-
-void helper_fldz_ST0(void)
-{
- ST0 = floatx80_zero;
-}
-
-void helper_fldz_FT0(void)
-{
- FT0 = floatx80_zero;
-}
-
-uint32_t helper_fnstsw(void)
-{
- return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
-}
-
-uint32_t helper_fnstcw(void)
-{
- return env->fpuc;
-}
-
-static void update_fp_status(void)
-{
- int rnd_type;
-
- /* set rounding mode */
- switch(env->fpuc & FPU_RC_MASK) {
- default:
- case FPU_RC_NEAR:
- rnd_type = float_round_nearest_even;
- break;
- case FPU_RC_DOWN:
- rnd_type = float_round_down;
- break;
- case FPU_RC_UP:
- rnd_type = float_round_up;
- break;
- case FPU_RC_CHOP:
- rnd_type = float_round_to_zero;
- break;
- }
- set_float_rounding_mode(rnd_type, &env->fp_status);
- switch((env->fpuc >> 8) & 3) {
- case 0:
- rnd_type = 32;
- break;
- case 2:
- rnd_type = 64;
- break;
- case 3:
- default:
- rnd_type = 80;
- break;
- }
- set_floatx80_rounding_precision(rnd_type, &env->fp_status);
-}
-
-void helper_fldcw(uint32_t val)
-{
- env->fpuc = val;
- update_fp_status();
-}
-
-void helper_fclex(void)
-{
- env->fpus &= 0x7f00;
-}
-
-void helper_fwait(void)
-{
- if (env->fpus & FPUS_SE)
- fpu_raise_exception();
-}
-
-void helper_fninit(void)
-{
- env->fpus = 0;
- env->fpstt = 0;
- env->fpuc = 0x37f;
- env->fptags[0] = 1;
- env->fptags[1] = 1;
- env->fptags[2] = 1;
- env->fptags[3] = 1;
- env->fptags[4] = 1;
- env->fptags[5] = 1;
- env->fptags[6] = 1;
- env->fptags[7] = 1;
-}
-
-/* BCD ops */
-
-void helper_fbld_ST0(target_ulong ptr)
-{
- floatx80 tmp;
- uint64_t val;
- unsigned int v;
- int i;
-
- val = 0;
- for(i = 8; i >= 0; i--) {
- v = ldub(ptr + i);
- val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
- }
- tmp = int64_to_floatx80(val, &env->fp_status);
- if (ldub(ptr + 9) & 0x80) {
- floatx80_chs(tmp);
- }
- fpush();
- ST0 = tmp;
-}
-
-void helper_fbst_ST0(target_ulong ptr)
-{
- int v;
- target_ulong mem_ref, mem_end;
- int64_t val;
-
- val = floatx80_to_int64(ST0, &env->fp_status);
- mem_ref = ptr;
- mem_end = mem_ref + 9;
- if (val < 0) {
- stb(mem_end, 0x80);
- val = -val;
- } else {
- stb(mem_end, 0x00);
- }
- while (mem_ref < mem_end) {
- if (val == 0)
- break;
- v = val % 100;
- val = val / 100;
- v = ((v / 10) << 4) | (v % 10);
- stb(mem_ref++, v);
- }
- while (mem_ref < mem_end) {
- stb(mem_ref++, 0);
- }
-}
-
-void helper_f2xm1(void)
-{
- double val = floatx80_to_double(ST0);
- val = pow(2.0, val) - 1.0;
- ST0 = double_to_floatx80(val);
-}
-
-void helper_fyl2x(void)
-{
- double fptemp = floatx80_to_double(ST0);
-
- if (fptemp>0.0){
- fptemp = log(fptemp)/log(2.0); /* log2(ST) */
- fptemp *= floatx80_to_double(ST1);
- ST1 = double_to_floatx80(fptemp);
- fpop();
- } else {
- env->fpus &= (~0x4700);
- env->fpus |= 0x400;
- }
-}
-
-void helper_fptan(void)
-{
- double fptemp = floatx80_to_double(ST0);
-
- if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
- env->fpus |= 0x400;
- } else {
- fptemp = tan(fptemp);
- ST0 = double_to_floatx80(fptemp);
- fpush();
- ST0 = floatx80_one;
- env->fpus &= (~0x400); /* C2 <-- 0 */
- /* the above code is for |arg| < 2**52 only */
- }
-}
-
-void helper_fpatan(void)
-{
- double fptemp, fpsrcop;
-
- fpsrcop = floatx80_to_double(ST1);
- fptemp = floatx80_to_double(ST0);
- ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
- fpop();
-}
-
-void helper_fxtract(void)
-{
- CPU_LDoubleU temp;
-
- temp.d = ST0;
-
- if (floatx80_is_zero(ST0)) {
- /* Easy way to generate -inf and raising division by 0 exception */
- ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
- fpush();
- ST0 = temp.d;
- } else {
- int expdif;
-
- expdif = EXPD(temp) - EXPBIAS;
- /*DP exponent bias*/
- ST0 = int32_to_floatx80(expdif, &env->fp_status);
- fpush();
- BIASEXPONENT(temp);
- ST0 = temp.d;
- }
-}
-
-void helper_fprem1(void)
-{
- double st0, st1, dblq, fpsrcop, fptemp;
- CPU_LDoubleU fpsrcop1, fptemp1;
- int expdif;
- signed long long int q;
-
- st0 = floatx80_to_double(ST0);
- st1 = floatx80_to_double(ST1);
-
- if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
- ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- return;
- }
-
- fpsrcop = st0;
- fptemp = st1;
- fpsrcop1.d = ST0;
- fptemp1.d = ST1;
- expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
-
- if (expdif < 0) {
- /* optimisation? taken from the AMD docs */
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- /* ST0 is unchanged */
- return;
- }
-
- if (expdif < 53) {
- dblq = fpsrcop / fptemp;
- /* round dblq towards nearest integer */
- dblq = rint(dblq);
- st0 = fpsrcop - fptemp * dblq;
-
- /* convert dblq to q by truncating towards zero */
- if (dblq < 0.0)
- q = (signed long long int)(-dblq);
- else
- q = (signed long long int)dblq;
-
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- /* (C0,C3,C1) <-- (q2,q1,q0) */
- env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
- env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
- env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
- } else {
- env->fpus |= 0x400; /* C2 <-- 1 */
- fptemp = pow(2.0, expdif - 50);
- fpsrcop = (st0 / st1) / fptemp;
- /* fpsrcop = integer obtained by chopping */
- fpsrcop = (fpsrcop < 0.0) ?
- -(floor(fabs(fpsrcop))) : floor(fpsrcop);
- st0 -= (st1 * fpsrcop * fptemp);
- }
- ST0 = double_to_floatx80(st0);
-}
-
-void helper_fprem(void)
-{
- double st0, st1, dblq, fpsrcop, fptemp;
- CPU_LDoubleU fpsrcop1, fptemp1;
- int expdif;
- signed long long int q;
-
- st0 = floatx80_to_double(ST0);
- st1 = floatx80_to_double(ST1);
-
- if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
- ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- return;
- }
-
- fpsrcop = st0;
- fptemp = st1;
- fpsrcop1.d = ST0;
- fptemp1.d = ST1;
- expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
-
- if (expdif < 0) {
- /* optimisation? taken from the AMD docs */
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- /* ST0 is unchanged */
- return;
- }
-
- if ( expdif < 53 ) {
- dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
- /* round dblq towards zero */
- dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
- st0 = fpsrcop/*ST0*/ - fptemp * dblq;
-
- /* convert dblq to q by truncating towards zero */
- if (dblq < 0.0)
- q = (signed long long int)(-dblq);
- else
- q = (signed long long int)dblq;
-
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- /* (C0,C3,C1) <-- (q2,q1,q0) */
- env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
- env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
- env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
- } else {
- int N = 32 + (expdif % 32); /* as per AMD docs */
- env->fpus |= 0x400; /* C2 <-- 1 */
- fptemp = pow(2.0, (double)(expdif - N));
- fpsrcop = (st0 / st1) / fptemp;
- /* fpsrcop = integer obtained by chopping */
- fpsrcop = (fpsrcop < 0.0) ?
- -(floor(fabs(fpsrcop))) : floor(fpsrcop);
- st0 -= (st1 * fpsrcop * fptemp);
- }
- ST0 = double_to_floatx80(st0);
-}
-
-void helper_fyl2xp1(void)
-{
- double fptemp = floatx80_to_double(ST0);
-
- if ((fptemp+1.0)>0.0) {
- fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
- fptemp *= floatx80_to_double(ST1);
- ST1 = double_to_floatx80(fptemp);
- fpop();
- } else {
- env->fpus &= (~0x4700);
- env->fpus |= 0x400;
- }
-}
-
-void helper_fsqrt(void)
-{
- if (floatx80_is_neg(ST0)) {
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- env->fpus |= 0x400;
- }
- ST0 = floatx80_sqrt(ST0, &env->fp_status);
-}
-
-void helper_fsincos(void)
-{
- double fptemp = floatx80_to_double(ST0);
-
- if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
- env->fpus |= 0x400;
- } else {
- ST0 = double_to_floatx80(sin(fptemp));
- fpush();
- ST0 = double_to_floatx80(cos(fptemp));
- env->fpus &= (~0x400); /* C2 <-- 0 */
- /* the above code is for |arg| < 2**63 only */
- }
-}
-
-void helper_frndint(void)
-{
- ST0 = floatx80_round_to_int(ST0, &env->fp_status);
-}
-
-void helper_fscale(void)
-{
- if (floatx80_is_any_nan(ST1)) {
- ST0 = ST1;
- } else {
- int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
- ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
- }
-}
-
-void helper_fsin(void)
-{
- double fptemp = floatx80_to_double(ST0);
-
- if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
- env->fpus |= 0x400;
- } else {
- ST0 = double_to_floatx80(sin(fptemp));
- env->fpus &= (~0x400); /* C2 <-- 0 */
- /* the above code is for |arg| < 2**53 only */
- }
-}
-
-void helper_fcos(void)
-{
- double fptemp = floatx80_to_double(ST0);
-
- if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
- env->fpus |= 0x400;
- } else {
- ST0 = double_to_floatx80(cos(fptemp));
- env->fpus &= (~0x400); /* C2 <-- 0 */
- /* the above code is for |arg5 < 2**63 only */
- }
-}
-
-void helper_fxam_ST0(void)
-{
- CPU_LDoubleU temp;
- int expdif;
-
- temp.d = ST0;
-
- env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
- if (SIGND(temp))
- env->fpus |= 0x200; /* C1 <-- 1 */
-
- /* XXX: test fptags too */
- expdif = EXPD(temp);
- if (expdif == MAXEXPD) {
- if (MANTD(temp) == 0x8000000000000000ULL)
- env->fpus |= 0x500 /*Infinity*/;
- else
- env->fpus |= 0x100 /*NaN*/;
- } else if (expdif == 0) {
- if (MANTD(temp) == 0)
- env->fpus |= 0x4000 /*Zero*/;
- else
- env->fpus |= 0x4400 /*Denormal*/;
- } else {
- env->fpus |= 0x400;
- }
-}
-
-void helper_fstenv(target_ulong ptr, int data32)
-{
- int fpus, fptag, exp, i;
- uint64_t mant;
- CPU_LDoubleU tmp;
-
- fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
- fptag = 0;
- for (i=7; i>=0; i--) {
- fptag <<= 2;
- if (env->fptags[i]) {
- fptag |= 3;
- } else {
- tmp.d = env->fpregs[i].d;
- exp = EXPD(tmp);
- mant = MANTD(tmp);
- if (exp == 0 && mant == 0) {
- /* zero */
- fptag |= 1;
- } else if (exp == 0 || exp == MAXEXPD
- || (mant & (1LL << 63)) == 0
- ) {
- /* NaNs, infinity, denormal */
- fptag |= 2;
- }
- }
- }
- if (data32) {
- /* 32 bit */
- stl(ptr, env->fpuc);
- stl(ptr + 4, fpus);
- stl(ptr + 8, fptag);
- stl(ptr + 12, 0); /* fpip */
- stl(ptr + 16, 0); /* fpcs */
- stl(ptr + 20, 0); /* fpoo */
- stl(ptr + 24, 0); /* fpos */
- } else {
- /* 16 bit */
- stw(ptr, env->fpuc);
- stw(ptr + 2, fpus);
- stw(ptr + 4, fptag);
- stw(ptr + 6, 0);
- stw(ptr + 8, 0);
- stw(ptr + 10, 0);
- stw(ptr + 12, 0);
- }
-}
-
-void helper_fldenv(target_ulong ptr, int data32)
-{
- int i, fpus, fptag;
-
- if (data32) {
- env->fpuc = lduw(ptr);
- fpus = lduw(ptr + 4);
- fptag = lduw(ptr + 8);
- }
- else {
- env->fpuc = lduw(ptr);
- fpus = lduw(ptr + 2);
- fptag = lduw(ptr + 4);
- }
- env->fpstt = (fpus >> 11) & 7;
- env->fpus = fpus & ~0x3800;
- for(i = 0;i < 8; i++) {
- env->fptags[i] = ((fptag & 3) == 3);
- fptag >>= 2;
- }
-}
-
-void helper_fsave(target_ulong ptr, int data32)
-{
- floatx80 tmp;
- int i;
-
- helper_fstenv(ptr, data32);
-
- ptr += (14 << data32);
- for(i = 0;i < 8; i++) {
- tmp = ST(i);
- helper_fstt(tmp, ptr);
- ptr += 10;
- }
-
- /* fninit */
- env->fpus = 0;
- env->fpstt = 0;
- env->fpuc = 0x37f;
- env->fptags[0] = 1;
- env->fptags[1] = 1;
- env->fptags[2] = 1;
- env->fptags[3] = 1;
- env->fptags[4] = 1;
- env->fptags[5] = 1;
- env->fptags[6] = 1;
- env->fptags[7] = 1;
-}
-
-void helper_frstor(target_ulong ptr, int data32)
-{
- floatx80 tmp;
- int i;
-
- helper_fldenv(ptr, data32);
- ptr += (14 << data32);
-
- for(i = 0;i < 8; i++) {
- tmp = helper_fldt(ptr);
- ST(i) = tmp;
- ptr += 10;
- }
-}
-
-
-#if defined(CONFIG_USER_ONLY)
-void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
-{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = s;
- if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
- selector &= 0xffff;
- cpu_x86_load_seg_cache(env, seg_reg, selector,
- (selector << 4), 0xffff, 0);
- } else {
- helper_load_seg(seg_reg, selector);
- }
- env = saved_env;
-}
-
-void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
-{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = s;
-
- helper_fsave(ptr, data32);
-
- env = saved_env;
-}
-
-void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
-{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = s;
-
- helper_frstor(ptr, data32);
-
- env = saved_env;
-}
-#endif
-
-void helper_fxsave(target_ulong ptr, int data64)
-{
- int fpus, fptag, i, nb_xmm_regs;
- floatx80 tmp;
- target_ulong addr;
-
- /* The operand must be 16 byte aligned */
- if (ptr & 0xf) {
- raise_exception(EXCP0D_GPF);
- }
-
- fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
- fptag = 0;
- for(i = 0; i < 8; i++) {
- fptag |= (env->fptags[i] << i);
- }
- stw(ptr, env->fpuc);
- stw(ptr + 2, fpus);
- stw(ptr + 4, fptag ^ 0xff);
-#ifdef TARGET_X86_64
- if (data64) {
- stq(ptr + 0x08, 0); /* rip */
- stq(ptr + 0x10, 0); /* rdp */
- } else
-#endif
- {
- stl(ptr + 0x08, 0); /* eip */
- stl(ptr + 0x0c, 0); /* sel */
- stl(ptr + 0x10, 0); /* dp */
- stl(ptr + 0x14, 0); /* sel */
- }
-
- addr = ptr + 0x20;
- for(i = 0;i < 8; i++) {
- tmp = ST(i);
- helper_fstt(tmp, addr);
- addr += 16;
- }
-
- if (env->cr[4] & CR4_OSFXSR_MASK) {
- /* XXX: finish it */
- stl(ptr + 0x18, env->mxcsr); /* mxcsr */
- stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
- if (env->hflags & HF_CS64_MASK)
- nb_xmm_regs = 16;
- else
- nb_xmm_regs = 8;
- addr = ptr + 0xa0;
- /* Fast FXSAVE leaves out the XMM registers */
- if (!(env->efer & MSR_EFER_FFXSR)
- || (env->hflags & HF_CPL_MASK)
- || !(env->hflags & HF_LMA_MASK)) {
- for(i = 0; i < nb_xmm_regs; i++) {
- stq(addr, env->xmm_regs[i].XMM_Q(0));
- stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
- addr += 16;
- }
- }
- }
-}
-
-void helper_fxrstor(target_ulong ptr, int data64)
-{
- int i, fpus, fptag, nb_xmm_regs;
- floatx80 tmp;
- target_ulong addr;
-
- /* The operand must be 16 byte aligned */
- if (ptr & 0xf) {
- raise_exception(EXCP0D_GPF);
- }
-
- env->fpuc = lduw(ptr);
- fpus = lduw(ptr + 2);
- fptag = lduw(ptr + 4);
- env->fpstt = (fpus >> 11) & 7;
- env->fpus = fpus & ~0x3800;
- fptag ^= 0xff;
- for(i = 0;i < 8; i++) {
- env->fptags[i] = ((fptag >> i) & 1);
- }
-
- addr = ptr + 0x20;
- for(i = 0;i < 8; i++) {
- tmp = helper_fldt(addr);
- ST(i) = tmp;
- addr += 16;
- }
-
- if (env->cr[4] & CR4_OSFXSR_MASK) {
- /* XXX: finish it */
- env->mxcsr = ldl(ptr + 0x18);
- //ldl(ptr + 0x1c);
- if (env->hflags & HF_CS64_MASK)
- nb_xmm_regs = 16;
- else
- nb_xmm_regs = 8;
- addr = ptr + 0xa0;
- /* Fast FXRESTORE leaves out the XMM registers */
- if (!(env->efer & MSR_EFER_FFXSR)
- || (env->hflags & HF_CPL_MASK)
- || !(env->hflags & HF_LMA_MASK)) {
- for(i = 0; i < nb_xmm_regs; i++) {
- env->xmm_regs[i].XMM_Q(0) = ldq(addr);
- env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
- addr += 16;
- }
- }
- }
-}
-
-void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
-{
- CPU_LDoubleU temp;
-
- temp.d = f;
- *pmant = temp.l.lower;
- *pexp = temp.l.upper;
-}
-
-floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
-{
- CPU_LDoubleU temp;
-
- temp.l.upper = upper;
- temp.l.lower = mant;
- return temp.d;
-}
-
-#ifdef TARGET_X86_64
-
-//#define DEBUG_MULDIV
-
-static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
-{
- *plow += a;
- /* carry test */
- if (*plow < a)
- (*phigh)++;
- *phigh += b;
-}
-
-static void neg128(uint64_t *plow, uint64_t *phigh)
-{
- *plow = ~ *plow;
- *phigh = ~ *phigh;
- add128(plow, phigh, 1, 0);
-}
-
-/* return TRUE if overflow */
-static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
-{
- uint64_t q, r, a1, a0;
- int i, qb, ab;
-
- a0 = *plow;
- a1 = *phigh;
- if (a1 == 0) {
- q = a0 / b;
- r = a0 % b;
- *plow = q;
- *phigh = r;
- } else {
- if (a1 >= b)
- return 1;
- /* XXX: use a better algorithm */
- for(i = 0; i < 64; i++) {
- ab = a1 >> 63;
- a1 = (a1 << 1) | (a0 >> 63);
- if (ab || a1 >= b) {
- a1 -= b;
- qb = 1;
- } else {
- qb = 0;
- }
- a0 = (a0 << 1) | qb;
- }
-#if defined(DEBUG_MULDIV)
- printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
- *phigh, *plow, b, a0, a1);
-#endif
- *plow = a0;
- *phigh = a1;
- }
- return 0;
-}
-
-/* return TRUE if overflow */
-static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
-{
- int sa, sb;
- sa = ((int64_t)*phigh < 0);
- if (sa)
- neg128(plow, phigh);
- sb = (b < 0);
- if (sb)
- b = -b;
- if (div64(plow, phigh, b) != 0)
- return 1;
- if (sa ^ sb) {
- if (*plow > (1ULL << 63))
- return 1;
- *plow = - *plow;
- } else {
- if (*plow >= (1ULL << 63))
- return 1;
- }
- if (sa)
- *phigh = - *phigh;
- return 0;
-}
-
-void helper_mulq_EAX_T0(target_ulong t0)
-{
- uint64_t r0, r1;
-
- mulu64(&r0, &r1, EAX, t0);
- EAX = r0;
- EDX = r1;
- CC_DST = r0;
- CC_SRC = r1;
-}
-
-void helper_imulq_EAX_T0(target_ulong t0)
-{
- uint64_t r0, r1;
-
- muls64(&r0, &r1, EAX, t0);
- EAX = r0;
- EDX = r1;
- CC_DST = r0;
- CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
-}
-
-target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
-{
- uint64_t r0, r1;
-
- muls64(&r0, &r1, t0, t1);
- CC_DST = r0;
- CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
- return r0;
-}
-
-void helper_divq_EAX(target_ulong t0)
-{
- uint64_t r0, r1;
- if (t0 == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- r0 = EAX;
- r1 = EDX;
- if (div64(&r0, &r1, t0))
- raise_exception(EXCP00_DIVZ);
- EAX = r0;
- EDX = r1;
-}
-
-void helper_idivq_EAX(target_ulong t0)
-{
- uint64_t r0, r1;
- if (t0 == 0) {
- raise_exception(EXCP00_DIVZ);
- }
- r0 = EAX;
- r1 = EDX;
- if (idiv64(&r0, &r1, t0))
- raise_exception(EXCP00_DIVZ);
- EAX = r0;
- EDX = r1;
-}
-#endif
-
-static void do_hlt(void)
-{
- env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
- env->halted = 1;
- env->exception_index = EXCP_HLT;
- cpu_loop_exit(env);
-}
-
-void helper_hlt(int next_eip_addend)
-{
- helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
- EIP += next_eip_addend;
-
- do_hlt();
-}
-
-void helper_monitor(target_ulong ptr)
-{
- if ((uint32_t)ECX != 0)
- raise_exception(EXCP0D_GPF);
- /* XXX: store address ? */
- helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
-}
-
-void helper_mwait(int next_eip_addend)
-{
- if ((uint32_t)ECX != 0)
- raise_exception(EXCP0D_GPF);
- helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
- EIP += next_eip_addend;
-
- /* XXX: not complete but not completely erroneous */
- if (env->cpu_index != 0 || env->next_cpu != NULL) {
- /* more than one CPU: do not sleep because another CPU may
- wake this one */
- } else {
- do_hlt();
- }
-}
-
-void helper_debug(void)
-{
- env->exception_index = EXCP_DEBUG;
- cpu_loop_exit(env);
-}
-
-void helper_reset_rf(void)
-{
- env->eflags &= ~RF_MASK;
-}
-
-void helper_raise_interrupt(int intno, int next_eip_addend)
-{
- raise_interrupt(intno, 1, 0, next_eip_addend);
-}
-
-void helper_raise_exception(int exception_index)
-{
- raise_exception(exception_index);
-}
-
-void helper_cli(void)
-{
- env->eflags &= ~IF_MASK;
-}
-
-void helper_sti(void)
-{
- env->eflags |= IF_MASK;
-}
-
-#if 0
-/* vm86plus instructions */
-void helper_cli_vm(void)
-{
- env->eflags &= ~VIF_MASK;
-}
-
-void helper_sti_vm(void)
-{
- env->eflags |= VIF_MASK;
- if (env->eflags & VIP_MASK) {
- raise_exception(EXCP0D_GPF);
- }
-}
-#endif
-
-void helper_set_inhibit_irq(void)
-{
- env->hflags |= HF_INHIBIT_IRQ_MASK;
-}
-
-void helper_reset_inhibit_irq(void)
-{
- env->hflags &= ~HF_INHIBIT_IRQ_MASK;
-}
-
-void helper_boundw(target_ulong a0, int v)
-{
- int low, high;
- low = ldsw(a0);
- high = ldsw(a0 + 2);
- v = (int16_t)v;
- if (v < low || v > high) {
- raise_exception(EXCP05_BOUND);
- }
-}
-
-void helper_boundl(target_ulong a0, int v)
-{
- int low, high;
- low = ldl(a0);
- high = ldl(a0 + 4);
- if (v < low || v > high) {
- raise_exception(EXCP05_BOUND);
- }
-}
-
-#if !defined(CONFIG_USER_ONLY)
-
-#define MMUSUFFIX _mmu
-
-#define SHIFT 0
-#include "softmmu_template.h"
-
-#define SHIFT 1
-#include "softmmu_template.h"
-
-#define SHIFT 2
-#include "softmmu_template.h"
-
-#define SHIFT 3
-#include "softmmu_template.h"
-
-#endif
-
-#if !defined(CONFIG_USER_ONLY)
-/* try to fill the TLB and return an exception if error. If retaddr is
- NULL, it means that the function was called in C code (i.e. not
- from generated code or from helper.c) */
-/* XXX: fix it to restore all registers */
-void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
- uintptr_t retaddr)
-{
- TranslationBlock *tb;
- int ret;
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
-
- ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
- if (ret) {
- if (retaddr) {
- /* now we have a real cpu fault */
- tb = tb_find_pc(retaddr);
- if (tb) {
- /* the PC is inside the translated code. It means that we have
- a virtual CPU fault */
- cpu_restore_state(tb, env, retaddr);
- }
- }
- raise_exception_err(env->exception_index, env->error_code);
- }
- env = saved_env;
-}
-#endif
-
-/* Secure Virtual Machine helpers */
-
-#if defined(CONFIG_USER_ONLY)
-
-void helper_vmrun(int aflag, int next_eip_addend)
-{
-}
-void helper_vmmcall(void)
-{
-}
-void helper_vmload(int aflag)
-{
-}
-void helper_vmsave(int aflag)
-{
-}
-void helper_stgi(void)
-{
-}
-void helper_clgi(void)
-{
-}
-void helper_skinit(void)
-{
-}
-void helper_invlpga(int aflag)
-{
-}
-void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
-{
-}
-void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
-{
-}
-
-void svm_check_intercept(CPUX86State *env1, uint32_t type)
-{
-}
-
-void helper_svm_check_io(uint32_t port, uint32_t param,
- uint32_t next_eip_addend)
-{
-}
-#else
-
-static inline void svm_save_seg(target_phys_addr_t addr,
- const SegmentCache *sc)
-{
- stw_phys(addr + offsetof(struct vmcb_seg, selector),
- sc->selector);
- stq_phys(addr + offsetof(struct vmcb_seg, base),
- sc->base);
- stl_phys(addr + offsetof(struct vmcb_seg, limit),
- sc->limit);
- stw_phys(addr + offsetof(struct vmcb_seg, attrib),
- ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
-}
-
-static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
-{
- unsigned int flags;
-
- sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
- sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
- sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
- flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
- sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
-}
-
-static inline void svm_load_seg_cache(target_phys_addr_t addr,
- CPUX86State *env, int seg_reg)
-{
- SegmentCache sc1, *sc = &sc1;
- svm_load_seg(addr, sc);
- cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
- sc->base, sc->limit, sc->flags);
-}
-
-void helper_vmrun(int aflag, int next_eip_addend)
-{
- target_ulong addr;
- uint32_t event_inj;
- uint32_t int_ctl;
-
- helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
-
- if (aflag == 2)
- addr = EAX;
- else
- addr = (uint32_t)EAX;
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
-
- env->vm_vmcb = addr;
-
- /* save the current CPU state in the hsave page */
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
- stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
-
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
- stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
-
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
-
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
-
- svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
- &env->segs[R_ES]);
- svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
- &env->segs[R_CS]);
- svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
- &env->segs[R_SS]);
- svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
- &env->segs[R_DS]);
-
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
- EIP + next_eip_addend);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
- stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
-
- /* load the interception bitmaps so we do not need to access the
- vmcb in svm mode */
- env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
- env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
- env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
- env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
- env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
- env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
-
- /* enable intercepts */
- env->hflags |= HF_SVMI_MASK;
-
- env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
-
- env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
- env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
-
- env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
- env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
-
- /* clear exit_info_2 so we behave like the real hardware */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
-
- cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
- cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
- cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
- env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
- int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
- env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
- if (int_ctl & V_INTR_MASKING_MASK) {
- env->v_tpr = int_ctl & V_TPR_MASK;
- env->hflags2 |= HF2_VINTR_MASK;
- if (env->eflags & IF_MASK)
- env->hflags2 |= HF2_HIF_MASK;
- }
-
- cpu_load_efer(env,
- ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
- env->eflags = 0;
- load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- CC_OP = CC_OP_EFLAGS;
-
- svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
- env, R_ES);
- svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
- env, R_CS);
- svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
- env, R_SS);
- svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
- env, R_DS);
-
- EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
- env->eip = EIP;
- ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
- EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
- env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
- env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
- cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
-
- /* FIXME: guest state consistency checks */
-
- switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
- case TLB_CONTROL_DO_NOTHING:
- break;
- case TLB_CONTROL_FLUSH_ALL_ASID:
- /* FIXME: this is not 100% correct but should work for now */
- tlb_flush(env, 1);
- break;
- }
-
- env->hflags2 |= HF2_GIF_MASK;
-
- if (int_ctl & V_IRQ_MASK) {
- env->interrupt_request |= CPU_INTERRUPT_VIRQ;
- }
-
- /* maybe we need to inject an event */
- event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
- if (event_inj & SVM_EVTINJ_VALID) {
- uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
- uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
- uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
- /* FIXME: need to implement valid_err */
- switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
- case SVM_EVTINJ_TYPE_INTR:
- env->exception_index = vector;
- env->error_code = event_inj_err;
- env->exception_is_int = 0;
- env->exception_next_eip = -1;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
- /* XXX: is it always correct ? */
- do_interrupt_all(vector, 0, 0, 0, 1);
- break;
- case SVM_EVTINJ_TYPE_NMI:
- env->exception_index = EXCP02_NMI;
- env->error_code = event_inj_err;
- env->exception_is_int = 0;
- env->exception_next_eip = EIP;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
- cpu_loop_exit(env);
- break;
- case SVM_EVTINJ_TYPE_EXEPT:
- env->exception_index = vector;
- env->error_code = event_inj_err;
- env->exception_is_int = 0;
- env->exception_next_eip = -1;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
- cpu_loop_exit(env);
- break;
- case SVM_EVTINJ_TYPE_SOFT:
- env->exception_index = vector;
- env->error_code = event_inj_err;
- env->exception_is_int = 1;
- env->exception_next_eip = EIP;
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
- cpu_loop_exit(env);
- break;
- }
- qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
- }
-}
-
-void helper_vmmcall(void)
-{
- helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
- raise_exception(EXCP06_ILLOP);
-}
-
-void helper_vmload(int aflag)
-{
- target_ulong addr;
- helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
-
- if (aflag == 2)
- addr = EAX;
- else
- addr = (uint32_t)EAX;
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
- addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
- env->segs[R_FS].base);
-
- svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
- env, R_FS);
- svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
- env, R_GS);
- svm_load_seg(addr + offsetof(struct vmcb, save.tr),
- &env->tr);
- svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
- &env->ldt);
-
-#ifdef TARGET_X86_64
- env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
- env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
- env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
- env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
-#endif
- env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
- env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
- env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
- env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
-}
-
-void helper_vmsave(int aflag)
-{
- target_ulong addr;
- helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
-
- if (aflag == 2)
- addr = EAX;
- else
- addr = (uint32_t)EAX;
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
- addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
- env->segs[R_FS].base);
-
- svm_save_seg(addr + offsetof(struct vmcb, save.fs),
- &env->segs[R_FS]);
- svm_save_seg(addr + offsetof(struct vmcb, save.gs),
- &env->segs[R_GS]);
- svm_save_seg(addr + offsetof(struct vmcb, save.tr),
- &env->tr);
- svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
- &env->ldt);
-
-#ifdef TARGET_X86_64
- stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
- stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
- stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
- stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
-#endif
- stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
- stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
- stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
- stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
-}
-
-void helper_stgi(void)
-{
- helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
- env->hflags2 |= HF2_GIF_MASK;
-}
-
-void helper_clgi(void)
-{
- helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
- env->hflags2 &= ~HF2_GIF_MASK;
-}
-
-void helper_skinit(void)
-{
- helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
- /* XXX: not implemented */
- raise_exception(EXCP06_ILLOP);
-}
-
-void helper_invlpga(int aflag)
-{
- target_ulong addr;
- helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
-
- if (aflag == 2)
- addr = EAX;
- else
- addr = (uint32_t)EAX;
-
- /* XXX: could use the ASID to see if it is needed to do the
- flush */
- tlb_flush_page(env, addr);
-}
-
-void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
-{
- if (likely(!(env->hflags & HF_SVMI_MASK)))
- return;
- switch(type) {
- case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
- if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
- helper_vmexit(type, param);
- }
- break;
- case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
- if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
- helper_vmexit(type, param);
- }
- break;
- case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
- if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
- helper_vmexit(type, param);
- }
- break;
- case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
- if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
- helper_vmexit(type, param);
- }
- break;
- case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
- if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
- helper_vmexit(type, param);
- }
- break;
- case SVM_EXIT_MSR:
- if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
- /* FIXME: this should be read in at vmrun (faster this way?) */
- uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
- uint32_t t0, t1;
- switch((uint32_t)ECX) {
- case 0 ... 0x1fff:
- t0 = (ECX * 2) % 8;
- t1 = (ECX * 2) / 8;
- break;
- case 0xc0000000 ... 0xc0001fff:
- t0 = (8192 + ECX - 0xc0000000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- case 0xc0010000 ... 0xc0011fff:
- t0 = (16384 + ECX - 0xc0010000) * 2;
- t1 = (t0 / 8);
- t0 %= 8;
- break;
- default:
- helper_vmexit(type, param);
- t0 = 0;
- t1 = 0;
- break;
- }
- if (ldub_phys(addr + t1) & ((1 << param) << t0))
- helper_vmexit(type, param);
- }
- break;
- default:
- if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
- helper_vmexit(type, param);
- }
- break;
- }
-}
-
-void svm_check_intercept(CPUX86State *env1, uint32_t type)
-{
- CPUX86State *saved_env;
-
- saved_env = env;
- env = env1;
- helper_svm_check_intercept_param(type, 0);
- env = saved_env;
-}
-
-void helper_svm_check_io(uint32_t port, uint32_t param,
- uint32_t next_eip_addend)
-{
- if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
- /* FIXME: this should be read in at vmrun (faster this way?) */
- uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
- uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
- if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
- /* next EIP */
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
- env->eip + next_eip_addend);
- helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
- }
- }
-}
-
-/* Note: currently only 32 bits of exit_code are used */
-void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
-{
- uint32_t int_ctl;
-
- qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
- exit_code, exit_info_1,
- ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
- EIP);
-
- if(env->hflags & HF_INHIBIT_IRQ_MASK) {
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
- env->hflags &= ~HF_INHIBIT_IRQ_MASK;
- } else {
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
- }
-
- /* Save the VM state in the vmcb */
- svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
- &env->segs[R_ES]);
- svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
- &env->segs[R_CS]);
- svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
- &env->segs[R_SS]);
- svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
- &env->segs[R_DS]);
-
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
-
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
-
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
-
- int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
- int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
- int_ctl |= env->v_tpr & V_TPR_MASK;
- if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
- int_ctl |= V_IRQ_MASK;
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
-
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
- stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
-
- /* Reload the host state from vm_hsave */
- env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
- env->hflags &= ~HF_SVMI_MASK;
- env->intercept = 0;
- env->intercept_exceptions = 0;
- env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
- env->tsc_offset = 0;
-
- env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
- env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
-
- env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
- env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
-
- cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
- cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
- cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
- /* we need to set the efer after the crs so the hidden flags get
- set properly */
- cpu_load_efer(env,
- ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
- env->eflags = 0;
- load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
- ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
- CC_OP = CC_OP_EFLAGS;
-
- svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
- env, R_ES);
- svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
- env, R_CS);
- svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
- env, R_SS);
- svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
- env, R_DS);
-
- EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
- ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
- EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
-
- env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
- env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
-
- /* other setups */
- cpu_x86_set_cpl(env, 0);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
- stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
-
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
- ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
- ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
- stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
-
- env->hflags2 &= ~HF2_GIF_MASK;
- /* FIXME: Resets the current ASID register to zero (host ASID). */
-
- /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
-
- /* Clears the TSC_OFFSET inside the processor. */
-
- /* If the host is in PAE mode, the processor reloads the host's PDPEs
- from the page table indicated the host's CR3. If the PDPEs contain
- illegal state, the processor causes a shutdown. */
-
- /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
- env->cr[0] |= CR0_PE_MASK;
- env->eflags &= ~VM_MASK;
-
- /* Disables all breakpoints in the host DR7 register. */
-
- /* Checks the reloaded host state for consistency. */
-
- /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
- host's code segment or non-canonical (in the case of long mode), a
- #GP fault is delivered inside the host.) */
-
- /* remove any pending exception */
- env->exception_index = -1;
- env->error_code = 0;
- env->old_exception = -1;
-
- cpu_loop_exit(env);
-}
-
-#endif
-
-/* MMX/SSE */
-/* XXX: optimize by storing fptt and fptags in the static cpu state */
-
-#define SSE_DAZ 0x0040
-#define SSE_RC_MASK 0x6000
-#define SSE_RC_NEAR 0x0000
-#define SSE_RC_DOWN 0x2000
-#define SSE_RC_UP 0x4000
-#define SSE_RC_CHOP 0x6000
-#define SSE_FZ 0x8000
-
-static void update_sse_status(void)
-{
- int rnd_type;
-
- /* set rounding mode */
- switch(env->mxcsr & SSE_RC_MASK) {
- default:
- case SSE_RC_NEAR:
- rnd_type = float_round_nearest_even;
- break;
- case SSE_RC_DOWN:
- rnd_type = float_round_down;
- break;
- case SSE_RC_UP:
- rnd_type = float_round_up;
- break;
- case SSE_RC_CHOP:
- rnd_type = float_round_to_zero;
- break;
- }
- set_float_rounding_mode(rnd_type, &env->sse_status);
-
- /* set denormals are zero */
- set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
-
- /* set flush to zero */
- set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
-}
-
-void helper_ldmxcsr(uint32_t val)
-{
- env->mxcsr = val;
- update_sse_status();
-}
-
-void helper_enter_mmx(void)
-{
- env->fpstt = 0;
- *(uint32_t *)(env->fptags) = 0;
- *(uint32_t *)(env->fptags + 4) = 0;
-}
-
-void helper_emms(void)
-{
- /* set to empty state */
- *(uint32_t *)(env->fptags) = 0x01010101;
- *(uint32_t *)(env->fptags + 4) = 0x01010101;
-}
-
-/* XXX: suppress */
-void helper_movq(void *d, void *s)
-{
- *(uint64_t *)d = *(uint64_t *)s;
-}
-
-#define SHIFT 0
-#include "ops_sse.h"
-
-#define SHIFT 1
-#include "ops_sse.h"
-
-#define SHIFT 0
-#include "helper_template.h"
-#undef SHIFT
-
-#define SHIFT 1
-#include "helper_template.h"
-#undef SHIFT
-
-#define SHIFT 2
-#include "helper_template.h"
-#undef SHIFT
-
-#ifdef TARGET_X86_64
-
-#define SHIFT 3
-#include "helper_template.h"
-#undef SHIFT
-
-#endif
-
-/* bit operations */
-target_ulong helper_bsf(target_ulong t0)
-{
- int count;
- target_ulong res;
-
- res = t0;
- count = 0;
- while ((res & 1) == 0) {
- count++;
- res >>= 1;
- }
- return count;
-}
-
-target_ulong helper_lzcnt(target_ulong t0, int wordsize)
-{
- int count;
- target_ulong res, mask;
-
- if (wordsize > 0 && t0 == 0) {
- return wordsize;
- }
- res = t0;
- count = TARGET_LONG_BITS - 1;
- mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
- while ((res & mask) == 0) {
- count--;
- res <<= 1;
- }
- if (wordsize > 0) {
- return wordsize - 1 - count;
- }
- return count;
-}
-
-target_ulong helper_bsr(target_ulong t0)
-{
- return helper_lzcnt(t0, 0);
-}
-
-static int compute_all_eflags(void)
-{
- return CC_SRC;
-}
-
-static int compute_c_eflags(void)
-{
- return CC_SRC & CC_C;
-}
-
-uint32_t helper_cc_compute_all(int op)
-{
- switch (op) {
- default: /* should never happen */ return 0;
-
- case CC_OP_EFLAGS: return compute_all_eflags();
-
- case CC_OP_MULB: return compute_all_mulb();
- case CC_OP_MULW: return compute_all_mulw();
- case CC_OP_MULL: return compute_all_mull();
-
- case CC_OP_ADDB: return compute_all_addb();
- case CC_OP_ADDW: return compute_all_addw();
- case CC_OP_ADDL: return compute_all_addl();
-
- case CC_OP_ADCB: return compute_all_adcb();
- case CC_OP_ADCW: return compute_all_adcw();
- case CC_OP_ADCL: return compute_all_adcl();
-
- case CC_OP_SUBB: return compute_all_subb();
- case CC_OP_SUBW: return compute_all_subw();
- case CC_OP_SUBL: return compute_all_subl();
-
- case CC_OP_SBBB: return compute_all_sbbb();
- case CC_OP_SBBW: return compute_all_sbbw();
- case CC_OP_SBBL: return compute_all_sbbl();
-
- case CC_OP_LOGICB: return compute_all_logicb();
- case CC_OP_LOGICW: return compute_all_logicw();
- case CC_OP_LOGICL: return compute_all_logicl();
-
- case CC_OP_INCB: return compute_all_incb();
- case CC_OP_INCW: return compute_all_incw();
- case CC_OP_INCL: return compute_all_incl();
-
- case CC_OP_DECB: return compute_all_decb();
- case CC_OP_DECW: return compute_all_decw();
- case CC_OP_DECL: return compute_all_decl();
-
- case CC_OP_SHLB: return compute_all_shlb();
- case CC_OP_SHLW: return compute_all_shlw();
- case CC_OP_SHLL: return compute_all_shll();
-
- case CC_OP_SARB: return compute_all_sarb();
- case CC_OP_SARW: return compute_all_sarw();
- case CC_OP_SARL: return compute_all_sarl();
-
-#ifdef TARGET_X86_64
- case CC_OP_MULQ: return compute_all_mulq();
-
- case CC_OP_ADDQ: return compute_all_addq();
-
- case CC_OP_ADCQ: return compute_all_adcq();
-
- case CC_OP_SUBQ: return compute_all_subq();
-
- case CC_OP_SBBQ: return compute_all_sbbq();
-
- case CC_OP_LOGICQ: return compute_all_logicq();
-
- case CC_OP_INCQ: return compute_all_incq();
-
- case CC_OP_DECQ: return compute_all_decq();
-
- case CC_OP_SHLQ: return compute_all_shlq();
-
- case CC_OP_SARQ: return compute_all_sarq();
-#endif
- }
-}
-
-uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
-{
- CPUX86State *saved_env;
- uint32_t ret;
-
- saved_env = env;
- env = env1;
- ret = helper_cc_compute_all(op);
- env = saved_env;
- return ret;
-}
-
-uint32_t helper_cc_compute_c(int op)
-{
- switch (op) {
- default: /* should never happen */ return 0;
-
- case CC_OP_EFLAGS: return compute_c_eflags();
-
- case CC_OP_MULB: return compute_c_mull();
- case CC_OP_MULW: return compute_c_mull();
- case CC_OP_MULL: return compute_c_mull();
-
- case CC_OP_ADDB: return compute_c_addb();
- case CC_OP_ADDW: return compute_c_addw();
- case CC_OP_ADDL: return compute_c_addl();
-
- case CC_OP_ADCB: return compute_c_adcb();
- case CC_OP_ADCW: return compute_c_adcw();
- case CC_OP_ADCL: return compute_c_adcl();
-
- case CC_OP_SUBB: return compute_c_subb();
- case CC_OP_SUBW: return compute_c_subw();
- case CC_OP_SUBL: return compute_c_subl();
-
- case CC_OP_SBBB: return compute_c_sbbb();
- case CC_OP_SBBW: return compute_c_sbbw();
- case CC_OP_SBBL: return compute_c_sbbl();
-
- case CC_OP_LOGICB: return compute_c_logicb();
- case CC_OP_LOGICW: return compute_c_logicw();
- case CC_OP_LOGICL: return compute_c_logicl();
-
- case CC_OP_INCB: return compute_c_incl();
- case CC_OP_INCW: return compute_c_incl();
- case CC_OP_INCL: return compute_c_incl();
-
- case CC_OP_DECB: return compute_c_incl();
- case CC_OP_DECW: return compute_c_incl();
- case CC_OP_DECL: return compute_c_incl();
-
- case CC_OP_SHLB: return compute_c_shlb();
- case CC_OP_SHLW: return compute_c_shlw();
- case CC_OP_SHLL: return compute_c_shll();
-
- case CC_OP_SARB: return compute_c_sarl();
- case CC_OP_SARW: return compute_c_sarl();
- case CC_OP_SARL: return compute_c_sarl();
-
-#ifdef TARGET_X86_64
- case CC_OP_MULQ: return compute_c_mull();
-
- case CC_OP_ADDQ: return compute_c_addq();
-
- case CC_OP_ADCQ: return compute_c_adcq();
-
- case CC_OP_SUBQ: return compute_c_subq();
-
- case CC_OP_SBBQ: return compute_c_sbbq();
-
- case CC_OP_LOGICQ: return compute_c_logicq();
-
- case CC_OP_INCQ: return compute_c_incl();
-
- case CC_OP_DECQ: return compute_c_incl();
-
- case CC_OP_SHLQ: return compute_c_shlq();
-
- case CC_OP_SARQ: return compute_c_sarl();
-#endif
- }
-}
diff --git a/target-i386/ops_sse.h b/target-i386/ops_sse.h
index 0d33ca1985..d109512d5b 100644
--- a/target-i386/ops_sse.h
+++ b/target-i386/ops_sse.h
@@ -203,12 +203,15 @@ void glue(helper_psrldq, SUFFIX)(Reg *d, Reg *s)
int shift, i;
shift = s->L(0);
- if (shift > 16)
+ if (shift > 16) {
shift = 16;
- for(i = 0; i < 16 - shift; i++)
+ }
+ for (i = 0; i < 16 - shift; i++) {
d->B(i) = d->B(i + shift);
- for(i = 16 - shift; i < 16; i++)
+ }
+ for (i = 16 - shift; i < 16; i++) {
d->B(i) = 0;
+ }
}
void glue(helper_pslldq, SUFFIX)(Reg *d, Reg *s)
@@ -216,112 +219,119 @@ void glue(helper_pslldq, SUFFIX)(Reg *d, Reg *s)
int shift, i;
shift = s->L(0);
- if (shift > 16)
+ if (shift > 16) {
shift = 16;
- for(i = 15; i >= shift; i--)
+ }
+ for (i = 15; i >= shift; i--) {
d->B(i) = d->B(i - shift);
- for(i = 0; i < shift; i++)
+ }
+ for (i = 0; i < shift; i++) {
d->B(i) = 0;
+ }
}
#endif
-#define SSE_HELPER_B(name, F)\
-void glue(name, SUFFIX) (Reg *d, Reg *s)\
-{\
- d->B(0) = F(d->B(0), s->B(0));\
- d->B(1) = F(d->B(1), s->B(1));\
- d->B(2) = F(d->B(2), s->B(2));\
- d->B(3) = F(d->B(3), s->B(3));\
- d->B(4) = F(d->B(4), s->B(4));\
- d->B(5) = F(d->B(5), s->B(5));\
- d->B(6) = F(d->B(6), s->B(6));\
- d->B(7) = F(d->B(7), s->B(7));\
- XMM_ONLY(\
- d->B(8) = F(d->B(8), s->B(8));\
- d->B(9) = F(d->B(9), s->B(9));\
- d->B(10) = F(d->B(10), s->B(10));\
- d->B(11) = F(d->B(11), s->B(11));\
- d->B(12) = F(d->B(12), s->B(12));\
- d->B(13) = F(d->B(13), s->B(13));\
- d->B(14) = F(d->B(14), s->B(14));\
- d->B(15) = F(d->B(15), s->B(15));\
- )\
-}
-
-#define SSE_HELPER_W(name, F)\
-void glue(name, SUFFIX) (Reg *d, Reg *s)\
-{\
- d->W(0) = F(d->W(0), s->W(0));\
- d->W(1) = F(d->W(1), s->W(1));\
- d->W(2) = F(d->W(2), s->W(2));\
- d->W(3) = F(d->W(3), s->W(3));\
- XMM_ONLY(\
- d->W(4) = F(d->W(4), s->W(4));\
- d->W(5) = F(d->W(5), s->W(5));\
- d->W(6) = F(d->W(6), s->W(6));\
- d->W(7) = F(d->W(7), s->W(7));\
- )\
-}
-
-#define SSE_HELPER_L(name, F)\
-void glue(name, SUFFIX) (Reg *d, Reg *s)\
-{\
- d->L(0) = F(d->L(0), s->L(0));\
- d->L(1) = F(d->L(1), s->L(1));\
- XMM_ONLY(\
- d->L(2) = F(d->L(2), s->L(2));\
- d->L(3) = F(d->L(3), s->L(3));\
- )\
-}
-
-#define SSE_HELPER_Q(name, F)\
-void glue(name, SUFFIX) (Reg *d, Reg *s)\
-{\
- d->Q(0) = F(d->Q(0), s->Q(0));\
- XMM_ONLY(\
- d->Q(1) = F(d->Q(1), s->Q(1));\
- )\
-}
+#define SSE_HELPER_B(name, F) \
+ void glue(name, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ d->B(0) = F(d->B(0), s->B(0)); \
+ d->B(1) = F(d->B(1), s->B(1)); \
+ d->B(2) = F(d->B(2), s->B(2)); \
+ d->B(3) = F(d->B(3), s->B(3)); \
+ d->B(4) = F(d->B(4), s->B(4)); \
+ d->B(5) = F(d->B(5), s->B(5)); \
+ d->B(6) = F(d->B(6), s->B(6)); \
+ d->B(7) = F(d->B(7), s->B(7)); \
+ XMM_ONLY( \
+ d->B(8) = F(d->B(8), s->B(8)); \
+ d->B(9) = F(d->B(9), s->B(9)); \
+ d->B(10) = F(d->B(10), s->B(10)); \
+ d->B(11) = F(d->B(11), s->B(11)); \
+ d->B(12) = F(d->B(12), s->B(12)); \
+ d->B(13) = F(d->B(13), s->B(13)); \
+ d->B(14) = F(d->B(14), s->B(14)); \
+ d->B(15) = F(d->B(15), s->B(15)); \
+ ) \
+ }
+
+#define SSE_HELPER_W(name, F) \
+ void glue(name, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ d->W(0) = F(d->W(0), s->W(0)); \
+ d->W(1) = F(d->W(1), s->W(1)); \
+ d->W(2) = F(d->W(2), s->W(2)); \
+ d->W(3) = F(d->W(3), s->W(3)); \
+ XMM_ONLY( \
+ d->W(4) = F(d->W(4), s->W(4)); \
+ d->W(5) = F(d->W(5), s->W(5)); \
+ d->W(6) = F(d->W(6), s->W(6)); \
+ d->W(7) = F(d->W(7), s->W(7)); \
+ ) \
+ }
+
+#define SSE_HELPER_L(name, F) \
+ void glue(name, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ d->L(0) = F(d->L(0), s->L(0)); \
+ d->L(1) = F(d->L(1), s->L(1)); \
+ XMM_ONLY( \
+ d->L(2) = F(d->L(2), s->L(2)); \
+ d->L(3) = F(d->L(3), s->L(3)); \
+ ) \
+ }
+
+#define SSE_HELPER_Q(name, F) \
+ void glue(name, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ d->Q(0) = F(d->Q(0), s->Q(0)); \
+ XMM_ONLY( \
+ d->Q(1) = F(d->Q(1), s->Q(1)); \
+ ) \
+ }
#if SHIFT == 0
static inline int satub(int x)
{
- if (x < 0)
+ if (x < 0) {
return 0;
- else if (x > 255)
+ } else if (x > 255) {
return 255;
- else
+ } else {
return x;
+ }
}
static inline int satuw(int x)
{
- if (x < 0)
+ if (x < 0) {
return 0;
- else if (x > 65535)
+ } else if (x > 65535) {
return 65535;
- else
+ } else {
return x;
+ }
}
static inline int satsb(int x)
{
- if (x < -128)
+ if (x < -128) {
return -128;
- else if (x > 127)
+ } else if (x > 127) {
return 127;
- else
+ } else {
return x;
+ }
}
static inline int satsw(int x)
{
- if (x < -32768)
+ if (x < -32768) {
return -32768;
- else if (x > 32767)
+ } else if (x > 32767) {
return 32767;
- else
+ } else {
return x;
+ }
}
#define FADD(a, b) ((a) + (b))
@@ -340,22 +350,22 @@ static inline int satsw(int x)
#define FMAXUB(a, b) ((a) > (b)) ? (a) : (b)
#define FMAXSW(a, b) ((int16_t)(a) > (int16_t)(b)) ? (a) : (b)
-#define FAND(a, b) (a) & (b)
+#define FAND(a, b) ((a) & (b))
#define FANDN(a, b) ((~(a)) & (b))
-#define FOR(a, b) (a) | (b)
-#define FXOR(a, b) (a) ^ (b)
+#define FOR(a, b) ((a) | (b))
+#define FXOR(a, b) ((a) ^ (b))
-#define FCMPGTB(a, b) (int8_t)(a) > (int8_t)(b) ? -1 : 0
-#define FCMPGTW(a, b) (int16_t)(a) > (int16_t)(b) ? -1 : 0
-#define FCMPGTL(a, b) (int32_t)(a) > (int32_t)(b) ? -1 : 0
-#define FCMPEQ(a, b) (a) == (b) ? -1 : 0
+#define FCMPGTB(a, b) ((int8_t)(a) > (int8_t)(b) ? -1 : 0)
+#define FCMPGTW(a, b) ((int16_t)(a) > (int16_t)(b) ? -1 : 0)
+#define FCMPGTL(a, b) ((int32_t)(a) > (int32_t)(b) ? -1 : 0)
+#define FCMPEQ(a, b) ((a) == (b) ? -1 : 0)
-#define FMULLW(a, b) (a) * (b)
-#define FMULHRW(a, b) ((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16
-#define FMULHUW(a, b) (a) * (b) >> 16
-#define FMULHW(a, b) (int16_t)(a) * (int16_t)(b) >> 16
+#define FMULLW(a, b) ((a) * (b))
+#define FMULHRW(a, b) (((int16_t)(a) * (int16_t)(b) + 0x8000) >> 16)
+#define FMULHUW(a, b) ((a) * (b) >> 16)
+#define FMULHW(a, b) ((int16_t)(a) * (int16_t)(b) >> 16)
-#define FAVG(a, b) ((a) + (b) + 1) >> 1
+#define FAVG(a, b) (((a) + (b) + 1) >> 1)
#endif
SSE_HELPER_B(helper_paddb, FADD)
@@ -407,7 +417,7 @@ SSE_HELPER_W(helper_pmulhw, FMULHW)
SSE_HELPER_B(helper_pavgb, FAVG)
SSE_HELPER_W(helper_pavgw, FAVG)
-void glue(helper_pmuludq, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_pmuludq, SUFFIX)(Reg *d, Reg *s)
{
d->Q(0) = (uint64_t)s->L(0) * (uint64_t)d->L(0);
#if SHIFT == 1
@@ -415,26 +425,27 @@ void glue(helper_pmuludq, SUFFIX) (Reg *d, Reg *s)
#endif
}
-void glue(helper_pmaddwd, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_pmaddwd, SUFFIX)(Reg *d, Reg *s)
{
int i;
- for(i = 0; i < (2 << SHIFT); i++) {
- d->L(i) = (int16_t)s->W(2*i) * (int16_t)d->W(2*i) +
- (int16_t)s->W(2*i+1) * (int16_t)d->W(2*i+1);
+ for (i = 0; i < (2 << SHIFT); i++) {
+ d->L(i) = (int16_t)s->W(2 * i) * (int16_t)d->W(2 * i) +
+ (int16_t)s->W(2 * i + 1) * (int16_t)d->W(2 * i + 1);
}
}
#if SHIFT == 0
static inline int abs1(int a)
{
- if (a < 0)
+ if (a < 0) {
return -a;
- else
+ } else {
return a;
+ }
}
#endif
-void glue(helper_psadbw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_psadbw, SUFFIX)(Reg *d, Reg *s)
{
unsigned int val;
@@ -462,16 +473,18 @@ void glue(helper_psadbw, SUFFIX) (Reg *d, Reg *s)
#endif
}
-void glue(helper_maskmov, SUFFIX) (Reg *d, Reg *s, target_ulong a0)
+void glue(helper_maskmov, SUFFIX)(Reg *d, Reg *s, target_ulong a0)
{
int i;
- for(i = 0; i < (8 << SHIFT); i++) {
- if (s->B(i) & 0x80)
+
+ for (i = 0; i < (8 << SHIFT); i++) {
+ if (s->B(i) & 0x80) {
stb(a0 + i, d->B(i));
+ }
}
}
-void glue(helper_movl_mm_T0, SUFFIX) (Reg *d, uint32_t val)
+void glue(helper_movl_mm_T0, SUFFIX)(Reg *d, uint32_t val)
{
d->L(0) = val;
d->L(1) = 0;
@@ -481,7 +494,7 @@ void glue(helper_movl_mm_T0, SUFFIX) (Reg *d, uint32_t val)
}
#ifdef TARGET_X86_64
-void glue(helper_movq_mm_T0, SUFFIX) (Reg *d, uint64_t val)
+void glue(helper_movq_mm_T0, SUFFIX)(Reg *d, uint64_t val)
{
d->Q(0) = val;
#if SHIFT == 1
@@ -491,9 +504,10 @@ void glue(helper_movq_mm_T0, SUFFIX) (Reg *d, uint64_t val)
#endif
#if SHIFT == 0
-void glue(helper_pshufw, SUFFIX) (Reg *d, Reg *s, int order)
+void glue(helper_pshufw, SUFFIX)(Reg *d, Reg *s, int order)
{
Reg r;
+
r.W(0) = s->W(order & 3);
r.W(1) = s->W((order >> 2) & 3);
r.W(2) = s->W((order >> 4) & 3);
@@ -504,6 +518,7 @@ void glue(helper_pshufw, SUFFIX) (Reg *d, Reg *s, int order)
void helper_shufps(Reg *d, Reg *s, int order)
{
Reg r;
+
r.L(0) = d->L(order & 3);
r.L(1) = d->L((order >> 2) & 3);
r.L(2) = s->L((order >> 4) & 3);
@@ -514,14 +529,16 @@ void helper_shufps(Reg *d, Reg *s, int order)
void helper_shufpd(Reg *d, Reg *s, int order)
{
Reg r;
+
r.Q(0) = d->Q(order & 1);
r.Q(1) = s->Q((order >> 1) & 1);
*d = r;
}
-void glue(helper_pshufd, SUFFIX) (Reg *d, Reg *s, int order)
+void glue(helper_pshufd, SUFFIX)(Reg *d, Reg *s, int order)
{
Reg r;
+
r.L(0) = s->L(order & 3);
r.L(1) = s->L((order >> 2) & 3);
r.L(2) = s->L((order >> 4) & 3);
@@ -529,9 +546,10 @@ void glue(helper_pshufd, SUFFIX) (Reg *d, Reg *s, int order)
*d = r;
}
-void glue(helper_pshuflw, SUFFIX) (Reg *d, Reg *s, int order)
+void glue(helper_pshuflw, SUFFIX)(Reg *d, Reg *s, int order)
{
Reg r;
+
r.W(0) = s->W(order & 3);
r.W(1) = s->W((order >> 2) & 3);
r.W(2) = s->W((order >> 4) & 3);
@@ -540,9 +558,10 @@ void glue(helper_pshuflw, SUFFIX) (Reg *d, Reg *s, int order)
*d = r;
}
-void glue(helper_pshufhw, SUFFIX) (Reg *d, Reg *s, int order)
+void glue(helper_pshufhw, SUFFIX)(Reg *d, Reg *s, int order)
{
Reg r;
+
r.Q(0) = s->Q(0);
r.W(4) = s->W(4 + (order & 3));
r.W(5) = s->W(4 + ((order >> 2) & 3));
@@ -556,29 +575,30 @@ void glue(helper_pshufhw, SUFFIX) (Reg *d, Reg *s, int order)
/* FPU ops */
/* XXX: not accurate */
-#define SSE_HELPER_S(name, F)\
-void helper_ ## name ## ps (Reg *d, Reg *s)\
-{\
- d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
- d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1));\
- d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2));\
- d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3));\
-}\
-\
-void helper_ ## name ## ss (Reg *d, Reg *s)\
-{\
- d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
-}\
-void helper_ ## name ## pd (Reg *d, Reg *s)\
-{\
- d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
- d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1));\
-}\
-\
-void helper_ ## name ## sd (Reg *d, Reg *s)\
-{\
- d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
-}
+#define SSE_HELPER_S(name, F) \
+ void helper_ ## name ## ps(Reg *d, Reg *s) \
+ { \
+ d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
+ d->XMM_S(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \
+ d->XMM_S(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \
+ d->XMM_S(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \
+ } \
+ \
+ void helper_ ## name ## ss(Reg *d, Reg *s) \
+ { \
+ d->XMM_S(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
+ } \
+ \
+ void helper_ ## name ## pd(Reg *d, Reg *s) \
+ { \
+ d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
+ d->XMM_D(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \
+ } \
+ \
+ void helper_ ## name ## sd(Reg *d, Reg *s) \
+ { \
+ d->XMM_D(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
+ }
#define FPU_ADD(size, a, b) float ## size ## _add(a, b, &env->sse_status)
#define FPU_SUB(size, a, b) float ## size ## _sub(a, b, &env->sse_status)
@@ -590,8 +610,10 @@ void helper_ ## name ## sd (Reg *d, Reg *s)\
* special cases right: for min and max Intel specifies that (-0,0),
* (NaN, anything) and (anything, NaN) return the second argument.
*/
-#define FPU_MIN(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b)
-#define FPU_MAX(size, a, b) float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b)
+#define FPU_MIN(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? (a) : (b))
+#define FPU_MAX(size, a, b) \
+ (float ## size ## _lt(b, a, &env->sse_status) ? (a) : (b))
SSE_HELPER_S(add, FPU_ADD)
SSE_HELPER_S(sub, FPU_SUB)
@@ -606,6 +628,7 @@ SSE_HELPER_S(sqrt, FPU_SQRT)
void helper_cvtps2pd(Reg *d, Reg *s)
{
float32 s0, s1;
+
s0 = s->XMM_S(0);
s1 = s->XMM_S(1);
d->XMM_D(0) = float32_to_float64(s0, &env->sse_status);
@@ -641,6 +664,7 @@ void helper_cvtdq2ps(Reg *d, Reg *s)
void helper_cvtdq2pd(Reg *d, Reg *s)
{
int32_t l0, l1;
+
l0 = (int32_t)s->XMM_L(0);
l1 = (int32_t)s->XMM_L(1);
d->XMM_D(0) = int32_to_float64(l0, &env->sse_status);
@@ -864,6 +888,7 @@ void helper_insertq_i(XMMReg *d, int index, int length)
void helper_haddps(XMMReg *d, XMMReg *s)
{
XMMReg r;
+
r.XMM_S(0) = float32_add(d->XMM_S(0), d->XMM_S(1), &env->sse_status);
r.XMM_S(1) = float32_add(d->XMM_S(2), d->XMM_S(3), &env->sse_status);
r.XMM_S(2) = float32_add(s->XMM_S(0), s->XMM_S(1), &env->sse_status);
@@ -874,6 +899,7 @@ void helper_haddps(XMMReg *d, XMMReg *s)
void helper_haddpd(XMMReg *d, XMMReg *s)
{
XMMReg r;
+
r.XMM_D(0) = float64_add(d->XMM_D(0), d->XMM_D(1), &env->sse_status);
r.XMM_D(1) = float64_add(s->XMM_D(0), s->XMM_D(1), &env->sse_status);
*d = r;
@@ -882,6 +908,7 @@ void helper_haddpd(XMMReg *d, XMMReg *s)
void helper_hsubps(XMMReg *d, XMMReg *s)
{
XMMReg r;
+
r.XMM_S(0) = float32_sub(d->XMM_S(0), d->XMM_S(1), &env->sse_status);
r.XMM_S(1) = float32_sub(d->XMM_S(2), d->XMM_S(3), &env->sse_status);
r.XMM_S(2) = float32_sub(s->XMM_S(0), s->XMM_S(1), &env->sse_status);
@@ -892,6 +919,7 @@ void helper_hsubps(XMMReg *d, XMMReg *s)
void helper_hsubpd(XMMReg *d, XMMReg *s)
{
XMMReg r;
+
r.XMM_D(0) = float64_sub(d->XMM_D(0), d->XMM_D(1), &env->sse_status);
r.XMM_D(1) = float64_sub(s->XMM_D(0), s->XMM_D(1), &env->sse_status);
*d = r;
@@ -912,38 +940,47 @@ void helper_addsubpd(XMMReg *d, XMMReg *s)
}
/* XXX: unordered */
-#define SSE_HELPER_CMP(name, F)\
-void helper_ ## name ## ps (Reg *d, Reg *s)\
-{\
- d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
- d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1));\
- d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2));\
- d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3));\
-}\
-\
-void helper_ ## name ## ss (Reg *d, Reg *s)\
-{\
- d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0));\
-}\
-void helper_ ## name ## pd (Reg *d, Reg *s)\
-{\
- d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
- d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1));\
-}\
-\
-void helper_ ## name ## sd (Reg *d, Reg *s)\
-{\
- d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0));\
-}
-
-#define FPU_CMPEQ(size, a, b) float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0
-#define FPU_CMPLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0
-#define FPU_CMPLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? -1 : 0
-#define FPU_CMPUNORD(size, a, b) float ## size ## _unordered_quiet(a, b, &env->sse_status) ? - 1 : 0
-#define FPU_CMPNEQ(size, a, b) float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1
-#define FPU_CMPNLT(size, a, b) float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1
-#define FPU_CMPNLE(size, a, b) float ## size ## _le(a, b, &env->sse_status) ? 0 : -1
-#define FPU_CMPORD(size, a, b) float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1
+#define SSE_HELPER_CMP(name, F) \
+ void helper_ ## name ## ps(Reg *d, Reg *s) \
+ { \
+ d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
+ d->XMM_L(1) = F(32, d->XMM_S(1), s->XMM_S(1)); \
+ d->XMM_L(2) = F(32, d->XMM_S(2), s->XMM_S(2)); \
+ d->XMM_L(3) = F(32, d->XMM_S(3), s->XMM_S(3)); \
+ } \
+ \
+ void helper_ ## name ## ss(Reg *d, Reg *s) \
+ { \
+ d->XMM_L(0) = F(32, d->XMM_S(0), s->XMM_S(0)); \
+ } \
+ \
+ void helper_ ## name ## pd(Reg *d, Reg *s) \
+ { \
+ d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
+ d->XMM_Q(1) = F(64, d->XMM_D(1), s->XMM_D(1)); \
+ } \
+ \
+ void helper_ ## name ## sd(Reg *d, Reg *s) \
+ { \
+ d->XMM_Q(0) = F(64, d->XMM_D(0), s->XMM_D(0)); \
+ }
+
+#define FPU_CMPEQ(size, a, b) \
+ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLT(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPLE(size, a, b) \
+ (float ## size ## _le(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPUNORD(size, a, b) \
+ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? -1 : 0)
+#define FPU_CMPNEQ(size, a, b) \
+ (float ## size ## _eq_quiet(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLT(size, a, b) \
+ (float ## size ## _lt(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPNLE(size, a, b) \
+ (float ## size ## _le(a, b, &env->sse_status) ? 0 : -1)
+#define FPU_CMPORD(size, a, b) \
+ (float ## size ## _unordered_quiet(a, b, &env->sse_status) ? 0 : -1)
SSE_HELPER_CMP(cmpeq, FPU_CMPEQ)
SSE_HELPER_CMP(cmplt, FPU_CMPLT)
@@ -1003,6 +1040,7 @@ void helper_comisd(Reg *d, Reg *s)
uint32_t helper_movmskps(Reg *s)
{
int b0, b1, b2, b3;
+
b0 = s->XMM_L(0) >> 31;
b1 = s->XMM_L(1) >> 31;
b2 = s->XMM_L(2) >> 31;
@@ -1013,6 +1051,7 @@ uint32_t helper_movmskps(Reg *s)
uint32_t helper_movmskpd(Reg *s)
{
int b0, b1;
+
b0 = s->XMM_L(1) >> 31;
b1 = s->XMM_L(3) >> 31;
return b0 | (b1 << 1);
@@ -1023,6 +1062,7 @@ uint32_t helper_movmskpd(Reg *s)
uint32_t glue(helper_pmovmskb, SUFFIX)(Reg *s)
{
uint32_t val;
+
val = 0;
val |= (s->B(0) >> 7);
val |= (s->B(1) >> 6) & 0x02;
@@ -1045,7 +1085,7 @@ uint32_t glue(helper_pmovmskb, SUFFIX)(Reg *s)
return val;
}
-void glue(helper_packsswb, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_packsswb, SUFFIX)(Reg *d, Reg *s)
{
Reg r;
@@ -1072,7 +1112,7 @@ void glue(helper_packsswb, SUFFIX) (Reg *d, Reg *s)
*d = r;
}
-void glue(helper_packuswb, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_packuswb, SUFFIX)(Reg *d, Reg *s)
{
Reg r;
@@ -1099,7 +1139,7 @@ void glue(helper_packuswb, SUFFIX) (Reg *d, Reg *s)
*d = r;
}
-void glue(helper_packssdw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_packssdw, SUFFIX)(Reg *d, Reg *s)
{
Reg r;
@@ -1118,73 +1158,74 @@ void glue(helper_packssdw, SUFFIX) (Reg *d, Reg *s)
*d = r;
}
-#define UNPCK_OP(base_name, base) \
- \
-void glue(helper_punpck ## base_name ## bw, SUFFIX) (Reg *d, Reg *s) \
-{ \
- Reg r; \
- \
- r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
- r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
- r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
- r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
- r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
- r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
- r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
- r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
-XMM_ONLY( \
- r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
- r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
- r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
- r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
- r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
- r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
- r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
- r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
-) \
- *d = r; \
-} \
- \
-void glue(helper_punpck ## base_name ## wd, SUFFIX) (Reg *d, Reg *s) \
-{ \
- Reg r; \
- \
- r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
- r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
- r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
- r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
-XMM_ONLY( \
- r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
- r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
- r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
- r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
-) \
- *d = r; \
-} \
- \
-void glue(helper_punpck ## base_name ## dq, SUFFIX) (Reg *d, Reg *s) \
-{ \
- Reg r; \
- \
- r.L(0) = d->L((base << SHIFT) + 0); \
- r.L(1) = s->L((base << SHIFT) + 0); \
-XMM_ONLY( \
- r.L(2) = d->L((base << SHIFT) + 1); \
- r.L(3) = s->L((base << SHIFT) + 1); \
-) \
- *d = r; \
-} \
- \
-XMM_ONLY( \
-void glue(helper_punpck ## base_name ## qdq, SUFFIX) (Reg *d, Reg *s) \
-{ \
- Reg r; \
- \
- r.Q(0) = d->Q(base); \
- r.Q(1) = s->Q(base); \
- *d = r; \
-} \
-)
+#define UNPCK_OP(base_name, base) \
+ \
+ void glue(helper_punpck ## base_name ## bw, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.B(0) = d->B((base << (SHIFT + 2)) + 0); \
+ r.B(1) = s->B((base << (SHIFT + 2)) + 0); \
+ r.B(2) = d->B((base << (SHIFT + 2)) + 1); \
+ r.B(3) = s->B((base << (SHIFT + 2)) + 1); \
+ r.B(4) = d->B((base << (SHIFT + 2)) + 2); \
+ r.B(5) = s->B((base << (SHIFT + 2)) + 2); \
+ r.B(6) = d->B((base << (SHIFT + 2)) + 3); \
+ r.B(7) = s->B((base << (SHIFT + 2)) + 3); \
+ XMM_ONLY( \
+ r.B(8) = d->B((base << (SHIFT + 2)) + 4); \
+ r.B(9) = s->B((base << (SHIFT + 2)) + 4); \
+ r.B(10) = d->B((base << (SHIFT + 2)) + 5); \
+ r.B(11) = s->B((base << (SHIFT + 2)) + 5); \
+ r.B(12) = d->B((base << (SHIFT + 2)) + 6); \
+ r.B(13) = s->B((base << (SHIFT + 2)) + 6); \
+ r.B(14) = d->B((base << (SHIFT + 2)) + 7); \
+ r.B(15) = s->B((base << (SHIFT + 2)) + 7); \
+ ) \
+ *d = r; \
+ } \
+ \
+ void glue(helper_punpck ## base_name ## wd, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.W(0) = d->W((base << (SHIFT + 1)) + 0); \
+ r.W(1) = s->W((base << (SHIFT + 1)) + 0); \
+ r.W(2) = d->W((base << (SHIFT + 1)) + 1); \
+ r.W(3) = s->W((base << (SHIFT + 1)) + 1); \
+ XMM_ONLY( \
+ r.W(4) = d->W((base << (SHIFT + 1)) + 2); \
+ r.W(5) = s->W((base << (SHIFT + 1)) + 2); \
+ r.W(6) = d->W((base << (SHIFT + 1)) + 3); \
+ r.W(7) = s->W((base << (SHIFT + 1)) + 3); \
+ ) \
+ *d = r; \
+ } \
+ \
+ void glue(helper_punpck ## base_name ## dq, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.L(0) = d->L((base << SHIFT) + 0); \
+ r.L(1) = s->L((base << SHIFT) + 0); \
+ XMM_ONLY( \
+ r.L(2) = d->L((base << SHIFT) + 1); \
+ r.L(3) = s->L((base << SHIFT) + 1); \
+ ) \
+ *d = r; \
+ } \
+ \
+ XMM_ONLY( \
+ void glue(helper_punpck ## base_name ## qdq, SUFFIX)(Reg *d, \
+ Reg *s) \
+ { \
+ Reg r; \
+ \
+ r.Q(0) = d->Q(base); \
+ r.Q(1) = s->Q(base); \
+ *d = r; \
+ } \
+ )
UNPCK_OP(l, 0)
UNPCK_OP(h, 1)
@@ -1211,13 +1252,16 @@ void helper_pf2id(MMXReg *d, MMXReg *s)
void helper_pf2iw(MMXReg *d, MMXReg *s)
{
- d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0), &env->mmx_status));
- d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1), &env->mmx_status));
+ d->MMX_L(0) = satsw(float32_to_int32_round_to_zero(s->MMX_S(0),
+ &env->mmx_status));
+ d->MMX_L(1) = satsw(float32_to_int32_round_to_zero(s->MMX_S(1),
+ &env->mmx_status));
}
void helper_pfacc(MMXReg *d, MMXReg *s)
{
MMXReg r;
+
r.MMX_S(0) = float32_add(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
*d = r;
@@ -1231,36 +1275,46 @@ void helper_pfadd(MMXReg *d, MMXReg *s)
void helper_pfcmpeq(MMXReg *d, MMXReg *s)
{
- d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0), &env->mmx_status) ? -1 : 0;
- d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1), &env->mmx_status) ? -1 : 0;
+ d->MMX_L(0) = float32_eq_quiet(d->MMX_S(0), s->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_eq_quiet(d->MMX_S(1), s->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
}
void helper_pfcmpge(MMXReg *d, MMXReg *s)
{
- d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0;
- d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0;
+ d->MMX_L(0) = float32_le(s->MMX_S(0), d->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_le(s->MMX_S(1), d->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
}
void helper_pfcmpgt(MMXReg *d, MMXReg *s)
{
- d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status) ? -1 : 0;
- d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status) ? -1 : 0;
+ d->MMX_L(0) = float32_lt(s->MMX_S(0), d->MMX_S(0),
+ &env->mmx_status) ? -1 : 0;
+ d->MMX_L(1) = float32_lt(s->MMX_S(1), d->MMX_S(1),
+ &env->mmx_status) ? -1 : 0;
}
void helper_pfmax(MMXReg *d, MMXReg *s)
{
- if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status))
+ if (float32_lt(d->MMX_S(0), s->MMX_S(0), &env->mmx_status)) {
d->MMX_S(0) = s->MMX_S(0);
- if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status))
+ }
+ if (float32_lt(d->MMX_S(1), s->MMX_S(1), &env->mmx_status)) {
d->MMX_S(1) = s->MMX_S(1);
+ }
}
void helper_pfmin(MMXReg *d, MMXReg *s)
{
- if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status))
+ if (float32_lt(s->MMX_S(0), d->MMX_S(0), &env->mmx_status)) {
d->MMX_S(0) = s->MMX_S(0);
- if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status))
+ }
+ if (float32_lt(s->MMX_S(1), d->MMX_S(1), &env->mmx_status)) {
d->MMX_S(1) = s->MMX_S(1);
+ }
}
void helper_pfmul(MMXReg *d, MMXReg *s)
@@ -1272,6 +1326,7 @@ void helper_pfmul(MMXReg *d, MMXReg *s)
void helper_pfnacc(MMXReg *d, MMXReg *s)
{
MMXReg r;
+
r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
r.MMX_S(1) = float32_sub(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
*d = r;
@@ -1280,6 +1335,7 @@ void helper_pfnacc(MMXReg *d, MMXReg *s)
void helper_pfpnacc(MMXReg *d, MMXReg *s)
{
MMXReg r;
+
r.MMX_S(0) = float32_sub(d->MMX_S(0), d->MMX_S(1), &env->mmx_status);
r.MMX_S(1) = float32_add(s->MMX_S(0), s->MMX_S(1), &env->mmx_status);
*d = r;
@@ -1316,6 +1372,7 @@ void helper_pfsubr(MMXReg *d, MMXReg *s)
void helper_pswapd(MMXReg *d, MMXReg *s)
{
MMXReg r;
+
r.MMX_L(0) = s->MMX_L(1);
r.MMX_L(1) = s->MMX_L(0);
*d = r;
@@ -1323,18 +1380,19 @@ void helper_pswapd(MMXReg *d, MMXReg *s)
#endif
/* SSSE3 op helpers */
-void glue(helper_pshufb, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_pshufb, SUFFIX)(Reg *d, Reg *s)
{
int i;
Reg r;
- for (i = 0; i < (8 << SHIFT); i++)
+ for (i = 0; i < (8 << SHIFT); i++) {
r.B(i) = (s->B(i) & 0x80) ? 0 : (d->B(s->B(i) & ((8 << SHIFT) - 1)));
+ }
*d = r;
}
-void glue(helper_phaddw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_phaddw, SUFFIX)(Reg *d, Reg *s)
{
d->W(0) = (int16_t)d->W(0) + (int16_t)d->W(1);
d->W(1) = (int16_t)d->W(2) + (int16_t)d->W(3);
@@ -1346,7 +1404,7 @@ void glue(helper_phaddw, SUFFIX) (Reg *d, Reg *s)
XMM_ONLY(d->W(7) = (int16_t)s->W(6) + (int16_t)s->W(7));
}
-void glue(helper_phaddd, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_phaddd, SUFFIX)(Reg *d, Reg *s)
{
d->L(0) = (int32_t)d->L(0) + (int32_t)d->L(1);
XMM_ONLY(d->L(1) = (int32_t)d->L(2) + (int32_t)d->L(3));
@@ -1354,7 +1412,7 @@ void glue(helper_phaddd, SUFFIX) (Reg *d, Reg *s)
XMM_ONLY(d->L(3) = (int32_t)s->L(2) + (int32_t)s->L(3));
}
-void glue(helper_phaddsw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_phaddsw, SUFFIX)(Reg *d, Reg *s)
{
d->W(0) = satsw((int16_t)d->W(0) + (int16_t)d->W(1));
d->W(1) = satsw((int16_t)d->W(2) + (int16_t)d->W(3));
@@ -1366,19 +1424,19 @@ void glue(helper_phaddsw, SUFFIX) (Reg *d, Reg *s)
XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) + (int16_t)s->W(7)));
}
-void glue(helper_pmaddubsw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_pmaddubsw, SUFFIX)(Reg *d, Reg *s)
{
- d->W(0) = satsw((int8_t)s->B( 0) * (uint8_t)d->B( 0) +
- (int8_t)s->B( 1) * (uint8_t)d->B( 1));
- d->W(1) = satsw((int8_t)s->B( 2) * (uint8_t)d->B( 2) +
- (int8_t)s->B( 3) * (uint8_t)d->B( 3));
- d->W(2) = satsw((int8_t)s->B( 4) * (uint8_t)d->B( 4) +
- (int8_t)s->B( 5) * (uint8_t)d->B( 5));
- d->W(3) = satsw((int8_t)s->B( 6) * (uint8_t)d->B( 6) +
- (int8_t)s->B( 7) * (uint8_t)d->B( 7));
+ d->W(0) = satsw((int8_t)s->B(0) * (uint8_t)d->B(0) +
+ (int8_t)s->B(1) * (uint8_t)d->B(1));
+ d->W(1) = satsw((int8_t)s->B(2) * (uint8_t)d->B(2) +
+ (int8_t)s->B(3) * (uint8_t)d->B(3));
+ d->W(2) = satsw((int8_t)s->B(4) * (uint8_t)d->B(4) +
+ (int8_t)s->B(5) * (uint8_t)d->B(5));
+ d->W(3) = satsw((int8_t)s->B(6) * (uint8_t)d->B(6) +
+ (int8_t)s->B(7) * (uint8_t)d->B(7));
#if SHIFT == 1
- d->W(4) = satsw((int8_t)s->B( 8) * (uint8_t)d->B( 8) +
- (int8_t)s->B( 9) * (uint8_t)d->B( 9));
+ d->W(4) = satsw((int8_t)s->B(8) * (uint8_t)d->B(8) +
+ (int8_t)s->B(9) * (uint8_t)d->B(9));
d->W(5) = satsw((int8_t)s->B(10) * (uint8_t)d->B(10) +
(int8_t)s->B(11) * (uint8_t)d->B(11));
d->W(6) = satsw((int8_t)s->B(12) * (uint8_t)d->B(12) +
@@ -1388,7 +1446,7 @@ void glue(helper_pmaddubsw, SUFFIX) (Reg *d, Reg *s)
#endif
}
-void glue(helper_phsubw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_phsubw, SUFFIX)(Reg *d, Reg *s)
{
d->W(0) = (int16_t)d->W(0) - (int16_t)d->W(1);
d->W(1) = (int16_t)d->W(2) - (int16_t)d->W(3);
@@ -1400,7 +1458,7 @@ void glue(helper_phsubw, SUFFIX) (Reg *d, Reg *s)
XMM_ONLY(d->W(7) = (int16_t)s->W(6) - (int16_t)s->W(7));
}
-void glue(helper_phsubd, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_phsubd, SUFFIX)(Reg *d, Reg *s)
{
d->L(0) = (int32_t)d->L(0) - (int32_t)d->L(1);
XMM_ONLY(d->L(1) = (int32_t)d->L(2) - (int32_t)d->L(3));
@@ -1408,7 +1466,7 @@ void glue(helper_phsubd, SUFFIX) (Reg *d, Reg *s)
XMM_ONLY(d->L(3) = (int32_t)s->L(2) - (int32_t)s->L(3));
}
-void glue(helper_phsubsw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_phsubsw, SUFFIX)(Reg *d, Reg *s)
{
d->W(0) = satsw((int16_t)d->W(0) - (int16_t)d->W(1));
d->W(1) = satsw((int16_t)d->W(2) - (int16_t)d->W(3));
@@ -1420,24 +1478,24 @@ void glue(helper_phsubsw, SUFFIX) (Reg *d, Reg *s)
XMM_ONLY(d->W(7) = satsw((int16_t)s->W(6) - (int16_t)s->W(7)));
}
-#define FABSB(_, x) x > INT8_MAX ? -(int8_t ) x : x
-#define FABSW(_, x) x > INT16_MAX ? -(int16_t) x : x
-#define FABSL(_, x) x > INT32_MAX ? -(int32_t) x : x
+#define FABSB(_, x) (x > INT8_MAX ? -(int8_t)x : x)
+#define FABSW(_, x) (x > INT16_MAX ? -(int16_t)x : x)
+#define FABSL(_, x) (x > INT32_MAX ? -(int32_t)x : x)
SSE_HELPER_B(helper_pabsb, FABSB)
SSE_HELPER_W(helper_pabsw, FABSW)
SSE_HELPER_L(helper_pabsd, FABSL)
-#define FMULHRSW(d, s) ((int16_t) d * (int16_t) s + 0x4000) >> 15
+#define FMULHRSW(d, s) (((int16_t) d * (int16_t)s + 0x4000) >> 15)
SSE_HELPER_W(helper_pmulhrsw, FMULHRSW)
-#define FSIGNB(d, s) s <= INT8_MAX ? s ? d : 0 : -(int8_t ) d
-#define FSIGNW(d, s) s <= INT16_MAX ? s ? d : 0 : -(int16_t) d
-#define FSIGNL(d, s) s <= INT32_MAX ? s ? d : 0 : -(int32_t) d
+#define FSIGNB(d, s) (s <= INT8_MAX ? s ? d : 0 : -(int8_t)d)
+#define FSIGNW(d, s) (s <= INT16_MAX ? s ? d : 0 : -(int16_t)d)
+#define FSIGNL(d, s) (s <= INT32_MAX ? s ? d : 0 : -(int32_t)d)
SSE_HELPER_B(helper_psignb, FSIGNB)
SSE_HELPER_W(helper_psignw, FSIGNW)
SSE_HELPER_L(helper_psignd, FSIGNL)
-void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift)
+void glue(helper_palignr, SUFFIX)(Reg *d, Reg *s, int32_t shift)
{
Reg r;
@@ -1449,17 +1507,17 @@ void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift)
shift <<= 3;
#define SHR(v, i) (i < 64 && i > -64 ? i > 0 ? v >> (i) : (v << -(i)) : 0)
#if SHIFT == 0
- r.Q(0) = SHR(s->Q(0), shift - 0) |
- SHR(d->Q(0), shift - 64);
+ r.Q(0) = SHR(s->Q(0), shift - 0) |
+ SHR(d->Q(0), shift - 64);
#else
- r.Q(0) = SHR(s->Q(0), shift - 0) |
- SHR(s->Q(1), shift - 64) |
- SHR(d->Q(0), shift - 128) |
- SHR(d->Q(1), shift - 192);
- r.Q(1) = SHR(s->Q(0), shift + 64) |
- SHR(s->Q(1), shift - 0) |
- SHR(d->Q(0), shift - 64) |
- SHR(d->Q(1), shift - 128);
+ r.Q(0) = SHR(s->Q(0), shift - 0) |
+ SHR(s->Q(1), shift - 64) |
+ SHR(d->Q(0), shift - 128) |
+ SHR(d->Q(1), shift - 192);
+ r.Q(1) = SHR(s->Q(0), shift + 64) |
+ SHR(s->Q(1), shift - 0) |
+ SHR(d->Q(0), shift - 64) |
+ SHR(d->Q(1), shift - 128);
#endif
#undef SHR
}
@@ -1467,72 +1525,78 @@ void glue(helper_palignr, SUFFIX) (Reg *d, Reg *s, int32_t shift)
*d = r;
}
-#define XMM0 env->xmm_regs[0]
+#define XMM0 (env->xmm_regs[0])
#if SHIFT == 1
-#define SSE_HELPER_V(name, elem, num, F)\
-void glue(name, SUFFIX) (Reg *d, Reg *s)\
-{\
- d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0));\
- d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1));\
- if (num > 2) {\
- d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2));\
- d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3));\
- if (num > 4) {\
- d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4));\
- d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5));\
- d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6));\
- d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7));\
- if (num > 8) {\
- d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8));\
- d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9));\
- d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10));\
- d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11));\
- d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12));\
- d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13));\
- d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14));\
- d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15));\
- }\
- }\
- }\
-}
-
-#define SSE_HELPER_I(name, elem, num, F)\
-void glue(name, SUFFIX) (Reg *d, Reg *s, uint32_t imm)\
-{\
- d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1));\
- d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1));\
- if (num > 2) {\
- d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1));\
- d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1));\
- if (num > 4) {\
- d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1));\
- d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1));\
- d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1));\
- d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1));\
- if (num > 8) {\
- d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1));\
- d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1));\
- d->elem(10) = F(d->elem(10), s->elem(10), ((imm >> 10) & 1));\
- d->elem(11) = F(d->elem(11), s->elem(11), ((imm >> 11) & 1));\
- d->elem(12) = F(d->elem(12), s->elem(12), ((imm >> 12) & 1));\
- d->elem(13) = F(d->elem(13), s->elem(13), ((imm >> 13) & 1));\
- d->elem(14) = F(d->elem(14), s->elem(14), ((imm >> 14) & 1));\
- d->elem(15) = F(d->elem(15), s->elem(15), ((imm >> 15) & 1));\
- }\
- }\
- }\
-}
+#define SSE_HELPER_V(name, elem, num, F) \
+ void glue(name, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ d->elem(0) = F(d->elem(0), s->elem(0), XMM0.elem(0)); \
+ d->elem(1) = F(d->elem(1), s->elem(1), XMM0.elem(1)); \
+ if (num > 2) { \
+ d->elem(2) = F(d->elem(2), s->elem(2), XMM0.elem(2)); \
+ d->elem(3) = F(d->elem(3), s->elem(3), XMM0.elem(3)); \
+ if (num > 4) { \
+ d->elem(4) = F(d->elem(4), s->elem(4), XMM0.elem(4)); \
+ d->elem(5) = F(d->elem(5), s->elem(5), XMM0.elem(5)); \
+ d->elem(6) = F(d->elem(6), s->elem(6), XMM0.elem(6)); \
+ d->elem(7) = F(d->elem(7), s->elem(7), XMM0.elem(7)); \
+ if (num > 8) { \
+ d->elem(8) = F(d->elem(8), s->elem(8), XMM0.elem(8)); \
+ d->elem(9) = F(d->elem(9), s->elem(9), XMM0.elem(9)); \
+ d->elem(10) = F(d->elem(10), s->elem(10), XMM0.elem(10)); \
+ d->elem(11) = F(d->elem(11), s->elem(11), XMM0.elem(11)); \
+ d->elem(12) = F(d->elem(12), s->elem(12), XMM0.elem(12)); \
+ d->elem(13) = F(d->elem(13), s->elem(13), XMM0.elem(13)); \
+ d->elem(14) = F(d->elem(14), s->elem(14), XMM0.elem(14)); \
+ d->elem(15) = F(d->elem(15), s->elem(15), XMM0.elem(15)); \
+ } \
+ } \
+ } \
+ }
+
+#define SSE_HELPER_I(name, elem, num, F) \
+ void glue(name, SUFFIX)(Reg *d, Reg *s, uint32_t imm) \
+ { \
+ d->elem(0) = F(d->elem(0), s->elem(0), ((imm >> 0) & 1)); \
+ d->elem(1) = F(d->elem(1), s->elem(1), ((imm >> 1) & 1)); \
+ if (num > 2) { \
+ d->elem(2) = F(d->elem(2), s->elem(2), ((imm >> 2) & 1)); \
+ d->elem(3) = F(d->elem(3), s->elem(3), ((imm >> 3) & 1)); \
+ if (num > 4) { \
+ d->elem(4) = F(d->elem(4), s->elem(4), ((imm >> 4) & 1)); \
+ d->elem(5) = F(d->elem(5), s->elem(5), ((imm >> 5) & 1)); \
+ d->elem(6) = F(d->elem(6), s->elem(6), ((imm >> 6) & 1)); \
+ d->elem(7) = F(d->elem(7), s->elem(7), ((imm >> 7) & 1)); \
+ if (num > 8) { \
+ d->elem(8) = F(d->elem(8), s->elem(8), ((imm >> 8) & 1)); \
+ d->elem(9) = F(d->elem(9), s->elem(9), ((imm >> 9) & 1)); \
+ d->elem(10) = F(d->elem(10), s->elem(10), \
+ ((imm >> 10) & 1)); \
+ d->elem(11) = F(d->elem(11), s->elem(11), \
+ ((imm >> 11) & 1)); \
+ d->elem(12) = F(d->elem(12), s->elem(12), \
+ ((imm >> 12) & 1)); \
+ d->elem(13) = F(d->elem(13), s->elem(13), \
+ ((imm >> 13) & 1)); \
+ d->elem(14) = F(d->elem(14), s->elem(14), \
+ ((imm >> 14) & 1)); \
+ d->elem(15) = F(d->elem(15), s->elem(15), \
+ ((imm >> 15) & 1)); \
+ } \
+ } \
+ } \
+ }
/* SSE4.1 op helpers */
-#define FBLENDVB(d, s, m) (m & 0x80) ? s : d
-#define FBLENDVPS(d, s, m) (m & 0x80000000) ? s : d
-#define FBLENDVPD(d, s, m) (m & 0x8000000000000000LL) ? s : d
+#define FBLENDVB(d, s, m) ((m & 0x80) ? s : d)
+#define FBLENDVPS(d, s, m) ((m & 0x80000000) ? s : d)
+#define FBLENDVPD(d, s, m) ((m & 0x8000000000000000LL) ? s : d)
SSE_HELPER_V(helper_pblendvb, B, 16, FBLENDVB)
SSE_HELPER_V(helper_blendvps, L, 4, FBLENDVPS)
SSE_HELPER_V(helper_blendvpd, Q, 2, FBLENDVPD)
-void glue(helper_ptest, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_ptest, SUFFIX)(Reg *d, Reg *s)
{
uint64_t zf = (s->Q(0) & d->Q(0)) | (s->Q(1) & d->Q(1));
uint64_t cf = (s->Q(0) & ~d->Q(0)) | (s->Q(1) & ~d->Q(1));
@@ -1540,22 +1604,22 @@ void glue(helper_ptest, SUFFIX) (Reg *d, Reg *s)
CC_SRC = (zf ? 0 : CC_Z) | (cf ? 0 : CC_C);
}
-#define SSE_HELPER_F(name, elem, num, F)\
-void glue(name, SUFFIX) (Reg *d, Reg *s)\
-{\
- d->elem(0) = F(0);\
- d->elem(1) = F(1);\
- if (num > 2) {\
- d->elem(2) = F(2);\
- d->elem(3) = F(3);\
- if (num > 4) {\
- d->elem(4) = F(4);\
- d->elem(5) = F(5);\
- d->elem(6) = F(6);\
- d->elem(7) = F(7);\
- }\
- }\
-}
+#define SSE_HELPER_F(name, elem, num, F) \
+ void glue(name, SUFFIX)(Reg *d, Reg *s) \
+ { \
+ d->elem(0) = F(0); \
+ d->elem(1) = F(1); \
+ if (num > 2) { \
+ d->elem(2) = F(2); \
+ d->elem(3) = F(3); \
+ if (num > 4) { \
+ d->elem(4) = F(4); \
+ d->elem(5) = F(5); \
+ d->elem(6) = F(6); \
+ d->elem(7) = F(7); \
+ } \
+ } \
+ }
SSE_HELPER_F(helper_pmovsxbw, W, 8, (int8_t) s->B)
SSE_HELPER_F(helper_pmovsxbd, L, 4, (int8_t) s->B)
@@ -1570,16 +1634,16 @@ SSE_HELPER_F(helper_pmovzxwd, L, 4, s->W)
SSE_HELPER_F(helper_pmovzxwq, Q, 2, s->W)
SSE_HELPER_F(helper_pmovzxdq, Q, 2, s->L)
-void glue(helper_pmuldq, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_pmuldq, SUFFIX)(Reg *d, Reg *s)
{
- d->Q(0) = (int64_t) (int32_t) d->L(0) * (int32_t) s->L(0);
- d->Q(1) = (int64_t) (int32_t) d->L(2) * (int32_t) s->L(2);
+ d->Q(0) = (int64_t)(int32_t) d->L(0) * (int32_t) s->L(0);
+ d->Q(1) = (int64_t)(int32_t) d->L(2) * (int32_t) s->L(2);
}
-#define FCMPEQQ(d, s) d == s ? -1 : 0
+#define FCMPEQQ(d, s) (d == s ? -1 : 0)
SSE_HELPER_Q(helper_pcmpeqq, FCMPEQQ)
-void glue(helper_packusdw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_packusdw, SUFFIX)(Reg *d, Reg *s)
{
d->W(0) = satuw((int32_t) d->L(0));
d->W(1) = satuw((int32_t) d->L(1));
@@ -1591,10 +1655,10 @@ void glue(helper_packusdw, SUFFIX) (Reg *d, Reg *s)
d->W(7) = satuw((int32_t) s->L(3));
}
-#define FMINSB(d, s) MIN((int8_t) d, (int8_t) s)
-#define FMINSD(d, s) MIN((int32_t) d, (int32_t) s)
-#define FMAXSB(d, s) MAX((int8_t) d, (int8_t) s)
-#define FMAXSD(d, s) MAX((int32_t) d, (int32_t) s)
+#define FMINSB(d, s) MIN((int8_t)d, (int8_t)s)
+#define FMINSD(d, s) MIN((int32_t)d, (int32_t)s)
+#define FMAXSB(d, s) MAX((int8_t)d, (int8_t)s)
+#define FMAXSD(d, s) MAX((int32_t)d, (int32_t)s)
SSE_HELPER_B(helper_pminsb, FMINSB)
SSE_HELPER_L(helper_pminsd, FMINSD)
SSE_HELPER_W(helper_pminuw, MIN)
@@ -1604,27 +1668,34 @@ SSE_HELPER_L(helper_pmaxsd, FMAXSD)
SSE_HELPER_W(helper_pmaxuw, MAX)
SSE_HELPER_L(helper_pmaxud, MAX)
-#define FMULLD(d, s) (int32_t) d * (int32_t) s
+#define FMULLD(d, s) ((int32_t)d * (int32_t)s)
SSE_HELPER_L(helper_pmulld, FMULLD)
-void glue(helper_phminposuw, SUFFIX) (Reg *d, Reg *s)
+void glue(helper_phminposuw, SUFFIX)(Reg *d, Reg *s)
{
int idx = 0;
- if (s->W(1) < s->W(idx))
+ if (s->W(1) < s->W(idx)) {
idx = 1;
- if (s->W(2) < s->W(idx))
+ }
+ if (s->W(2) < s->W(idx)) {
idx = 2;
- if (s->W(3) < s->W(idx))
+ }
+ if (s->W(3) < s->W(idx)) {
idx = 3;
- if (s->W(4) < s->W(idx))
+ }
+ if (s->W(4) < s->W(idx)) {
idx = 4;
- if (s->W(5) < s->W(idx))
+ }
+ if (s->W(5) < s->W(idx)) {
idx = 5;
- if (s->W(6) < s->W(idx))
+ }
+ if (s->W(6) < s->W(idx)) {
idx = 6;
- if (s->W(7) < s->W(idx))
+ }
+ if (s->W(7) < s->W(idx)) {
idx = 7;
+ }
d->Q(1) = 0;
d->L(1) = 0;
@@ -1632,12 +1703,12 @@ void glue(helper_phminposuw, SUFFIX) (Reg *d, Reg *s)
d->W(0) = s->W(idx);
}
-void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
+void glue(helper_roundps, SUFFIX)(Reg *d, Reg *s, uint32_t mode)
{
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
- if (!(mode & (1 << 2)))
+ if (!(mode & (1 << 2))) {
switch (mode & 3) {
case 0:
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
@@ -1652,6 +1723,7 @@ void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
break;
}
+ }
d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status);
d->XMM_S(1) = float32_round_to_int(s->XMM_S(1), &env->sse_status);
@@ -1659,21 +1731,21 @@ void glue(helper_roundps, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
d->XMM_S(3) = float32_round_to_int(s->XMM_S(3), &env->sse_status);
#if 0 /* TODO */
- if (mode & (1 << 3))
- set_float_exception_flags(
- get_float_exception_flags(&env->sse_status) &
- ~float_flag_inexact,
- &env->sse_status);
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
-void glue(helper_roundpd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
+void glue(helper_roundpd, SUFFIX)(Reg *d, Reg *s, uint32_t mode)
{
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
- if (!(mode & (1 << 2)))
+ if (!(mode & (1 << 2))) {
switch (mode & 3) {
case 0:
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
@@ -1688,26 +1760,27 @@ void glue(helper_roundpd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
break;
}
+ }
d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status);
d->XMM_D(1) = float64_round_to_int(s->XMM_D(1), &env->sse_status);
#if 0 /* TODO */
- if (mode & (1 << 3))
- set_float_exception_flags(
- get_float_exception_flags(&env->sse_status) &
- ~float_flag_inexact,
- &env->sse_status);
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
-void glue(helper_roundss, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
+void glue(helper_roundss, SUFFIX)(Reg *d, Reg *s, uint32_t mode)
{
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
- if (!(mode & (1 << 2)))
+ if (!(mode & (1 << 2))) {
switch (mode & 3) {
case 0:
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
@@ -1722,25 +1795,26 @@ void glue(helper_roundss, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
break;
}
+ }
d->XMM_S(0) = float32_round_to_int(s->XMM_S(0), &env->sse_status);
#if 0 /* TODO */
- if (mode & (1 << 3))
- set_float_exception_flags(
- get_float_exception_flags(&env->sse_status) &
- ~float_flag_inexact,
- &env->sse_status);
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
-void glue(helper_roundsd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
+void glue(helper_roundsd, SUFFIX)(Reg *d, Reg *s, uint32_t mode)
{
signed char prev_rounding_mode;
prev_rounding_mode = env->sse_status.float_rounding_mode;
- if (!(mode & (1 << 2)))
+ if (!(mode & (1 << 2))) {
switch (mode & 3) {
case 0:
set_float_rounding_mode(float_round_nearest_even, &env->sse_status);
@@ -1755,67 +1829,80 @@ void glue(helper_roundsd, SUFFIX) (Reg *d, Reg *s, uint32_t mode)
set_float_rounding_mode(float_round_to_zero, &env->sse_status);
break;
}
+ }
d->XMM_D(0) = float64_round_to_int(s->XMM_D(0), &env->sse_status);
#if 0 /* TODO */
- if (mode & (1 << 3))
- set_float_exception_flags(
- get_float_exception_flags(&env->sse_status) &
- ~float_flag_inexact,
- &env->sse_status);
+ if (mode & (1 << 3)) {
+ set_float_exception_flags(get_float_exception_flags(&env->sse_status) &
+ ~float_flag_inexact,
+ &env->sse_status);
+ }
#endif
env->sse_status.float_rounding_mode = prev_rounding_mode;
}
-#define FBLENDP(d, s, m) m ? s : d
+#define FBLENDP(d, s, m) (m ? s : d)
SSE_HELPER_I(helper_blendps, L, 4, FBLENDP)
SSE_HELPER_I(helper_blendpd, Q, 2, FBLENDP)
SSE_HELPER_I(helper_pblendw, W, 8, FBLENDP)
-void glue(helper_dpps, SUFFIX) (Reg *d, Reg *s, uint32_t mask)
+void glue(helper_dpps, SUFFIX)(Reg *d, Reg *s, uint32_t mask)
{
float32 iresult = float32_zero;
- if (mask & (1 << 4))
+ if (mask & (1 << 4)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(0), s->XMM_S(0), &env->sse_status),
- &env->sse_status);
- if (mask & (1 << 5))
+ float32_mul(d->XMM_S(0), s->XMM_S(0),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 5)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(1), s->XMM_S(1), &env->sse_status),
- &env->sse_status);
- if (mask & (1 << 6))
+ float32_mul(d->XMM_S(1), s->XMM_S(1),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 6)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(2), s->XMM_S(2), &env->sse_status),
- &env->sse_status);
- if (mask & (1 << 7))
+ float32_mul(d->XMM_S(2), s->XMM_S(2),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 7)) {
iresult = float32_add(iresult,
- float32_mul(d->XMM_S(3), s->XMM_S(3), &env->sse_status),
- &env->sse_status);
+ float32_mul(d->XMM_S(3), s->XMM_S(3),
+ &env->sse_status),
+ &env->sse_status);
+ }
d->XMM_S(0) = (mask & (1 << 0)) ? iresult : float32_zero;
d->XMM_S(1) = (mask & (1 << 1)) ? iresult : float32_zero;
d->XMM_S(2) = (mask & (1 << 2)) ? iresult : float32_zero;
d->XMM_S(3) = (mask & (1 << 3)) ? iresult : float32_zero;
}
-void glue(helper_dppd, SUFFIX) (Reg *d, Reg *s, uint32_t mask)
+void glue(helper_dppd, SUFFIX)(Reg *d, Reg *s, uint32_t mask)
{
float64 iresult = float64_zero;
- if (mask & (1 << 4))
+ if (mask & (1 << 4)) {
iresult = float64_add(iresult,
- float64_mul(d->XMM_D(0), s->XMM_D(0), &env->sse_status),
- &env->sse_status);
- if (mask & (1 << 5))
+ float64_mul(d->XMM_D(0), s->XMM_D(0),
+ &env->sse_status),
+ &env->sse_status);
+ }
+ if (mask & (1 << 5)) {
iresult = float64_add(iresult,
- float64_mul(d->XMM_D(1), s->XMM_D(1), &env->sse_status),
- &env->sse_status);
+ float64_mul(d->XMM_D(1), s->XMM_D(1),
+ &env->sse_status),
+ &env->sse_status);
+ }
d->XMM_D(0) = (mask & (1 << 0)) ? iresult : float64_zero;
d->XMM_D(1) = (mask & (1 << 1)) ? iresult : float64_zero;
}
-void glue(helper_mpsadbw, SUFFIX) (Reg *d, Reg *s, uint32_t offset)
+void glue(helper_mpsadbw, SUFFIX)(Reg *d, Reg *s, uint32_t offset)
{
int s0 = (offset & 3) << 2;
int d0 = (offset & 4) << 0;
@@ -1835,7 +1922,7 @@ void glue(helper_mpsadbw, SUFFIX) (Reg *d, Reg *s, uint32_t offset)
/* SSE4.2 op helpers */
/* it's unclear whether signed or unsigned */
-#define FCMPGTQ(d, s) d > s ? -1 : 0
+#define FCMPGTQ(d, s) (d > s ? -1 : 0)
SSE_HELPER_Q(helper_pcmpgtq, FCMPGTQ)
static inline int pcmp_elen(int reg, uint32_t ctrl)
@@ -1843,18 +1930,21 @@ static inline int pcmp_elen(int reg, uint32_t ctrl)
int val;
/* Presence of REX.W is indicated by a bit higher than 7 set */
- if (ctrl >> 8)
- val = abs1((int64_t) env->regs[reg]);
- else
- val = abs1((int32_t) env->regs[reg]);
+ if (ctrl >> 8) {
+ val = abs1((int64_t)env->regs[reg]);
+ } else {
+ val = abs1((int32_t)env->regs[reg]);
+ }
if (ctrl & 1) {
- if (val > 8)
+ if (val > 8) {
return 8;
- } else
- if (val > 16)
+ }
+ } else {
+ if (val > 16) {
return 16;
-
+ }
+ }
return val;
}
@@ -1863,11 +1953,14 @@ static inline int pcmp_ilen(Reg *r, uint8_t ctrl)
int val = 0;
if (ctrl & 1) {
- while (val < 8 && r->W(val))
+ while (val < 8 && r->W(val)) {
val++;
- } else
- while (val < 16 && r->B(val))
+ }
+ } else {
+ while (val < 16 && r->B(val)) {
val++;
+ }
+ }
return val;
}
@@ -1880,15 +1973,15 @@ static inline int pcmp_val(Reg *r, uint8_t ctrl, int i)
case 1:
return r->W(i);
case 2:
- return (int8_t) r->B(i);
+ return (int8_t)r->B(i);
case 3:
default:
- return (int16_t) r->W(i);
+ return (int16_t)r->W(i);
}
}
static inline unsigned pcmpxstrx(Reg *d, Reg *s,
- int8_t ctrl, int valids, int validd)
+ int8_t ctrl, int valids, int validd)
{
unsigned int res = 0;
int v;
@@ -1905,17 +1998,19 @@ static inline unsigned pcmpxstrx(Reg *d, Reg *s,
for (j = valids; j >= 0; j--) {
res <<= 1;
v = pcmp_val(s, ctrl, j);
- for (i = validd; i >= 0; i--)
+ for (i = validd; i >= 0; i--) {
res |= (v == pcmp_val(d, ctrl, i));
+ }
}
break;
case 1:
for (j = valids; j >= 0; j--) {
res <<= 1;
v = pcmp_val(s, ctrl, j);
- for (i = ((validd - 1) | 1); i >= 0; i -= 2)
+ for (i = ((validd - 1) | 1); i >= 0; i -= 2) {
res |= (pcmp_val(d, ctrl, i - 0) <= v &&
pcmp_val(d, ctrl, i - 1) >= v);
+ }
}
break;
case 2:
@@ -1931,8 +2026,9 @@ static inline unsigned pcmpxstrx(Reg *d, Reg *s,
for (j = valids - validd; j >= 0; j--) {
res <<= 1;
res |= 1;
- for (i = MIN(upper - j, validd); i >= 0; i--)
+ for (i = MIN(upper - j, validd); i >= 0; i--) {
res &= (pcmp_val(s, ctrl, i + j) == pcmp_val(d, ctrl, i));
+ }
}
break;
}
@@ -1946,10 +2042,12 @@ static inline unsigned pcmpxstrx(Reg *d, Reg *s,
break;
}
- if (res)
- CC_SRC |= CC_C;
- if (res & 1)
- CC_SRC |= CC_O;
+ if (res) {
+ CC_SRC |= CC_C;
+ }
+ if (res & 1) {
+ CC_SRC |= CC_O;
+ }
return res;
}
@@ -1958,11 +2056,12 @@ static inline int rffs1(unsigned int val)
{
int ret = 1, hi;
- for (hi = sizeof(val) * 4; hi; hi /= 2)
+ for (hi = sizeof(val) * 4; hi; hi /= 2) {
if (val >> hi) {
val >>= hi;
ret += hi;
}
+ }
return ret;
}
@@ -1971,77 +2070,82 @@ static inline int ffs1(unsigned int val)
{
int ret = 1, hi;
- for (hi = sizeof(val) * 4; hi; hi /= 2)
+ for (hi = sizeof(val) * 4; hi; hi /= 2) {
if (val << hi) {
val <<= hi;
ret += hi;
}
+ }
return ret;
}
-void glue(helper_pcmpestri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
+void glue(helper_pcmpestri, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl)
{
unsigned int res = pcmpxstrx(d, s, ctrl,
- pcmp_elen(R_EDX, ctrl),
- pcmp_elen(R_EAX, ctrl));
+ pcmp_elen(R_EDX, ctrl),
+ pcmp_elen(R_EAX, ctrl));
- if (res)
+ if (res) {
env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1;
- else
+ } else {
env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+ }
}
-void glue(helper_pcmpestrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
+void glue(helper_pcmpestrm, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl)
{
int i;
unsigned int res = pcmpxstrx(d, s, ctrl,
- pcmp_elen(R_EDX, ctrl),
- pcmp_elen(R_EAX, ctrl));
+ pcmp_elen(R_EDX, ctrl),
+ pcmp_elen(R_EAX, ctrl));
if ((ctrl >> 6) & 1) {
- if (ctrl & 1)
+ if (ctrl & 1) {
for (i = 0; i < 8; i++, res >>= 1) {
d->W(i) = (res & 1) ? ~0 : 0;
}
- else
+ } else {
for (i = 0; i < 16; i++, res >>= 1) {
d->B(i) = (res & 1) ? ~0 : 0;
}
+ }
} else {
d->Q(1) = 0;
d->Q(0) = res;
}
}
-void glue(helper_pcmpistri, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
+void glue(helper_pcmpistri, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl)
{
unsigned int res = pcmpxstrx(d, s, ctrl,
- pcmp_ilen(s, ctrl),
- pcmp_ilen(d, ctrl));
+ pcmp_ilen(s, ctrl),
+ pcmp_ilen(d, ctrl));
- if (res)
+ if (res) {
env->regs[R_ECX] = ((ctrl & (1 << 6)) ? rffs1 : ffs1)(res) - 1;
- else
+ } else {
env->regs[R_ECX] = 16 >> (ctrl & (1 << 0));
+ }
}
-void glue(helper_pcmpistrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
+void glue(helper_pcmpistrm, SUFFIX)(Reg *d, Reg *s, uint32_t ctrl)
{
int i;
unsigned int res = pcmpxstrx(d, s, ctrl,
- pcmp_ilen(s, ctrl),
- pcmp_ilen(d, ctrl));
+ pcmp_ilen(s, ctrl),
+ pcmp_ilen(d, ctrl));
if ((ctrl >> 6) & 1) {
- if (ctrl & 1)
+ if (ctrl & 1) {
for (i = 0; i < 8; i++, res >>= 1) {
d->W(i) = (res & 1) ? ~0 : 0;
}
- else
+ } else {
for (i = 0; i < 16; i++, res >>= 1) {
d->B(i) = (res & 1) ? ~0 : 0;
}
+ }
} else {
d->Q(1) = 0;
d->Q(0) = res;
@@ -2053,16 +2157,17 @@ void glue(helper_pcmpistrm, SUFFIX) (Reg *d, Reg *s, uint32_t ctrl)
target_ulong helper_crc32(uint32_t crc1, target_ulong msg, uint32_t len)
{
target_ulong crc = (msg & ((target_ulong) -1 >>
- (TARGET_LONG_BITS - len))) ^ crc1;
+ (TARGET_LONG_BITS - len))) ^ crc1;
- while (len--)
+ while (len--) {
crc = (crc >> 1) ^ ((crc & 1) ? CRCPOLY_BITREV : 0);
+ }
return crc;
}
#define POPMASK(i) ((target_ulong) -1 / ((1LL << (1 << i)) + 1))
-#define POPCOUNT(n, i) (n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i))
+#define POPCOUNT(n, i) ((n & POPMASK(i)) + ((n >> (1 << i)) & POPMASK(i)))
target_ulong helper_popcnt(target_ulong n, uint32_t type)
{
CC_SRC = n ? 0 : CC_Z;
@@ -2071,15 +2176,17 @@ target_ulong helper_popcnt(target_ulong n, uint32_t type)
n = POPCOUNT(n, 1);
n = POPCOUNT(n, 2);
n = POPCOUNT(n, 3);
- if (type == 1)
+ if (type == 1) {
return n & 0xff;
+ }
n = POPCOUNT(n, 4);
#ifndef TARGET_X86_64
return n;
#else
- if (type == 2)
+ if (type == 2) {
return n & 0xff;
+ }
return POPCOUNT(n, 5);
#endif
diff --git a/target-i386/seg_helper.c b/target-i386/seg_helper.c
new file mode 100644
index 0000000000..a4b8b640a0
--- /dev/null
+++ b/target-i386/seg_helper.c
@@ -0,0 +1,2475 @@
+/*
+ * x86 segmentation related helpers:
+ * TSS, interrupts, system calls, jumps and call/task gates, descriptors
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "qemu-log.h"
+#include "helper.h"
+
+#if !defined(CONFIG_USER_ONLY)
+#include "softmmu_exec.h"
+#endif /* !defined(CONFIG_USER_ONLY) */
+
+//#define DEBUG_PCALL
+
+#ifdef DEBUG_PCALL
+# define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
+# define LOG_PCALL_STATE(env) \
+ log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
+#else
+# define LOG_PCALL(...) do { } while (0)
+# define LOG_PCALL_STATE(env) do { } while (0)
+#endif
+
+/* return non zero if error */
+static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
+ int selector)
+{
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ if (selector & 0x4) {
+ dt = &env->ldt;
+ } else {
+ dt = &env->gdt;
+ }
+ index = selector & ~7;
+ if ((index + 7) > dt->limit) {
+ return -1;
+ }
+ ptr = dt->base + index;
+ *e1_ptr = ldl_kernel(ptr);
+ *e2_ptr = ldl_kernel(ptr + 4);
+ return 0;
+}
+
+static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
+{
+ unsigned int limit;
+
+ limit = (e1 & 0xffff) | (e2 & 0x000f0000);
+ if (e2 & DESC_G_MASK) {
+ limit = (limit << 12) | 0xfff;
+ }
+ return limit;
+}
+
+static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
+{
+ return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
+}
+
+static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
+ uint32_t e2)
+{
+ sc->base = get_seg_base(e1, e2);
+ sc->limit = get_seg_limit(e1, e2);
+ sc->flags = e2;
+}
+
+/* init the segment cache in vm86 mode. */
+static inline void load_seg_vm(int seg, int selector)
+{
+ selector &= 0xffff;
+ cpu_x86_load_seg_cache(env, seg, selector,
+ (selector << 4), 0xffff, 0);
+}
+
+static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
+ uint32_t *esp_ptr, int dpl)
+{
+ int type, index, shift;
+
+#if 0
+ {
+ int i;
+ printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
+ for (i = 0; i < env->tr.limit; i++) {
+ printf("%02x ", env->tr.base[i]);
+ if ((i & 7) == 7) {
+ printf("\n");
+ }
+ }
+ printf("\n");
+ }
+#endif
+
+ if (!(env->tr.flags & DESC_P_MASK)) {
+ cpu_abort(env, "invalid tss");
+ }
+ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ if ((type & 7) != 1) {
+ cpu_abort(env, "invalid tss type");
+ }
+ shift = type >> 3;
+ index = (dpl * 4 + 2) << shift;
+ if (index + (4 << shift) - 1 > env->tr.limit) {
+ raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
+ }
+ if (shift == 0) {
+ *esp_ptr = lduw_kernel(env->tr.base + index);
+ *ss_ptr = lduw_kernel(env->tr.base + index + 2);
+ } else {
+ *esp_ptr = ldl_kernel(env->tr.base + index);
+ *ss_ptr = lduw_kernel(env->tr.base + index + 4);
+ }
+}
+
+/* XXX: merge with load_seg() */
+static void tss_load_seg(int seg_reg, int selector)
+{
+ uint32_t e1, e2;
+ int rpl, dpl, cpl;
+
+ if ((selector & 0xfffc) != 0) {
+ if (load_segment(&e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (seg_reg == R_CS) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ /* XXX: is it correct? */
+ if (dpl != rpl) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ if ((e2 & DESC_C_MASK) && dpl > rpl) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ } else if (seg_reg == R_SS) {
+ /* SS must be writable data */
+ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ if (dpl != cpl || dpl != rpl) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ } else {
+ /* not readable code */
+ if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ /* if data or non conforming code, checks the rights */
+ if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ } else {
+ if (seg_reg == R_SS || seg_reg == R_CS) {
+ raise_exception_err(env, EXCP0A_TSS, selector & 0xfffc);
+ }
+ }
+}
+
+#define SWITCH_TSS_JMP 0
+#define SWITCH_TSS_IRET 1
+#define SWITCH_TSS_CALL 2
+
+/* XXX: restore CPU state in registers (PowerPC case) */
+static void switch_tss(int tss_selector,
+ uint32_t e1, uint32_t e2, int source,
+ uint32_t next_eip)
+{
+ int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
+ target_ulong tss_base;
+ uint32_t new_regs[8], new_segs[6];
+ uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
+ uint32_t old_eflags, eflags_mask;
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
+ source);
+
+ /* if task gate, we read the TSS segment and we load it */
+ if (type == 5) {
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
+ }
+ tss_selector = e1 >> 16;
+ if (tss_selector & 4) {
+ raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ }
+ if (load_segment(&e1, &e2, tss_selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
+ }
+ if (e2 & DESC_S_MASK) {
+ raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ if ((type & 7) != 1) {
+ raise_exception_err(env, EXCP0D_GPF, tss_selector & 0xfffc);
+ }
+ }
+
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, tss_selector & 0xfffc);
+ }
+
+ if (type & 8) {
+ tss_limit_max = 103;
+ } else {
+ tss_limit_max = 43;
+ }
+ tss_limit = get_seg_limit(e1, e2);
+ tss_base = get_seg_base(e1, e2);
+ if ((tss_selector & 4) != 0 ||
+ tss_limit < tss_limit_max) {
+ raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ }
+ old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ if (old_type & 8) {
+ old_tss_limit_max = 103;
+ } else {
+ old_tss_limit_max = 43;
+ }
+
+ /* read all the registers from the new TSS */
+ if (type & 8) {
+ /* 32 bit */
+ new_cr3 = ldl_kernel(tss_base + 0x1c);
+ new_eip = ldl_kernel(tss_base + 0x20);
+ new_eflags = ldl_kernel(tss_base + 0x24);
+ for (i = 0; i < 8; i++) {
+ new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
+ }
+ for (i = 0; i < 6; i++) {
+ new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
+ }
+ new_ldt = lduw_kernel(tss_base + 0x60);
+ new_trap = ldl_kernel(tss_base + 0x64);
+ } else {
+ /* 16 bit */
+ new_cr3 = 0;
+ new_eip = lduw_kernel(tss_base + 0x0e);
+ new_eflags = lduw_kernel(tss_base + 0x10);
+ for (i = 0; i < 8; i++) {
+ new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
+ }
+ for (i = 0; i < 4; i++) {
+ new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
+ }
+ new_ldt = lduw_kernel(tss_base + 0x2a);
+ new_segs[R_FS] = 0;
+ new_segs[R_GS] = 0;
+ new_trap = 0;
+ }
+ /* XXX: avoid a compiler warning, see
+ http://support.amd.com/us/Processor_TechDocs/24593.pdf
+ chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
+ (void)new_trap;
+
+ /* NOTE: we must avoid memory exceptions during the task switch,
+ so we make dummy accesses before */
+ /* XXX: it can still fail in some cases, so a bigger hack is
+ necessary to valid the TLB after having done the accesses */
+
+ v1 = ldub_kernel(env->tr.base);
+ v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
+ stb_kernel(env->tr.base, v1);
+ stb_kernel(env->tr.base + old_tss_limit_max, v2);
+
+ /* clear busy bit (it is restartable) */
+ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
+ target_ulong ptr;
+ uint32_t e2;
+
+ ptr = env->gdt.base + (env->tr.selector & ~7);
+ e2 = ldl_kernel(ptr + 4);
+ e2 &= ~DESC_TSS_BUSY_MASK;
+ stl_kernel(ptr + 4, e2);
+ }
+ old_eflags = cpu_compute_eflags(env);
+ if (source == SWITCH_TSS_IRET) {
+ old_eflags &= ~NT_MASK;
+ }
+
+ /* save the current state in the old TSS */
+ if (type & 8) {
+ /* 32 bit */
+ stl_kernel(env->tr.base + 0x20, next_eip);
+ stl_kernel(env->tr.base + 0x24, old_eflags);
+ stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
+ stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
+ stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
+ stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
+ stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
+ stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
+ stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
+ stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
+ for (i = 0; i < 6; i++) {
+ stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
+ }
+ } else {
+ /* 16 bit */
+ stw_kernel(env->tr.base + 0x0e, next_eip);
+ stw_kernel(env->tr.base + 0x10, old_eflags);
+ stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
+ stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
+ stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
+ stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
+ stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
+ stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
+ stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
+ stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
+ for (i = 0; i < 4; i++) {
+ stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
+ }
+ }
+
+ /* now if an exception occurs, it will occurs in the next task
+ context */
+
+ if (source == SWITCH_TSS_CALL) {
+ stw_kernel(tss_base, env->tr.selector);
+ new_eflags |= NT_MASK;
+ }
+
+ /* set busy bit */
+ if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
+ target_ulong ptr;
+ uint32_t e2;
+
+ ptr = env->gdt.base + (tss_selector & ~7);
+ e2 = ldl_kernel(ptr + 4);
+ e2 |= DESC_TSS_BUSY_MASK;
+ stl_kernel(ptr + 4, e2);
+ }
+
+ /* set the new CPU state */
+ /* from this point, any exception which occurs can give problems */
+ env->cr[0] |= CR0_TS_MASK;
+ env->hflags |= HF_TS_MASK;
+ env->tr.selector = tss_selector;
+ env->tr.base = tss_base;
+ env->tr.limit = tss_limit;
+ env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
+
+ if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
+ cpu_x86_update_cr3(env, new_cr3);
+ }
+
+ /* load all registers without an exception, then reload them with
+ possible exception */
+ env->eip = new_eip;
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK |
+ IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
+ if (!(type & 8)) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ /* XXX: what to do in 16 bit case? */
+ EAX = new_regs[0];
+ ECX = new_regs[1];
+ EDX = new_regs[2];
+ EBX = new_regs[3];
+ ESP = new_regs[4];
+ EBP = new_regs[5];
+ ESI = new_regs[6];
+ EDI = new_regs[7];
+ if (new_eflags & VM_MASK) {
+ for (i = 0; i < 6; i++) {
+ load_seg_vm(i, new_segs[i]);
+ }
+ /* in vm86, CPL is always 3 */
+ cpu_x86_set_cpl(env, 3);
+ } else {
+ /* CPL is set the RPL of CS */
+ cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
+ /* first just selectors as the rest may trigger exceptions */
+ for (i = 0; i < 6; i++) {
+ cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
+ }
+ }
+
+ env->ldt.selector = new_ldt & ~4;
+ env->ldt.base = 0;
+ env->ldt.limit = 0;
+ env->ldt.flags = 0;
+
+ /* load the LDT */
+ if (new_ldt & 4) {
+ raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ }
+
+ if ((new_ldt & 0xfffc) != 0) {
+ dt = &env->gdt;
+ index = new_ldt & ~7;
+ if ((index + 7) > dt->limit) {
+ raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ }
+ ptr = dt->base + index;
+ e1 = ldl_kernel(ptr);
+ e2 = ldl_kernel(ptr + 4);
+ if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+ raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, new_ldt & 0xfffc);
+ }
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ }
+
+ /* load the segments */
+ if (!(new_eflags & VM_MASK)) {
+ tss_load_seg(R_CS, new_segs[R_CS]);
+ tss_load_seg(R_SS, new_segs[R_SS]);
+ tss_load_seg(R_ES, new_segs[R_ES]);
+ tss_load_seg(R_DS, new_segs[R_DS]);
+ tss_load_seg(R_FS, new_segs[R_FS]);
+ tss_load_seg(R_GS, new_segs[R_GS]);
+ }
+
+ /* check that EIP is in the CS segment limits */
+ if (new_eip > env->segs[R_CS].limit) {
+ /* XXX: different exception if CALL? */
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+
+#ifndef CONFIG_USER_ONLY
+ /* reset local breakpoints */
+ if (env->dr[7] & 0x55) {
+ for (i = 0; i < 4; i++) {
+ if (hw_breakpoint_enabled(env->dr[7], i) == 0x1) {
+ hw_breakpoint_remove(env, i);
+ }
+ }
+ env->dr[7] &= ~0x55;
+ }
+#endif
+}
+
+static inline unsigned int get_sp_mask(unsigned int e2)
+{
+ if (e2 & DESC_B_MASK) {
+ return 0xffffffff;
+ } else {
+ return 0xffff;
+ }
+}
+
+static int exception_has_error_code(int intno)
+{
+ switch (intno) {
+ case 8:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 17:
+ return 1;
+ }
+ return 0;
+}
+
+#ifdef TARGET_X86_64
+#define SET_ESP(val, sp_mask) \
+ do { \
+ if ((sp_mask) == 0xffff) { \
+ ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
+ } else if ((sp_mask) == 0xffffffffLL) { \
+ ESP = (uint32_t)(val); \
+ } else { \
+ ESP = (val); \
+ } \
+ } while (0)
+#else
+#define SET_ESP(val, sp_mask) \
+ do { \
+ ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
+ } while (0)
+#endif
+
+/* in 64-bit machines, this can overflow. So this segment addition macro
+ * can be used to trim the value to 32-bit whenever needed */
+#define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
+
+/* XXX: add a is_user flag to have proper security support */
+#define PUSHW(ssp, sp, sp_mask, val) \
+ { \
+ sp -= 2; \
+ stw_kernel((ssp) + (sp & (sp_mask)), (val)); \
+ }
+
+#define PUSHL(ssp, sp, sp_mask, val) \
+ { \
+ sp -= 4; \
+ stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
+ }
+
+#define POPW(ssp, sp, sp_mask, val) \
+ { \
+ val = lduw_kernel((ssp) + (sp & (sp_mask))); \
+ sp += 2; \
+ }
+
+#define POPL(ssp, sp, sp_mask, val) \
+ { \
+ val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
+ sp += 4; \
+ }
+
+/* protected mode interrupt */
+static void do_interrupt_protected(int intno, int is_int, int error_code,
+ unsigned int next_eip, int is_hw)
+{
+ SegmentCache *dt;
+ target_ulong ptr, ssp;
+ int type, dpl, selector, ss_dpl, cpl;
+ int has_error_code, new_stack, shift;
+ uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
+ uint32_t old_eip, sp_mask;
+
+ has_error_code = 0;
+ if (!is_int && !is_hw) {
+ has_error_code = exception_has_error_code(intno);
+ }
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+
+ dt = &env->idt;
+ if (intno * 8 + 7 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ ptr = dt->base + intno * 8;
+ e1 = ldl_kernel(ptr);
+ e2 = ldl_kernel(ptr + 4);
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch (type) {
+ case 5: /* task gate */
+ /* must do that check here to return the correct error code */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+ }
+ switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
+ if (has_error_code) {
+ int type;
+ uint32_t mask;
+
+ /* push the error code */
+ type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
+ shift = type >> 3;
+ if (env->segs[R_SS].flags & DESC_B_MASK) {
+ mask = 0xffffffff;
+ } else {
+ mask = 0xffff;
+ }
+ esp = (ESP - (2 << shift)) & mask;
+ ssp = env->segs[R_SS].base + esp;
+ if (shift) {
+ stl_kernel(ssp, error_code);
+ } else {
+ stw_kernel(ssp, error_code);
+ }
+ SET_ESP(esp, mask);
+ }
+ return;
+ case 6: /* 286 interrupt gate */
+ case 7: /* 286 trap gate */
+ case 14: /* 386 interrupt gate */
+ case 15: /* 386 trap gate */
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ break;
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (is_int && dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
+ }
+ selector = e1 >> 16;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ if (load_segment(&e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ /* to inner privilege */
+ get_ss_esp_from_tss(&ss, &esp, dpl);
+ if ((ss & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if ((ss & 3) != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ new_stack = 1;
+ sp_mask = get_sp_mask(ss_e2);
+ ssp = get_seg_base(ss_e1, ss_e2);
+ } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+ /* to same privilege */
+ if (env->eflags & VM_MASK) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ new_stack = 0;
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ esp = ESP;
+ dpl = cpl;
+ } else {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ new_stack = 0; /* avoid warning */
+ sp_mask = 0; /* avoid warning */
+ ssp = 0; /* avoid warning */
+ esp = 0; /* avoid warning */
+ }
+
+ shift = type >> 3;
+
+#if 0
+ /* XXX: check that enough room is available */
+ push_size = 6 + (new_stack << 2) + (has_error_code << 1);
+ if (env->eflags & VM_MASK) {
+ push_size += 8;
+ }
+ push_size <<= shift;
+#endif
+ if (shift == 1) {
+ if (new_stack) {
+ if (env->eflags & VM_MASK) {
+ PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
+ PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ }
+ PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
+ PUSHL(ssp, esp, sp_mask, ESP);
+ }
+ PUSHL(ssp, esp, sp_mask, cpu_compute_eflags(env));
+ PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
+ PUSHL(ssp, esp, sp_mask, old_eip);
+ if (has_error_code) {
+ PUSHL(ssp, esp, sp_mask, error_code);
+ }
+ } else {
+ if (new_stack) {
+ if (env->eflags & VM_MASK) {
+ PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
+ PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
+ }
+ PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
+ PUSHW(ssp, esp, sp_mask, ESP);
+ }
+ PUSHW(ssp, esp, sp_mask, cpu_compute_eflags(env));
+ PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
+ PUSHW(ssp, esp, sp_mask, old_eip);
+ if (has_error_code) {
+ PUSHW(ssp, esp, sp_mask, error_code);
+ }
+ }
+
+ if (new_stack) {
+ if (env->eflags & VM_MASK) {
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
+ }
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
+ }
+ SET_ESP(esp, sp_mask);
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ cpu_x86_set_cpl(env, dpl);
+ env->eip = offset;
+
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+}
+
+#ifdef TARGET_X86_64
+
+#define PUSHQ(sp, val) \
+ { \
+ sp -= 8; \
+ stq_kernel(sp, (val)); \
+ }
+
+#define POPQ(sp, val) \
+ { \
+ val = ldq_kernel(sp); \
+ sp += 8; \
+ }
+
+static inline target_ulong get_rsp_from_tss(int level)
+{
+ int index;
+
+#if 0
+ printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
+ env->tr.base, env->tr.limit);
+#endif
+
+ if (!(env->tr.flags & DESC_P_MASK)) {
+ cpu_abort(env, "invalid tss");
+ }
+ index = 8 * level + 4;
+ if ((index + 7) > env->tr.limit) {
+ raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
+ }
+ return ldq_kernel(env->tr.base + index);
+}
+
+/* 64 bit interrupt */
+static void do_interrupt64(int intno, int is_int, int error_code,
+ target_ulong next_eip, int is_hw)
+{
+ SegmentCache *dt;
+ target_ulong ptr;
+ int type, dpl, selector, cpl, ist;
+ int has_error_code, new_stack;
+ uint32_t e1, e2, e3, ss;
+ target_ulong old_eip, esp, offset;
+
+ has_error_code = 0;
+ if (!is_int && !is_hw) {
+ has_error_code = exception_has_error_code(intno);
+ }
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+
+ dt = &env->idt;
+ if (intno * 16 + 15 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ }
+ ptr = dt->base + intno * 16;
+ e1 = ldl_kernel(ptr);
+ e2 = ldl_kernel(ptr + 4);
+ e3 = ldl_kernel(ptr + 8);
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ switch (type) {
+ case 14: /* 386 interrupt gate */
+ case 15: /* 386 trap gate */
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ break;
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (is_int && dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 16 + 2);
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, intno * 16 + 2);
+ }
+ selector = e1 >> 16;
+ offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ ist = e2 & 7;
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+
+ if (load_segment(&e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
+ /* to inner privilege */
+ if (ist != 0) {
+ esp = get_rsp_from_tss(ist + 3);
+ } else {
+ esp = get_rsp_from_tss(dpl);
+ }
+ esp &= ~0xfLL; /* align stack */
+ ss = 0;
+ new_stack = 1;
+ } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
+ /* to same privilege */
+ if (env->eflags & VM_MASK) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ new_stack = 0;
+ if (ist != 0) {
+ esp = get_rsp_from_tss(ist + 3);
+ } else {
+ esp = ESP;
+ }
+ esp &= ~0xfLL; /* align stack */
+ dpl = cpl;
+ } else {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ new_stack = 0; /* avoid warning */
+ esp = 0; /* avoid warning */
+ }
+
+ PUSHQ(esp, env->segs[R_SS].selector);
+ PUSHQ(esp, ESP);
+ PUSHQ(esp, cpu_compute_eflags(env));
+ PUSHQ(esp, env->segs[R_CS].selector);
+ PUSHQ(esp, old_eip);
+ if (has_error_code) {
+ PUSHQ(esp, error_code);
+ }
+
+ if (new_stack) {
+ ss = 0 | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
+ }
+ ESP = esp;
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ cpu_x86_set_cpl(env, dpl);
+ env->eip = offset;
+
+ /* interrupt gate clear IF mask */
+ if ((type & 1) == 0) {
+ env->eflags &= ~IF_MASK;
+ }
+ env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
+}
+#endif
+
+#ifdef TARGET_X86_64
+#if defined(CONFIG_USER_ONLY)
+void helper_syscall(int next_eip_addend)
+{
+ env->exception_index = EXCP_SYSCALL;
+ env->exception_next_eip = env->eip + next_eip_addend;
+ cpu_loop_exit(env);
+}
+#else
+void helper_syscall(int next_eip_addend)
+{
+ int selector;
+
+ if (!(env->efer & MSR_EFER_SCE)) {
+ raise_exception_err(env, EXCP06_ILLOP, 0);
+ }
+ selector = (env->star >> 32) & 0xffff;
+ if (env->hflags & HF_LMA_MASK) {
+ int code64;
+
+ ECX = env->eip + next_eip_addend;
+ env->regs[11] = cpu_compute_eflags(env);
+
+ code64 = env->hflags & HF_CS64_MASK;
+
+ cpu_x86_set_cpl(env, 0);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ env->eflags &= ~env->fmask;
+ cpu_load_eflags(env, env->eflags, 0);
+ if (code64) {
+ env->eip = env->lstar;
+ } else {
+ env->eip = env->cstar;
+ }
+ } else {
+ ECX = (uint32_t)(env->eip + next_eip_addend);
+
+ cpu_x86_set_cpl(env, 0);
+ cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
+ env->eip = (uint32_t)env->star;
+ }
+}
+#endif
+#endif
+
+#ifdef TARGET_X86_64
+void helper_sysret(int dflag)
+{
+ int cpl, selector;
+
+ if (!(env->efer & MSR_EFER_SCE)) {
+ raise_exception_err(env, EXCP06_ILLOP, 0);
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ selector = (env->star >> 48) & 0xffff;
+ if (env->hflags & HF_LMA_MASK) {
+ if (dflag == 2) {
+ cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ env->eip = ECX;
+ } else {
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)ECX;
+ }
+ cpu_x86_load_seg_cache(env, R_SS, selector + 8,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
+ | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
+ NT_MASK);
+ cpu_x86_set_cpl(env, 3);
+ } else {
+ cpu_x86_load_seg_cache(env, R_CS, selector | 3,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ env->eip = (uint32_t)ECX;
+ cpu_x86_load_seg_cache(env, R_SS, selector + 8,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ env->eflags |= IF_MASK;
+ cpu_x86_set_cpl(env, 3);
+ }
+}
+#endif
+
+/* real mode interrupt */
+static void do_interrupt_real(int intno, int is_int, int error_code,
+ unsigned int next_eip)
+{
+ SegmentCache *dt;
+ target_ulong ptr, ssp;
+ int selector;
+ uint32_t offset, esp;
+ uint32_t old_cs, old_eip;
+
+ /* real mode (simpler!) */
+ dt = &env->idt;
+ if (intno * 4 + 3 > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
+ }
+ ptr = dt->base + intno * 4;
+ offset = lduw_kernel(ptr);
+ selector = lduw_kernel(ptr + 2);
+ esp = ESP;
+ ssp = env->segs[R_SS].base;
+ if (is_int) {
+ old_eip = next_eip;
+ } else {
+ old_eip = env->eip;
+ }
+ old_cs = env->segs[R_CS].selector;
+ /* XXX: use SS segment size? */
+ PUSHW(ssp, esp, 0xffff, cpu_compute_eflags(env));
+ PUSHW(ssp, esp, 0xffff, old_cs);
+ PUSHW(ssp, esp, 0xffff, old_eip);
+
+ /* update processor state */
+ ESP = (ESP & ~0xffff) | (esp & 0xffff);
+ env->eip = offset;
+ env->segs[R_CS].selector = selector;
+ env->segs[R_CS].base = (selector << 4);
+ env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
+}
+
+#if defined(CONFIG_USER_ONLY)
+/* fake user mode interrupt */
+static void do_interrupt_user(int intno, int is_int, int error_code,
+ target_ulong next_eip)
+{
+ SegmentCache *dt;
+ target_ulong ptr;
+ int dpl, cpl, shift;
+ uint32_t e2;
+
+ dt = &env->idt;
+ if (env->hflags & HF_LMA_MASK) {
+ shift = 4;
+ } else {
+ shift = 3;
+ }
+ ptr = dt->base + (intno << shift);
+ e2 = ldl_kernel(ptr + 4);
+
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ /* check privilege if software int */
+ if (is_int && dpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, (intno << shift) + 2);
+ }
+
+ /* Since we emulate only user space, we cannot do more than
+ exiting the emulation with the suitable exception and error
+ code */
+ if (is_int) {
+ EIP = next_eip;
+ }
+}
+
+#else
+
+static void handle_even_inj(int intno, int is_int, int error_code,
+ int is_hw, int rm)
+{
+ uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+
+ if (!(event_inj & SVM_EVTINJ_VALID)) {
+ int type;
+
+ if (is_int) {
+ type = SVM_EVTINJ_TYPE_SOFT;
+ } else {
+ type = SVM_EVTINJ_TYPE_EXEPT;
+ }
+ event_inj = intno | type | SVM_EVTINJ_VALID;
+ if (!rm && exception_has_error_code(intno)) {
+ event_inj |= SVM_EVTINJ_VALID_ERR;
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err),
+ error_code);
+ }
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ event_inj);
+ }
+}
+#endif
+
+/*
+ * Begin execution of an interruption. is_int is TRUE if coming from
+ * the int instruction. next_eip is the EIP value AFTER the interrupt
+ * instruction. It is only relevant if is_int is TRUE.
+ */
+static void do_interrupt_all(int intno, int is_int, int error_code,
+ target_ulong next_eip, int is_hw)
+{
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
+ if ((env->cr[0] & CR0_PE_MASK)) {
+ static int count;
+
+ qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
+ " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
+ count, intno, error_code, is_int,
+ env->hflags & HF_CPL_MASK,
+ env->segs[R_CS].selector, EIP,
+ (int)env->segs[R_CS].base + EIP,
+ env->segs[R_SS].selector, ESP);
+ if (intno == 0x0e) {
+ qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
+ } else {
+ qemu_log(" EAX=" TARGET_FMT_lx, EAX);
+ }
+ qemu_log("\n");
+ log_cpu_state(env, X86_DUMP_CCOP);
+#if 0
+ {
+ int i;
+ target_ulong ptr;
+
+ qemu_log(" code=");
+ ptr = env->segs[R_CS].base + env->eip;
+ for (i = 0; i < 16; i++) {
+ qemu_log(" %02x", ldub(ptr + i));
+ }
+ qemu_log("\n");
+ }
+#endif
+ count++;
+ }
+ }
+ if (env->cr[0] & CR0_PE_MASK) {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_SVMI_MASK) {
+ handle_even_inj(intno, is_int, error_code, is_hw, 0);
+ }
+#endif
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
+ } else
+#endif
+ {
+ do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
+ }
+ } else {
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_SVMI_MASK) {
+ handle_even_inj(intno, is_int, error_code, is_hw, 1);
+ }
+#endif
+ do_interrupt_real(intno, is_int, error_code, next_eip);
+ }
+
+#if !defined(CONFIG_USER_ONLY)
+ if (env->hflags & HF_SVMI_MASK) {
+ uint32_t event_inj = ldl_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.event_inj));
+
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
+ event_inj & ~SVM_EVTINJ_VALID);
+ }
+#endif
+}
+
+void do_interrupt(CPUX86State *env1)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = env1;
+#if defined(CONFIG_USER_ONLY)
+ /* if user mode only, we simulate a fake exception
+ which will be handled outside the cpu execution
+ loop */
+ do_interrupt_user(env->exception_index,
+ env->exception_is_int,
+ env->error_code,
+ env->exception_next_eip);
+ /* successfully delivered */
+ env->old_exception = -1;
+#else
+ /* simulate a real cpu exception. On i386, it can
+ trigger new exceptions, but we do not handle
+ double or triple faults yet. */
+ do_interrupt_all(env->exception_index,
+ env->exception_is_int,
+ env->error_code,
+ env->exception_next_eip, 0);
+ /* successfully delivered */
+ env->old_exception = -1;
+#endif
+ env = saved_env;
+}
+
+void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = env1;
+ do_interrupt_all(intno, 0, 0, 0, is_hw);
+ env = saved_env;
+}
+
+void helper_enter_level(int level, int data32, target_ulong t1)
+{
+ target_ulong ssp;
+ uint32_t esp_mask, esp, ebp;
+
+ esp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ ebp = EBP;
+ esp = ESP;
+ if (data32) {
+ /* 32 bit */
+ esp -= 4;
+ while (--level) {
+ esp -= 4;
+ ebp -= 4;
+ stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
+ }
+ esp -= 4;
+ stl(ssp + (esp & esp_mask), t1);
+ } else {
+ /* 16 bit */
+ esp -= 2;
+ while (--level) {
+ esp -= 2;
+ ebp -= 2;
+ stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
+ }
+ esp -= 2;
+ stw(ssp + (esp & esp_mask), t1);
+ }
+}
+
+#ifdef TARGET_X86_64
+void helper_enter64_level(int level, int data64, target_ulong t1)
+{
+ target_ulong esp, ebp;
+
+ ebp = EBP;
+ esp = ESP;
+
+ if (data64) {
+ /* 64 bit */
+ esp -= 8;
+ while (--level) {
+ esp -= 8;
+ ebp -= 8;
+ stq(esp, ldq(ebp));
+ }
+ esp -= 8;
+ stq(esp, t1);
+ } else {
+ /* 16 bit */
+ esp -= 2;
+ while (--level) {
+ esp -= 2;
+ ebp -= 2;
+ stw(esp, lduw(ebp));
+ }
+ esp -= 2;
+ stw(esp, t1);
+ }
+}
+#endif
+
+void helper_lldt(int selector)
+{
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index, entry_limit;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* XXX: NULL selector case: invalid LDT */
+ env->ldt.base = 0;
+ env->ldt.limit = 0;
+ } else {
+ if (selector & 0x4) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dt = &env->gdt;
+ index = selector & ~7;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ entry_limit = 15;
+ } else
+#endif
+ {
+ entry_limit = 7;
+ }
+ if ((index + entry_limit) > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ ptr = dt->base + index;
+ e1 = ldl_kernel(ptr);
+ e2 = ldl_kernel(ptr + 4);
+ if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint32_t e3;
+
+ e3 = ldl_kernel(ptr + 8);
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ env->ldt.base |= (target_ulong)e3 << 32;
+ } else
+#endif
+ {
+ load_seg_cache_raw_dt(&env->ldt, e1, e2);
+ }
+ }
+ env->ldt.selector = selector;
+}
+
+void helper_ltr(int selector)
+{
+ SegmentCache *dt;
+ uint32_t e1, e2;
+ int index, type, entry_limit;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ if ((selector & 0xfffc) == 0) {
+ /* NULL selector case: invalid TR */
+ env->tr.base = 0;
+ env->tr.limit = 0;
+ env->tr.flags = 0;
+ } else {
+ if (selector & 0x4) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dt = &env->gdt;
+ index = selector & ~7;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ entry_limit = 15;
+ } else
+#endif
+ {
+ entry_limit = 7;
+ }
+ if ((index + entry_limit) > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ ptr = dt->base + index;
+ e1 = ldl_kernel(ptr);
+ e2 = ldl_kernel(ptr + 4);
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ if ((e2 & DESC_S_MASK) ||
+ (type != 1 && type != 9)) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ uint32_t e3, e4;
+
+ e3 = ldl_kernel(ptr + 8);
+ e4 = ldl_kernel(ptr + 12);
+ if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ load_seg_cache_raw_dt(&env->tr, e1, e2);
+ env->tr.base |= (target_ulong)e3 << 32;
+ } else
+#endif
+ {
+ load_seg_cache_raw_dt(&env->tr, e1, e2);
+ }
+ e2 |= DESC_TSS_BUSY_MASK;
+ stl_kernel(ptr + 4, e2);
+ }
+ env->tr.selector = selector;
+}
+
+/* only works if protected mode and not VM86. seg_reg must be != R_CS */
+void helper_load_seg(int seg_reg, int selector)
+{
+ uint32_t e1, e2;
+ int cpl, dpl, rpl;
+ SegmentCache *dt;
+ int index;
+ target_ulong ptr;
+
+ selector &= 0xffff;
+ cpl = env->hflags & HF_CPL_MASK;
+ if ((selector & 0xfffc) == 0) {
+ /* null selector case */
+ if (seg_reg == R_SS
+#ifdef TARGET_X86_64
+ && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
+#endif
+ ) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
+ } else {
+
+ if (selector & 0x4) {
+ dt = &env->ldt;
+ } else {
+ dt = &env->gdt;
+ }
+ index = selector & ~7;
+ if ((index + 7) > dt->limit) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ ptr = dt->base + index;
+ e1 = ldl_kernel(ptr);
+ e2 = ldl_kernel(ptr + 4);
+
+ if (!(e2 & DESC_S_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (seg_reg == R_SS) {
+ /* must be writable segment */
+ if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (rpl != cpl || dpl != cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ } else {
+ /* must be readable segment */
+ if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+
+ if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+ /* if not conforming code, test rights */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ }
+ }
+
+ if (!(e2 & DESC_P_MASK)) {
+ if (seg_reg == R_SS) {
+ raise_exception_err(env, EXCP0C_STACK, selector & 0xfffc);
+ } else {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+ }
+
+ /* set the access bit if not already set */
+ if (!(e2 & DESC_A_MASK)) {
+ e2 |= DESC_A_MASK;
+ stl_kernel(ptr + 4, e2);
+ }
+
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+#if 0
+ qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
+ selector, (unsigned long)sc->base, sc->limit, sc->flags);
+#endif
+ }
+}
+
+/* protected mode jump */
+void helper_ljmp_protected(int new_cs, target_ulong new_eip,
+ int next_eip_addend)
+{
+ int gate_cs, type;
+ uint32_t e1, e2, cpl, dpl, rpl, limit;
+ target_ulong next_eip;
+
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ if (load_segment(&e1, &e2, new_cs) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (dpl != cpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ }
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit &&
+ !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ EIP = new_eip;
+ } else {
+ /* jump to call or task gate */
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ rpl = new_cs & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1: /* 286 TSS */
+ case 9: /* 386 TSS */
+ case 5: /* task gate */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ next_eip = env->eip + next_eip_addend;
+ switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
+ CC_OP = CC_OP_EFLAGS;
+ break;
+ case 4: /* 286 call gate */
+ case 12: /* 386 call gate */
+ if ((dpl < cpl) || (dpl < rpl)) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ }
+ gate_cs = e1 >> 16;
+ new_eip = (e1 & 0xffff);
+ if (type == 12) {
+ new_eip |= (e2 & 0xffff0000);
+ }
+ if (load_segment(&e1, &e2, gate_cs) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ /* must be code segment */
+ if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
+ (DESC_S_MASK | DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ }
+ if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
+ (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
+ raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, gate_cs & 0xfffc);
+ }
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ EIP = new_eip;
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ break;
+ }
+ }
+}
+
+/* real mode call */
+void helper_lcall_real(int new_cs, target_ulong new_eip1,
+ int shift, int next_eip)
+{
+ int new_eip;
+ uint32_t esp, esp_mask;
+ target_ulong ssp;
+
+ new_eip = new_eip1;
+ esp = ESP;
+ esp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ if (shift) {
+ PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
+ PUSHL(ssp, esp, esp_mask, next_eip);
+ } else {
+ PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
+ PUSHW(ssp, esp, esp_mask, next_eip);
+ }
+
+ SET_ESP(esp, esp_mask);
+ env->eip = new_eip;
+ env->segs[R_CS].selector = new_cs;
+ env->segs[R_CS].base = (new_cs << 4);
+}
+
+/* protected mode call */
+void helper_lcall_protected(int new_cs, target_ulong new_eip,
+ int shift, int next_eip_addend)
+{
+ int new_stack, i;
+ uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
+ uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
+ uint32_t val, limit, old_sp_mask;
+ target_ulong ssp, old_ssp, next_eip;
+
+ next_eip = env->eip + next_eip_addend;
+ LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
+ LOG_PCALL_STATE(env);
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ if (load_segment(&e1, &e2, new_cs) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ LOG_PCALL("desc=%08x:%08x\n", e1, e2);
+ if (e2 & DESC_S_MASK) {
+ if (!(e2 & DESC_CS_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ /* conforming code segment */
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ } else {
+ /* non conforming code segment */
+ rpl = new_cs & 3;
+ if (rpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (dpl != cpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ }
+
+#ifdef TARGET_X86_64
+ /* XXX: check 16/32 bit cases in long mode */
+ if (shift == 2) {
+ target_ulong rsp;
+
+ /* 64 bit case */
+ rsp = ESP;
+ PUSHQ(rsp, env->segs[R_CS].selector);
+ PUSHQ(rsp, next_eip);
+ /* from this point, not restartable */
+ ESP = rsp;
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2), e2);
+ EIP = new_eip;
+ } else
+#endif
+ {
+ sp = ESP;
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ if (shift) {
+ PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
+ PUSHL(ssp, sp, sp_mask, next_eip);
+ } else {
+ PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
+ PUSHW(ssp, sp, sp_mask, next_eip);
+ }
+
+ limit = get_seg_limit(e1, e2);
+ if (new_eip > limit) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ /* from this point, not restartable */
+ SET_ESP(sp, sp_mask);
+ cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
+ get_seg_base(e1, e2), limit, e2);
+ EIP = new_eip;
+ }
+ } else {
+ /* check gate type */
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ rpl = new_cs & 3;
+ switch (type) {
+ case 1: /* available 286 TSS */
+ case 9: /* available 386 TSS */
+ case 5: /* task gate */
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
+ CC_OP = CC_OP_EFLAGS;
+ return;
+ case 4: /* 286 call gate */
+ case 12: /* 386 call gate */
+ break;
+ default:
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ break;
+ }
+ shift = type >> 3;
+
+ if (dpl < cpl || dpl < rpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ /* check valid bit */
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ }
+ selector = e1 >> 16;
+ offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
+ param_count = e2 & 0x1f;
+ if ((selector & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+
+ if (load_segment(&e1, &e2, selector) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl > cpl) {
+ raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
+ }
+
+ if (!(e2 & DESC_C_MASK) && dpl < cpl) {
+ /* to inner privilege */
+ get_ss_esp_from_tss(&ss, &sp, dpl);
+ LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
+ "\n",
+ ss, sp, param_count, ESP);
+ if ((ss & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if ((ss & 3) != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (load_segment(&ss_e1, &ss_e2, ss) != 0) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (ss_dpl != dpl) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
+ }
+
+ /* push_size = ((param_count * 2) + 8) << shift; */
+
+ old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ old_ssp = env->segs[R_SS].base;
+
+ sp_mask = get_sp_mask(ss_e2);
+ ssp = get_seg_base(ss_e1, ss_e2);
+ if (shift) {
+ PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
+ PUSHL(ssp, sp, sp_mask, ESP);
+ for (i = param_count - 1; i >= 0; i--) {
+ val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
+ PUSHL(ssp, sp, sp_mask, val);
+ }
+ } else {
+ PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
+ PUSHW(ssp, sp, sp_mask, ESP);
+ for (i = param_count - 1; i >= 0; i--) {
+ val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
+ PUSHW(ssp, sp, sp_mask, val);
+ }
+ }
+ new_stack = 1;
+ } else {
+ /* to same privilege */
+ sp = ESP;
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ ssp = env->segs[R_SS].base;
+ /* push_size = (4 << shift); */
+ new_stack = 0;
+ }
+
+ if (shift) {
+ PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
+ PUSHL(ssp, sp, sp_mask, next_eip);
+ } else {
+ PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
+ PUSHW(ssp, sp, sp_mask, next_eip);
+ }
+
+ /* from this point, not restartable */
+
+ if (new_stack) {
+ ss = (ss & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_SS, ss,
+ ssp,
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ }
+
+ selector = (selector & ~3) | dpl;
+ cpu_x86_load_seg_cache(env, R_CS, selector,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ cpu_x86_set_cpl(env, dpl);
+ SET_ESP(sp, sp_mask);
+ EIP = offset;
+ }
+}
+
+/* real and vm86 mode iret */
+void helper_iret_real(int shift)
+{
+ uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
+ target_ulong ssp;
+ int eflags_mask;
+
+ sp_mask = 0xffff; /* XXXX: use SS segment size? */
+ sp = ESP;
+ ssp = env->segs[R_SS].base;
+ if (shift == 1) {
+ /* 32 bits */
+ POPL(ssp, sp, sp_mask, new_eip);
+ POPL(ssp, sp, sp_mask, new_cs);
+ new_cs &= 0xffff;
+ POPL(ssp, sp, sp_mask, new_eflags);
+ } else {
+ /* 16 bits */
+ POPW(ssp, sp, sp_mask, new_eip);
+ POPW(ssp, sp, sp_mask, new_cs);
+ POPW(ssp, sp, sp_mask, new_eflags);
+ }
+ ESP = (ESP & ~sp_mask) | (sp & sp_mask);
+ env->segs[R_CS].selector = new_cs;
+ env->segs[R_CS].base = (new_cs << 4);
+ env->eip = new_eip;
+ if (env->eflags & VM_MASK) {
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
+ NT_MASK;
+ } else {
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
+ RF_MASK | NT_MASK;
+ }
+ if (shift == 0) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+static inline void validate_seg(int seg_reg, int cpl)
+{
+ int dpl;
+ uint32_t e2;
+
+ /* XXX: on x86_64, we do not want to nullify FS and GS because
+ they may still contain a valid base. I would be interested to
+ know how a real x86_64 CPU behaves */
+ if ((seg_reg == R_FS || seg_reg == R_GS) &&
+ (env->segs[seg_reg].selector & 0xfffc) == 0) {
+ return;
+ }
+
+ e2 = env->segs[seg_reg].flags;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
+ /* data or non conforming code segment */
+ if (dpl < cpl) {
+ cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
+ }
+ }
+}
+
+/* protected mode iret */
+static inline void helper_ret_protected(int shift, int is_iret, int addend)
+{
+ uint32_t new_cs, new_eflags, new_ss;
+ uint32_t new_es, new_ds, new_fs, new_gs;
+ uint32_t e1, e2, ss_e1, ss_e2;
+ int cpl, dpl, rpl, eflags_mask, iopl;
+ target_ulong ssp, sp, new_eip, new_esp, sp_mask;
+
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ sp_mask = -1;
+ } else
+#endif
+ {
+ sp_mask = get_sp_mask(env->segs[R_SS].flags);
+ }
+ sp = ESP;
+ ssp = env->segs[R_SS].base;
+ new_eflags = 0; /* avoid warning */
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ POPQ(sp, new_eip);
+ POPQ(sp, new_cs);
+ new_cs &= 0xffff;
+ if (is_iret) {
+ POPQ(sp, new_eflags);
+ }
+ } else
+#endif
+ {
+ if (shift == 1) {
+ /* 32 bits */
+ POPL(ssp, sp, sp_mask, new_eip);
+ POPL(ssp, sp, sp_mask, new_cs);
+ new_cs &= 0xffff;
+ if (is_iret) {
+ POPL(ssp, sp, sp_mask, new_eflags);
+ if (new_eflags & VM_MASK) {
+ goto return_to_vm86;
+ }
+ }
+ } else {
+ /* 16 bits */
+ POPW(ssp, sp, sp_mask, new_eip);
+ POPW(ssp, sp, sp_mask, new_cs);
+ if (is_iret) {
+ POPW(ssp, sp, sp_mask, new_eflags);
+ }
+ }
+ }
+ LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
+ new_cs, new_eip, shift, addend);
+ LOG_PCALL_STATE(env);
+ if ((new_cs & 0xfffc) == 0) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (load_segment(&e1, &e2, new_cs) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ if (!(e2 & DESC_S_MASK) ||
+ !(e2 & DESC_CS_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ cpl = env->hflags & HF_CPL_MASK;
+ rpl = new_cs & 3;
+ if (rpl < cpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ if (e2 & DESC_C_MASK) {
+ if (dpl > rpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ } else {
+ if (dpl != rpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_cs & 0xfffc);
+ }
+ }
+ if (!(e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, new_cs & 0xfffc);
+ }
+
+ sp += addend;
+ if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
+ ((env->hflags & HF_CS64_MASK) && !is_iret))) {
+ /* return to same privilege level */
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ } else {
+ /* return to different privilege level */
+#ifdef TARGET_X86_64
+ if (shift == 2) {
+ POPQ(sp, new_esp);
+ POPQ(sp, new_ss);
+ new_ss &= 0xffff;
+ } else
+#endif
+ {
+ if (shift == 1) {
+ /* 32 bits */
+ POPL(ssp, sp, sp_mask, new_esp);
+ POPL(ssp, sp, sp_mask, new_ss);
+ new_ss &= 0xffff;
+ } else {
+ /* 16 bits */
+ POPW(ssp, sp, sp_mask, new_esp);
+ POPW(ssp, sp, sp_mask, new_ss);
+ }
+ }
+ LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
+ new_ss, new_esp);
+ if ((new_ss & 0xfffc) == 0) {
+#ifdef TARGET_X86_64
+ /* NULL ss is allowed in long mode if cpl != 3 */
+ /* XXX: test CS64? */
+ if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
+ } else
+#endif
+ {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ } else {
+ if ((new_ss & 3) != rpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ }
+ if (load_segment(&ss_e1, &ss_e2, new_ss) != 0) {
+ raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_S_MASK) ||
+ (ss_e2 & DESC_CS_MASK) ||
+ !(ss_e2 & DESC_W_MASK)) {
+ raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ }
+ dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
+ if (dpl != rpl) {
+ raise_exception_err(env, EXCP0D_GPF, new_ss & 0xfffc);
+ }
+ if (!(ss_e2 & DESC_P_MASK)) {
+ raise_exception_err(env, EXCP0B_NOSEG, new_ss & 0xfffc);
+ }
+ cpu_x86_load_seg_cache(env, R_SS, new_ss,
+ get_seg_base(ss_e1, ss_e2),
+ get_seg_limit(ss_e1, ss_e2),
+ ss_e2);
+ }
+
+ cpu_x86_load_seg_cache(env, R_CS, new_cs,
+ get_seg_base(e1, e2),
+ get_seg_limit(e1, e2),
+ e2);
+ cpu_x86_set_cpl(env, rpl);
+ sp = new_esp;
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_CS64_MASK) {
+ sp_mask = -1;
+ } else
+#endif
+ {
+ sp_mask = get_sp_mask(ss_e2);
+ }
+
+ /* validate data segments */
+ validate_seg(R_ES, rpl);
+ validate_seg(R_DS, rpl);
+ validate_seg(R_FS, rpl);
+ validate_seg(R_GS, rpl);
+
+ sp += addend;
+ }
+ SET_ESP(sp, sp_mask);
+ env->eip = new_eip;
+ if (is_iret) {
+ /* NOTE: 'cpl' is the _old_ CPL */
+ eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
+ if (cpl == 0) {
+ eflags_mask |= IOPL_MASK;
+ }
+ iopl = (env->eflags >> IOPL_SHIFT) & 3;
+ if (cpl <= iopl) {
+ eflags_mask |= IF_MASK;
+ }
+ if (shift == 0) {
+ eflags_mask &= 0xffff;
+ }
+ cpu_load_eflags(env, new_eflags, eflags_mask);
+ }
+ return;
+
+ return_to_vm86:
+ POPL(ssp, sp, sp_mask, new_esp);
+ POPL(ssp, sp, sp_mask, new_ss);
+ POPL(ssp, sp, sp_mask, new_es);
+ POPL(ssp, sp, sp_mask, new_ds);
+ POPL(ssp, sp, sp_mask, new_fs);
+ POPL(ssp, sp, sp_mask, new_gs);
+
+ /* modify processor state */
+ cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
+ IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
+ VIP_MASK);
+ load_seg_vm(R_CS, new_cs & 0xffff);
+ cpu_x86_set_cpl(env, 3);
+ load_seg_vm(R_SS, new_ss & 0xffff);
+ load_seg_vm(R_ES, new_es & 0xffff);
+ load_seg_vm(R_DS, new_ds & 0xffff);
+ load_seg_vm(R_FS, new_fs & 0xffff);
+ load_seg_vm(R_GS, new_gs & 0xffff);
+
+ env->eip = new_eip & 0xffff;
+ ESP = new_esp;
+}
+
+void helper_iret_protected(int shift, int next_eip)
+{
+ int tss_selector, type;
+ uint32_t e1, e2;
+
+ /* specific case for TSS */
+ if (env->eflags & NT_MASK) {
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+#endif
+ tss_selector = lduw_kernel(env->tr.base + 0);
+ if (tss_selector & 4) {
+ raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ }
+ if (load_segment(&e1, &e2, tss_selector) != 0) {
+ raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ }
+ type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
+ /* NOTE: we check both segment and busy TSS */
+ if (type != 3) {
+ raise_exception_err(env, EXCP0A_TSS, tss_selector & 0xfffc);
+ }
+ switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
+ } else {
+ helper_ret_protected(shift, 1, 0);
+ }
+ env->hflags2 &= ~HF2_NMI_MASK;
+}
+
+void helper_lret_protected(int shift, int addend)
+{
+ helper_ret_protected(shift, 0, addend);
+}
+
+void helper_sysenter(void)
+{
+ if (env->sysenter_cs == 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
+ cpu_x86_set_cpl(env, 0);
+
+#ifdef TARGET_X86_64
+ if (env->hflags & HF_LMA_MASK) {
+ cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ } else
+#endif
+ {
+ cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ }
+ cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
+ 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK |
+ DESC_W_MASK | DESC_A_MASK);
+ ESP = env->sysenter_esp;
+ EIP = env->sysenter_eip;
+}
+
+void helper_sysexit(int dflag)
+{
+ int cpl;
+
+ cpl = env->hflags & HF_CPL_MASK;
+ if (env->sysenter_cs == 0 || cpl != 0) {
+ raise_exception_err(env, EXCP0D_GPF, 0);
+ }
+ cpu_x86_set_cpl(env, 3);
+#ifdef TARGET_X86_64
+ if (dflag == 2) {
+ cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
+ DESC_L_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ } else
+#endif
+ {
+ cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
+ cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
+ 3, 0, 0xffffffff,
+ DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
+ DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
+ DESC_W_MASK | DESC_A_MASK);
+ }
+ ESP = ECX;
+ EIP = EDX;
+}
+
+target_ulong helper_lsl(target_ulong selector1)
+{
+ unsigned int limit;
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl, type;
+
+ selector = selector1 & 0xffff;
+ eflags = helper_cc_compute_all(CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment(&e1, &e2, selector) != 0) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+ /* conforming */
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1:
+ case 2:
+ case 3:
+ case 9:
+ case 11:
+ break;
+ default:
+ goto fail;
+ }
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return 0;
+ }
+ }
+ limit = get_seg_limit(e1, e2);
+ CC_SRC = eflags | CC_Z;
+ return limit;
+}
+
+target_ulong helper_lar(target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl, type;
+
+ selector = selector1 & 0xffff;
+ eflags = helper_cc_compute_all(CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment(&e1, &e2, selector) != 0) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_S_MASK) {
+ if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
+ /* conforming */
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
+ switch (type) {
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 9:
+ case 11:
+ case 12:
+ break;
+ default:
+ goto fail;
+ }
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return 0;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+ return e2 & 0x00f0ff00;
+}
+
+void helper_verr(target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl;
+
+ selector = selector1 & 0xffff;
+ eflags = helper_cc_compute_all(CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment(&e1, &e2, selector) != 0) {
+ goto fail;
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_CS_MASK) {
+ if (!(e2 & DESC_R_MASK)) {
+ goto fail;
+ }
+ if (!(e2 & DESC_C_MASK)) {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ }
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+}
+
+void helper_verw(target_ulong selector1)
+{
+ uint32_t e1, e2, eflags, selector;
+ int rpl, dpl, cpl;
+
+ selector = selector1 & 0xffff;
+ eflags = helper_cc_compute_all(CC_OP);
+ if ((selector & 0xfffc) == 0) {
+ goto fail;
+ }
+ if (load_segment(&e1, &e2, selector) != 0) {
+ goto fail;
+ }
+ if (!(e2 & DESC_S_MASK)) {
+ goto fail;
+ }
+ rpl = selector & 3;
+ dpl = (e2 >> DESC_DPL_SHIFT) & 3;
+ cpl = env->hflags & HF_CPL_MASK;
+ if (e2 & DESC_CS_MASK) {
+ goto fail;
+ } else {
+ if (dpl < cpl || dpl < rpl) {
+ goto fail;
+ }
+ if (!(e2 & DESC_W_MASK)) {
+ fail:
+ CC_SRC = eflags & ~CC_Z;
+ return;
+ }
+ }
+ CC_SRC = eflags | CC_Z;
+}
+
+#if defined(CONFIG_USER_ONLY)
+void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = s;
+ if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
+ selector &= 0xffff;
+ cpu_x86_load_seg_cache(env, seg_reg, selector,
+ (selector << 4), 0xffff, 0);
+ } else {
+ helper_load_seg(seg_reg, selector);
+ }
+ env = saved_env;
+}
+#endif
diff --git a/target-i386/shift_helper_template.h b/target-i386/shift_helper_template.h
new file mode 100644
index 0000000000..239ee0973c
--- /dev/null
+++ b/target-i386/shift_helper_template.h
@@ -0,0 +1,110 @@
+/*
+ * x86 shift helpers
+ *
+ * Copyright (c) 2008 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define DATA_BITS (1 << (3 + SHIFT))
+#define SHIFT_MASK (DATA_BITS - 1)
+#if DATA_BITS <= 32
+#define SHIFT1_MASK 0x1f
+#else
+#define SHIFT1_MASK 0x3f
+#endif
+
+#if DATA_BITS == 8
+#define SUFFIX b
+#define DATA_MASK 0xff
+#elif DATA_BITS == 16
+#define SUFFIX w
+#define DATA_MASK 0xffff
+#elif DATA_BITS == 32
+#define SUFFIX l
+#define DATA_MASK 0xffffffff
+#elif DATA_BITS == 64
+#define SUFFIX q
+#define DATA_MASK 0xffffffffffffffffULL
+#else
+#error unhandled operand size
+#endif
+
+target_ulong glue(helper_rcl, SUFFIX)(target_ulong t0, target_ulong t1)
+{
+ int count, eflags;
+ target_ulong src;
+ target_long res;
+
+ count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = helper_cc_compute_all(CC_OP);
+ t0 &= DATA_MASK;
+ src = t0;
+ res = (t0 << count) | ((target_ulong)(eflags & CC_C) << (count - 1));
+ if (count > 1) {
+ res |= t0 >> (DATA_BITS + 1 - count);
+ }
+ t0 = res;
+ env->cc_tmp = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (DATA_BITS - count)) & CC_C);
+ } else {
+ env->cc_tmp = -1;
+ }
+ return t0;
+}
+
+target_ulong glue(helper_rcr, SUFFIX)(target_ulong t0, target_ulong t1)
+{
+ int count, eflags;
+ target_ulong src;
+ target_long res;
+
+ count = t1 & SHIFT1_MASK;
+#if DATA_BITS == 16
+ count = rclw_table[count];
+#elif DATA_BITS == 8
+ count = rclb_table[count];
+#endif
+ if (count) {
+ eflags = helper_cc_compute_all(CC_OP);
+ t0 &= DATA_MASK;
+ src = t0;
+ res = (t0 >> count) |
+ ((target_ulong)(eflags & CC_C) << (DATA_BITS - count));
+ if (count > 1) {
+ res |= t0 << (DATA_BITS + 1 - count);
+ }
+ t0 = res;
+ env->cc_tmp = (eflags & ~(CC_C | CC_O)) |
+ (lshift(src ^ t0, 11 - (DATA_BITS - 1)) & CC_O) |
+ ((src >> (count - 1)) & CC_C);
+ } else {
+ env->cc_tmp = -1;
+ }
+ return t0;
+}
+
+#undef DATA_BITS
+#undef SHIFT_MASK
+#undef SHIFT1_MASK
+#undef DATA_TYPE
+#undef DATA_MASK
+#undef SUFFIX
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
new file mode 100644
index 0000000000..bc1bfa2a59
--- /dev/null
+++ b/target-i386/smm_helper.c
@@ -0,0 +1,307 @@
+/*
+ * x86 SMM helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "helper.h"
+
+/* SMM support */
+
+#if defined(CONFIG_USER_ONLY)
+
+void do_smm_enter(CPUX86State *env1)
+{
+}
+
+void helper_rsm(void)
+{
+}
+
+#else
+
+#ifdef TARGET_X86_64
+#define SMM_REVISION_ID 0x00020064
+#else
+#define SMM_REVISION_ID 0x00020000
+#endif
+
+void do_smm_enter(CPUX86State *env1)
+{
+ target_ulong sm_state;
+ SegmentCache *dt;
+ int i, offset;
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = env1;
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
+ log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
+
+ env->hflags |= HF_SMM_MASK;
+ cpu_smm_update(env);
+
+ sm_state = env->smbase + 0x8000;
+
+#ifdef TARGET_X86_64
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ offset = 0x7e00 + i * 16;
+ stw_phys(sm_state + offset, dt->selector);
+ stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
+ stl_phys(sm_state + offset + 4, dt->limit);
+ stq_phys(sm_state + offset + 8, dt->base);
+ }
+
+ stq_phys(sm_state + 0x7e68, env->gdt.base);
+ stl_phys(sm_state + 0x7e64, env->gdt.limit);
+
+ stw_phys(sm_state + 0x7e70, env->ldt.selector);
+ stq_phys(sm_state + 0x7e78, env->ldt.base);
+ stl_phys(sm_state + 0x7e74, env->ldt.limit);
+ stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
+
+ stq_phys(sm_state + 0x7e88, env->idt.base);
+ stl_phys(sm_state + 0x7e84, env->idt.limit);
+
+ stw_phys(sm_state + 0x7e90, env->tr.selector);
+ stq_phys(sm_state + 0x7e98, env->tr.base);
+ stl_phys(sm_state + 0x7e94, env->tr.limit);
+ stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+
+ stq_phys(sm_state + 0x7ed0, env->efer);
+
+ stq_phys(sm_state + 0x7ff8, EAX);
+ stq_phys(sm_state + 0x7ff0, ECX);
+ stq_phys(sm_state + 0x7fe8, EDX);
+ stq_phys(sm_state + 0x7fe0, EBX);
+ stq_phys(sm_state + 0x7fd8, ESP);
+ stq_phys(sm_state + 0x7fd0, EBP);
+ stq_phys(sm_state + 0x7fc8, ESI);
+ stq_phys(sm_state + 0x7fc0, EDI);
+ for (i = 8; i < 16; i++) {
+ stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
+ }
+ stq_phys(sm_state + 0x7f78, env->eip);
+ stl_phys(sm_state + 0x7f70, cpu_compute_eflags(env));
+ stl_phys(sm_state + 0x7f68, env->dr[6]);
+ stl_phys(sm_state + 0x7f60, env->dr[7]);
+
+ stl_phys(sm_state + 0x7f48, env->cr[4]);
+ stl_phys(sm_state + 0x7f50, env->cr[3]);
+ stl_phys(sm_state + 0x7f58, env->cr[0]);
+
+ stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
+ stl_phys(sm_state + 0x7f00, env->smbase);
+#else
+ stl_phys(sm_state + 0x7ffc, env->cr[0]);
+ stl_phys(sm_state + 0x7ff8, env->cr[3]);
+ stl_phys(sm_state + 0x7ff4, cpu_compute_eflags(env));
+ stl_phys(sm_state + 0x7ff0, env->eip);
+ stl_phys(sm_state + 0x7fec, EDI);
+ stl_phys(sm_state + 0x7fe8, ESI);
+ stl_phys(sm_state + 0x7fe4, EBP);
+ stl_phys(sm_state + 0x7fe0, ESP);
+ stl_phys(sm_state + 0x7fdc, EBX);
+ stl_phys(sm_state + 0x7fd8, EDX);
+ stl_phys(sm_state + 0x7fd4, ECX);
+ stl_phys(sm_state + 0x7fd0, EAX);
+ stl_phys(sm_state + 0x7fcc, env->dr[6]);
+ stl_phys(sm_state + 0x7fc8, env->dr[7]);
+
+ stl_phys(sm_state + 0x7fc4, env->tr.selector);
+ stl_phys(sm_state + 0x7f64, env->tr.base);
+ stl_phys(sm_state + 0x7f60, env->tr.limit);
+ stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
+
+ stl_phys(sm_state + 0x7fc0, env->ldt.selector);
+ stl_phys(sm_state + 0x7f80, env->ldt.base);
+ stl_phys(sm_state + 0x7f7c, env->ldt.limit);
+ stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
+
+ stl_phys(sm_state + 0x7f74, env->gdt.base);
+ stl_phys(sm_state + 0x7f70, env->gdt.limit);
+
+ stl_phys(sm_state + 0x7f58, env->idt.base);
+ stl_phys(sm_state + 0x7f54, env->idt.limit);
+
+ for (i = 0; i < 6; i++) {
+ dt = &env->segs[i];
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
+ stl_phys(sm_state + offset + 8, dt->base);
+ stl_phys(sm_state + offset + 4, dt->limit);
+ stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
+ }
+ stl_phys(sm_state + 0x7f14, env->cr[4]);
+
+ stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
+ stl_phys(sm_state + 0x7ef8, env->smbase);
+#endif
+ /* init SMM cpu state */
+
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, 0);
+#endif
+ cpu_load_eflags(env, 0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C |
+ DF_MASK));
+ env->eip = 0x00008000;
+ cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
+ 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
+ cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
+
+ cpu_x86_update_cr0(env,
+ env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK |
+ CR0_PG_MASK));
+ cpu_x86_update_cr4(env, 0);
+ env->dr[7] = 0x00000400;
+ CC_OP = CC_OP_EFLAGS;
+ env = saved_env;
+}
+
+void helper_rsm(void)
+{
+ target_ulong sm_state;
+ int i, offset;
+ uint32_t val;
+
+ sm_state = env->smbase + 0x8000;
+#ifdef TARGET_X86_64
+ cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
+
+ for (i = 0; i < 6; i++) {
+ offset = 0x7e00 + i * 16;
+ cpu_x86_load_seg_cache(env, i,
+ lduw_phys(sm_state + offset),
+ ldq_phys(sm_state + offset + 8),
+ ldl_phys(sm_state + offset + 4),
+ (lduw_phys(sm_state + offset + 2) &
+ 0xf0ff) << 8);
+ }
+
+ env->gdt.base = ldq_phys(sm_state + 0x7e68);
+ env->gdt.limit = ldl_phys(sm_state + 0x7e64);
+
+ env->ldt.selector = lduw_phys(sm_state + 0x7e70);
+ env->ldt.base = ldq_phys(sm_state + 0x7e78);
+ env->ldt.limit = ldl_phys(sm_state + 0x7e74);
+ env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
+
+ env->idt.base = ldq_phys(sm_state + 0x7e88);
+ env->idt.limit = ldl_phys(sm_state + 0x7e84);
+
+ env->tr.selector = lduw_phys(sm_state + 0x7e90);
+ env->tr.base = ldq_phys(sm_state + 0x7e98);
+ env->tr.limit = ldl_phys(sm_state + 0x7e94);
+ env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
+
+ EAX = ldq_phys(sm_state + 0x7ff8);
+ ECX = ldq_phys(sm_state + 0x7ff0);
+ EDX = ldq_phys(sm_state + 0x7fe8);
+ EBX = ldq_phys(sm_state + 0x7fe0);
+ ESP = ldq_phys(sm_state + 0x7fd8);
+ EBP = ldq_phys(sm_state + 0x7fd0);
+ ESI = ldq_phys(sm_state + 0x7fc8);
+ EDI = ldq_phys(sm_state + 0x7fc0);
+ for (i = 8; i < 16; i++) {
+ env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
+ }
+ env->eip = ldq_phys(sm_state + 0x7f78);
+ cpu_load_eflags(env, ldl_phys(sm_state + 0x7f70),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->dr[6] = ldl_phys(sm_state + 0x7f68);
+ env->dr[7] = ldl_phys(sm_state + 0x7f60);
+
+ cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
+ cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
+ cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
+
+ val = ldl_phys(sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
+ }
+#else
+ cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
+ cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
+ cpu_load_eflags(env, ldl_phys(sm_state + 0x7ff4),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ env->eip = ldl_phys(sm_state + 0x7ff0);
+ EDI = ldl_phys(sm_state + 0x7fec);
+ ESI = ldl_phys(sm_state + 0x7fe8);
+ EBP = ldl_phys(sm_state + 0x7fe4);
+ ESP = ldl_phys(sm_state + 0x7fe0);
+ EBX = ldl_phys(sm_state + 0x7fdc);
+ EDX = ldl_phys(sm_state + 0x7fd8);
+ ECX = ldl_phys(sm_state + 0x7fd4);
+ EAX = ldl_phys(sm_state + 0x7fd0);
+ env->dr[6] = ldl_phys(sm_state + 0x7fcc);
+ env->dr[7] = ldl_phys(sm_state + 0x7fc8);
+
+ env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
+ env->tr.base = ldl_phys(sm_state + 0x7f64);
+ env->tr.limit = ldl_phys(sm_state + 0x7f60);
+ env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
+
+ env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
+ env->ldt.base = ldl_phys(sm_state + 0x7f80);
+ env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
+ env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
+
+ env->gdt.base = ldl_phys(sm_state + 0x7f74);
+ env->gdt.limit = ldl_phys(sm_state + 0x7f70);
+
+ env->idt.base = ldl_phys(sm_state + 0x7f58);
+ env->idt.limit = ldl_phys(sm_state + 0x7f54);
+
+ for (i = 0; i < 6; i++) {
+ if (i < 3) {
+ offset = 0x7f84 + i * 12;
+ } else {
+ offset = 0x7f2c + (i - 3) * 12;
+ }
+ cpu_x86_load_seg_cache(env, i,
+ ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
+ ldl_phys(sm_state + offset + 8),
+ ldl_phys(sm_state + offset + 4),
+ (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
+ }
+ cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
+
+ val = ldl_phys(sm_state + 0x7efc); /* revision ID */
+ if (val & 0x20000) {
+ env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
+ }
+#endif
+ CC_OP = CC_OP_EFLAGS;
+ env->hflags &= ~HF_SMM_MASK;
+ cpu_smm_update(env);
+
+ qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
+ log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
+}
+
+#endif /* !CONFIG_USER_ONLY */
diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c
new file mode 100644
index 0000000000..64d842c82c
--- /dev/null
+++ b/target-i386/svm_helper.c
@@ -0,0 +1,716 @@
+/*
+ * x86 SVM helpers
+ *
+ * Copyright (c) 2003 Fabrice Bellard
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "dyngen-exec.h"
+#include "helper.h"
+
+/* Secure Virtual Machine helpers */
+
+#if defined(CONFIG_USER_ONLY)
+
+void helper_vmrun(int aflag, int next_eip_addend)
+{
+}
+
+void helper_vmmcall(void)
+{
+}
+
+void helper_vmload(int aflag)
+{
+}
+
+void helper_vmsave(int aflag)
+{
+}
+
+void helper_stgi(void)
+{
+}
+
+void helper_clgi(void)
+{
+}
+
+void helper_skinit(void)
+{
+}
+
+void helper_invlpga(int aflag)
+{
+}
+
+void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
+{
+}
+
+void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
+{
+}
+
+void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
+{
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
+ uint64_t param)
+{
+}
+
+void helper_svm_check_io(uint32_t port, uint32_t param,
+ uint32_t next_eip_addend)
+{
+}
+#else
+
+static inline void svm_save_seg(target_phys_addr_t addr,
+ const SegmentCache *sc)
+{
+ stw_phys(addr + offsetof(struct vmcb_seg, selector),
+ sc->selector);
+ stq_phys(addr + offsetof(struct vmcb_seg, base),
+ sc->base);
+ stl_phys(addr + offsetof(struct vmcb_seg, limit),
+ sc->limit);
+ stw_phys(addr + offsetof(struct vmcb_seg, attrib),
+ ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
+}
+
+static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
+{
+ unsigned int flags;
+
+ sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
+ sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
+ sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
+ flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
+ sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
+}
+
+static inline void svm_load_seg_cache(target_phys_addr_t addr,
+ CPUX86State *env, int seg_reg)
+{
+ SegmentCache sc1, *sc = &sc1;
+
+ svm_load_seg(addr, sc);
+ cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
+ sc->base, sc->limit, sc->flags);
+}
+
+void helper_vmrun(int aflag, int next_eip_addend)
+{
+ target_ulong addr;
+ uint32_t event_inj;
+ uint32_t int_ctl;
+
+ helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
+
+ if (aflag == 2) {
+ addr = EAX;
+ } else {
+ addr = (uint32_t)EAX;
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
+
+ env->vm_vmcb = addr;
+
+ /* save the current CPU state in the hsave page */
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
+
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+
+ svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
+ EIP + next_eip_addend);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
+ stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
+
+ /* load the interception bitmaps so we do not need to access the
+ vmcb in svm mode */
+ env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ control.intercept));
+ env->intercept_cr_read = lduw_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_read));
+ env->intercept_cr_write = lduw_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_cr_write));
+ env->intercept_dr_read = lduw_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_read));
+ env->intercept_dr_write = lduw_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_dr_write));
+ env->intercept_exceptions = ldl_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.intercept_exceptions
+ ));
+
+ /* enable intercepts */
+ env->hflags |= HF_SVMI_MASK;
+
+ env->tsc_offset = ldq_phys(env->vm_vmcb +
+ offsetof(struct vmcb, control.tsc_offset));
+
+ env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.gdtr.base));
+ env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.gdtr.limit));
+
+ env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.idtr.base));
+ env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.idtr.limit));
+
+ /* clear exit_info_2 so we behave like the real hardware */
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
+
+ cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.cr0)));
+ cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.cr4)));
+ cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.cr3)));
+ env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
+ int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ if (int_ctl & V_INTR_MASKING_MASK) {
+ env->v_tpr = int_ctl & V_TPR_MASK;
+ env->hflags2 |= HF2_VINTR_MASK;
+ if (env->eflags & IF_MASK) {
+ env->hflags2 |= HF2_HIF_MASK;
+ }
+ }
+
+ cpu_load_efer(env,
+ ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
+ env->eflags = 0;
+ cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ CC_OP = CC_OP_EFLAGS;
+
+ svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
+ env, R_ES);
+ svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
+ env, R_CS);
+ svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
+ env, R_SS);
+ svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
+ env, R_DS);
+
+ EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
+ env->eip = EIP;
+ ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
+ EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
+ env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
+ env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
+ cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
+ save.cpl)));
+
+ /* FIXME: guest state consistency checks */
+
+ switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
+ case TLB_CONTROL_DO_NOTHING:
+ break;
+ case TLB_CONTROL_FLUSH_ALL_ASID:
+ /* FIXME: this is not 100% correct but should work for now */
+ tlb_flush(env, 1);
+ break;
+ }
+
+ env->hflags2 |= HF2_GIF_MASK;
+
+ if (int_ctl & V_IRQ_MASK) {
+ env->interrupt_request |= CPU_INTERRUPT_VIRQ;
+ }
+
+ /* maybe we need to inject an event */
+ event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj));
+ if (event_inj & SVM_EVTINJ_VALID) {
+ uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
+ uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
+ uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.event_inj_err));
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
+ /* FIXME: need to implement valid_err */
+ switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
+ case SVM_EVTINJ_TYPE_INTR:
+ env->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
+ /* XXX: is it always correct? */
+ do_interrupt_x86_hardirq(env, vector, 1);
+ break;
+ case SVM_EVTINJ_TYPE_NMI:
+ env->exception_index = EXCP02_NMI;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = EIP;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
+ cpu_loop_exit(env);
+ break;
+ case SVM_EVTINJ_TYPE_EXEPT:
+ env->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 0;
+ env->exception_next_eip = -1;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
+ cpu_loop_exit(env);
+ break;
+ case SVM_EVTINJ_TYPE_SOFT:
+ env->exception_index = vector;
+ env->error_code = event_inj_err;
+ env->exception_is_int = 1;
+ env->exception_next_eip = EIP;
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
+ cpu_loop_exit(env);
+ break;
+ }
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
+ env->error_code);
+ }
+}
+
+void helper_vmmcall(void)
+{
+ helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
+ raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_vmload(int aflag)
+{
+ target_ulong addr;
+
+ helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
+
+ if (aflag == 2) {
+ addr = EAX;
+ } else {
+ addr = (uint32_t)EAX;
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
+ "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+ addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
+ env->segs[R_FS].base);
+
+ svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
+ env, R_FS);
+ svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
+ env, R_GS);
+ svm_load_seg(addr + offsetof(struct vmcb, save.tr),
+ &env->tr);
+ svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
+ &env->ldt);
+
+#ifdef TARGET_X86_64
+ env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
+ save.kernel_gs_base));
+ env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
+ env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
+ env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
+#endif
+ env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
+ env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
+ env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
+ save.sysenter_esp));
+ env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
+ save.sysenter_eip));
+}
+
+void helper_vmsave(int aflag)
+{
+ target_ulong addr;
+
+ helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
+
+ if (aflag == 2) {
+ addr = EAX;
+ } else {
+ addr = (uint32_t)EAX;
+ }
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
+ "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
+ addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
+ env->segs[R_FS].base);
+
+ svm_save_seg(addr + offsetof(struct vmcb, save.fs),
+ &env->segs[R_FS]);
+ svm_save_seg(addr + offsetof(struct vmcb, save.gs),
+ &env->segs[R_GS]);
+ svm_save_seg(addr + offsetof(struct vmcb, save.tr),
+ &env->tr);
+ svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
+ &env->ldt);
+
+#ifdef TARGET_X86_64
+ stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
+ env->kernelgsbase);
+ stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
+ stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
+ stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
+#endif
+ stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
+ stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
+ stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
+ env->sysenter_esp);
+ stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
+ env->sysenter_eip);
+}
+
+void helper_stgi(void)
+{
+ helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
+ env->hflags2 |= HF2_GIF_MASK;
+}
+
+void helper_clgi(void)
+{
+ helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
+ env->hflags2 &= ~HF2_GIF_MASK;
+}
+
+void helper_skinit(void)
+{
+ helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
+ /* XXX: not implemented */
+ raise_exception(env, EXCP06_ILLOP);
+}
+
+void helper_invlpga(int aflag)
+{
+ target_ulong addr;
+
+ helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
+
+ if (aflag == 2) {
+ addr = EAX;
+ } else {
+ addr = (uint32_t)EAX;
+ }
+
+ /* XXX: could use the ASID to see if it is needed to do the
+ flush */
+ tlb_flush_page(env, addr);
+}
+
+void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
+{
+ if (likely(!(env->hflags & HF_SVMI_MASK))) {
+ return;
+ }
+ switch (type) {
+ case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
+ if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
+ helper_vmexit(type, param);
+ }
+ break;
+ case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
+ if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
+ helper_vmexit(type, param);
+ }
+ break;
+ case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
+ if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
+ helper_vmexit(type, param);
+ }
+ break;
+ case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
+ if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
+ helper_vmexit(type, param);
+ }
+ break;
+ case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
+ if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
+ helper_vmexit(type, param);
+ }
+ break;
+ case SVM_EXIT_MSR:
+ if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = ldq_phys(env->vm_vmcb +
+ offsetof(struct vmcb,
+ control.msrpm_base_pa));
+ uint32_t t0, t1;
+
+ switch ((uint32_t)ECX) {
+ case 0 ... 0x1fff:
+ t0 = (ECX * 2) % 8;
+ t1 = (ECX * 2) / 8;
+ break;
+ case 0xc0000000 ... 0xc0001fff:
+ t0 = (8192 + ECX - 0xc0000000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ case 0xc0010000 ... 0xc0011fff:
+ t0 = (16384 + ECX - 0xc0010000) * 2;
+ t1 = (t0 / 8);
+ t0 %= 8;
+ break;
+ default:
+ helper_vmexit(type, param);
+ t0 = 0;
+ t1 = 0;
+ break;
+ }
+ if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
+ helper_vmexit(type, param);
+ }
+ }
+ break;
+ default:
+ if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
+ helper_vmexit(type, param);
+ }
+ break;
+ }
+}
+
+void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type,
+ uint64_t param)
+{
+ CPUX86State *saved_env;
+
+ saved_env = env;
+ env = env1;
+ helper_svm_check_intercept_param(type, param);
+ env = saved_env;
+}
+
+void helper_svm_check_io(uint32_t port, uint32_t param,
+ uint32_t next_eip_addend)
+{
+ if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
+ /* FIXME: this should be read in at vmrun (faster this way?) */
+ uint64_t addr = ldq_phys(env->vm_vmcb +
+ offsetof(struct vmcb, control.iopm_base_pa));
+ uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
+
+ if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
+ /* next EIP */
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
+ env->eip + next_eip_addend);
+ helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
+ }
+ }
+}
+
+/* Note: currently only 32 bits of exit_code are used */
+void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
+{
+ uint32_t int_ctl;
+
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
+ PRIx64 ", " TARGET_FMT_lx ")!\n",
+ exit_code, exit_info_1,
+ ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
+ control.exit_info_2)),
+ EIP);
+
+ if (env->hflags & HF_INHIBIT_IRQ_MASK) {
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
+ SVM_INTERRUPT_SHADOW_MASK);
+ env->hflags &= ~HF_INHIBIT_IRQ_MASK;
+ } else {
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
+ }
+
+ /* Save the VM state in the vmcb */
+ svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
+ &env->segs[R_ES]);
+ svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
+ &env->segs[R_CS]);
+ svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
+ &env->segs[R_SS]);
+ svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
+ &env->segs[R_DS]);
+
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
+ env->gdt.base);
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
+ env->gdt.limit);
+
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
+ env->idt.base);
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
+ env->idt.limit);
+
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
+
+ int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
+ int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
+ int_ctl |= env->v_tpr & V_TPR_MASK;
+ if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
+ int_ctl |= V_IRQ_MASK;
+ }
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
+
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
+ cpu_compute_eflags(env));
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
+ stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
+ env->hflags & HF_CPL_MASK);
+
+ /* Reload the host state from vm_hsave */
+ env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
+ env->hflags &= ~HF_SVMI_MASK;
+ env->intercept = 0;
+ env->intercept_exceptions = 0;
+ env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
+ env->tsc_offset = 0;
+
+ env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.base));
+ env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.gdtr.limit));
+
+ env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.base));
+ env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.idtr.limit));
+
+ cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.cr0)) |
+ CR0_PE_MASK);
+ cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.cr4)));
+ cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.cr3)));
+ /* we need to set the efer after the crs so the hidden flags get
+ set properly */
+ cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.efer)));
+ env->eflags = 0;
+ cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
+ save.rflags)),
+ ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
+ CC_OP = CC_OP_EFLAGS;
+
+ svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
+ env, R_ES);
+ svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
+ env, R_CS);
+ svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
+ env, R_SS);
+ svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
+ env, R_DS);
+
+ EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
+ ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
+ EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
+
+ env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
+ env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
+
+ /* other setups */
+ cpu_x86_set_cpl(env, 0);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
+ exit_code);
+ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
+ exit_info_1);
+
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
+ ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj)));
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
+ ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
+ control.event_inj_err)));
+ stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
+
+ env->hflags2 &= ~HF2_GIF_MASK;
+ /* FIXME: Resets the current ASID register to zero (host ASID). */
+
+ /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
+
+ /* Clears the TSC_OFFSET inside the processor. */
+
+ /* If the host is in PAE mode, the processor reloads the host's PDPEs
+ from the page table indicated the host's CR3. If the PDPEs contain
+ illegal state, the processor causes a shutdown. */
+
+ /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
+ env->cr[0] |= CR0_PE_MASK;
+ env->eflags &= ~VM_MASK;
+
+ /* Disables all breakpoints in the host DR7 register. */
+
+ /* Checks the reloaded host state for consistency. */
+
+ /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
+ host's code segment or non-canonical (in the case of long mode), a
+ #GP fault is delivered inside the host. */
+
+ /* remove any pending exception */
+ env->exception_index = -1;
+ env->error_code = 0;
+ env->old_exception = -1;
+
+ cpu_loop_exit(env);
+}
+
+void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
+{
+ env = nenv;
+ helper_vmexit(exit_code, exit_info_1);
+}
+
+#endif
diff --git a/target-i386/translate.c b/target-i386/translate.c
index c1ede1a756..2b113333ac 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -38,18 +38,10 @@
#define PREFIX_ADR 0x10
#ifdef TARGET_X86_64
-#define X86_64_ONLY(x) x
-#define X86_64_DEF(...) __VA_ARGS__
#define CODE64(s) ((s)->code64)
#define REX_X(s) ((s)->rex_x)
#define REX_B(s) ((s)->rex_b)
-/* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
-#if 1
-#define BUGGY_64(x) NULL
-#endif
#else
-#define X86_64_ONLY(x) NULL
-#define X86_64_DEF(...)
#define CODE64(s) 0
#define REX_X(s) 0
#define REX_B(s) 0
@@ -271,11 +263,30 @@ static inline void gen_op_andl_A0_ffff(void)
#define REG_LH_OFFSET 4
#endif
+/* In instruction encodings for byte register accesses the
+ * register number usually indicates "low 8 bits of register N";
+ * however there are some special cases where N 4..7 indicates
+ * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
+ * true for this special case, false otherwise.
+ */
+static inline bool byte_reg_is_xH(int reg)
+{
+ if (reg < 4) {
+ return false;
+ }
+#ifdef TARGET_X86_64
+ if (reg >= 8 || x86_64_hregs) {
+ return false;
+ }
+#endif
+ return true;
+}
+
static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
{
switch(ot) {
case OT_BYTE:
- if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
+ if (!byte_reg_is_xH(reg)) {
tcg_gen_deposit_tl(cpu_regs[reg], cpu_regs[reg], t0, 0, 8);
} else {
tcg_gen_deposit_tl(cpu_regs[reg - 4], cpu_regs[reg - 4], t0, 8, 8);
@@ -330,19 +341,11 @@ static inline void gen_op_mov_reg_A0(int size, int reg)
static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
{
- switch(ot) {
- case OT_BYTE:
- if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
- goto std_case;
- } else {
- tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
- tcg_gen_ext8u_tl(t0, t0);
- }
- break;
- default:
- std_case:
+ if (ot == OT_BYTE && byte_reg_is_xH(reg)) {
+ tcg_gen_shri_tl(t0, cpu_regs[reg - 4], 8);
+ tcg_gen_ext8u_tl(t0, t0);
+ } else {
tcg_gen_mov_tl(t0, cpu_regs[reg]);
- break;
}
}
@@ -456,12 +459,19 @@ static inline void gen_op_movl_A0_seg(int reg)
tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUX86State, segs[reg].base) + REG_L_OFFSET);
}
-static inline void gen_op_addl_A0_seg(int reg)
+static inline void gen_op_addl_A0_seg(DisasContext *s, int reg)
{
tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State, segs[reg].base));
- tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#ifdef TARGET_X86_64
- tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
+ if (CODE64(s)) {
+ tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
+ tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
+ } else {
+ tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
+ tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
+ }
+#else
+ tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
#endif
}
@@ -617,7 +627,7 @@ static inline void gen_string_movl_A0_ESI(DisasContext *s)
override = R_DS;
gen_op_movl_A0_reg(R_ESI);
gen_op_andl_A0_ffff();
- gen_op_addl_A0_seg(override);
+ gen_op_addl_A0_seg(s, override);
}
}
@@ -638,7 +648,7 @@ static inline void gen_string_movl_A0_EDI(DisasContext *s)
} else {
gen_op_movl_A0_reg(R_EDI);
gen_op_andl_A0_ffff();
- gen_op_addl_A0_seg(R_ES);
+ gen_op_addl_A0_seg(s, R_ES);
}
}
@@ -2063,7 +2073,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
} else
#endif
{
- gen_op_addl_A0_seg(override);
+ gen_op_addl_A0_seg(s, override);
}
}
} else {
@@ -2130,7 +2140,7 @@ static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_
else
override = R_DS;
}
- gen_op_addl_A0_seg(override);
+ gen_op_addl_A0_seg(s, override);
}
}
@@ -2207,7 +2217,7 @@ static void gen_add_A0_ds_seg(DisasContext *s)
} else
#endif
{
- gen_op_addl_A0_seg(override);
+ gen_op_addl_A0_seg(s, override);
}
}
}
@@ -2460,12 +2470,12 @@ static void gen_push_T0(DisasContext *s)
if (s->ss32) {
if (s->addseg) {
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
}
} else {
gen_op_andl_A0_ffff();
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
}
gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
if (s->ss32 && !s->addseg)
@@ -2500,11 +2510,11 @@ static void gen_push_T1(DisasContext *s)
gen_op_addl_A0_im(-4);
if (s->ss32) {
if (s->addseg) {
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
}
} else {
gen_op_andl_A0_ffff();
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
}
gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
@@ -2528,10 +2538,10 @@ static void gen_pop_T0(DisasContext *s)
gen_op_movl_A0_reg(R_ESP);
if (s->ss32) {
if (s->addseg)
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
} else {
gen_op_andl_A0_ffff();
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
}
gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
}
@@ -2556,7 +2566,7 @@ static void gen_stack_A0(DisasContext *s)
gen_op_andl_A0_ffff();
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
if (s->addseg)
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
}
/* NOTE: wrap around in 16 bit not fully handled */
@@ -2569,7 +2579,7 @@ static void gen_pusha(DisasContext *s)
gen_op_andl_A0_ffff();
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
if (s->addseg)
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
for(i = 0;i < 8; i++) {
gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
@@ -2588,7 +2598,7 @@ static void gen_popa(DisasContext *s)
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
if (s->addseg)
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
for(i = 0;i < 8; i++) {
/* ESP is not reloaded */
if (i != 3) {
@@ -2638,7 +2648,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level)
gen_op_andl_A0_ffff();
tcg_gen_mov_tl(cpu_T[1], cpu_A0);
if (s->addseg)
- gen_op_addl_A0_seg(R_SS);
+ gen_op_addl_A0_seg(s, R_SS);
/* push bp */
gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
gen_op_st_T0_A0(ot + s->mem_index);
@@ -2659,7 +2669,7 @@ static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(cur_eip);
- gen_helper_raise_exception(tcg_const_i32(trapno));
+ gen_helper_raise_exception(cpu_env, tcg_const_i32(trapno));
s->is_jmp = DISAS_TB_JUMP;
}
@@ -2671,7 +2681,7 @@ static void gen_interrupt(DisasContext *s, int intno,
if (s->cc_op != CC_OP_DYNAMIC)
gen_op_set_cc_op(s->cc_op);
gen_jmp_im(cur_eip);
- gen_helper_raise_interrupt(tcg_const_i32(intno),
+ gen_helper_raise_interrupt(cpu_env, tcg_const_i32(intno),
tcg_const_i32(next_eip - cur_eip));
s->is_jmp = DISAS_TB_JUMP;
}
@@ -2786,6 +2796,14 @@ static inline void gen_op_movq_env_0(int d_offset)
tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
}
+typedef void (*SSEFunc_i_p)(TCGv_i32 val, TCGv_ptr reg);
+typedef void (*SSEFunc_l_p)(TCGv_i64 val, TCGv_ptr reg);
+typedef void (*SSEFunc_0_pi)(TCGv_ptr reg, TCGv_i32 val);
+typedef void (*SSEFunc_0_pl)(TCGv_ptr reg, TCGv_i64 val);
+typedef void (*SSEFunc_0_pp)(TCGv_ptr reg_a, TCGv_ptr reg_b);
+typedef void (*SSEFunc_0_ppi)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv_i32 val);
+typedef void (*SSEFunc_0_ppt)(TCGv_ptr reg_a, TCGv_ptr reg_b, TCGv val);
+
#define SSE_SPECIAL ((void *)1)
#define SSE_DUMMY ((void *)2)
@@ -2793,7 +2811,7 @@ static inline void gen_op_movq_env_0(int d_offset)
#define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
-static void *sse_op_table1[256][4] = {
+static const SSEFunc_0_pp sse_op_table1[256][4] = {
/* 3DNow! extensions */
[0x0e] = { SSE_DUMMY }, /* femms */
[0x0f] = { SSE_DUMMY }, /* pf... */
@@ -2834,7 +2852,8 @@ static void *sse_op_table1[256][4] = {
[0x5f] = SSE_FOP(max),
[0xc2] = SSE_FOP(cmpeq),
- [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
+ [0xc6] = { (SSEFunc_0_pp)gen_helper_shufps,
+ (SSEFunc_0_pp)gen_helper_shufpd }, /* XXX: casts */
[0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
[0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
@@ -2856,10 +2875,10 @@ static void *sse_op_table1[256][4] = {
[0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
[0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
[0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
- [0x70] = { gen_helper_pshufw_mmx,
- gen_helper_pshufd_xmm,
- gen_helper_pshufhw_xmm,
- gen_helper_pshuflw_xmm },
+ [0x70] = { (SSEFunc_0_pp)gen_helper_pshufw_mmx,
+ (SSEFunc_0_pp)gen_helper_pshufd_xmm,
+ (SSEFunc_0_pp)gen_helper_pshufhw_xmm,
+ (SSEFunc_0_pp)gen_helper_pshuflw_xmm }, /* XXX: casts */
[0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
[0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
[0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
@@ -2914,7 +2933,8 @@ static void *sse_op_table1[256][4] = {
[0xf4] = MMX_OP2(pmuludq),
[0xf5] = MMX_OP2(pmaddwd),
[0xf6] = MMX_OP2(psadbw),
- [0xf7] = MMX_OP2(maskmov),
+ [0xf7] = { (SSEFunc_0_pp)gen_helper_maskmov_mmx,
+ (SSEFunc_0_pp)gen_helper_maskmov_xmm }, /* XXX: casts */
[0xf8] = MMX_OP2(psubb),
[0xf9] = MMX_OP2(psubw),
[0xfa] = MMX_OP2(psubl),
@@ -2924,7 +2944,7 @@ static void *sse_op_table1[256][4] = {
[0xfe] = MMX_OP2(paddl),
};
-static void *sse_op_table2[3 * 8][2] = {
+static const SSEFunc_0_pp sse_op_table2[3 * 8][2] = {
[0 + 2] = MMX_OP2(psrlw),
[0 + 4] = MMX_OP2(psraw),
[0 + 6] = MMX_OP2(psllw),
@@ -2937,24 +2957,35 @@ static void *sse_op_table2[3 * 8][2] = {
[16 + 7] = { NULL, gen_helper_pslldq_xmm },
};
-static void *sse_op_table3[4 * 3] = {
+static const SSEFunc_0_pi sse_op_table3ai[] = {
gen_helper_cvtsi2ss,
- gen_helper_cvtsi2sd,
- X86_64_ONLY(gen_helper_cvtsq2ss),
- X86_64_ONLY(gen_helper_cvtsq2sd),
+ gen_helper_cvtsi2sd
+};
+#ifdef TARGET_X86_64
+static const SSEFunc_0_pl sse_op_table3aq[] = {
+ gen_helper_cvtsq2ss,
+ gen_helper_cvtsq2sd
+};
+#endif
+
+static const SSEFunc_i_p sse_op_table3bi[] = {
gen_helper_cvttss2si,
+ gen_helper_cvtss2si,
gen_helper_cvttsd2si,
- X86_64_ONLY(gen_helper_cvttss2sq),
- X86_64_ONLY(gen_helper_cvttsd2sq),
+ gen_helper_cvtsd2si
+};
- gen_helper_cvtss2si,
- gen_helper_cvtsd2si,
- X86_64_ONLY(gen_helper_cvtss2sq),
- X86_64_ONLY(gen_helper_cvtsd2sq),
+#ifdef TARGET_X86_64
+static const SSEFunc_l_p sse_op_table3bq[] = {
+ gen_helper_cvttss2sq,
+ gen_helper_cvtss2sq,
+ gen_helper_cvttsd2sq,
+ gen_helper_cvtsd2sq
};
+#endif
-static void *sse_op_table4[8][4] = {
+static const SSEFunc_0_pp sse_op_table4[8][4] = {
SSE_FOP(cmpeq),
SSE_FOP(cmplt),
SSE_FOP(cmple),
@@ -2965,7 +2996,7 @@ static void *sse_op_table4[8][4] = {
SSE_FOP(cmpord),
};
-static void *sse_op_table5[256] = {
+static const SSEFunc_0_pp sse_op_table5[256] = {
[0x0c] = gen_helper_pi2fw,
[0x0d] = gen_helper_pi2fd,
[0x1c] = gen_helper_pf2iw,
@@ -2992,14 +3023,22 @@ static void *sse_op_table5[256] = {
[0xbf] = gen_helper_pavgb_mmx /* pavgusb */
};
-struct sse_op_helper_s {
- void *op[2]; uint32_t ext_mask;
+struct SSEOpHelper_pp {
+ SSEFunc_0_pp op[2];
+ uint32_t ext_mask;
+};
+
+struct SSEOpHelper_ppi {
+ SSEFunc_0_ppi op[2];
+ uint32_t ext_mask;
};
+
#define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
#define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
#define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
#define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
-static struct sse_op_helper_s sse_op_table6[256] = {
+
+static const struct SSEOpHelper_pp sse_op_table6[256] = {
[0x00] = SSSE3_OP(pshufb),
[0x01] = SSSE3_OP(phaddw),
[0x02] = SSSE3_OP(phaddd),
@@ -3048,7 +3087,7 @@ static struct sse_op_helper_s sse_op_table6[256] = {
[0x41] = SSE41_OP(phminposuw),
};
-static struct sse_op_helper_s sse_op_table7[256] = {
+static const struct SSEOpHelper_ppi sse_op_table7[256] = {
[0x08] = SSE41_OP(roundps),
[0x09] = SSE41_OP(roundpd),
[0x0a] = SSE41_OP(roundss),
@@ -3077,7 +3116,9 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
{
int b1, op1_offset, op2_offset, is_xmm, val, ot;
int modrm, mod, rm, reg, reg_addr, offset_addr;
- void *sse_op2;
+ SSEFunc_0_pp sse_fn_pp;
+ SSEFunc_0_ppi sse_fn_ppi;
+ SSEFunc_0_ppt sse_fn_ppt;
b &= 0xff;
if (s->prefix & PREFIX_DATA)
@@ -3088,9 +3129,10 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
b1 = 3;
else
b1 = 0;
- sse_op2 = sse_op_table1[b][b1];
- if (!sse_op2)
+ sse_fn_pp = sse_op_table1[b][b1];
+ if (!sse_fn_pp) {
goto illegal_op;
+ }
if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
is_xmm = 1;
} else {
@@ -3137,7 +3179,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (is_xmm)
reg |= rex_r;
mod = (modrm >> 6) & 3;
- if (sse_op2 == SSE_SPECIAL) {
+ if (sse_fn_pp == SSE_SPECIAL) {
b |= (b1 << 8);
switch(b) {
case 0x0e7: /* movntq */
@@ -3474,9 +3516,10 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
op1_offset = offsetof(CPUX86State,mmx_t0);
}
- sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
- if (!sse_op2)
+ sse_fn_pp = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
+ if (!sse_fn_pp) {
goto illegal_op;
+ }
if (is_xmm) {
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
@@ -3486,7 +3529,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
}
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
+ sse_fn_pp(cpu_ptr0, cpu_ptr1);
break;
case 0x050: /* movmskps */
rm = (modrm & 7) | REX_B(s);
@@ -3534,12 +3577,17 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
- sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
if (ot == OT_LONG) {
+ SSEFunc_0_pi sse_fn_pi = sse_op_table3ai[(b >> 8) & 1];
tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
- ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
+ sse_fn_pi(cpu_ptr0, cpu_tmp2_i32);
} else {
- ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
+#ifdef TARGET_X86_64
+ SSEFunc_0_pl sse_fn_pl = sse_op_table3aq[(b >> 8) & 1];
+ sse_fn_pl(cpu_ptr0, cpu_T[0]);
+#else
+ goto illegal_op;
+#endif
}
break;
case 0x02c: /* cvttps2pi */
@@ -3591,14 +3639,20 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
rm = (modrm & 7) | REX_B(s);
op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
}
- sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
- (b & 1) * 4];
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
if (ot == OT_LONG) {
- ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
+ SSEFunc_i_p sse_fn_i_p =
+ sse_op_table3bi[((b >> 7) & 2) | (b & 1)];
+ sse_fn_i_p(cpu_tmp2_i32, cpu_ptr0);
tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
} else {
- ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
+#ifdef TARGET_X86_64
+ SSEFunc_l_p sse_fn_l_p =
+ sse_op_table3bq[((b >> 7) & 2) | (b & 1)];
+ sse_fn_l_p(cpu_T[0], cpu_ptr0);
+#else
+ goto illegal_op;
+#endif
}
gen_op_mov_reg_T0(ot, reg);
break;
@@ -3691,9 +3745,10 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
goto illegal_op;
}
- sse_op2 = sse_op_table6[b].op[b1];
- if (!sse_op2)
+ sse_fn_pp = sse_op_table6[b].op[b1];
+ if (!sse_fn_pp) {
goto illegal_op;
+ }
if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
goto illegal_op;
@@ -3742,12 +3797,13 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
gen_ldq_env_A0(s->mem_index, op2_offset);
}
}
- if (sse_op2 == SSE_SPECIAL)
+ if (sse_fn_pp == SSE_SPECIAL) {
goto illegal_op;
+ }
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
+ sse_fn_pp(cpu_ptr0, cpu_ptr1);
if (b == 0x17)
s->cc_op = CC_OP_EFLAGS;
@@ -3793,13 +3849,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
goto illegal_op;
}
- sse_op2 = sse_op_table7[b].op[b1];
- if (!sse_op2)
+ sse_fn_ppi = sse_op_table7[b].op[b1];
+ if (!sse_fn_ppi) {
goto illegal_op;
+ }
if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
goto illegal_op;
- if (sse_op2 == SSE_SPECIAL) {
+ if (sse_fn_ppi == SSE_SPECIAL) {
ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
rm = (modrm & 7) | REX_B(s);
if (mod != 3)
@@ -3960,7 +4017,7 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
+ sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
break;
default:
goto illegal_op;
@@ -4015,29 +4072,33 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
goto illegal_op;
val = ldub_code(s->pc++);
- sse_op2 = sse_op_table5[val];
- if (!sse_op2)
+ sse_fn_pp = sse_op_table5[val];
+ if (!sse_fn_pp) {
goto illegal_op;
+ }
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
+ sse_fn_pp(cpu_ptr0, cpu_ptr1);
break;
case 0x70: /* pshufx insn */
case 0xc6: /* pshufx insn */
val = ldub_code(s->pc++);
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
+ /* XXX: introduce a new table? */
+ sse_fn_ppi = (SSEFunc_0_ppi)sse_fn_pp;
+ sse_fn_ppi(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
break;
case 0xc2:
/* compare insns */
val = ldub_code(s->pc++);
if (val >= 8)
goto illegal_op;
- sse_op2 = sse_op_table4[val][b1];
+ sse_fn_pp = sse_op_table4[val][b1];
+
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
+ sse_fn_pp(cpu_ptr0, cpu_ptr1);
break;
case 0xf7:
/* maskmov : we must prepare A0 */
@@ -4057,12 +4118,14 @@ static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
+ /* XXX: introduce a new table? */
+ sse_fn_ppt = (SSEFunc_0_ppt)sse_fn_pp;
+ sse_fn_ppt(cpu_ptr0, cpu_ptr1, cpu_A0);
break;
default:
tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
- ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
+ sse_fn_pp(cpu_ptr0, cpu_ptr1);
break;
}
if (b == 0x2e || b == 0x2f) {
diff --git a/target-mips/translate.c b/target-mips/translate.c
index 4e15ee36b8..47daf8574f 100644
--- a/target-mips/translate.c
+++ b/target-mips/translate.c
@@ -12763,6 +12763,7 @@ void cpu_state_reset(CPUMIPSState *env)
env->CP0_SRSConf3 = env->cpu_model->CP0_SRSConf3;
env->CP0_SRSConf4_rw_bitmask = env->cpu_model->CP0_SRSConf4_rw_bitmask;
env->CP0_SRSConf4 = env->cpu_model->CP0_SRSConf4;
+ env->active_fpu.fcr0 = env->cpu_model->CP1_fcr0;
env->insn_flags = env->cpu_model->insn_flags;
#if defined(CONFIG_USER_ONLY)
diff --git a/target-openrisc/Makefile.objs b/target-openrisc/Makefile.objs
new file mode 100644
index 0000000000..44dc5399df
--- /dev/null
+++ b/target-openrisc/Makefile.objs
@@ -0,0 +1,4 @@
+obj-$(CONFIG_SOFTMMU) += machine.o
+obj-y += cpu.o exception.o interrupt.o mmu.o translate.o
+obj-y += exception_helper.o fpu_helper.o int_helper.o \
+ interrupt_helper.o mmu_helper.o sys_helper.o
diff --git a/target-openrisc/cpu.c b/target-openrisc/cpu.c
new file mode 100644
index 0000000000..ba35b17581
--- /dev/null
+++ b/target-openrisc/cpu.c
@@ -0,0 +1,220 @@
+/*
+ * QEMU OpenRISC CPU
+ *
+ * Copyright (c) 2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+
+/* CPUClass::reset() */
+static void openrisc_cpu_reset(CPUState *s)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(s);
+ OpenRISCCPUClass *occ = OPENRISC_CPU_GET_CLASS(cpu);
+
+ if (qemu_loglevel_mask(CPU_LOG_RESET)) {
+ qemu_log("CPU Reset (CPU %d)\n", cpu->env.cpu_index);
+ log_cpu_state(&cpu->env, 0);
+ }
+
+ occ->parent_reset(s);
+
+ memset(&cpu->env, 0, offsetof(CPUOpenRISCState, breakpoints));
+
+ tlb_flush(&cpu->env, 1);
+ /*tb_flush(&cpu->env); FIXME: Do we need it? */
+
+ cpu->env.pc = 0x100;
+ cpu->env.sr = SR_FO | SR_SM;
+ cpu->env.exception_index = -1;
+
+ cpu->env.upr = UPR_UP | UPR_DMP | UPR_IMP | UPR_PICP | UPR_TTP;
+ cpu->env.cpucfgr = CPUCFGR_OB32S | CPUCFGR_OF32S;
+ cpu->env.dmmucfgr = (DMMUCFGR_NTW & (0 << 2)) | (DMMUCFGR_NTS & (6 << 2));
+ cpu->env.immucfgr = (IMMUCFGR_NTW & (0 << 2)) | (IMMUCFGR_NTS & (6 << 2));
+
+#ifndef CONFIG_USER_ONLY
+ cpu->env.picmr = 0x00000000;
+ cpu->env.picsr = 0x00000000;
+
+ cpu->env.ttmr = 0x00000000;
+ cpu->env.ttcr = 0x00000000;
+#endif
+}
+
+static inline void set_feature(OpenRISCCPU *cpu, int feature)
+{
+ cpu->feature |= feature;
+ cpu->env.cpucfgr = cpu->feature;
+}
+
+void openrisc_cpu_realize(Object *obj, Error **errp)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ qemu_init_vcpu(&cpu->env);
+ cpu_reset(CPU(cpu));
+}
+
+static void openrisc_cpu_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+ static int inited;
+
+ cpu_exec_init(&cpu->env);
+
+#ifndef CONFIG_USER_ONLY
+ cpu_openrisc_mmu_init(cpu);
+#endif
+
+ if (tcg_enabled() && !inited) {
+ inited = 1;
+ openrisc_translate_init();
+ }
+}
+
+/* CPU models */
+static void or1200_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ set_feature(cpu, OPENRISC_FEATURE_OB32S);
+ set_feature(cpu, OPENRISC_FEATURE_OF32S);
+}
+
+static void openrisc_any_initfn(Object *obj)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(obj);
+
+ set_feature(cpu, OPENRISC_FEATURE_OB32S);
+}
+
+typedef struct OpenRISCCPUInfo {
+ const char *name;
+ void (*initfn)(Object *obj);
+} OpenRISCCPUInfo;
+
+static const OpenRISCCPUInfo openrisc_cpus[] = {
+ { .name = "or1200", .initfn = or1200_initfn },
+ { .name = "any", .initfn = openrisc_any_initfn },
+};
+
+static void openrisc_cpu_class_init(ObjectClass *oc, void *data)
+{
+ OpenRISCCPUClass *occ = OPENRISC_CPU_CLASS(oc);
+ CPUClass *cc = CPU_CLASS(occ);
+
+ occ->parent_reset = cc->reset;
+ cc->reset = openrisc_cpu_reset;
+}
+
+static void cpu_register(const OpenRISCCPUInfo *info)
+{
+ TypeInfo type_info = {
+ .name = info->name,
+ .parent = TYPE_OPENRISC_CPU,
+ .instance_size = sizeof(OpenRISCCPU),
+ .instance_init = info->initfn,
+ .class_size = sizeof(OpenRISCCPUClass),
+ };
+
+ type_register_static(&type_info);
+}
+
+static const TypeInfo openrisc_cpu_type_info = {
+ .name = TYPE_OPENRISC_CPU,
+ .parent = TYPE_CPU,
+ .instance_size = sizeof(OpenRISCCPU),
+ .instance_init = openrisc_cpu_initfn,
+ .abstract = false,
+ .class_size = sizeof(OpenRISCCPUClass),
+ .class_init = openrisc_cpu_class_init,
+};
+
+static void openrisc_cpu_register_types(void)
+{
+ int i;
+
+ type_register_static(&openrisc_cpu_type_info);
+ for (i = 0; i < ARRAY_SIZE(openrisc_cpus); i++) {
+ cpu_register(&openrisc_cpus[i]);
+ }
+}
+
+OpenRISCCPU *cpu_openrisc_init(const char *cpu_model)
+{
+ OpenRISCCPU *cpu;
+
+ if (!object_class_by_name(cpu_model)) {
+ return NULL;
+ }
+ cpu = OPENRISC_CPU(object_new(cpu_model));
+ cpu->env.cpu_model_str = cpu_model;
+
+ openrisc_cpu_realize(OBJECT(cpu), NULL);
+
+ return cpu;
+}
+
+typedef struct OpenRISCCPUList {
+ fprintf_function cpu_fprintf;
+ FILE *file;
+} OpenRISCCPUList;
+
+/* Sort alphabetically by type name, except for "any". */
+static gint openrisc_cpu_list_compare(gconstpointer a, gconstpointer b)
+{
+ ObjectClass *class_a = (ObjectClass *)a;
+ ObjectClass *class_b = (ObjectClass *)b;
+ const char *name_a, *name_b;
+
+ name_a = object_class_get_name(class_a);
+ name_b = object_class_get_name(class_b);
+ if (strcmp(name_a, "any") == 0) {
+ return 1;
+ } else if (strcmp(name_b, "any") == 0) {
+ return -1;
+ } else {
+ return strcmp(name_a, name_b);
+ }
+}
+
+static void openrisc_cpu_list_entry(gpointer data, gpointer user_data)
+{
+ ObjectClass *oc = data;
+ OpenRISCCPUList *s = user_data;
+
+ (*s->cpu_fprintf)(s->file, " %s\n",
+ object_class_get_name(oc));
+}
+
+void cpu_openrisc_list(FILE *f, fprintf_function cpu_fprintf)
+{
+ OpenRISCCPUList s = {
+ .file = f,
+ .cpu_fprintf = cpu_fprintf,
+ };
+ GSList *list;
+
+ list = object_class_get_list(TYPE_OPENRISC_CPU, false);
+ list = g_slist_sort(list, openrisc_cpu_list_compare);
+ (*cpu_fprintf)(f, "Available CPUs:\n");
+ g_slist_foreach(list, openrisc_cpu_list_entry, &s);
+ g_slist_free(list);
+}
+
+type_init(openrisc_cpu_register_types)
diff --git a/target-openrisc/cpu.h b/target-openrisc/cpu.h
new file mode 100644
index 0000000000..de21a877d3
--- /dev/null
+++ b/target-openrisc/cpu.h
@@ -0,0 +1,458 @@
+/*
+ * OpenRISC virtual CPU header.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef CPU_OPENRISC_H
+#define CPU_OPENRISC_H
+
+#define TARGET_LONG_BITS 32
+#define ELF_MACHINE EM_OPENRISC
+
+#define CPUArchState struct CPUOpenRISCState
+
+/* cpu_openrisc_map_address_* in CPUOpenRISCTLBContext need this decl. */
+struct OpenRISCCPU;
+
+#include "config.h"
+#include "qemu-common.h"
+#include "cpu-defs.h"
+#include "softfloat.h"
+#include "qemu/cpu.h"
+#include "error.h"
+
+#define TYPE_OPENRISC_CPU "or32-cpu"
+
+#define OPENRISC_CPU_CLASS(klass) \
+ OBJECT_CLASS_CHECK(OpenRISCCPUClass, (klass), TYPE_OPENRISC_CPU)
+#define OPENRISC_CPU(obj) \
+ OBJECT_CHECK(OpenRISCCPU, (obj), TYPE_OPENRISC_CPU)
+#define OPENRISC_CPU_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(OpenRISCCPUClass, (obj), TYPE_OPENRISC_CPU)
+
+/**
+ * OpenRISCCPUClass:
+ * @parent_reset: The parent class' reset handler.
+ *
+ * A OpenRISC CPU model.
+ */
+typedef struct OpenRISCCPUClass {
+ /*< private >*/
+ CPUClass parent_class;
+ /*< public >*/
+
+ void (*parent_reset)(CPUState *cpu);
+} OpenRISCCPUClass;
+
+#define NB_MMU_MODES 3
+
+enum {
+ MMU_NOMMU_IDX = 0,
+ MMU_SUPERVISOR_IDX = 1,
+ MMU_USER_IDX = 2,
+};
+
+#define TARGET_PAGE_BITS 13
+
+#define TARGET_PHYS_ADDR_SPACE_BITS 32
+#define TARGET_VIRT_ADDR_SPACE_BITS 32
+
+#define SET_FP_CAUSE(reg, v) do {\
+ (reg) = ((reg) & ~(0x3f << 12)) | \
+ ((v & 0x3f) << 12);\
+ } while (0)
+#define GET_FP_ENABLE(reg) (((reg) >> 7) & 0x1f)
+#define UPDATE_FP_FLAGS(reg, v) do {\
+ (reg) |= ((v & 0x1f) << 2);\
+ } while (0)
+
+/* Version Register */
+#define SPR_VR 0xFFFF003F
+
+/* Internal flags, delay slot flag */
+#define D_FLAG 1
+
+/* Interrupt */
+#define NR_IRQS 32
+
+/* Registers */
+enum {
+ R0 = 0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10,
+ R11, R12, R13, R14, R15, R16, R17, R18, R19, R20,
+ R21, R22, R23, R24, R25, R26, R27, R28, R29, R30,
+ R31
+};
+
+/* Register aliases */
+enum {
+ R_ZERO = R0,
+ R_SP = R1,
+ R_FP = R2,
+ R_LR = R9,
+ R_RV = R11,
+ R_RVH = R12
+};
+
+/* Unit presece register */
+enum {
+ UPR_UP = (1 << 0),
+ UPR_DCP = (1 << 1),
+ UPR_ICP = (1 << 2),
+ UPR_DMP = (1 << 3),
+ UPR_IMP = (1 << 4),
+ UPR_MP = (1 << 5),
+ UPR_DUP = (1 << 6),
+ UPR_PCUR = (1 << 7),
+ UPR_PMP = (1 << 8),
+ UPR_PICP = (1 << 9),
+ UPR_TTP = (1 << 10),
+ UPR_CUP = (255 << 24),
+};
+
+/* CPU configure register */
+enum {
+ CPUCFGR_NSGF = (15 << 0),
+ CPUCFGR_CGF = (1 << 4),
+ CPUCFGR_OB32S = (1 << 5),
+ CPUCFGR_OB64S = (1 << 6),
+ CPUCFGR_OF32S = (1 << 7),
+ CPUCFGR_OF64S = (1 << 8),
+ CPUCFGR_OV64S = (1 << 9),
+};
+
+/* DMMU configure register */
+enum {
+ DMMUCFGR_NTW = (3 << 0),
+ DMMUCFGR_NTS = (7 << 2),
+ DMMUCFGR_NAE = (7 << 5),
+ DMMUCFGR_CRI = (1 << 8),
+ DMMUCFGR_PRI = (1 << 9),
+ DMMUCFGR_TEIRI = (1 << 10),
+ DMMUCFGR_HTR = (1 << 11),
+};
+
+/* IMMU configure register */
+enum {
+ IMMUCFGR_NTW = (3 << 0),
+ IMMUCFGR_NTS = (7 << 2),
+ IMMUCFGR_NAE = (7 << 5),
+ IMMUCFGR_CRI = (1 << 8),
+ IMMUCFGR_PRI = (1 << 9),
+ IMMUCFGR_TEIRI = (1 << 10),
+ IMMUCFGR_HTR = (1 << 11),
+};
+
+/* Float point control status register */
+enum {
+ FPCSR_FPEE = 1,
+ FPCSR_RM = (3 << 1),
+ FPCSR_OVF = (1 << 3),
+ FPCSR_UNF = (1 << 4),
+ FPCSR_SNF = (1 << 5),
+ FPCSR_QNF = (1 << 6),
+ FPCSR_ZF = (1 << 7),
+ FPCSR_IXF = (1 << 8),
+ FPCSR_IVF = (1 << 9),
+ FPCSR_INF = (1 << 10),
+ FPCSR_DZF = (1 << 11),
+};
+
+/* Exceptions indices */
+enum {
+ EXCP_RESET = 0x1,
+ EXCP_BUSERR = 0x2,
+ EXCP_DPF = 0x3,
+ EXCP_IPF = 0x4,
+ EXCP_TICK = 0x5,
+ EXCP_ALIGN = 0x6,
+ EXCP_ILLEGAL = 0x7,
+ EXCP_INT = 0x8,
+ EXCP_DTLBMISS = 0x9,
+ EXCP_ITLBMISS = 0xa,
+ EXCP_RANGE = 0xb,
+ EXCP_SYSCALL = 0xc,
+ EXCP_FPE = 0xd,
+ EXCP_TRAP = 0xe,
+ EXCP_NR,
+};
+
+/* Supervisor register */
+enum {
+ SR_SM = (1 << 0),
+ SR_TEE = (1 << 1),
+ SR_IEE = (1 << 2),
+ SR_DCE = (1 << 3),
+ SR_ICE = (1 << 4),
+ SR_DME = (1 << 5),
+ SR_IME = (1 << 6),
+ SR_LEE = (1 << 7),
+ SR_CE = (1 << 8),
+ SR_F = (1 << 9),
+ SR_CY = (1 << 10),
+ SR_OV = (1 << 11),
+ SR_OVE = (1 << 12),
+ SR_DSX = (1 << 13),
+ SR_EPH = (1 << 14),
+ SR_FO = (1 << 15),
+ SR_SUMRA = (1 << 16),
+ SR_SCE = (1 << 17),
+};
+
+/* OpenRISC Hardware Capabilities */
+enum {
+ OPENRISC_FEATURE_NSGF = (15 << 0),
+ OPENRISC_FEATURE_CGF = (1 << 4),
+ OPENRISC_FEATURE_OB32S = (1 << 5),
+ OPENRISC_FEATURE_OB64S = (1 << 6),
+ OPENRISC_FEATURE_OF32S = (1 << 7),
+ OPENRISC_FEATURE_OF64S = (1 << 8),
+ OPENRISC_FEATURE_OV64S = (1 << 9),
+};
+
+/* Tick Timer Mode Register */
+enum {
+ TTMR_TP = (0xfffffff),
+ TTMR_IP = (1 << 28),
+ TTMR_IE = (1 << 29),
+ TTMR_M = (3 << 30),
+};
+
+/* Timer Mode */
+enum {
+ TIMER_NONE = (0 << 30),
+ TIMER_INTR = (1 << 30),
+ TIMER_SHOT = (2 << 30),
+ TIMER_CONT = (3 << 30),
+};
+
+/* TLB size */
+enum {
+ DTLB_WAYS = 1,
+ DTLB_SIZE = 64,
+ DTLB_MASK = (DTLB_SIZE-1),
+ ITLB_WAYS = 1,
+ ITLB_SIZE = 64,
+ ITLB_MASK = (ITLB_SIZE-1),
+};
+
+/* TLB prot */
+enum {
+ URE = (1 << 6),
+ UWE = (1 << 7),
+ SRE = (1 << 8),
+ SWE = (1 << 9),
+
+ SXE = (1 << 6),
+ UXE = (1 << 7),
+};
+
+/* check if tlb available */
+enum {
+ TLBRET_INVALID = -3,
+ TLBRET_NOMATCH = -2,
+ TLBRET_BADADDR = -1,
+ TLBRET_MATCH = 0
+};
+
+typedef struct OpenRISCTLBEntry {
+ uint32_t mr;
+ uint32_t tr;
+} OpenRISCTLBEntry;
+
+#ifndef CONFIG_USER_ONLY
+typedef struct CPUOpenRISCTLBContext {
+ OpenRISCTLBEntry itlb[ITLB_WAYS][ITLB_SIZE];
+ OpenRISCTLBEntry dtlb[DTLB_WAYS][DTLB_SIZE];
+
+ int (*cpu_openrisc_map_address_code)(struct OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot,
+ target_ulong address, int rw);
+ int (*cpu_openrisc_map_address_data)(struct OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot,
+ target_ulong address, int rw);
+} CPUOpenRISCTLBContext;
+#endif
+
+typedef struct CPUOpenRISCState {
+ target_ulong gpr[32]; /* General registers */
+ target_ulong pc; /* Program counter */
+ target_ulong npc; /* Next PC */
+ target_ulong ppc; /* Prev PC */
+ target_ulong jmp_pc; /* Jump PC */
+
+ target_ulong machi; /* Multiply register MACHI */
+ target_ulong maclo; /* Multiply register MACLO */
+
+ target_ulong fpmaddhi; /* Multiply and add float register FPMADDHI */
+ target_ulong fpmaddlo; /* Multiply and add float register FPMADDLO */
+
+ target_ulong epcr; /* Exception PC register */
+ target_ulong eear; /* Exception EA register */
+
+ uint32_t sr; /* Supervisor register */
+ uint32_t vr; /* Version register */
+ uint32_t upr; /* Unit presence register */
+ uint32_t cpucfgr; /* CPU configure register */
+ uint32_t dmmucfgr; /* DMMU configure register */
+ uint32_t immucfgr; /* IMMU configure register */
+ uint32_t esr; /* Exception supervisor register */
+ uint32_t fpcsr; /* Float register */
+ float_status fp_status;
+
+ uint32_t flags; /* cpu_flags, we only use it for exception
+ in solt so far. */
+ uint32_t btaken; /* the SR_F bit */
+
+ CPU_COMMON
+
+#ifndef CONFIG_USER_ONLY
+ CPUOpenRISCTLBContext * tlb;
+
+ struct QEMUTimer *timer;
+ uint32_t ttmr; /* Timer tick mode register */
+ uint32_t ttcr; /* Timer tick count register */
+
+ uint32_t picmr; /* Interrupt mask register */
+ uint32_t picsr; /* Interrupt contrl register*/
+#endif
+ void *irq[32]; /* Interrupt irq input */
+} CPUOpenRISCState;
+
+/**
+ * OpenRISCCPU:
+ * @env: #CPUOpenRISCState
+ *
+ * A OpenRISC CPU.
+ */
+typedef struct OpenRISCCPU {
+ /*< private >*/
+ CPUState parent_obj;
+ /*< public >*/
+
+ CPUOpenRISCState env;
+
+ uint32_t feature; /* CPU Capabilities */
+} OpenRISCCPU;
+
+static inline OpenRISCCPU *openrisc_env_get_cpu(CPUOpenRISCState *env)
+{
+ return OPENRISC_CPU(container_of(env, OpenRISCCPU, env));
+}
+
+#define ENV_GET_CPU(e) CPU(openrisc_env_get_cpu(e))
+
+OpenRISCCPU *cpu_openrisc_init(const char *cpu_model);
+void openrisc_cpu_realize(Object *obj, Error **errp);
+
+void cpu_openrisc_list(FILE *f, fprintf_function cpu_fprintf);
+int cpu_openrisc_exec(CPUOpenRISCState *s);
+void do_interrupt(CPUOpenRISCState *env);
+void openrisc_translate_init(void);
+int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
+ target_ulong address,
+ int rw, int mmu_idx);
+int cpu_openrisc_signal_handler(int host_signum, void *pinfo, void *puc);
+
+#define cpu_list cpu_openrisc_list
+#define cpu_exec cpu_openrisc_exec
+#define cpu_gen_code cpu_openrisc_gen_code
+#define cpu_handle_mmu_fault cpu_openrisc_handle_mmu_fault
+#define cpu_signal_handler cpu_openrisc_signal_handler
+
+#ifndef CONFIG_USER_ONLY
+/* hw/openrisc_pic.c */
+void cpu_openrisc_pic_init(OpenRISCCPU *cpu);
+
+/* hw/openrisc_timer.c */
+void cpu_openrisc_clock_init(OpenRISCCPU *cpu);
+void cpu_openrisc_count_update(OpenRISCCPU *cpu);
+void cpu_openrisc_count_start(OpenRISCCPU *cpu);
+void cpu_openrisc_count_stop(OpenRISCCPU *cpu);
+
+void cpu_openrisc_mmu_init(OpenRISCCPU *cpu);
+int cpu_openrisc_get_phys_nommu(OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot, target_ulong address, int rw);
+int cpu_openrisc_get_phys_code(OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot, target_ulong address, int rw);
+int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot, target_ulong address, int rw);
+#endif
+
+static inline CPUOpenRISCState *cpu_init(const char *cpu_model)
+{
+ OpenRISCCPU *cpu = cpu_openrisc_init(cpu_model);
+ if (cpu) {
+ return &cpu->env;
+ }
+ return NULL;
+}
+
+#if defined(CONFIG_USER_ONLY)
+static inline void cpu_clone_regs(CPUOpenRISCState *env, target_ulong newsp)
+{
+ if (newsp) {
+ env->gpr[1] = newsp;
+ }
+ env->gpr[2] = 0;
+}
+#endif
+
+#include "cpu-all.h"
+
+static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
+ target_ulong *pc,
+ target_ulong *cs_base, int *flags)
+{
+ *pc = env->pc;
+ *cs_base = 0;
+ /* D_FLAG -- branch instruction exception */
+ *flags = (env->flags & D_FLAG);
+}
+
+static inline int cpu_mmu_index(CPUOpenRISCState *env)
+{
+ if (!(env->sr & SR_IME)) {
+ return MMU_NOMMU_IDX;
+ }
+ return (env->sr & SR_SM) == 0 ? MMU_USER_IDX : MMU_SUPERVISOR_IDX;
+}
+
+#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_INT_0
+static inline bool cpu_has_work(CPUOpenRISCState *env)
+{
+ return env->interrupt_request & (CPU_INTERRUPT_HARD |
+ CPU_INTERRUPT_TIMER);
+}
+
+#include "exec-all.h"
+
+static inline target_ulong cpu_get_pc(CPUOpenRISCState *env)
+{
+ return env->pc;
+}
+
+static inline void cpu_pc_from_tb(CPUOpenRISCState *env, TranslationBlock *tb)
+{
+ env->pc = tb->pc;
+}
+
+#endif /* CPU_OPENRISC_H */
diff --git a/target-openrisc/exception.c b/target-openrisc/exception.c
new file mode 100644
index 0000000000..58e53c6c98
--- /dev/null
+++ b/target-openrisc/exception.c
@@ -0,0 +1,27 @@
+/*
+ * OpenRISC exception.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exception.h"
+
+void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp)
+{
+ cpu->env.exception_index = excp;
+ cpu_loop_exit(&cpu->env);
+}
diff --git a/target-openrisc/exception.h b/target-openrisc/exception.h
new file mode 100644
index 0000000000..4b64430df1
--- /dev/null
+++ b/target-openrisc/exception.h
@@ -0,0 +1,28 @@
+/*
+ * OpenRISC exception header.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef QEMU_OPENRISC_EXCP_H
+#define QEMU_OPENRISC_EXCP_H
+
+#include "cpu.h"
+#include "qemu-common.h"
+
+void QEMU_NORETURN raise_exception(OpenRISCCPU *cpu, uint32_t excp);
+
+#endif /* QEMU_OPENRISC_EXCP_H */
diff --git a/target-openrisc/exception_helper.c b/target-openrisc/exception_helper.c
new file mode 100644
index 0000000000..dab4148151
--- /dev/null
+++ b/target-openrisc/exception_helper.c
@@ -0,0 +1,29 @@
+/*
+ * OpenRISC exception helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "helper.h"
+#include "exception.h"
+
+void HELPER(exception)(CPUOpenRISCState *env, uint32_t excp)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ raise_exception(cpu, excp);
+}
diff --git a/target-openrisc/fpu_helper.c b/target-openrisc/fpu_helper.c
new file mode 100644
index 0000000000..b184d5ef73
--- /dev/null
+++ b/target-openrisc/fpu_helper.c
@@ -0,0 +1,300 @@
+/*
+ * OpenRISC float helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "helper.h"
+#include "exception.h"
+
+static inline uint32_t ieee_ex_to_openrisc(OpenRISCCPU *cpu, int fexcp)
+{
+ int ret = 0;
+ if (fexcp) {
+ if (fexcp & float_flag_invalid) {
+ cpu->env.fpcsr |= FPCSR_IVF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_overflow) {
+ cpu->env.fpcsr |= FPCSR_OVF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_underflow) {
+ cpu->env.fpcsr |= FPCSR_UNF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_divbyzero) {
+ cpu->env.fpcsr |= FPCSR_DZF;
+ ret = 1;
+ }
+ if (fexcp & float_flag_inexact) {
+ cpu->env.fpcsr |= FPCSR_IXF;
+ ret = 1;
+ }
+ }
+
+ return ret;
+}
+
+static inline void update_fpcsr(OpenRISCCPU *cpu)
+{
+ int tmp = ieee_ex_to_openrisc(cpu,
+ get_float_exception_flags(&cpu->env.fp_status));
+
+ SET_FP_CAUSE(cpu->env.fpcsr, tmp);
+ if ((GET_FP_ENABLE(cpu->env.fpcsr) & tmp) &&
+ (cpu->env.fpcsr & FPCSR_FPEE)) {
+ helper_exception(&cpu->env, EXCP_FPE);
+ } else {
+ UPDATE_FP_FLAGS(cpu->env.fpcsr, tmp);
+ }
+}
+
+uint64_t HELPER(itofd)(CPUOpenRISCState *env, uint64_t val)
+{
+ uint64_t itofd;
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ itofd = int32_to_float64(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return itofd;
+}
+
+uint32_t HELPER(itofs)(CPUOpenRISCState *env, uint32_t val)
+{
+ uint32_t itofs;
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ itofs = int32_to_float32(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return itofs;
+}
+
+uint64_t HELPER(ftoid)(CPUOpenRISCState *env, uint64_t val)
+{
+ uint64_t ftoid;
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ ftoid = float32_to_int64(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return ftoid;
+}
+
+uint32_t HELPER(ftois)(CPUOpenRISCState *env, uint32_t val)
+{
+ uint32_t ftois;
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ set_float_exception_flags(0, &cpu->env.fp_status);
+ ftois = float32_to_int32(val, &cpu->env.fp_status);
+ update_fpcsr(cpu);
+
+ return ftois;
+}
+
+#define FLOAT_OP(name, p) void helper_float_##_##p(void)
+
+#define FLOAT_CALC(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ uint64_t result; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float64_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return result; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ uint32_t result; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float32_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return result; \
+} \
+
+FLOAT_CALC(add)
+FLOAT_CALC(sub)
+FLOAT_CALC(mul)
+FLOAT_CALC(div)
+FLOAT_CALC(rem)
+#undef FLOAT_CALC
+
+#define FLOAT_TERNOP(name1, name2) \
+uint64_t helper_float_ ## name1 ## name2 ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, \
+ uint64_t fdt1) \
+{ \
+ uint64_t result, temp, hi, lo; \
+ uint32_t val1, val2; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ hi = env->fpmaddhi; \
+ lo = env->fpmaddlo; \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float64_ ## name1(fdt0, fdt1, &cpu->env.fp_status); \
+ lo &= 0xffffffff; \
+ hi &= 0xffffffff; \
+ temp = (hi << 32) | lo; \
+ result = float64_ ## name2(result, temp, &cpu->env.fp_status); \
+ val1 = result >> 32; \
+ val2 = (uint32_t) (result & 0xffffffff); \
+ update_fpcsr(cpu); \
+ cpu->env.fpmaddlo = val2; \
+ cpu->env.fpmaddhi = val1; \
+ return 0; \
+} \
+ \
+uint32_t helper_float_ ## name1 ## name2 ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ uint64_t result, temp, hi, lo; \
+ uint32_t val1, val2; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ hi = cpu->env.fpmaddhi; \
+ lo = cpu->env.fpmaddlo; \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ result = float64_ ## name1(fdt0, fdt1, &cpu->env.fp_status); \
+ temp = (hi << 32) | lo; \
+ result = float64_ ## name2(result, temp, &cpu->env.fp_status); \
+ val1 = result >> 32; \
+ val2 = (uint32_t) (result & 0xffffffff); \
+ update_fpcsr(cpu); \
+ cpu->env.fpmaddlo = val2; \
+ cpu->env.fpmaddhi = val1; \
+ return 0; \
+}
+
+FLOAT_TERNOP(mul, add)
+#undef FLOAT_TERNOP
+
+
+#define FLOAT_CMP(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = float64_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1)\
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = float32_ ## name(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+
+FLOAT_CMP(le)
+FLOAT_CMP(eq)
+FLOAT_CMP(lt)
+#undef FLOAT_CMP
+
+
+#define FLOAT_CMPNE(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float64_eq_quiet(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float32_eq_quiet(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+
+FLOAT_CMPNE(ne)
+#undef FLOAT_CMPNE
+
+#define FLOAT_CMPGT(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float64_le(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float32_le(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+FLOAT_CMPGT(gt)
+#undef FLOAT_CMPGT
+
+#define FLOAT_CMPGE(name) \
+uint64_t helper_float_ ## name ## _d(CPUOpenRISCState *env, \
+ uint64_t fdt0, uint64_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float64_lt(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+} \
+ \
+uint32_t helper_float_ ## name ## _s(CPUOpenRISCState *env, \
+ uint32_t fdt0, uint32_t fdt1) \
+{ \
+ int res; \
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env)); \
+ set_float_exception_flags(0, &cpu->env.fp_status); \
+ res = !float32_lt(fdt0, fdt1, &cpu->env.fp_status); \
+ update_fpcsr(cpu); \
+ return res; \
+}
+
+FLOAT_CMPGE(ge)
+#undef FLOAT_CMPGE
diff --git a/target-openrisc/helper.h b/target-openrisc/helper.h
new file mode 100644
index 0000000000..404d46447f
--- /dev/null
+++ b/target-openrisc/helper.h
@@ -0,0 +1,70 @@
+/*
+ * OpenRISC helper defines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "def-helper.h"
+
+/* exception */
+DEF_HELPER_FLAGS_2(exception, 0, void, env, i32)
+
+/* float */
+DEF_HELPER_FLAGS_2(itofd, 0, i64, env, i64)
+DEF_HELPER_FLAGS_2(itofs, 0, i32, env, i32)
+DEF_HELPER_FLAGS_2(ftoid, 0, i64, env, i64)
+DEF_HELPER_FLAGS_2(ftois, 0, i32, env, i32)
+
+#define FOP_MADD(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, 0, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, 0, i64, env, i64, i64)
+FOP_MADD(muladd)
+#undef FOP_MADD
+
+#define FOP_CALC(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, 0, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, 0, i64, env, i64, i64)
+FOP_CALC(add)
+FOP_CALC(sub)
+FOP_CALC(mul)
+FOP_CALC(div)
+FOP_CALC(rem)
+#undef FOP_CALC
+
+#define FOP_CMP(op) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _s, 0, i32, env, i32, i32) \
+DEF_HELPER_FLAGS_3(float_ ## op ## _d, 0, i64, env, i64, i64)
+FOP_CMP(eq)
+FOP_CMP(lt)
+FOP_CMP(le)
+FOP_CMP(ne)
+FOP_CMP(gt)
+FOP_CMP(ge)
+#undef FOP_CMP
+
+/* int */
+DEF_HELPER_FLAGS_1(ff1, 0, tl, tl)
+DEF_HELPER_FLAGS_1(fl1, 0, tl, tl)
+DEF_HELPER_FLAGS_3(mul32, 0, i32, env, i32, i32)
+
+/* interrupt */
+DEF_HELPER_FLAGS_1(rfe, 0, void, env)
+
+/* sys */
+DEF_HELPER_FLAGS_4(mtspr, 0, void, env, tl, tl, tl)
+DEF_HELPER_FLAGS_4(mfspr, 0, tl, env, tl, tl, tl)
+
+#include "def-helper.h"
diff --git a/target-openrisc/int_helper.c b/target-openrisc/int_helper.c
new file mode 100644
index 0000000000..2fdfd27712
--- /dev/null
+++ b/target-openrisc/int_helper.c
@@ -0,0 +1,79 @@
+/*
+ * OpenRISC int helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "helper.h"
+#include "exception.h"
+#include "host-utils.h"
+
+target_ulong HELPER(ff1)(target_ulong x)
+{
+/*#ifdef TARGET_OPENRISC64
+ return x ? ctz64(x) + 1 : 0;
+#else*/
+ return x ? ctz32(x) + 1 : 0;
+/*#endif*/
+}
+
+target_ulong HELPER(fl1)(target_ulong x)
+{
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ return 64 - clz64(x);
+#else*/
+ return 32 - clz32(x);
+/*#endif*/
+}
+
+uint32_t HELPER(mul32)(CPUOpenRISCState *env,
+ uint32_t ra, uint32_t rb)
+{
+ uint64_t result;
+ uint32_t high, cy;
+
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ result = (uint64_t)ra * rb;
+ /* regisiers in or32 is 32bit, so 32 is NOT a magic number.
+ or64 is not handled in this function, and not implement yet,
+ TARGET_LONG_BITS for or64 is 64, it will break this function,
+ so, we didn't use TARGET_LONG_BITS here. */
+ high = result >> 32;
+ cy = result >> (32 - 1);
+
+ if ((cy & 0x1) == 0x0) {
+ if (high == 0x0) {
+ return result;
+ }
+ }
+
+ if ((cy & 0x1) == 0x1) {
+ if (high == 0xffffffff) {
+ return result;
+ }
+ }
+
+ cpu->env.sr |= (SR_OV | SR_CY);
+ if (cpu->env.sr & SR_OVE) {
+ raise_exception(cpu, EXCP_RANGE);
+ }
+
+ return result;
+}
diff --git a/target-openrisc/interrupt.c b/target-openrisc/interrupt.c
new file mode 100644
index 0000000000..642da7de49
--- /dev/null
+++ b/target-openrisc/interrupt.c
@@ -0,0 +1,74 @@
+/*
+ * OpenRISC interrupt.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "gdbstub.h"
+#include "host-utils.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/loader.h"
+#endif
+
+void do_interrupt(CPUOpenRISCState *env)
+{
+#ifndef CONFIG_USER_ONLY
+ if (env->flags & D_FLAG) { /* Delay Slot insn */
+ env->flags &= ~D_FLAG;
+ env->sr |= SR_DSX;
+ if (env->exception_index == EXCP_TICK ||
+ env->exception_index == EXCP_INT ||
+ env->exception_index == EXCP_SYSCALL ||
+ env->exception_index == EXCP_FPE) {
+ env->epcr = env->jmp_pc;
+ } else {
+ env->epcr = env->pc - 4;
+ }
+ } else {
+ if (env->exception_index == EXCP_TICK ||
+ env->exception_index == EXCP_INT ||
+ env->exception_index == EXCP_SYSCALL ||
+ env->exception_index == EXCP_FPE) {
+ env->epcr = env->npc;
+ } else {
+ env->epcr = env->pc;
+ }
+ }
+
+ /* For machine-state changed between user-mode and supervisor mode,
+ we need flush TLB when we enter&exit EXCP. */
+ tlb_flush(env, 1);
+
+ env->esr = env->sr;
+ env->sr &= ~SR_DME;
+ env->sr &= ~SR_IME;
+ env->sr |= SR_SM;
+ env->sr &= ~SR_IEE;
+ env->sr &= ~SR_TEE;
+ env->tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
+ env->tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
+
+ if (env->exception_index > 0 && env->exception_index < EXCP_NR) {
+ env->pc = (env->exception_index << 8);
+ } else {
+ cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ }
+#endif
+
+ env->exception_index = -1;
+}
diff --git a/target-openrisc/interrupt_helper.c b/target-openrisc/interrupt_helper.c
new file mode 100644
index 0000000000..79f5afed44
--- /dev/null
+++ b/target-openrisc/interrupt_helper.c
@@ -0,0 +1,57 @@
+/*
+ * OpenRISC interrupt helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "helper.h"
+
+void HELPER(rfe)(CPUOpenRISCState *env)
+{
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+#ifndef CONFIG_USER_ONLY
+ int need_flush_tlb = (cpu->env.sr & (SR_SM | SR_IME | SR_DME)) ^
+ (cpu->env.esr & (SR_SM | SR_IME | SR_DME));
+#endif
+ cpu->env.pc = cpu->env.epcr;
+ cpu->env.npc = cpu->env.epcr;
+ cpu->env.sr = cpu->env.esr;
+
+#ifndef CONFIG_USER_ONLY
+ if (cpu->env.sr & SR_DME) {
+ cpu->env.tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_data;
+ } else {
+ cpu->env.tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_nommu;
+ }
+
+ if (cpu->env.sr & SR_IME) {
+ cpu->env.tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_code;
+ } else {
+ cpu->env.tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_nommu;
+ }
+
+ if (need_flush_tlb) {
+ tlb_flush(&cpu->env, 1);
+ }
+#endif
+ cpu->env.interrupt_request |= CPU_INTERRUPT_EXITTB;
+}
diff --git a/target-openrisc/machine.c b/target-openrisc/machine.c
new file mode 100644
index 0000000000..cba9811ea5
--- /dev/null
+++ b/target-openrisc/machine.c
@@ -0,0 +1,47 @@
+/*
+ * OpenRISC Machine
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "hw/hw.h"
+#include "hw/boards.h"
+
+static const VMStateDescription vmstate_cpu = {
+ .name = "cpu",
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32_ARRAY(gpr, CPUOpenRISCState, 32),
+ VMSTATE_UINT32(sr, CPUOpenRISCState),
+ VMSTATE_UINT32(epcr, CPUOpenRISCState),
+ VMSTATE_UINT32(eear, CPUOpenRISCState),
+ VMSTATE_UINT32(esr, CPUOpenRISCState),
+ VMSTATE_UINT32(fpcsr, CPUOpenRISCState),
+ VMSTATE_UINT32(pc, CPUOpenRISCState),
+ VMSTATE_UINT32(npc, CPUOpenRISCState),
+ VMSTATE_UINT32(ppc, CPUOpenRISCState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+void cpu_save(QEMUFile *f, void *opaque)
+{
+ vmstate_save_state(f, &vmstate_cpu, opaque);
+}
+
+int cpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+ return vmstate_load_state(f, &vmstate_cpu, opaque, version_id);
+}
diff --git a/target-openrisc/mmu.c b/target-openrisc/mmu.c
new file mode 100644
index 0000000000..0be1d413c9
--- /dev/null
+++ b/target-openrisc/mmu.c
@@ -0,0 +1,243 @@
+/*
+ * OpenRISC MMU.
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "qemu-common.h"
+#include "gdbstub.h"
+#include "host-utils.h"
+#ifndef CONFIG_USER_ONLY
+#include "hw/loader.h"
+#endif
+
+#ifndef CONFIG_USER_ONLY
+int cpu_openrisc_get_phys_nommu(OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot, target_ulong address, int rw)
+{
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE;
+ return TLBRET_MATCH;
+}
+
+int cpu_openrisc_get_phys_code(OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot, target_ulong address, int rw)
+{
+ int vpn = address >> TARGET_PAGE_BITS;
+ int idx = vpn & ITLB_MASK;
+ int right = 0;
+
+ if ((cpu->env.tlb->itlb[0][idx].mr >> TARGET_PAGE_BITS) != vpn) {
+ return TLBRET_NOMATCH;
+ }
+ if (!(cpu->env.tlb->itlb[0][idx].mr & 1)) {
+ return TLBRET_INVALID;
+ }
+
+ if (cpu->env.sr & SR_SM) { /* supervisor mode */
+ if (cpu->env.tlb->itlb[0][idx].tr & SXE) {
+ right |= PAGE_EXEC;
+ }
+ } else {
+ if (cpu->env.tlb->itlb[0][idx].tr & UXE) {
+ right |= PAGE_EXEC;
+ }
+ }
+
+ if ((rw & 2) && ((right & PAGE_EXEC) == 0)) {
+ return TLBRET_BADADDR;
+ }
+
+ *physical = (cpu->env.tlb->itlb[0][idx].tr & TARGET_PAGE_MASK) |
+ (address & (TARGET_PAGE_SIZE-1));
+ *prot = right;
+ return TLBRET_MATCH;
+}
+
+int cpu_openrisc_get_phys_data(OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot, target_ulong address, int rw)
+{
+ int vpn = address >> TARGET_PAGE_BITS;
+ int idx = vpn & DTLB_MASK;
+ int right = 0;
+
+ if ((cpu->env.tlb->dtlb[0][idx].mr >> TARGET_PAGE_BITS) != vpn) {
+ return TLBRET_NOMATCH;
+ }
+ if (!(cpu->env.tlb->dtlb[0][idx].mr & 1)) {
+ return TLBRET_INVALID;
+ }
+
+ if (cpu->env.sr & SR_SM) { /* supervisor mode */
+ if (cpu->env.tlb->dtlb[0][idx].tr & SRE) {
+ right |= PAGE_READ;
+ }
+ if (cpu->env.tlb->dtlb[0][idx].tr & SWE) {
+ right |= PAGE_WRITE;
+ }
+ } else {
+ if (cpu->env.tlb->dtlb[0][idx].tr & URE) {
+ right |= PAGE_READ;
+ }
+ if (cpu->env.tlb->dtlb[0][idx].tr & UWE) {
+ right |= PAGE_WRITE;
+ }
+ }
+
+ if ((rw & 0) && ((right & PAGE_READ) == 0)) {
+ return TLBRET_BADADDR;
+ }
+ if ((rw & 1) && ((right & PAGE_WRITE) == 0)) {
+ return TLBRET_BADADDR;
+ }
+
+ *physical = (cpu->env.tlb->dtlb[0][idx].tr & TARGET_PAGE_MASK) |
+ (address & (TARGET_PAGE_SIZE-1));
+ *prot = right;
+ return TLBRET_MATCH;
+}
+
+static int cpu_openrisc_get_phys_addr(OpenRISCCPU *cpu,
+ target_phys_addr_t *physical,
+ int *prot, target_ulong address,
+ int rw)
+{
+ int ret = TLBRET_MATCH;
+
+ /* [0x0000--0x2000]: unmapped */
+ if (address < 0x2000 && (cpu->env.sr & SR_SM)) {
+ *physical = address;
+ *prot = PAGE_READ | PAGE_WRITE;
+ return ret;
+ }
+
+ if (rw == 2) { /* ITLB */
+ *physical = 0;
+ ret = cpu->env.tlb->cpu_openrisc_map_address_code(cpu, physical,
+ prot, address, rw);
+ } else { /* DTLB */
+ ret = cpu->env.tlb->cpu_openrisc_map_address_data(cpu, physical,
+ prot, address, rw);
+ }
+
+ return ret;
+}
+#endif
+
+static void cpu_openrisc_raise_mmu_exception(OpenRISCCPU *cpu,
+ target_ulong address,
+ int rw, int tlb_error)
+{
+ int exception = 0;
+
+ switch (tlb_error) {
+ default:
+ if (rw == 2) {
+ exception = EXCP_IPF;
+ } else {
+ exception = EXCP_DPF;
+ }
+ break;
+#ifndef CONFIG_USER_ONLY
+ case TLBRET_BADADDR:
+ if (rw == 2) {
+ exception = EXCP_IPF;
+ } else {
+ exception = EXCP_DPF;
+ }
+ break;
+ case TLBRET_INVALID:
+ case TLBRET_NOMATCH:
+ /* No TLB match for a mapped address */
+ if (rw == 2) {
+ exception = EXCP_ITLBMISS;
+ } else {
+ exception = EXCP_DTLBMISS;
+ }
+ break;
+#endif
+ }
+
+ cpu->env.exception_index = exception;
+ cpu->env.eear = address;
+}
+
+#ifndef CONFIG_USER_ONLY
+int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
+ target_ulong address, int rw, int mmu_idx)
+{
+ int ret = 0;
+ target_phys_addr_t physical = 0;
+ int prot = 0;
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ ret = cpu_openrisc_get_phys_addr(cpu, &physical, &prot,
+ address, rw);
+
+ if (ret == TLBRET_MATCH) {
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
+ } else if (ret < 0) {
+ cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
+ ret = 1;
+ }
+
+ return ret;
+}
+#else
+int cpu_openrisc_handle_mmu_fault(CPUOpenRISCState *env,
+ target_ulong address, int rw, int mmu_idx)
+{
+ int ret = 0;
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ cpu_openrisc_raise_mmu_exception(cpu, address, rw, ret);
+ ret = 1;
+
+ return ret;
+}
+#endif
+
+#ifndef CONFIG_USER_ONLY
+target_phys_addr_t cpu_get_phys_page_debug(CPUOpenRISCState *env,
+ target_ulong addr)
+{
+ target_phys_addr_t phys_addr;
+ int prot;
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ if (cpu_openrisc_get_phys_addr(cpu, &phys_addr, &prot, addr, 0)) {
+ return -1;
+ }
+
+ return phys_addr;
+}
+
+void cpu_openrisc_mmu_init(OpenRISCCPU *cpu)
+{
+ cpu->env.tlb = g_malloc0(sizeof(CPUOpenRISCTLBContext));
+
+ cpu->env.tlb->cpu_openrisc_map_address_code = &cpu_openrisc_get_phys_nommu;
+ cpu->env.tlb->cpu_openrisc_map_address_data = &cpu_openrisc_get_phys_nommu;
+}
+#endif
diff --git a/target-openrisc/mmu_helper.c b/target-openrisc/mmu_helper.c
new file mode 100644
index 0000000000..59ed371ae0
--- /dev/null
+++ b/target-openrisc/mmu_helper.c
@@ -0,0 +1,63 @@
+/*
+ * OpenRISC MMU helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+
+#ifndef CONFIG_USER_ONLY
+#include "softmmu_exec.h"
+#define MMUSUFFIX _mmu
+
+#define SHIFT 0
+#include "softmmu_template.h"
+
+#define SHIFT 1
+#include "softmmu_template.h"
+
+#define SHIFT 2
+#include "softmmu_template.h"
+
+#define SHIFT 3
+#include "softmmu_template.h"
+
+void tlb_fill(CPUOpenRISCState *env, target_ulong addr, int is_write,
+ int mmu_idx, uintptr_t retaddr)
+{
+ TranslationBlock *tb;
+ unsigned long pc;
+ int ret;
+
+ ret = cpu_openrisc_handle_mmu_fault(env, addr, is_write, mmu_idx);
+
+ if (ret) {
+ if (retaddr) {
+ /* now we have a real cpu fault. */
+ pc = (unsigned long)retaddr;
+ tb = tb_find_pc(pc);
+ if (tb) {
+ /* the PC is inside the translated code. It means that we
+ have a virtual CPU fault. */
+ cpu_restore_state(tb, env, pc);
+ }
+ }
+ /* Raise Exception. */
+ cpu_loop_exit(env);
+ }
+}
+#endif
diff --git a/target-openrisc/sys_helper.c b/target-openrisc/sys_helper.c
new file mode 100644
index 0000000000..f160dc397c
--- /dev/null
+++ b/target-openrisc/sys_helper.c
@@ -0,0 +1,287 @@
+/*
+ * OpenRISC system instructions helper routines
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Zhizhou Zhang <etouzh@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "helper.h"
+
+#define TO_SPR(group, number) (((group) << 11) + (number))
+
+void HELPER(mtspr)(CPUOpenRISCState *env,
+ target_ulong ra, target_ulong rb, target_ulong offset)
+{
+#ifndef CONFIG_USER_ONLY
+ int spr = (ra | offset);
+ int idx;
+
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ switch (spr) {
+ case TO_SPR(0, 0): /* VR */
+ env->vr = rb;
+ break;
+
+ case TO_SPR(0, 16): /* NPC */
+ env->npc = rb;
+ break;
+
+ case TO_SPR(0, 17): /* SR */
+ if ((env->sr & (SR_IME | SR_DME | SR_SM)) ^
+ (rb & (SR_IME | SR_DME | SR_SM))) {
+ tlb_flush(env, 1);
+ }
+ env->sr = rb;
+ env->sr |= SR_FO; /* FO is const equal to 1 */
+ if (env->sr & SR_DME) {
+ env->tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_data;
+ } else {
+ env->tlb->cpu_openrisc_map_address_data =
+ &cpu_openrisc_get_phys_nommu;
+ }
+
+ if (env->sr & SR_IME) {
+ env->tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_code;
+ } else {
+ env->tlb->cpu_openrisc_map_address_code =
+ &cpu_openrisc_get_phys_nommu;
+ }
+ break;
+
+ case TO_SPR(0, 18): /* PPC */
+ env->ppc = rb;
+ break;
+
+ case TO_SPR(0, 32): /* EPCR */
+ env->epcr = rb;
+ break;
+
+ case TO_SPR(0, 48): /* EEAR */
+ env->eear = rb;
+ break;
+
+ case TO_SPR(0, 64): /* ESR */
+ env->esr = rb;
+ break;
+ case TO_SPR(1, 512) ... TO_SPR(1, 639): /* DTLBW0MR 0-127 */
+ idx = spr - TO_SPR(1, 512);
+ if (!(rb & 1)) {
+ tlb_flush_page(env, env->tlb->dtlb[0][idx].mr & TARGET_PAGE_MASK);
+ }
+ env->tlb->dtlb[0][idx].mr = rb;
+ break;
+
+ case TO_SPR(1, 640) ... TO_SPR(1, 767): /* DTLBW0TR 0-127 */
+ idx = spr - TO_SPR(1, 640);
+ env->tlb->dtlb[0][idx].tr = rb;
+ break;
+ case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
+ case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
+ case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */
+ case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */
+ case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
+ case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
+ break;
+ case TO_SPR(2, 512) ... TO_SPR(2, 639): /* ITLBW0MR 0-127 */
+ idx = spr - TO_SPR(2, 512);
+ if (!(rb & 1)) {
+ tlb_flush_page(env, env->tlb->itlb[0][idx].mr & TARGET_PAGE_MASK);
+ }
+ env->tlb->itlb[0][idx].mr = rb;
+ break;
+
+ case TO_SPR(2, 640) ... TO_SPR(2, 767): /* ITLBW0TR 0-127 */
+ idx = spr - TO_SPR(2, 640);
+ env->tlb->itlb[0][idx].tr = rb;
+ break;
+ case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
+ case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
+ case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */
+ case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */
+ case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
+ case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
+ break;
+ case TO_SPR(9, 0): /* PICMR */
+ env->picmr |= rb;
+ break;
+ case TO_SPR(9, 2): /* PICSR */
+ env->picsr &= ~rb;
+ break;
+ case TO_SPR(10, 0): /* TTMR */
+ {
+ int ip = env->ttmr & TTMR_IP;
+
+ if (rb & TTMR_IP) { /* Keep IP bit. */
+ env->ttmr = (rb & ~TTMR_IP) + ip;
+ } else { /* Clear IP bit. */
+ env->ttmr = rb & ~TTMR_IP;
+ env->interrupt_request &= ~CPU_INTERRUPT_TIMER;
+ }
+
+ cpu_openrisc_count_update(cpu);
+
+ switch (env->ttmr & TTMR_M) {
+ case TIMER_NONE:
+ cpu_openrisc_count_stop(cpu);
+ break;
+ case TIMER_INTR:
+ cpu_openrisc_count_start(cpu);
+ break;
+ case TIMER_SHOT:
+ cpu_openrisc_count_start(cpu);
+ break;
+ case TIMER_CONT:
+ cpu_openrisc_count_start(cpu);
+ break;
+ default:
+ break;
+ }
+ }
+ break;
+
+ case TO_SPR(10, 1): /* TTCR */
+ env->ttcr = rb;
+ if (env->ttmr & TIMER_NONE) {
+ return;
+ }
+ cpu_openrisc_count_start(cpu);
+ break;
+ default:
+
+ break;
+ }
+#endif
+}
+
+target_ulong HELPER(mfspr)(CPUOpenRISCState *env,
+ target_ulong rd, target_ulong ra, uint32_t offset)
+{
+#ifndef CONFIG_USER_ONLY
+ int spr = (ra | offset);
+ int idx;
+
+ OpenRISCCPU *cpu = OPENRISC_CPU(ENV_GET_CPU(env));
+
+ switch (spr) {
+ case TO_SPR(0, 0): /* VR */
+ return env->vr & SPR_VR;
+
+ case TO_SPR(0, 1): /* UPR */
+ return env->upr; /* TT, DM, IM, UP present */
+
+ case TO_SPR(0, 2): /* CPUCFGR */
+ return env->cpucfgr;
+
+ case TO_SPR(0, 3): /* DMMUCFGR */
+ return env->dmmucfgr; /* 1Way, 64 entries */
+
+ case TO_SPR(0, 4): /* IMMUCFGR */
+ return env->immucfgr;
+
+ case TO_SPR(0, 16): /* NPC */
+ return env->npc;
+
+ case TO_SPR(0, 17): /* SR */
+ return env->sr;
+
+ case TO_SPR(0, 18): /* PPC */
+ return env->ppc;
+
+ case TO_SPR(0, 32): /* EPCR */
+ return env->epcr;
+
+ case TO_SPR(0, 48): /* EEAR */
+ return env->eear;
+
+ case TO_SPR(0, 64): /* ESR */
+ return env->esr;
+
+ case TO_SPR(1, 512) ... TO_SPR(1, 639): /* DTLBW0MR 0-127 */
+ idx = spr - TO_SPR(1, 512);
+ return env->tlb->dtlb[0][idx].mr;
+
+ case TO_SPR(1, 640) ... TO_SPR(1, 767): /* DTLBW0TR 0-127 */
+ idx = spr - TO_SPR(1, 640);
+ return env->tlb->dtlb[0][idx].tr;
+
+ case TO_SPR(1, 768) ... TO_SPR(1, 895): /* DTLBW1MR 0-127 */
+ case TO_SPR(1, 896) ... TO_SPR(1, 1023): /* DTLBW1TR 0-127 */
+ case TO_SPR(1, 1024) ... TO_SPR(1, 1151): /* DTLBW2MR 0-127 */
+ case TO_SPR(1, 1152) ... TO_SPR(1, 1279): /* DTLBW2TR 0-127 */
+ case TO_SPR(1, 1280) ... TO_SPR(1, 1407): /* DTLBW3MR 0-127 */
+ case TO_SPR(1, 1408) ... TO_SPR(1, 1535): /* DTLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(2, 512) ... TO_SPR(2, 639): /* ITLBW0MR 0-127 */
+ idx = spr - TO_SPR(2, 512);
+ return env->tlb->itlb[0][idx].mr;
+
+ case TO_SPR(2, 640) ... TO_SPR(2, 767): /* ITLBW0TR 0-127 */
+ idx = spr - TO_SPR(2, 640);
+ return env->tlb->itlb[0][idx].tr;
+
+ case TO_SPR(2, 768) ... TO_SPR(2, 895): /* ITLBW1MR 0-127 */
+ case TO_SPR(2, 896) ... TO_SPR(2, 1023): /* ITLBW1TR 0-127 */
+ case TO_SPR(2, 1024) ... TO_SPR(2, 1151): /* ITLBW2MR 0-127 */
+ case TO_SPR(2, 1152) ... TO_SPR(2, 1279): /* ITLBW2TR 0-127 */
+ case TO_SPR(2, 1280) ... TO_SPR(2, 1407): /* ITLBW3MR 0-127 */
+ case TO_SPR(2, 1408) ... TO_SPR(2, 1535): /* ITLBW3TR 0-127 */
+ break;
+
+ case TO_SPR(9, 0): /* PICMR */
+ return env->picmr;
+
+ case TO_SPR(9, 2): /* PICSR */
+ return env->picsr;
+
+ case TO_SPR(10, 0): /* TTMR */
+ return env->ttmr;
+
+ case TO_SPR(10, 1): /* TTCR */
+ cpu_openrisc_count_update(cpu);
+ return env->ttcr;
+
+ default:
+ break;
+ }
+#endif
+
+/*If we later need to add tracepoints (or debug printfs) for the return
+value, it may be useful to structure the code like this:
+
+target_ulong ret = 0;
+
+switch() {
+case x:
+ ret = y;
+ break;
+case z:
+ ret = 42;
+ break;
+...
+}
+
+later something like trace_spr_read(ret);
+
+return ret;*/
+
+ /* for rd is passed in, if rd unchanged, just keep it back. */
+ return rd;
+}
diff --git a/target-openrisc/translate.c b/target-openrisc/translate.c
new file mode 100644
index 0000000000..325ba09cb5
--- /dev/null
+++ b/target-openrisc/translate.c
@@ -0,0 +1,1835 @@
+/*
+ * OpenRISC translation
+ *
+ * Copyright (c) 2011-2012 Jia Liu <proljc@gmail.com>
+ * Feng Gao <gf91597@gmail.com>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec-all.h"
+#include "disas.h"
+#include "tcg-op.h"
+#include "qemu-common.h"
+#include "qemu-log.h"
+#include "config.h"
+#include "bitops.h"
+
+#include "helper.h"
+#define GEN_HELPER 1
+#include "helper.h"
+
+#define OPENRISC_DISAS
+
+#ifdef OPENRISC_DISAS
+# define LOG_DIS(...) qemu_log_mask(CPU_LOG_TB_IN_ASM, ## __VA_ARGS__)
+#else
+# define LOG_DIS(...) do { } while (0)
+#endif
+
+typedef struct DisasContext {
+ TranslationBlock *tb;
+ target_ulong pc, ppc, npc;
+ uint32_t tb_flags, synced_flags, flags;
+ uint32_t is_jmp;
+ uint32_t mem_idx;
+ int singlestep_enabled;
+ uint32_t delayed_branch;
+} DisasContext;
+
+static TCGv_ptr cpu_env;
+static TCGv cpu_sr;
+static TCGv cpu_R[32];
+static TCGv cpu_pc;
+static TCGv jmp_pc; /* l.jr/l.jalr temp pc */
+static TCGv cpu_npc;
+static TCGv cpu_ppc;
+static TCGv_i32 env_btaken; /* bf/bnf , F flag taken */
+static TCGv_i32 fpcsr;
+static TCGv machi, maclo;
+static TCGv fpmaddhi, fpmaddlo;
+static TCGv_i32 env_flags;
+#include "gen-icount.h"
+
+void openrisc_translate_init(void)
+{
+ static const char * const regnames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ };
+ int i;
+
+ cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
+ cpu_sr = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, sr), "sr");
+ env_flags = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUOpenRISCState, flags),
+ "flags");
+ cpu_pc = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, pc), "pc");
+ cpu_npc = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, npc), "npc");
+ cpu_ppc = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, ppc), "ppc");
+ jmp_pc = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, jmp_pc), "jmp_pc");
+ env_btaken = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUOpenRISCState, btaken),
+ "btaken");
+ fpcsr = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUOpenRISCState, fpcsr),
+ "fpcsr");
+ machi = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, machi),
+ "machi");
+ maclo = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, maclo),
+ "maclo");
+ fpmaddhi = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, fpmaddhi),
+ "fpmaddhi");
+ fpmaddlo = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, fpmaddlo),
+ "fpmaddlo");
+ for (i = 0; i < 32; i++) {
+ cpu_R[i] = tcg_global_mem_new(TCG_AREG0,
+ offsetof(CPUOpenRISCState, gpr[i]),
+ regnames[i]);
+ }
+#define GEN_HELPER 2
+#include "helper.h"
+}
+
+/* Writeback SR_F transaltion-space to execution-space. */
+static inline void wb_SR_F(void)
+{
+ int label;
+
+ label = gen_new_label();
+ tcg_gen_andi_tl(cpu_sr, cpu_sr, ~SR_F);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, env_btaken, 0, label);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, SR_F);
+ gen_set_label(label);
+}
+
+static inline int zero_extend(unsigned int val, int width)
+{
+ return val & ((1 << width) - 1);
+}
+
+static inline int sign_extend(unsigned int val, int width)
+{
+ int sval;
+
+ /* LSL */
+ val <<= TARGET_LONG_BITS - width;
+ sval = val;
+ /* ASR. */
+ sval >>= TARGET_LONG_BITS - width;
+ return sval;
+}
+
+static inline void gen_sync_flags(DisasContext *dc)
+{
+ /* Sync the tb dependent flag between translate and runtime. */
+ if (dc->tb_flags != dc->synced_flags) {
+ tcg_gen_movi_tl(env_flags, dc->tb_flags);
+ dc->synced_flags = dc->tb_flags;
+ }
+}
+
+static void gen_exception(DisasContext *dc, unsigned int excp)
+{
+ TCGv_i32 tmp = tcg_const_i32(excp);
+ gen_helper_exception(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
+}
+
+static void gen_illegal_exception(DisasContext *dc)
+{
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_ILLEGAL);
+ dc->is_jmp = DISAS_UPDATE;
+}
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+static void check_ob64s(DisasContext *dc)
+{
+ if (!(dc->flags & CPUCFGR_OB64S)) {
+ gen_illegal_exception(dc);
+ }
+}
+
+static void check_of64s(DisasContext *dc)
+{
+ if (!(dc->flags & CPUCFGR_OF64S)) {
+ gen_illegal_exception(dc);
+ }
+}
+
+static void check_ov64s(DisasContext *dc)
+{
+ if (!(dc->flags & CPUCFGR_OV64S)) {
+ gen_illegal_exception(dc);
+ }
+}
+#endif*/
+
+static void gen_goto_tb(DisasContext *dc, int n, target_ulong dest)
+{
+ TranslationBlock *tb;
+ tb = dc->tb;
+ if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK) &&
+ likely(!dc->singlestep_enabled)) {
+ tcg_gen_movi_tl(cpu_pc, dest);
+ tcg_gen_goto_tb(n);
+ tcg_gen_exit_tb((tcg_target_long)tb + n);
+ } else {
+ tcg_gen_movi_tl(cpu_pc, dest);
+ if (dc->singlestep_enabled) {
+ gen_exception(dc, EXCP_DEBUG);
+ }
+ tcg_gen_exit_tb(0);
+ }
+}
+
+static void gen_jump(DisasContext *dc, uint32_t imm, uint32_t reg, uint32_t op0)
+{
+ target_ulong tmp_pc;
+ int lab = gen_new_label();
+ TCGv sr_f = tcg_temp_new();
+ /* N26, 26bits imm */
+ tmp_pc = sign_extend((imm<<2), 26) + dc->pc;
+ tcg_gen_andi_tl(sr_f, cpu_sr, SR_F);
+
+ if (op0 == 0x00) { /* l.j */
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ } else if (op0 == 0x01) { /* l.jal */
+ tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8));
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ } else if (op0 == 0x03) { /* l.bnf */
+ tcg_gen_movi_tl(jmp_pc, dc->pc+8);
+ tcg_gen_brcondi_i32(TCG_COND_EQ, sr_f, SR_F, lab);
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ gen_set_label(lab);
+ } else if (op0 == 0x04) { /* l.bf */
+ tcg_gen_movi_tl(jmp_pc, dc->pc+8);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_f, SR_F, lab);
+ tcg_gen_movi_tl(jmp_pc, tmp_pc);
+ gen_set_label(lab);
+ } else if (op0 == 0x11) { /* l.jr */
+ tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
+ } else if (op0 == 0x12) { /* l.jalr */
+ tcg_gen_movi_tl(cpu_R[9], (dc->pc + 8));
+ tcg_gen_mov_tl(jmp_pc, cpu_R[reg]);
+ } else {
+ gen_illegal_exception(dc);
+ }
+
+ tcg_temp_free(sr_f);
+ dc->delayed_branch = 2;
+ dc->tb_flags |= D_FLAG;
+ gen_sync_flags(dc);
+}
+
+static void dec_calc(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0, op1, op2;
+ uint32_t ra, rb, rd;
+ op0 = extract32(insn, 0, 4);
+ op1 = extract32(insn, 8, 2);
+ op2 = extract32(insn, 6, 2);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+ rd = extract32(insn, 21, 5);
+
+ switch (op0) {
+ case 0x0000:
+ switch (op1) {
+ case 0x00: /* l.add */
+ LOG_DIS("l.add r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ int lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 tb = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
+ tcg_gen_add_i64(td, ta, tb);
+ tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 31);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0001: /* l.addc */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.addc r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ int lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 tb = tcg_temp_new_i64();
+ TCGv_i64 tcy = tcg_temp_local_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_cy = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
+ tcg_gen_andi_i32(sr_cy, cpu_sr, SR_CY);
+ tcg_gen_extu_i32_i64(tcy, sr_cy);
+ tcg_gen_shri_i64(tcy, tcy, 10);
+ tcg_gen_add_i64(td, ta, tb);
+ tcg_gen_add_i64(td, td, tcy);
+ tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 32);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+ tcg_temp_free_i64(tcy);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_cy);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0002: /* l.sub */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.sub r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ int lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 tb = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(tb, cpu_R[rb]);
+ tcg_gen_sub_i64(td, ta, tb);
+ tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 31);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(tb);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0003: /* l.and */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.and r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_and_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0004: /* l.or */
+ switch (op1) {
+ case 0x00:
+ LOG_DIS("l.or r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_or_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0005:
+ switch (op1) {
+ case 0x00: /* l.xor */
+ LOG_DIS("l.xor r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_xor_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0006:
+ switch (op1) {
+ case 0x03: /* l.mul */
+ LOG_DIS("l.mul r%d, r%d, r%d\n", rd, ra, rb);
+ if (ra != 0 && rb != 0) {
+ gen_helper_mul32(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ } else {
+ tcg_gen_movi_tl(cpu_R[rd], 0x0);
+ }
+ break;
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0009:
+ switch (op1) {
+ case 0x03: /* l.div */
+ LOG_DIS("l.div r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ int lab0 = gen_new_label();
+ int lab1 = gen_new_label();
+ int lab2 = gen_new_label();
+ int lab3 = gen_new_label();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ if (rb == 0) {
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab0);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_R[rb],
+ 0x00000000, lab1);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[ra],
+ 0x80000000, lab2);
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb],
+ 0xffffffff, lab2);
+ gen_set_label(lab1);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab3);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab2);
+ tcg_gen_div_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ gen_set_label(lab3);
+ }
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000a:
+ switch (op1) {
+ case 0x03: /* l.divu */
+ LOG_DIS("l.divu r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ int lab0 = gen_new_label();
+ int lab1 = gen_new_label();
+ int lab2 = gen_new_label();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ if (rb == 0) {
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab0);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab0);
+ } else {
+ tcg_gen_brcondi_tl(TCG_COND_NE, cpu_R[rb],
+ 0x00000000, lab1);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab2);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab1);
+ tcg_gen_divu_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ gen_set_label(lab2);
+ }
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000b:
+ switch (op1) {
+ case 0x03: /* l.mulu */
+ LOG_DIS("l.mulu r%d, r%d, r%d\n", rd, ra, rb);
+ if (rb != 0 && ra != 0) {
+ TCGv_i64 result = tcg_temp_local_new_i64();
+ TCGv_i64 tra = tcg_temp_local_new_i64();
+ TCGv_i64 trb = tcg_temp_local_new_i64();
+ TCGv_i64 high = tcg_temp_new_i64();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ int lab = gen_new_label();
+ /* Calculate the each result. */
+ tcg_gen_extu_i32_i64(tra, cpu_R[ra]);
+ tcg_gen_extu_i32_i64(trb, cpu_R[rb]);
+ tcg_gen_mul_i64(result, tra, trb);
+ tcg_temp_free_i64(tra);
+ tcg_temp_free_i64(trb);
+ tcg_gen_shri_i64(high, result, TARGET_LONG_BITS);
+ /* Overflow or not. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, high, 0x00000000, lab);
+ tcg_gen_ori_tl(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_tl(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_temp_free_i64(high);
+ tcg_gen_trunc_i64_tl(cpu_R[rd], result);
+ tcg_temp_free_i64(result);
+ tcg_temp_free_i32(sr_ove);
+ } else {
+ tcg_gen_movi_tl(cpu_R[rd], 0);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000e:
+ switch (op1) {
+ case 0x00: /* l.cmov */
+ LOG_DIS("l.cmov r%d, r%d, r%d\n", rd, ra, rb);
+ {
+ int lab = gen_new_label();
+ TCGv res = tcg_temp_local_new();
+ TCGv sr_f = tcg_temp_new();
+ tcg_gen_andi_tl(sr_f, cpu_sr, SR_F);
+ tcg_gen_mov_tl(res, cpu_R[rb]);
+ tcg_gen_brcondi_tl(TCG_COND_NE, sr_f, SR_F, lab);
+ tcg_gen_mov_tl(res, cpu_R[ra]);
+ gen_set_label(lab);
+ tcg_gen_mov_tl(cpu_R[rd], res);
+ tcg_temp_free(sr_f);
+ tcg_temp_free(res);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000f:
+ switch (op1) {
+ case 0x00: /* l.ff1 */
+ LOG_DIS("l.ff1 r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_ff1(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x01: /* l.fl1 */
+ LOG_DIS("l.fl1 r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_fl1(cpu_R[rd], cpu_R[ra]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x0008:
+ switch (op1) {
+ case 0x00:
+ switch (op2) {
+ case 0x00: /* l.sll */
+ LOG_DIS("l.sll r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_shl_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ case 0x01: /* l.srl */
+ LOG_DIS("l.srl r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_shr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ case 0x02: /* l.sra */
+ LOG_DIS("l.sra r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_sar_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+ case 0x03: /* l.ror */
+ LOG_DIS("l.ror r%d, r%d, r%d\n", rd, ra, rb);
+ tcg_gen_rotr_tl(cpu_R[rd], cpu_R[ra], cpu_R[rb]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000c:
+ switch (op1) {
+ case 0x00:
+ switch (op2) {
+ case 0x00: /* l.exths */
+ LOG_DIS("l.exths r%d, r%d\n", rd, ra);
+ tcg_gen_ext16s_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x01: /* l.extbs */
+ LOG_DIS("l.extbs r%d, r%d\n", rd, ra);
+ tcg_gen_ext8s_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x02: /* l.exthz */
+ LOG_DIS("l.exthz r%d, r%d\n", rd, ra);
+ tcg_gen_ext16u_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x03: /* l.extbz */
+ LOG_DIS("l.extbz r%d, r%d\n", rd, ra);
+ tcg_gen_ext8u_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x000d:
+ switch (op1) {
+ case 0x00:
+ switch (op2) {
+ case 0x00: /* l.extws */
+ LOG_DIS("l.extws r%d, r%d\n", rd, ra);
+ tcg_gen_ext32s_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+ case 0x01: /* l.extwz */
+ LOG_DIS("l.extwz r%d, r%d\n", rd, ra);
+ tcg_gen_ext32u_tl(cpu_R[rd], cpu_R[ra]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_misc(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0, op1;
+ uint32_t ra, rb, rd;
+#ifdef OPENRISC_DISAS
+ uint32_t L6, K5;
+#endif
+ uint32_t I16, I5, I11, N26, tmp;
+ op0 = extract32(insn, 26, 6);
+ op1 = extract32(insn, 24, 2);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+ rd = extract32(insn, 21, 5);
+#ifdef OPENRISC_DISAS
+ L6 = extract32(insn, 5, 6);
+ K5 = extract32(insn, 0, 5);
+#endif
+ I16 = extract32(insn, 0, 16);
+ I5 = extract32(insn, 21, 5);
+ I11 = extract32(insn, 0, 11);
+ N26 = extract32(insn, 0, 26);
+ tmp = (I5<<11) + I11;
+
+ switch (op0) {
+ case 0x00: /* l.j */
+ LOG_DIS("l.j %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x01: /* l.jal */
+ LOG_DIS("l.jal %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x03: /* l.bnf */
+ LOG_DIS("l.bnf %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x04: /* l.bf */
+ LOG_DIS("l.bf %d\n", N26);
+ gen_jump(dc, N26, 0, op0);
+ break;
+
+ case 0x05:
+ switch (op1) {
+ case 0x01: /* l.nop */
+ LOG_DIS("l.nop %d\n", I16);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ break;
+
+ case 0x11: /* l.jr */
+ LOG_DIS("l.jr r%d\n", rb);
+ gen_jump(dc, 0, rb, op0);
+ break;
+
+ case 0x12: /* l.jalr */
+ LOG_DIS("l.jalr r%d\n", rb);
+ gen_jump(dc, 0, rb, op0);
+ break;
+
+ case 0x13: /* l.maci */
+ LOG_DIS("l.maci %d, r%d, %d\n", I5, ra, I11);
+ {
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ TCGv_i32 dst = tcg_temp_new_i32();
+ TCGv ttmp = tcg_const_tl(tmp);
+ tcg_gen_mul_tl(dst, cpu_R[ra], ttmp);
+ tcg_gen_ext_i32_i64(t1, dst);
+ tcg_gen_concat_i32_i64(t2, maclo, machi);
+ tcg_gen_add_i64(t2, t2, t1);
+ tcg_gen_trunc_i64_i32(maclo, t2);
+ tcg_gen_shri_i64(t2, t2, 32);
+ tcg_gen_trunc_i64_i32(machi, t2);
+ tcg_temp_free_i32(dst);
+ tcg_temp_free(ttmp);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+
+ case 0x09: /* l.rfe */
+ LOG_DIS("l.rfe\n");
+ {
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ gen_helper_rfe(cpu_env);
+ dc->is_jmp = DISAS_UPDATE;
+#endif
+ }
+ break;
+
+ case 0x1c: /* l.cust1 */
+ LOG_DIS("l.cust1\n");
+ break;
+
+ case 0x1d: /* l.cust2 */
+ LOG_DIS("l.cust2\n");
+ break;
+
+ case 0x1e: /* l.cust3 */
+ LOG_DIS("l.cust3\n");
+ break;
+
+ case 0x1f: /* l.cust4 */
+ LOG_DIS("l.cust4\n");
+ break;
+
+ case 0x3c: /* l.cust5 */
+ LOG_DIS("l.cust5 r%d, r%d, r%d, %d, %d\n", rd, ra, rb, L6, K5);
+ break;
+
+ case 0x3d: /* l.cust6 */
+ LOG_DIS("l.cust6\n");
+ break;
+
+ case 0x3e: /* l.cust7 */
+ LOG_DIS("l.cust7\n");
+ break;
+
+ case 0x3f: /* l.cust8 */
+ LOG_DIS("l.cust8\n");
+ break;
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ case 0x20: l.ld
+ LOG_DIS("l.ld r%d, r%d, %d\n", rd, ra, I16);
+ {
+ check_ob64s(dc);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_addi_i64(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld64(cpu_R[rd], t0, dc->mem_idx);
+ tcg_temp_free_i64(t0);
+ }
+ break;
+#endif*/
+
+ case 0x21: /* l.lwz */
+ LOG_DIS("l.lwz r%d, r%d, %d\n", rd, ra, I16);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld32u(cpu_R[rd], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x22: /* l.lws */
+ LOG_DIS("l.lws r%d, r%d, %d\n", rd, ra, I16);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld32s(cpu_R[rd], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x23: /* l.lbz */
+ LOG_DIS("l.lbz r%d, r%d, %d\n", rd, ra, I16);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld8u(cpu_R[rd], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x24: /* l.lbs */
+ LOG_DIS("l.lbs r%d, r%d, %d\n", rd, ra, I16);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld8s(cpu_R[rd], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x25: /* l.lhz */
+ LOG_DIS("l.lhz r%d, r%d, %d\n", rd, ra, I16);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld16u(cpu_R[rd], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x26: /* l.lhs */
+ LOG_DIS("l.lhs r%d, r%d, %d\n", rd, ra, I16);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(I16, 16));
+ tcg_gen_qemu_ld16s(cpu_R[rd], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x27: /* l.addi */
+ LOG_DIS("l.addi r%d, r%d, %d\n", rd, ra, I16);
+ {
+ int lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
+ tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 32);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+
+ case 0x28: /* l.addic */
+ LOG_DIS("l.addic r%d, r%d, %d\n", rd, ra, I16);
+ {
+ int lab = gen_new_label();
+ TCGv_i64 ta = tcg_temp_new_i64();
+ TCGv_i64 td = tcg_temp_local_new_i64();
+ TCGv_i64 tcy = tcg_temp_local_new_i64();
+ TCGv_i32 res = tcg_temp_local_new_i32();
+ TCGv_i32 sr_cy = tcg_temp_local_new_i32();
+ TCGv_i32 sr_ove = tcg_temp_local_new_i32();
+ tcg_gen_extu_i32_i64(ta, cpu_R[ra]);
+ tcg_gen_andi_i32(sr_cy, cpu_sr, SR_CY);
+ tcg_gen_shri_i32(sr_cy, sr_cy, 10);
+ tcg_gen_extu_i32_i64(tcy, sr_cy);
+ tcg_gen_addi_i64(td, ta, sign_extend(I16, 16));
+ tcg_gen_add_i64(td, td, tcy);
+ tcg_gen_trunc_i64_i32(res, td);
+ tcg_gen_shri_i64(td, td, 32);
+ tcg_gen_andi_i64(td, td, 0x3);
+ /* Jump to lab when no overflow. */
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x0, lab);
+ tcg_gen_brcondi_i64(TCG_COND_EQ, td, 0x3, lab);
+ tcg_gen_ori_i32(cpu_sr, cpu_sr, (SR_OV | SR_CY));
+ tcg_gen_andi_i32(sr_ove, cpu_sr, SR_OVE);
+ tcg_gen_brcondi_i32(TCG_COND_NE, sr_ove, SR_OVE, lab);
+ gen_exception(dc, EXCP_RANGE);
+ gen_set_label(lab);
+ tcg_gen_mov_i32(cpu_R[rd], res);
+ tcg_temp_free_i64(ta);
+ tcg_temp_free_i64(td);
+ tcg_temp_free_i64(tcy);
+ tcg_temp_free_i32(res);
+ tcg_temp_free_i32(sr_cy);
+ tcg_temp_free_i32(sr_ove);
+ }
+ break;
+
+ case 0x29: /* l.andi */
+ LOG_DIS("l.andi r%d, r%d, %d\n", rd, ra, I16);
+ tcg_gen_andi_tl(cpu_R[rd], cpu_R[ra], zero_extend(I16, 16));
+ break;
+
+ case 0x2a: /* l.ori */
+ LOG_DIS("l.ori r%d, r%d, %d\n", rd, ra, I16);
+ tcg_gen_ori_tl(cpu_R[rd], cpu_R[ra], zero_extend(I16, 16));
+ break;
+
+ case 0x2b: /* l.xori */
+ LOG_DIS("l.xori r%d, r%d, %d\n", rd, ra, I16);
+ tcg_gen_xori_tl(cpu_R[rd], cpu_R[ra], sign_extend(I16, 16));
+ break;
+
+ case 0x2c: /* l.muli */
+ LOG_DIS("l.muli r%d, r%d, %d\n", rd, ra, I16);
+ if (ra != 0 && I16 != 0) {
+ TCGv_i32 im = tcg_const_i32(I16);
+ gen_helper_mul32(cpu_R[rd], cpu_env, cpu_R[ra], im);
+ tcg_temp_free_i32(im);
+ } else {
+ tcg_gen_movi_tl(cpu_R[rd], 0x0);
+ }
+ break;
+
+ case 0x2d: /* l.mfspr */
+ LOG_DIS("l.mfspr r%d, r%d, %d\n", rd, ra, I16);
+ {
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ TCGv_i32 ti = tcg_const_i32(I16);
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ gen_helper_mfspr(cpu_R[rd], cpu_env, cpu_R[rd], cpu_R[ra], ti);
+ tcg_temp_free_i32(ti);
+#endif
+ }
+ break;
+
+ case 0x30: /* l.mtspr */
+ LOG_DIS("l.mtspr %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ {
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ TCGv_i32 im = tcg_const_i32(tmp);
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ gen_helper_mtspr(cpu_env, cpu_R[ra], cpu_R[rb], im);
+ tcg_temp_free_i32(im);
+#endif
+ }
+ break;
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ case 0x34: l.sd
+ LOG_DIS("l.sd %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ {
+ check_ob64s(dc);
+ TCGv_i64 t0 = tcg_temp_new_i64();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(tmp, 16));
+ tcg_gen_qemu_st64(cpu_R[rb], t0, dc->mem_idx);
+ tcg_temp_free_i64(t0);
+ }
+ break;
+#endif*/
+
+ case 0x35: /* l.sw */
+ LOG_DIS("l.sw %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(tmp, 16));
+ tcg_gen_qemu_st32(cpu_R[rb], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x36: /* l.sb */
+ LOG_DIS("l.sb %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(tmp, 16));
+ tcg_gen_qemu_st8(cpu_R[rb], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ case 0x37: /* l.sh */
+ LOG_DIS("l.sh %d, r%d, r%d, %d\n", I5, ra, rb, I11);
+ {
+ TCGv t0 = tcg_temp_new();
+ tcg_gen_addi_tl(t0, cpu_R[ra], sign_extend(tmp, 16));
+ tcg_gen_qemu_st16(cpu_R[rb], t0, dc->mem_idx);
+ tcg_temp_free(t0);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_mac(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, rb;
+ op0 = extract32(insn, 0, 4);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+
+ switch (op0) {
+ case 0x0001: /* l.mac */
+ LOG_DIS("l.mac r%d, r%d\n", ra, rb);
+ {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_mul_tl(t0, cpu_R[ra], cpu_R[rb]);
+ tcg_gen_ext_i32_i64(t1, t0);
+ tcg_gen_concat_i32_i64(t2, maclo, machi);
+ tcg_gen_add_i64(t2, t2, t1);
+ tcg_gen_trunc_i64_i32(maclo, t2);
+ tcg_gen_shri_i64(t2, t2, 32);
+ tcg_gen_trunc_i64_i32(machi, t2);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+
+ case 0x0002: /* l.msb */
+ LOG_DIS("l.msb r%d, r%d\n", ra, rb);
+ {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ TCGv_i64 t2 = tcg_temp_new_i64();
+ tcg_gen_mul_tl(t0, cpu_R[ra], cpu_R[rb]);
+ tcg_gen_ext_i32_i64(t1, t0);
+ tcg_gen_concat_i32_i64(t2, maclo, machi);
+ tcg_gen_sub_i64(t2, t2, t1);
+ tcg_gen_trunc_i64_i32(maclo, t2);
+ tcg_gen_shri_i64(t2, t2, 32);
+ tcg_gen_trunc_i64_i32(machi, t2);
+ tcg_temp_free_i32(t0);
+ tcg_temp_free_i64(t1);
+ tcg_temp_free_i64(t2);
+ }
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_logic(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t rd, ra, L6;
+ op0 = extract32(insn, 6, 2);
+ rd = extract32(insn, 21, 5);
+ ra = extract32(insn, 16, 5);
+ L6 = extract32(insn, 0, 6);
+
+ switch (op0) {
+ case 0x00: /* l.slli */
+ LOG_DIS("l.slli r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_shli_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f));
+ break;
+
+ case 0x01: /* l.srli */
+ LOG_DIS("l.srli r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_shri_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f));
+ break;
+
+ case 0x02: /* l.srai */
+ LOG_DIS("l.srai r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_sari_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f)); break;
+
+ case 0x03: /* l.rori */
+ LOG_DIS("l.rori r%d, r%d, %d\n", rd, ra, L6);
+ tcg_gen_rotri_tl(cpu_R[rd], cpu_R[ra], (L6 & 0x1f));
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_M(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t rd;
+ uint32_t K16;
+ op0 = extract32(insn, 16, 1);
+ rd = extract32(insn, 21, 5);
+ K16 = extract32(insn, 0, 16);
+
+ switch (op0) {
+ case 0x0: /* l.movhi */
+ LOG_DIS("l.movhi r%d, %d\n", rd, K16);
+ tcg_gen_movi_tl(cpu_R[rd], (K16 << 16));
+ break;
+
+ case 0x1: /* l.macrc */
+ LOG_DIS("l.macrc r%d\n", rd);
+ tcg_gen_mov_tl(cpu_R[rd], maclo);
+ tcg_gen_movi_tl(maclo, 0x0);
+ tcg_gen_movi_tl(machi, 0x0);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_comp(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, rb;
+
+ op0 = extract32(insn, 21, 5);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+
+ tcg_gen_movi_i32(env_btaken, 0x0);
+ /* unsigned integers */
+ tcg_gen_ext32u_tl(cpu_R[ra], cpu_R[ra]);
+ tcg_gen_ext32u_tl(cpu_R[rb], cpu_R[rb]);
+
+ switch (op0) {
+ case 0x0: /* l.sfeq */
+ LOG_DIS("l.sfeq r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_EQ, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1: /* l.sfne */
+ LOG_DIS("l.sfne r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_NE, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x2: /* l.sfgtu */
+ LOG_DIS("l.sfgtu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GTU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x3: /* l.sfgeu */
+ LOG_DIS("l.sfgeu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GEU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x4: /* l.sfltu */
+ LOG_DIS("l.sfltu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LTU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x5: /* l.sfleu */
+ LOG_DIS("l.sfleu r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LEU, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xa: /* l.sfgts */
+ LOG_DIS("l.sfgts r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GT, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xb: /* l.sfges */
+ LOG_DIS("l.sfges r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_GE, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xc: /* l.sflts */
+ LOG_DIS("l.sflts r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LT, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0xd: /* l.sfles */
+ LOG_DIS("l.sfles r%d, r%d\n", ra, rb);
+ tcg_gen_setcond_tl(TCG_COND_LE, env_btaken, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ wb_SR_F();
+}
+
+static void dec_compi(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, I16;
+
+ op0 = extract32(insn, 21, 5);
+ ra = extract32(insn, 16, 5);
+ I16 = extract32(insn, 0, 16);
+
+ tcg_gen_movi_i32(env_btaken, 0x0);
+ I16 = sign_extend(I16, 16);
+
+ switch (op0) {
+ case 0x0: /* l.sfeqi */
+ LOG_DIS("l.sfeqi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_EQ, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x1: /* l.sfnei */
+ LOG_DIS("l.sfnei r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_NE, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x2: /* l.sfgtui */
+ LOG_DIS("l.sfgtui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GTU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x3: /* l.sfgeui */
+ LOG_DIS("l.sfgeui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GEU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x4: /* l.sfltui */
+ LOG_DIS("l.sfltui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LTU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0x5: /* l.sfleui */
+ LOG_DIS("l.sfleui r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LEU, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xa: /* l.sfgtsi */
+ LOG_DIS("l.sfgtsi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GT, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xb: /* l.sfgesi */
+ LOG_DIS("l.sfgesi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_GE, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xc: /* l.sfltsi */
+ LOG_DIS("l.sfltsi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LT, env_btaken, cpu_R[ra], I16);
+ break;
+
+ case 0xd: /* l.sflesi */
+ LOG_DIS("l.sflesi r%d, %d\n", ra, I16);
+ tcg_gen_setcondi_tl(TCG_COND_LE, env_btaken, cpu_R[ra], I16);
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ wb_SR_F();
+}
+
+static void dec_sys(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+#ifdef OPENRISC_DISAS
+ uint32_t K16;
+#endif
+ op0 = extract32(insn, 16, 8);
+#ifdef OPENRISC_DISAS
+ K16 = extract32(insn, 0, 16);
+#endif
+
+ switch (op0) {
+ case 0x000: /* l.sys */
+ LOG_DIS("l.sys %d\n", K16);
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_SYSCALL);
+ dc->is_jmp = DISAS_UPDATE;
+ break;
+
+ case 0x100: /* l.trap */
+ LOG_DIS("l.trap %d\n", K16);
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_TRAP);
+#endif
+ break;
+
+ case 0x300: /* l.csync */
+ LOG_DIS("l.csync\n");
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+#endif
+ break;
+
+ case 0x200: /* l.msync */
+ LOG_DIS("l.msync\n");
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+#endif
+ break;
+
+ case 0x270: /* l.psync */
+ LOG_DIS("l.psync\n");
+#if defined(CONFIG_USER_ONLY)
+ return;
+#else
+ if (dc->mem_idx == MMU_USER_IDX) {
+ gen_illegal_exception(dc);
+ return;
+ }
+#endif
+ break;
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+}
+
+static void dec_float(DisasContext *dc, uint32_t insn)
+{
+ uint32_t op0;
+ uint32_t ra, rb, rd;
+ op0 = extract32(insn, 0, 8);
+ ra = extract32(insn, 16, 5);
+ rb = extract32(insn, 11, 5);
+ rd = extract32(insn, 21, 5);
+
+ switch (op0) {
+ case 0x00: /* lf.add.s */
+ LOG_DIS("lf.add.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_add_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x01: /* lf.sub.s */
+ LOG_DIS("lf.sub.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_sub_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+
+ case 0x02: /* lf.mul.s */
+ LOG_DIS("lf.mul.s r%d, r%d, r%d\n", rd, ra, rb);
+ if (ra != 0 && rb != 0) {
+ gen_helper_float_mul_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ } else {
+ tcg_gen_ori_tl(fpcsr, fpcsr, FPCSR_ZF);
+ tcg_gen_movi_i32(cpu_R[rd], 0x0);
+ }
+ break;
+
+ case 0x03: /* lf.div.s */
+ LOG_DIS("lf.div.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_div_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x04: /* lf.itof.s */
+ LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
+ gen_helper_itofs(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x05: /* lf.ftoi.s */
+ LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
+ gen_helper_ftois(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x06: /* lf.rem.s */
+ LOG_DIS("lf.rem.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_rem_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x07: /* lf.madd.s */
+ LOG_DIS("lf.madd.s r%d, r%d, r%d\n", rd, ra, rb);
+ gen_helper_float_muladd_s(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x08: /* lf.sfeq.s */
+ LOG_DIS("lf.sfeq.s r%d, r%d\n", ra, rb);
+ gen_helper_float_eq_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x09: /* lf.sfne.s */
+ LOG_DIS("lf.sfne.s r%d, r%d\n", ra, rb);
+ gen_helper_float_ne_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0a: /* lf.sfgt.s */
+ LOG_DIS("lf.sfgt.s r%d, r%d\n", ra, rb);
+ gen_helper_float_gt_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0b: /* lf.sfge.s */
+ LOG_DIS("lf.sfge.s r%d, r%d\n", ra, rb);
+ gen_helper_float_ge_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0c: /* lf.sflt.s */
+ LOG_DIS("lf.sflt.s r%d, r%d\n", ra, rb);
+ gen_helper_float_lt_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x0d: /* lf.sfle.s */
+ LOG_DIS("lf.sfle.s r%d, r%d\n", ra, rb);
+ gen_helper_float_le_s(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+/* not used yet, open it when we need or64. */
+/*#ifdef TARGET_OPENRISC64
+ case 0x10: lf.add.d
+ LOG_DIS("lf.add.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_add_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x11: lf.sub.d
+ LOG_DIS("lf.sub.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_sub_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x12: lf.mul.d
+ LOG_DIS("lf.mul.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ if (ra != 0 && rb != 0) {
+ gen_helper_float_mul_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ } else {
+ tcg_gen_ori_tl(fpcsr, fpcsr, FPCSR_ZF);
+ tcg_gen_movi_i64(cpu_R[rd], 0x0);
+ }
+ break;
+
+ case 0x13: lf.div.d
+ LOG_DIS("lf.div.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_div_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x14: lf.itof.d
+ LOG_DIS("lf.itof r%d, r%d\n", rd, ra);
+ check_of64s(dc);
+ gen_helper_itofd(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x15: lf.ftoi.d
+ LOG_DIS("lf.ftoi r%d, r%d\n", rd, ra);
+ check_of64s(dc);
+ gen_helper_ftoid(cpu_R[rd], cpu_env, cpu_R[ra]);
+ break;
+
+ case 0x16: lf.rem.d
+ LOG_DIS("lf.rem.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_rem_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x17: lf.madd.d
+ LOG_DIS("lf.madd.d r%d, r%d, r%d\n", rd, ra, rb);
+ check_of64s(dc);
+ gen_helper_float_muladd_d(cpu_R[rd], cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x18: lf.sfeq.d
+ LOG_DIS("lf.sfeq.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_eq_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1a: lf.sfgt.d
+ LOG_DIS("lf.sfgt.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_gt_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1b: lf.sfge.d
+ LOG_DIS("lf.sfge.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_ge_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x19: lf.sfne.d
+ LOG_DIS("lf.sfne.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_ne_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1c: lf.sflt.d
+ LOG_DIS("lf.sflt.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_lt_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+
+ case 0x1d: lf.sfle.d
+ LOG_DIS("lf.sfle.d r%d, r%d\n", ra, rb);
+ check_of64s(dc);
+ gen_helper_float_le_d(env_btaken, cpu_env, cpu_R[ra], cpu_R[rb]);
+ break;
+#endif*/
+
+ default:
+ gen_illegal_exception(dc);
+ break;
+ }
+ wb_SR_F();
+}
+
+static void disas_openrisc_insn(DisasContext *dc, OpenRISCCPU *cpu)
+{
+ uint32_t op0;
+ uint32_t insn;
+ insn = cpu_ldl_code(&cpu->env, dc->pc);
+ op0 = extract32(insn, 26, 6);
+
+ switch (op0) {
+ case 0x06:
+ dec_M(dc, insn);
+ break;
+
+ case 0x08:
+ dec_sys(dc, insn);
+ break;
+
+ case 0x2e:
+ dec_logic(dc, insn);
+ break;
+
+ case 0x2f:
+ dec_compi(dc, insn);
+ break;
+
+ case 0x31:
+ dec_mac(dc, insn);
+ break;
+
+ case 0x32:
+ dec_float(dc, insn);
+ break;
+
+ case 0x38:
+ dec_calc(dc, insn);
+ break;
+
+ case 0x39:
+ dec_comp(dc, insn);
+ break;
+
+ default:
+ dec_misc(dc, insn);
+ break;
+ }
+}
+
+static void check_breakpoint(OpenRISCCPU *cpu, DisasContext *dc)
+{
+ CPUBreakpoint *bp;
+
+ if (unlikely(!QTAILQ_EMPTY(&cpu->env.breakpoints))) {
+ QTAILQ_FOREACH(bp, &cpu->env.breakpoints, entry) {
+ if (bp->pc == dc->pc) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ gen_exception(dc, EXCP_DEBUG);
+ dc->is_jmp = DISAS_UPDATE;
+ }
+ }
+ }
+}
+
+static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
+ TranslationBlock *tb,
+ int search_pc)
+{
+ struct DisasContext ctx, *dc = &ctx;
+ uint16_t *gen_opc_end;
+ uint32_t pc_start;
+ int j, k;
+ uint32_t next_page_start;
+ int num_insns;
+ int max_insns;
+
+ qemu_log_try_set_file(stderr);
+
+ pc_start = tb->pc;
+ dc->tb = tb;
+
+ gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
+ dc->is_jmp = DISAS_NEXT;
+ dc->ppc = pc_start;
+ dc->pc = pc_start;
+ dc->flags = cpu->env.cpucfgr;
+ dc->mem_idx = cpu_mmu_index(&cpu->env);
+ dc->synced_flags = dc->tb_flags = tb->flags;
+ dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
+ dc->singlestep_enabled = cpu->env.singlestep_enabled;
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("-----------------------------------------\n");
+ log_cpu_state(&cpu->env, 0);
+ }
+
+ next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+ k = -1;
+ num_insns = 0;
+ max_insns = tb->cflags & CF_COUNT_MASK;
+
+ if (max_insns == 0) {
+ max_insns = CF_COUNT_MASK;
+ }
+
+ gen_icount_start();
+
+ do {
+ check_breakpoint(cpu, dc);
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ if (k < j) {
+ k++;
+ while (k < j) {
+ gen_opc_instr_start[k++] = 0;
+ }
+ }
+ gen_opc_pc[k] = dc->pc;
+ gen_opc_instr_start[k] = 1;
+ gen_opc_icount[k] = num_insns;
+ }
+
+ if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP))) {
+ tcg_gen_debug_insn_start(dc->pc);
+ }
+
+ if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO)) {
+ gen_io_start();
+ }
+ dc->ppc = dc->pc - 4;
+ dc->npc = dc->pc + 4;
+ tcg_gen_movi_tl(cpu_ppc, dc->ppc);
+ tcg_gen_movi_tl(cpu_npc, dc->npc);
+ disas_openrisc_insn(dc, cpu);
+ dc->pc = dc->npc;
+ num_insns++;
+ /* delay slot */
+ if (dc->delayed_branch) {
+ dc->delayed_branch--;
+ if (!dc->delayed_branch) {
+ dc->tb_flags &= ~D_FLAG;
+ gen_sync_flags(dc);
+ tcg_gen_mov_tl(cpu_pc, jmp_pc);
+ tcg_gen_mov_tl(cpu_npc, jmp_pc);
+ tcg_gen_movi_tl(jmp_pc, 0);
+ tcg_gen_exit_tb(0);
+ dc->is_jmp = DISAS_JUMP;
+ break;
+ }
+ }
+ } while (!dc->is_jmp
+ && gen_opc_ptr < gen_opc_end
+ && !cpu->env.singlestep_enabled
+ && !singlestep
+ && (dc->pc < next_page_start)
+ && num_insns < max_insns);
+
+ if (tb->cflags & CF_LAST_IO) {
+ gen_io_end();
+ }
+ if (dc->is_jmp == DISAS_NEXT) {
+ dc->is_jmp = DISAS_UPDATE;
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ }
+ if (unlikely(cpu->env.singlestep_enabled)) {
+ if (dc->is_jmp == DISAS_NEXT) {
+ tcg_gen_movi_tl(cpu_pc, dc->pc);
+ }
+ gen_exception(dc, EXCP_DEBUG);
+ } else {
+ switch (dc->is_jmp) {
+ case DISAS_NEXT:
+ gen_goto_tb(dc, 0, dc->pc);
+ break;
+ default:
+ case DISAS_JUMP:
+ break;
+ case DISAS_UPDATE:
+ /* indicate that the hash table must be used
+ to find the next TB */
+ tcg_gen_exit_tb(0);
+ break;
+ case DISAS_TB_JUMP:
+ /* nothing more to generate */
+ break;
+ }
+ }
+
+ gen_icount_end(tb, num_insns);
+ *gen_opc_ptr = INDEX_op_end;
+ if (search_pc) {
+ j = gen_opc_ptr - gen_opc_buf;
+ k++;
+ while (k <= j) {
+ gen_opc_instr_start[k++] = 0;
+ }
+ } else {
+ tb->size = dc->pc - pc_start;
+ tb->icount = num_insns;
+ }
+
+#ifdef DEBUG_DISAS
+ if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
+ qemu_log("\n");
+ log_target_disas(pc_start, dc->pc - pc_start, 0);
+ qemu_log("\nisize=%d osize=%td\n",
+ dc->pc - pc_start, gen_opc_ptr - gen_opc_buf);
+ }
+#endif
+}
+
+void gen_intermediate_code(CPUOpenRISCState *env, struct TranslationBlock *tb)
+{
+ gen_intermediate_code_internal(openrisc_env_get_cpu(env), tb, 0);
+}
+
+void gen_intermediate_code_pc(CPUOpenRISCState *env,
+ struct TranslationBlock *tb)
+{
+ gen_intermediate_code_internal(openrisc_env_get_cpu(env), tb, 1);
+}
+
+void cpu_dump_state(CPUOpenRISCState *env, FILE *f,
+ fprintf_function cpu_fprintf,
+ int flags)
+{
+ int i;
+ uint32_t *regs = env->gpr;
+ cpu_fprintf(f, "PC=%08x\n", env->pc);
+ for (i = 0; i < 32; ++i) {
+ cpu_fprintf(f, "R%02d=%08x%c", i, regs[i],
+ (i % 4) == 3 ? '\n' : ' ');
+ }
+}
+
+void restore_state_to_opc(CPUOpenRISCState *env, TranslationBlock *tb,
+ int pc_pos)
+{
+ env->pc = gen_opc_pc[pc_pos];
+}
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index b6ef72d16b..829e180f8b 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -1067,7 +1067,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
return NULL;
}
- len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE) * sizeof(VIOsPAPR_RTCE);
+ len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(sPAPRTCE);
/* FIXME: round this up to page size */
table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
@@ -1090,7 +1090,7 @@ int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t window_size)
return -1;
}
- len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE)*sizeof(VIOsPAPR_RTCE);
+ len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(sPAPRTCE);
if ((munmap(table, len) < 0) ||
(close(fd) < 0)) {
fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index ec08dd0474..47008c24f2 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -135,6 +135,41 @@ int kvm_arch_get_registers(CPUS390XState *env)
return 0;
}
+/*
+ * Legacy layout for s390:
+ * Older S390 KVM requires the topmost vma of the RAM to be
+ * smaller than an system defined value, which is at least 256GB.
+ * Larger systems have larger values. We put the guest between
+ * the end of data segment (system break) and this value. We
+ * use 32GB as a base to have enough room for the system break
+ * to grow. We also have to use MAP parameters that avoid
+ * read-only mapping of guest pages.
+ */
+static void *legacy_s390_alloc(ram_addr_t size)
+{
+ void *mem;
+
+ mem = mmap((void *) 0x800000000ULL, size,
+ PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
+ if (mem == MAP_FAILED) {
+ fprintf(stderr, "Allocating RAM failed\n");
+ abort();
+ }
+ return mem;
+}
+
+void *kvm_arch_vmalloc(ram_addr_t size)
+{
+ /* Can we use the standard allocation ? */
+ if (kvm_check_extension(kvm_state, KVM_CAP_S390_GMAP) &&
+ kvm_check_extension(kvm_state, KVM_CAP_S390_COW)) {
+ return NULL;
+ } else {
+ return legacy_s390_alloc(size);
+ }
+}
+
int kvm_arch_insert_sw_breakpoint(CPUS390XState *env, struct kvm_sw_breakpoint *bp)
{
static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
diff --git a/target-xtensa/cpu.h b/target-xtensa/cpu.h
index f7db116400..177094ae9a 100644
--- a/target-xtensa/cpu.h
+++ b/target-xtensa/cpu.h
@@ -351,6 +351,12 @@ typedef struct CPUXtensaState {
#define cpu_signal_handler cpu_xtensa_signal_handler
#define cpu_list xtensa_cpu_list
+#ifdef TARGET_WORDS_BIGENDIAN
+#define XTENSA_DEFAULT_CPU_MODEL "fsf"
+#else
+#define XTENSA_DEFAULT_CPU_MODEL "dc232b"
+#endif
+
XtensaCPU *cpu_xtensa_init(const char *cpu_model);
static inline CPUXtensaState *cpu_init(const char *cpu_model)
diff --git a/target-xtensa/translate.c b/target-xtensa/translate.c
index b883e6bb72..1900bd5d44 100644
--- a/target-xtensa/translate.c
+++ b/target-xtensa/translate.c
@@ -2366,10 +2366,18 @@ static void disas_xtensa_insn(DisasContext *dc)
case 5: /*BBC*/ /*BBS*/
gen_window_check2(dc, RRI8_S, RRI8_T);
{
- TCGv_i32 bit = tcg_const_i32(1);
+#ifdef TARGET_WORDS_BIGENDIAN
+ TCGv_i32 bit = tcg_const_i32(0x80000000);
+#else
+ TCGv_i32 bit = tcg_const_i32(0x00000001);
+#endif
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_R[RRI8_T], 0x1f);
+#ifdef TARGET_WORDS_BIGENDIAN
+ tcg_gen_shr_i32(bit, bit, tmp);
+#else
tcg_gen_shl_i32(bit, bit, tmp);
+#endif
tcg_gen_and_i32(tmp, cpu_R[RRI8_S], bit);
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
tcg_temp_free(tmp);
@@ -2383,7 +2391,11 @@ static void disas_xtensa_insn(DisasContext *dc)
{
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_andi_i32(tmp, cpu_R[RRI8_S],
- 1 << (((RRI8_R & 1) << 4) | RRI8_T));
+#ifdef TARGET_WORDS_BIGENDIAN
+ 0x80000000 >> (((RRI8_R & 1) << 4) | RRI8_T));
+#else
+ 0x00000001 << (((RRI8_R & 1) << 4) | RRI8_T));
+#endif
gen_brcondi(dc, eq_ne, tmp, 0, 4 + RRI8_IMM8_SE);
tcg_temp_free(tmp);
}
diff --git a/targphys.h b/targphys.h
index 95648d6882..bd4938fc02 100644
--- a/targphys.h
+++ b/targphys.h
@@ -11,10 +11,26 @@
typedef uint32_t target_phys_addr_t;
#define TARGET_PHYS_ADDR_MAX UINT32_MAX
#define TARGET_FMT_plx "%08x"
+/* Format strings for printing target_phys_addr_t types.
+ * These are recommended over the less flexible TARGET_FMT_plx,
+ * which is retained for the benefit of existing code.
+ */
+#define TARGET_PRIdPHYS PRId32
+#define TARGET_PRIiPHYS PRIi32
+#define TARGET_PRIoPHYS PRIo32
+#define TARGET_PRIuPHYS PRIu32
+#define TARGET_PRIxPHYS PRIx32
+#define TARGET_PRIXPHYS PRIX32
#elif TARGET_PHYS_ADDR_BITS == 64
typedef uint64_t target_phys_addr_t;
#define TARGET_PHYS_ADDR_MAX UINT64_MAX
#define TARGET_FMT_plx "%016" PRIx64
+#define TARGET_PRIdPHYS PRId64
+#define TARGET_PRIiPHYS PRIi64
+#define TARGET_PRIoPHYS PRIo64
+#define TARGET_PRIuPHYS PRIu64
+#define TARGET_PRIxPHYS PRIx64
+#define TARGET_PRIXPHYS PRIX64
#endif
#endif
diff --git a/tci.c b/tci.c
index a412a4ed93..c79350d242 100644
--- a/tci.c
+++ b/tci.c
@@ -1014,7 +1014,6 @@ tcg_target_ulong tcg_qemu_tb_exec(CPUArchState *cpustate, uint8_t *tb_ptr)
#endif
#if TCG_TARGET_HAS_bswap64_i64
case INDEX_op_bswap64_i64:
- TODO();
t0 = *tb_ptr++;
t1 = tci_read_r64(&tb_ptr);
tci_write_reg64(t0, bswap64(t1));
diff --git a/tests/Makefile b/tests/Makefile
index d66ab196a7..f3f4159c25 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -14,12 +14,14 @@ check-unit-y += tests/test-string-input-visitor$(EXESUF)
check-unit-y += tests/test-string-output-visitor$(EXESUF)
check-unit-y += tests/test-coroutine$(EXESUF)
check-unit-y += tests/test-visitor-serialization$(EXESUF)
+check-unit-y += tests/test-iov$(EXESUF)
check-block-$(CONFIG_POSIX) += tests/qemu-iotests-quick.sh
# All QTests for now are POSIX-only, but the dependencies are
# really in libqtest, not in the testcases themselves.
check-qtest-i386-y = tests/fdc-test$(EXESUF)
+check-qtest-i386-y += tests/hd-geo-test$(EXESUF)
check-qtest-i386-y += tests/rtc-test$(EXESUF)
check-qtest-x86_64-y = $(check-qtest-i386-y)
check-qtest-sparc-y = tests/m48t59-test$(EXESUF)
@@ -47,6 +49,7 @@ tests/check-qlist$(EXESUF): tests/check-qlist.o qlist.o qint.o $(tools-obj-y)
tests/check-qfloat$(EXESUF): tests/check-qfloat.o qfloat.o $(tools-obj-y)
tests/check-qjson$(EXESUF): tests/check-qjson.o $(qobject-obj-y) $(tools-obj-y)
tests/test-coroutine$(EXESUF): tests/test-coroutine.o $(coroutine-obj-y) $(tools-obj-y)
+tests/test-iov$(EXESUF): tests/test-iov.o iov.o
tests/test-qapi-types.c tests/test-qapi-types.h :\
$(SRC_PATH)/qapi-schema-test.json $(SRC_PATH)/scripts/qapi-types.py
@@ -70,6 +73,7 @@ tests/test-visitor-serialization$(EXESUF): tests/test-visitor-serialization.o $(
tests/rtc-test$(EXESUF): tests/rtc-test.o $(trace-obj-y)
tests/m48t59-test$(EXESUF): tests/m48t59-test.o $(trace-obj-y)
tests/fdc-test$(EXESUF): tests/fdc-test.o tests/libqtest.o $(trace-obj-y)
+tests/hd-geo-test$(EXESUF): tests/hd-geo-test.o tests/libqtest.o $(trace-obj-y)
# QTest rules
@@ -143,3 +147,5 @@ check-qtest: $(patsubst %,check-qtest-%, $(QTEST_TARGETS))
check-unit: $(patsubst %,check-%, $(check-unit-y))
check-block: $(patsubst %,check-%, $(check-block-y))
check: check-unit check-qtest
+
+-include $(wildcard tests/*.d)
diff --git a/tests/fdc-test.c b/tests/fdc-test.c
index 610e2f1e26..fa7441110d 100644
--- a/tests/fdc-test.c
+++ b/tests/fdc-test.c
@@ -47,9 +47,11 @@ enum {
};
enum {
- CMD_SENSE_INT = 0x08,
- CMD_SEEK = 0x0f,
- CMD_READ = 0xe6,
+ CMD_SENSE_INT = 0x08,
+ CMD_SEEK = 0x0f,
+ CMD_READ = 0xe6,
+ CMD_RELATIVE_SEEK_OUT = 0x8f,
+ CMD_RELATIVE_SEEK_IN = 0xcf,
};
enum {
@@ -91,12 +93,20 @@ static uint8_t floppy_recv(void)
return inb(FLOPPY_BASE + reg_fifo);
}
-static void ack_irq(void)
+/* pcn: Present Cylinder Number */
+static void ack_irq(uint8_t *pcn)
{
+ uint8_t ret;
+
g_assert(get_irq(FLOPPY_IRQ));
floppy_send(CMD_SENSE_INT);
floppy_recv();
- floppy_recv();
+
+ ret = floppy_recv();
+ if (pcn != NULL) {
+ *pcn = ret;
+ }
+
g_assert(!get_irq(FLOPPY_IRQ));
}
@@ -142,7 +152,7 @@ static uint8_t send_read_command(void)
}
st0 = floppy_recv();
- if (st0 != 0x40) {
+ if (st0 != 0x60) {
ret = 1;
}
@@ -156,19 +166,16 @@ static uint8_t send_read_command(void)
return ret;
}
-static void send_step_pulse(void)
+static void send_seek(int cyl)
{
int drive = 0;
int head = 0;
- static int cyl = 0;
floppy_send(CMD_SEEK);
floppy_send(head << 2 | drive);
g_assert(!get_irq(FLOPPY_IRQ));
floppy_send(cyl);
- ack_irq();
-
- cyl = (cyl + 1) % 4;
+ ack_irq(NULL);
}
static uint8_t cmos_read(uint8_t reg)
@@ -195,8 +202,7 @@ static void test_no_media_on_start(void)
assert_bit_set(dir, DSKCHG);
dir = inb(FLOPPY_BASE + reg_dir);
assert_bit_set(dir, DSKCHG);
- send_step_pulse();
- send_step_pulse();
+ send_seek(1);
dir = inb(FLOPPY_BASE + reg_dir);
assert_bit_set(dir, DSKCHG);
dir = inb(FLOPPY_BASE + reg_dir);
@@ -227,7 +233,14 @@ static void test_media_change(void)
dir = inb(FLOPPY_BASE + reg_dir);
assert_bit_set(dir, DSKCHG);
- send_step_pulse();
+ send_seek(0);
+ dir = inb(FLOPPY_BASE + reg_dir);
+ assert_bit_set(dir, DSKCHG);
+ dir = inb(FLOPPY_BASE + reg_dir);
+ assert_bit_set(dir, DSKCHG);
+
+ /* Step to next track should clear DSKCHG bit. */
+ send_seek(1);
dir = inb(FLOPPY_BASE + reg_dir);
assert_bit_clear(dir, DSKCHG);
dir = inb(FLOPPY_BASE + reg_dir);
@@ -243,11 +256,68 @@ static void test_media_change(void)
dir = inb(FLOPPY_BASE + reg_dir);
assert_bit_set(dir, DSKCHG);
- send_step_pulse();
+ send_seek(0);
dir = inb(FLOPPY_BASE + reg_dir);
assert_bit_set(dir, DSKCHG);
dir = inb(FLOPPY_BASE + reg_dir);
assert_bit_set(dir, DSKCHG);
+
+ send_seek(1);
+ dir = inb(FLOPPY_BASE + reg_dir);
+ assert_bit_set(dir, DSKCHG);
+ dir = inb(FLOPPY_BASE + reg_dir);
+ assert_bit_set(dir, DSKCHG);
+}
+
+static void test_sense_interrupt(void)
+{
+ int drive = 0;
+ int head = 0;
+ int cyl = 0;
+ int ret = 0;
+
+ floppy_send(CMD_SENSE_INT);
+ ret = floppy_recv();
+ g_assert(ret == 0x80);
+
+ floppy_send(CMD_SEEK);
+ floppy_send(head << 2 | drive);
+ g_assert(!get_irq(FLOPPY_IRQ));
+ floppy_send(cyl);
+
+ floppy_send(CMD_SENSE_INT);
+ ret = floppy_recv();
+ g_assert(ret == 0x20);
+ floppy_recv();
+}
+
+static void test_relative_seek(void)
+{
+ uint8_t drive = 0;
+ uint8_t head = 0;
+ uint8_t cyl = 1;
+ uint8_t pcn;
+
+ /* Send seek to track 0 */
+ send_seek(0);
+
+ /* Send relative seek to increase track by 1 */
+ floppy_send(CMD_RELATIVE_SEEK_IN);
+ floppy_send(head << 2 | drive);
+ g_assert(!get_irq(FLOPPY_IRQ));
+ floppy_send(cyl);
+
+ ack_irq(&pcn);
+ g_assert(pcn == 1);
+
+ /* Send relative seek to decrease track by 1 */
+ floppy_send(CMD_RELATIVE_SEEK_OUT);
+ floppy_send(head << 2 | drive);
+ g_assert(!get_irq(FLOPPY_IRQ));
+ floppy_send(cyl);
+
+ ack_irq(&pcn);
+ g_assert(pcn == 0);
}
/* success if no crash or abort */
@@ -297,6 +367,8 @@ int main(int argc, char **argv)
qtest_add_func("/fdc/no_media_on_start", test_no_media_on_start);
qtest_add_func("/fdc/read_without_media", test_read_without_media);
qtest_add_func("/fdc/media_change", test_media_change);
+ qtest_add_func("/fdc/sense_interrupt", test_sense_interrupt);
+ qtest_add_func("/fdc/relative_seek", test_relative_seek);
qtest_add_func("/fdc/fuzz-registers", fuzz_registers);
ret = g_test_run();
diff --git a/tests/hd-geo-test.c b/tests/hd-geo-test.c
new file mode 100644
index 0000000000..9a31e8587f
--- /dev/null
+++ b/tests/hd-geo-test.c
@@ -0,0 +1,428 @@
+/*
+ * Hard disk geometry test cases.
+ *
+ * Copyright (c) 2012 Red Hat Inc.
+ *
+ * Authors:
+ * Markus Armbruster <armbru@redhat.com>,
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+/*
+ * Covers only IDE and tests only CMOS contents. Better than nothing.
+ * Improvements welcome.
+ */
+
+#include <glib.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include "qemu-common.h"
+#include "libqtest.h"
+
+static const char test_image[] = "/tmp/qtest.XXXXXX";
+
+static char *create_test_img(int secs)
+{
+ char *template = strdup("/tmp/qtest.XXXXXX");
+ int fd, ret;
+
+ fd = mkstemp(template);
+ g_assert(fd >= 0);
+ ret = ftruncate(fd, (off_t)secs * 512);
+ g_assert(ret == 0);
+ close(fd);
+ return template;
+}
+
+typedef struct {
+ int cyls, heads, secs, trans;
+} CHST;
+
+typedef enum {
+ mbr_blank, mbr_lba, mbr_chs,
+ mbr_last
+} MBRcontents;
+
+typedef enum {
+ /* order is relevant */
+ backend_small, backend_large, backend_empty,
+ backend_last
+} Backend;
+
+static const int img_secs[backend_last] = {
+ [backend_small] = 61440,
+ [backend_large] = 8388608,
+ [backend_empty] = -1,
+};
+
+static const CHST hd_chst[backend_last][mbr_last] = {
+ [backend_small] = {
+ [mbr_blank] = { 60, 16, 63, 0 },
+ [mbr_lba] = { 60, 16, 63, 2 },
+ [mbr_chs] = { 60, 16, 63, 0 }
+ },
+ [backend_large] = {
+ [mbr_blank] = { 8322, 16, 63, 1 },
+ [mbr_lba] = { 8322, 16, 63, 1 },
+ [mbr_chs] = { 8322, 16, 63, 0 }
+ },
+};
+
+static const char *img_file_name[backend_last];
+
+static const CHST *cur_ide[4];
+
+static bool is_hd(const CHST *expected_chst)
+{
+ return expected_chst && expected_chst->cyls;
+}
+
+static void test_cmos_byte(int reg, int expected)
+{
+ enum { cmos_base = 0x70 };
+ int actual;
+
+ outb(cmos_base + 0, reg);
+ actual = inb(cmos_base + 1);
+ g_assert(actual == expected);
+}
+
+static void test_cmos_bytes(int reg0, int n, uint8_t expected[])
+{
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ test_cmos_byte(reg0 + i, expected[i]);
+ }
+}
+
+static void test_cmos_disk_data(void)
+{
+ test_cmos_byte(0x12,
+ (is_hd(cur_ide[0]) ? 0xf0 : 0) |
+ (is_hd(cur_ide[1]) ? 0x0f : 0));
+}
+
+static void test_cmos_drive_cyl(int reg0, const CHST *expected_chst)
+{
+ if (is_hd(expected_chst)) {
+ int c = expected_chst->cyls;
+ int h = expected_chst->heads;
+ int s = expected_chst->secs;
+ uint8_t expected_bytes[9] = {
+ c & 0xff, c >> 8, h, 0xff, 0xff, 0xc0 | ((h > 8) << 3),
+ c & 0xff, c >> 8, s
+ };
+ test_cmos_bytes(reg0, 9, expected_bytes);
+ } else {
+ int i;
+
+ for (i = 0; i < 9; i++) {
+ test_cmos_byte(reg0 + i, 0);
+ }
+ }
+}
+
+static void test_cmos_drive1(void)
+{
+ test_cmos_byte(0x19, is_hd(cur_ide[0]) ? 47 : 0);
+ test_cmos_drive_cyl(0x1b, cur_ide[0]);
+}
+
+static void test_cmos_drive2(void)
+{
+ test_cmos_byte(0x1a, is_hd(cur_ide[1]) ? 47 : 0);
+ test_cmos_drive_cyl(0x24, cur_ide[1]);
+}
+
+static void test_cmos_disktransflag(void)
+{
+ int val, i;
+
+ val = 0;
+ for (i = 0; i < ARRAY_SIZE(cur_ide); i++) {
+ if (is_hd(cur_ide[i])) {
+ val |= cur_ide[i]->trans << (2 * i);
+ }
+ }
+ test_cmos_byte(0x39, val);
+}
+
+static void test_cmos(void)
+{
+ test_cmos_disk_data();
+ test_cmos_drive1();
+ test_cmos_drive2();
+ test_cmos_disktransflag();
+}
+
+static int append_arg(int argc, char *argv[], int argv_sz, char *arg)
+{
+ g_assert(argc + 1 < argv_sz);
+ argv[argc++] = arg;
+ argv[argc] = NULL;
+ return argc;
+}
+
+static int setup_common(char *argv[], int argv_sz)
+{
+ memset(cur_ide, 0, sizeof(cur_ide));
+ return append_arg(0, argv, argv_sz,
+ g_strdup("-nodefaults -display none"));
+}
+
+static void setup_mbr(int img_idx, MBRcontents mbr)
+{
+ static const uint8_t part_lba[16] = {
+ /* chs 0,1,1 (lba 63) to chs 0,127,63 (8001 sectors) */
+ 0x80, 1, 1, 0, 6, 127, 63, 0, 63, 0, 0, 0, 0x41, 0x1F, 0, 0,
+ };
+ static const uint8_t part_chs[16] = {
+ /* chs 0,1,1 (lba 63) to chs 7,15,63 (8001 sectors) */
+ 0x80, 1, 1, 0, 6, 15, 63, 7, 63, 0, 0, 0, 0x41, 0x1F, 0, 0,
+ };
+ uint8_t buf[512];
+ int fd, ret;
+
+ memset(buf, 0, sizeof(buf));
+
+ if (mbr != mbr_blank) {
+ buf[0x1fe] = 0x55;
+ buf[0x1ff] = 0xAA;
+ memcpy(buf + 0x1BE, mbr == mbr_lba ? part_lba : part_chs, 16);
+ }
+
+ fd = open(img_file_name[img_idx], O_WRONLY);
+ g_assert(fd >= 0);
+ ret = write(fd, buf, sizeof(buf));
+ g_assert(ret == sizeof(buf));
+ close(fd);
+}
+
+static int setup_ide(int argc, char *argv[], int argv_sz,
+ int ide_idx, const char *dev, int img_idx,
+ MBRcontents mbr, const char *opts)
+{
+ char *s1, *s2, *s3;
+
+ s1 = g_strdup_printf("-drive id=drive%d,if=%s",
+ ide_idx, dev ? "none" : "ide");
+ s2 = dev ? g_strdup("") : g_strdup_printf(",index=%d", ide_idx);
+
+ if (img_secs[img_idx] >= 0) {
+ setup_mbr(img_idx, mbr);
+ s3 = g_strdup_printf(",file=%s", img_file_name[img_idx]);
+ } else {
+ s3 = g_strdup(",media=cdrom");
+ }
+ argc = append_arg(argc, argv, argv_sz,
+ g_strdup_printf("%s%s%s%s", s1, s2, s3, opts));
+ g_free(s1);
+ g_free(s2);
+ g_free(s3);
+
+ if (dev) {
+ argc = append_arg(argc, argv, argv_sz,
+ g_strdup_printf("-device %s,drive=drive%d,"
+ "bus=ide.%d,unit=%d",
+ dev, ide_idx,
+ ide_idx / 2, ide_idx % 2));
+ }
+ return argc;
+}
+
+/*
+ * Test case: no IDE devices
+ */
+static void test_ide_none(void)
+{
+ char *argv[256];
+
+ setup_common(argv, ARRAY_SIZE(argv));
+ qtest_start(g_strjoinv(" ", argv));
+ test_cmos();
+ qtest_quit(global_qtest);
+}
+
+static void test_ide_mbr(bool use_device, MBRcontents mbr)
+{
+ char *argv[256];
+ int argc;
+ Backend i;
+ const char *dev;
+
+ argc = setup_common(argv, ARRAY_SIZE(argv));
+ for (i = 0; i < backend_last; i++) {
+ cur_ide[i] = &hd_chst[i][mbr];
+ dev = use_device ? (is_hd(cur_ide[i]) ? "ide-hd" : "ide-cd") : NULL;
+ argc = setup_ide(argc, argv, ARRAY_SIZE(argv), i, dev, i, mbr, "");
+ }
+ qtest_start(g_strjoinv(" ", argv));
+ test_cmos();
+ qtest_quit(global_qtest);
+}
+
+/*
+ * Test case: IDE devices (if=ide) with blank MBRs
+ */
+static void test_ide_drive_mbr_blank(void)
+{
+ test_ide_mbr(false, mbr_blank);
+}
+
+/*
+ * Test case: IDE devices (if=ide) with MBRs indicating LBA is in use
+ */
+static void test_ide_drive_mbr_lba(void)
+{
+ test_ide_mbr(false, mbr_lba);
+}
+
+/*
+ * Test case: IDE devices (if=ide) with MBRs indicating CHS is in use
+ */
+static void test_ide_drive_mbr_chs(void)
+{
+ test_ide_mbr(false, mbr_chs);
+}
+
+/*
+ * Test case: IDE devices (if=none) with blank MBRs
+ */
+static void test_ide_device_mbr_blank(void)
+{
+ test_ide_mbr(true, mbr_blank);
+}
+
+/*
+ * Test case: IDE devices (if=none) with MBRs indicating LBA is in use
+ */
+static void test_ide_device_mbr_lba(void)
+{
+ test_ide_mbr(true, mbr_lba);
+}
+
+/*
+ * Test case: IDE devices (if=none) with MBRs indicating CHS is in use
+ */
+static void test_ide_device_mbr_chs(void)
+{
+ test_ide_mbr(true, mbr_chs);
+}
+
+static void test_ide_drive_user(const char *dev, bool trans)
+{
+ char *argv[256], *opts;
+ int argc;
+ int secs = img_secs[backend_small];
+ const CHST expected_chst = { secs / (4 * 32) , 4, 32, trans };
+
+ argc = setup_common(argv, ARRAY_SIZE(argv));
+ opts = g_strdup_printf("%s,%s%scyls=%d,heads=%d,secs=%d",
+ dev ?: "",
+ trans && dev ? "bios-chs-" : "",
+ trans ? "trans=lba," : "",
+ expected_chst.cyls, expected_chst.heads,
+ expected_chst.secs);
+ cur_ide[0] = &expected_chst;
+ argc = setup_ide(argc, argv, ARRAY_SIZE(argv),
+ 0, dev ? opts : NULL, backend_small, mbr_chs,
+ dev ? "" : opts);
+ g_free(opts);
+ qtest_start(g_strjoinv(" ", argv));
+ test_cmos();
+ qtest_quit(global_qtest);
+}
+
+/*
+ * Test case: IDE device (if=ide) with explicit CHS
+ */
+static void test_ide_drive_user_chs(void)
+{
+ test_ide_drive_user(NULL, false);
+}
+
+/*
+ * Test case: IDE device (if=ide) with explicit CHS and translation
+ */
+static void test_ide_drive_user_chst(void)
+{
+ test_ide_drive_user(NULL, true);
+}
+
+/*
+ * Test case: IDE device (if=none) with explicit CHS
+ */
+static void test_ide_device_user_chs(void)
+{
+ test_ide_drive_user("ide-hd", false);
+}
+
+/*
+ * Test case: IDE device (if=none) with explicit CHS and translation
+ */
+static void test_ide_device_user_chst(void)
+{
+ test_ide_drive_user("ide-hd", true);
+}
+
+/*
+ * Test case: IDE devices (if=ide), but use index=0 for CD-ROM
+ */
+static void test_ide_drive_cd_0(void)
+{
+ char *argv[256];
+ int argc, ide_idx;
+ Backend i;
+
+ argc = setup_common(argv, ARRAY_SIZE(argv));
+ for (i = 0; i <= backend_empty; i++) {
+ ide_idx = backend_empty - i;
+ cur_ide[ide_idx] = &hd_chst[i][mbr_blank];
+ argc = setup_ide(argc, argv, ARRAY_SIZE(argv),
+ ide_idx, NULL, i, mbr_blank, "");
+ }
+ qtest_start(g_strjoinv(" ", argv));
+ test_cmos();
+ qtest_quit(global_qtest);
+}
+
+int main(int argc, char **argv)
+{
+ Backend i;
+ int ret;
+
+ g_test_init(&argc, &argv, NULL);
+
+ for (i = 0; i < backend_last; i++) {
+ if (img_secs[i] >= 0) {
+ img_file_name[i] = create_test_img(img_secs[i]);
+ } else {
+ img_file_name[i] = NULL;
+ }
+ }
+
+ qtest_add_func("hd-geo/ide/none", test_ide_none);
+ qtest_add_func("hd-geo/ide/drive/mbr/blank", test_ide_drive_mbr_blank);
+ qtest_add_func("hd-geo/ide/drive/mbr/lba", test_ide_drive_mbr_lba);
+ qtest_add_func("hd-geo/ide/drive/mbr/chs", test_ide_drive_mbr_chs);
+ qtest_add_func("hd-geo/ide/drive/user/chs", test_ide_drive_user_chs);
+ qtest_add_func("hd-geo/ide/drive/user/chst", test_ide_drive_user_chst);
+ qtest_add_func("hd-geo/ide/drive/cd_0", test_ide_drive_cd_0);
+ qtest_add_func("hd-geo/ide/device/mbr/blank", test_ide_device_mbr_blank);
+ qtest_add_func("hd-geo/ide/device/mbr/lba", test_ide_device_mbr_lba);
+ qtest_add_func("hd-geo/ide/device/mbr/chs", test_ide_device_mbr_chs);
+ qtest_add_func("hd-geo/ide/device/user/chs", test_ide_device_user_chs);
+ qtest_add_func("hd-geo/ide/device/user/chst", test_ide_device_user_chst);
+
+ ret = g_test_run();
+
+ for (i = 0; i < backend_last; i++) {
+ unlink(img_file_name[i]);
+ }
+
+ return ret;
+}
diff --git a/tests/libqtest.c b/tests/libqtest.c
index 6d333ef0ac..02d039218d 100644
--- a/tests/libqtest.c
+++ b/tests/libqtest.c
@@ -40,6 +40,7 @@ struct QTestState
bool irq_level[MAX_IRQ];
GString *rx;
gchar *pid_file;
+ char *socket_path, *qmp_socket_path;
};
#define g_assert_no_errno(ret) do { \
@@ -74,6 +75,7 @@ static int socket_accept(int sock)
socklen_t addrlen;
int ret;
+ addrlen = sizeof(addr);
do {
ret = accept(sock, (struct sockaddr *)&addr, &addrlen);
} while (ret == -1 && errno == EINTR);
@@ -87,8 +89,6 @@ QTestState *qtest_init(const char *extra_args)
{
QTestState *s;
int sock, qmpsock, ret, i;
- gchar *socket_path;
- gchar *qmp_socket_path;
gchar *pid_file;
gchar *command;
const char *qemu_binary;
@@ -97,14 +97,14 @@ QTestState *qtest_init(const char *extra_args)
qemu_binary = getenv("QTEST_QEMU_BINARY");
g_assert(qemu_binary != NULL);
- socket_path = g_strdup_printf("/tmp/qtest-%d.sock", getpid());
- qmp_socket_path = g_strdup_printf("/tmp/qtest-%d.qmp", getpid());
- pid_file = g_strdup_printf("/tmp/qtest-%d.pid", getpid());
-
s = g_malloc(sizeof(*s));
- sock = init_socket(socket_path);
- qmpsock = init_socket(qmp_socket_path);
+ s->socket_path = g_strdup_printf("/tmp/qtest-%d.sock", getpid());
+ s->qmp_socket_path = g_strdup_printf("/tmp/qtest-%d.qmp", getpid());
+ pid_file = g_strdup_printf("/tmp/qtest-%d.pid", getpid());
+
+ sock = init_socket(s->socket_path);
+ qmpsock = init_socket(s->qmp_socket_path);
pid = fork();
if (pid == 0) {
@@ -114,8 +114,8 @@ QTestState *qtest_init(const char *extra_args)
"-qmp unix:%s,nowait "
"-pidfile %s "
"-machine accel=qtest "
- "%s", qemu_binary, socket_path,
- qmp_socket_path, pid_file,
+ "%s", qemu_binary, s->socket_path,
+ s->qmp_socket_path, pid_file,
extra_args ?: "");
ret = system(command);
@@ -132,9 +132,6 @@ QTestState *qtest_init(const char *extra_args)
s->irq_level[i] = false;
}
- g_free(socket_path);
- g_free(qmp_socket_path);
-
/* Read the QMP greeting and then do the handshake */
qtest_qmp(s, "");
qtest_qmp(s, "{ 'execute': 'qmp_capabilities' }");
@@ -159,6 +156,13 @@ void qtest_quit(QTestState *s)
fclose(f);
}
+
+ unlink(s->pid_file);
+ unlink(s->socket_path);
+ unlink(s->qmp_socket_path);
+ g_free(s->pid_file);
+ g_free(s->socket_path);
+ g_free(s->qmp_socket_path);
}
static void socket_sendf(int fd, const char *fmt, va_list ap)
@@ -290,6 +294,11 @@ void qtest_qmp(QTestState *s, const char *fmt, ...)
continue;
}
+ if (len == -1 || len == 0) {
+ fprintf(stderr, "Broken pipe\n");
+ exit(1);
+ }
+
switch (c) {
case '{':
nesting++;
diff --git a/tests/qemu-iotests/031.out b/tests/qemu-iotests/031.out
index d3cab301d4..796c993df2 100644
--- a/tests/qemu-iotests/031.out
+++ b/tests/qemu-iotests/031.out
@@ -54,8 +54,8 @@ header_length 72
Header extension:
magic 0x6803f857
-length 0
-data ''
+length 96
+data <binary>
Header extension:
magic 0x12345678
@@ -68,7 +68,7 @@ No errors were found on the image.
magic 0x514649fb
version 2
-backing_file_offset 0x98
+backing_file_offset 0xf8
backing_file_size 0x17
cluster_bits 16
size 67108864
@@ -92,8 +92,8 @@ data 'host_device'
Header extension:
magic 0x6803f857
-length 0
-data ''
+length 96
+data <binary>
Header extension:
magic 0x12345678
@@ -155,8 +155,8 @@ header_length 104
Header extension:
magic 0x6803f857
-length 0
-data ''
+length 96
+data <binary>
Header extension:
magic 0x12345678
@@ -169,7 +169,7 @@ No errors were found on the image.
magic 0x514649fb
version 3
-backing_file_offset 0xb8
+backing_file_offset 0x118
backing_file_size 0x17
cluster_bits 16
size 67108864
@@ -193,8 +193,8 @@ data 'host_device'
Header extension:
magic 0x6803f857
-length 0
-data ''
+length 96
+data <binary>
Header extension:
magic 0x12345678
diff --git a/tests/qemu-iotests/036.out b/tests/qemu-iotests/036.out
index 6953e37ab6..063ca22d66 100644
--- a/tests/qemu-iotests/036.out
+++ b/tests/qemu-iotests/036.out
@@ -46,7 +46,7 @@ header_length 104
Header extension:
magic 0x6803f857
-length 0
-data ''
+length 96
+data <binary>
*** done
diff --git a/tests/qemu-iotests/039 b/tests/qemu-iotests/039
new file mode 100755
index 0000000000..a749fcf23b
--- /dev/null
+++ b/tests/qemu-iotests/039
@@ -0,0 +1,136 @@
+#!/bin/bash
+#
+# Test qcow2 lazy refcounts
+#
+# Copyright (C) 2012 Red Hat, Inc.
+# Copyright IBM, Corp. 2010
+#
+# Based on test 038.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+
+# creator
+owner=stefanha@linux.vnet.ibm.com
+
+seq=`basename $0`
+echo "QA output created by $seq"
+
+here=`pwd`
+tmp=/tmp/$$
+status=1 # failure is the default!
+
+_cleanup()
+{
+ _cleanup_test_img
+}
+trap "_cleanup; exit \$status" 0 1 2 3 15
+
+# get standard environment, filters and checks
+. ./common.rc
+. ./common.filter
+
+_supported_fmt qcow2
+_supported_proto generic
+_supported_os Linux
+
+size=128M
+
+echo
+echo "== Checking that image is clean on shutdown =="
+
+IMGOPTS="compat=1.1,lazy_refcounts=on"
+_make_test_img $size
+
+$QEMU_IO -c "write -P 0x5a 0 512" $TEST_IMG | _filter_qemu_io
+
+# The dirty bit must not be set
+./qcow2.py $TEST_IMG dump-header | grep incompatible_features
+_check_test_img
+
+echo
+echo "== Creating a dirty image file =="
+
+IMGOPTS="compat=1.1,lazy_refcounts=on"
+_make_test_img $size
+
+old_ulimit=$(ulimit -c)
+ulimit -c 0 # do not produce a core dump on abort(3)
+$QEMU_IO -c "write -P 0x5a 0 512" -c "abort" $TEST_IMG | _filter_qemu_io
+ulimit -c "$old_ulimit"
+
+# The dirty bit must be set
+./qcow2.py $TEST_IMG dump-header | grep incompatible_features
+_check_test_img
+
+echo
+echo "== Read-only access must still work =="
+
+$QEMU_IO -r -c "read -P 0x5a 0 512" $TEST_IMG | _filter_qemu_io
+
+# The dirty bit must be set
+./qcow2.py $TEST_IMG dump-header | grep incompatible_features
+
+echo
+echo "== Repairing the image file must succeed =="
+
+$QEMU_IMG check -r all $TEST_IMG
+
+# The dirty bit must not be set
+./qcow2.py $TEST_IMG dump-header | grep incompatible_features
+
+echo
+echo "== Data should still be accessible after repair =="
+
+$QEMU_IO -c "read -P 0x5a 0 512" $TEST_IMG | _filter_qemu_io
+
+echo
+echo "== Opening a dirty image read/write should repair it =="
+
+IMGOPTS="compat=1.1,lazy_refcounts=on"
+_make_test_img $size
+
+old_ulimit=$(ulimit -c)
+ulimit -c 0 # do not produce a core dump on abort(3)
+$QEMU_IO -c "write -P 0x5a 0 512" -c "abort" $TEST_IMG | _filter_qemu_io
+ulimit -c "$old_ulimit"
+
+# The dirty bit must be set
+./qcow2.py $TEST_IMG dump-header | grep incompatible_features
+
+$QEMU_IO -c "write 0 512" $TEST_IMG | _filter_qemu_io
+
+# The dirty bit must not be set
+./qcow2.py $TEST_IMG dump-header | grep incompatible_features
+
+echo
+echo "== Creating an image file with lazy_refcounts=off =="
+
+IMGOPTS="compat=1.1,lazy_refcounts=off"
+_make_test_img $size
+
+old_ulimit=$(ulimit -c)
+ulimit -c 0 # do not produce a core dump on abort(3)
+$QEMU_IO -c "write -P 0x5a 0 512" -c "abort" $TEST_IMG | _filter_qemu_io
+ulimit -c "$old_ulimit"
+
+# The dirty bit must not be set since lazy_refcounts=off
+./qcow2.py $TEST_IMG dump-header | grep incompatible_features
+_check_test_img
+
+# success, all done
+echo "*** done"
+rm -f $seq.full
+status=0
+
diff --git a/tests/qemu-iotests/039.out b/tests/qemu-iotests/039.out
new file mode 100644
index 0000000000..155a05e109
--- /dev/null
+++ b/tests/qemu-iotests/039.out
@@ -0,0 +1,53 @@
+QA output created by 039
+
+== Checking that image is clean on shutdown ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+incompatible_features 0x0
+No errors were found on the image.
+
+== Creating a dirty image file ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+incompatible_features 0x1
+ERROR OFLAG_COPIED: offset=8000000000050000 refcount=0
+ERROR cluster 5 refcount=0 reference=1
+
+2 errors were found on the image.
+Data may be corrupted, or further writes to the image may corrupt it.
+
+== Read-only access must still work ==
+read 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+incompatible_features 0x1
+
+== Repairing the image file must succeed ==
+ERROR OFLAG_COPIED: offset=8000000000050000 refcount=0
+Repairing cluster 5 refcount=0 reference=1
+No errors were found on the image.
+incompatible_features 0x0
+
+== Data should still be accessible after repair ==
+read 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+
+== Opening a dirty image read/write should repair it ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+incompatible_features 0x1
+ERROR OFLAG_COPIED: offset=8000000000050000 refcount=0
+Repairing cluster 5 refcount=0 reference=1
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+incompatible_features 0x0
+
+== Creating an image file with lazy_refcounts=off ==
+Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=134217728
+wrote 512/512 bytes at offset 0
+512 bytes, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec)
+incompatible_features 0x0
+No errors were found on the image.
+*** done
diff --git a/tests/qemu-iotests/common b/tests/qemu-iotests/common
index eeb70cbcdc..1f6fdf5c56 100644
--- a/tests/qemu-iotests/common
+++ b/tests/qemu-iotests/common
@@ -41,6 +41,7 @@ sortme=false
expunge=true
have_test_arg=false
randomize=false
+valgrind=false
rm -f $tmp.list $tmp.tmp $tmp.sed
export IMGFMT=raw
@@ -212,6 +213,11 @@ testlist options
xpand=false
;;
+ -valgrind)
+ valgrind=true
+ xpand=false
+ ;;
+
-g) # -g group ... pick from group file
group=true
xpand=false
@@ -345,3 +351,8 @@ fi
[ "$QEMU" = "" ] && _fatal "qemu not found"
[ "$QEMU_IMG" = "" ] && _fatal "qemu-img not found"
[ "$QEMU_IO" = "" ] && _fatal "qemu-img not found"
+
+if $valgrind; then
+ export REAL_QEMU_IO="$QEMU_IO_PROG"
+ export QEMU_IO_PROG=valgrind_qemu_io
+fi
diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc
index e535874e4c..7782808a26 100644
--- a/tests/qemu-iotests/common.rc
+++ b/tests/qemu-iotests/common.rc
@@ -53,6 +53,16 @@ else
TEST_IMG=$IMGPROTO:$TEST_DIR/t.$IMGFMT
fi
+function valgrind_qemu_io()
+{
+ valgrind --log-file=/tmp/$$.valgrind --error-exitcode=99 $REAL_QEMU_IO "$@"
+ if [ $? != 0 ]; then
+ cat /tmp/$$.valgrind
+ fi
+ rm -f /tmp/$$.valgrind
+}
+
+
_optstr_add()
{
if [ -n "$1" ]; then
@@ -100,10 +110,11 @@ _make_test_img()
sed -e "s#$IMGFMT#IMGFMT#g" | \
sed -e "s# encryption=off##g" | \
sed -e "s# cluster_size=[0-9]\\+##g" | \
- sed -e "s# table_size=0##g" | \
+ sed -e "s# table_size=[0-9]\\+##g" | \
sed -e "s# compat='[^']*'##g" | \
- sed -e "s# compat6=off##g" | \
- sed -e "s# static=off##g"
+ sed -e "s# compat6=\\(on\\|off\\)##g" | \
+ sed -e "s# static=\\(on\\|off\\)##g" | \
+ sed -e "s# lazy_refcounts=\\(on\\|off\\)##g"
}
_cleanup_test_img()
diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group
index 7a2c92b6e9..ebb5ca4b41 100644
--- a/tests/qemu-iotests/group
+++ b/tests/qemu-iotests/group
@@ -45,3 +45,4 @@
036 rw auto quick
037 rw auto backing
038 rw auto backing
+039 rw auto
diff --git a/tests/qemu-iotests/qed.py b/tests/qemu-iotests/qed.py
new file mode 100755
index 0000000000..52ff845590
--- /dev/null
+++ b/tests/qemu-iotests/qed.py
@@ -0,0 +1,235 @@
+#!/usr/bin/env python
+#
+# Tool to manipulate QED image files
+#
+# Copyright (C) 2010 IBM, Corp.
+#
+# Authors:
+# Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
+#
+# This work is licensed under the terms of the GNU GPL, version 2 or later.
+# See the COPYING file in the top-level directory.
+
+import sys
+import struct
+import random
+import optparse
+
+# This can be used as a module
+__all__ = ['QED_F_NEED_CHECK', 'QED']
+
+QED_F_NEED_CHECK = 0x02
+
+header_fmt = '<IIIIQQQQQII'
+header_size = struct.calcsize(header_fmt)
+field_names = ['magic', 'cluster_size', 'table_size',
+ 'header_size', 'features', 'compat_features',
+ 'autoclear_features', 'l1_table_offset', 'image_size',
+ 'backing_filename_offset', 'backing_filename_size']
+table_elem_fmt = '<Q'
+table_elem_size = struct.calcsize(table_elem_fmt)
+
+def err(msg):
+ sys.stderr.write(msg + '\n')
+ sys.exit(1)
+
+def unpack_header(s):
+ fields = struct.unpack(header_fmt, s)
+ return dict((field_names[idx], val) for idx, val in enumerate(fields))
+
+def pack_header(header):
+ fields = tuple(header[x] for x in field_names)
+ return struct.pack(header_fmt, *fields)
+
+def unpack_table_elem(s):
+ return struct.unpack(table_elem_fmt, s)[0]
+
+def pack_table_elem(elem):
+ return struct.pack(table_elem_fmt, elem)
+
+class QED(object):
+ def __init__(self, f):
+ self.f = f
+
+ self.f.seek(0, 2)
+ self.filesize = f.tell()
+
+ self.load_header()
+ self.load_l1_table()
+
+ def raw_pread(self, offset, size):
+ self.f.seek(offset)
+ return self.f.read(size)
+
+ def raw_pwrite(self, offset, data):
+ self.f.seek(offset)
+ return self.f.write(data)
+
+ def load_header(self):
+ self.header = unpack_header(self.raw_pread(0, header_size))
+
+ def store_header(self):
+ self.raw_pwrite(0, pack_header(self.header))
+
+ def read_table(self, offset):
+ size = self.header['table_size'] * self.header['cluster_size']
+ s = self.raw_pread(offset, size)
+ table = [unpack_table_elem(s[i:i + table_elem_size]) for i in xrange(0, size, table_elem_size)]
+ return table
+
+ def load_l1_table(self):
+ self.l1_table = self.read_table(self.header['l1_table_offset'])
+ self.table_nelems = self.header['table_size'] * self.header['cluster_size'] / table_elem_size
+
+ def write_table(self, offset, table):
+ s = ''.join(pack_table_elem(x) for x in table)
+ self.raw_pwrite(offset, s)
+
+def random_table_item(table):
+ vals = [(index, offset) for index, offset in enumerate(table) if offset != 0]
+ if not vals:
+ err('cannot pick random item because table is empty')
+ return random.choice(vals)
+
+def corrupt_table_duplicate(table):
+ '''Corrupt a table by introducing a duplicate offset'''
+ victim_idx, victim_val = random_table_item(table)
+ unique_vals = set(table)
+ if len(unique_vals) == 1:
+ err('no duplication corruption possible in table')
+ dup_val = random.choice(list(unique_vals.difference([victim_val])))
+ table[victim_idx] = dup_val
+
+def corrupt_table_invalidate(qed, table):
+ '''Corrupt a table by introducing an invalid offset'''
+ index, _ = random_table_item(table)
+ table[index] = qed.filesize + random.randint(0, 100 * 1024 * 1024 * 1024 * 1024)
+
+def cmd_show(qed, *args):
+ '''show [header|l1|l2 <offset>]- Show header or l1/l2 tables'''
+ if not args or args[0] == 'header':
+ print qed.header
+ elif args[0] == 'l1':
+ print qed.l1_table
+ elif len(args) == 2 and args[0] == 'l2':
+ offset = int(args[1])
+ print qed.read_table(offset)
+ else:
+ err('unrecognized sub-command')
+
+def cmd_duplicate(qed, table_level):
+ '''duplicate l1|l2 - Duplicate a random table element'''
+ if table_level == 'l1':
+ offset = qed.header['l1_table_offset']
+ table = qed.l1_table
+ elif table_level == 'l2':
+ _, offset = random_table_item(qed.l1_table)
+ table = qed.read_table(offset)
+ else:
+ err('unrecognized sub-command')
+ corrupt_table_duplicate(table)
+ qed.write_table(offset, table)
+
+def cmd_invalidate(qed, table_level):
+ '''invalidate l1|l2 - Plant an invalid table element at random'''
+ if table_level == 'l1':
+ offset = qed.header['l1_table_offset']
+ table = qed.l1_table
+ elif table_level == 'l2':
+ _, offset = random_table_item(qed.l1_table)
+ table = qed.read_table(offset)
+ else:
+ err('unrecognized sub-command')
+ corrupt_table_invalidate(qed, table)
+ qed.write_table(offset, table)
+
+def cmd_need_check(qed, *args):
+ '''need-check [on|off] - Test, set, or clear the QED_F_NEED_CHECK header bit'''
+ if not args:
+ print bool(qed.header['features'] & QED_F_NEED_CHECK)
+ return
+
+ if args[0] == 'on':
+ qed.header['features'] |= QED_F_NEED_CHECK
+ elif args[0] == 'off':
+ qed.header['features'] &= ~QED_F_NEED_CHECK
+ else:
+ err('unrecognized sub-command')
+ qed.store_header()
+
+def cmd_zero_cluster(qed, pos, *args):
+ '''zero-cluster <pos> [<n>] - Zero data clusters'''
+ pos, n = int(pos), 1
+ if args:
+ if len(args) != 1:
+ err('expected one argument')
+ n = int(args[0])
+
+ for i in xrange(n):
+ l1_index = pos / qed.header['cluster_size'] / len(qed.l1_table)
+ if qed.l1_table[l1_index] == 0:
+ err('no l2 table allocated')
+
+ l2_offset = qed.l1_table[l1_index]
+ l2_table = qed.read_table(l2_offset)
+
+ l2_index = (pos / qed.header['cluster_size']) % len(qed.l1_table)
+ l2_table[l2_index] = 1 # zero the data cluster
+ qed.write_table(l2_offset, l2_table)
+ pos += qed.header['cluster_size']
+
+def cmd_copy_metadata(qed, outfile):
+ '''copy-metadata <outfile> - Copy metadata only (for scrubbing corrupted images)'''
+ out = open(outfile, 'wb')
+
+ # Match file size
+ out.seek(qed.filesize - 1)
+ out.write('\0')
+
+ # Copy header clusters
+ out.seek(0)
+ header_size_bytes = qed.header['header_size'] * qed.header['cluster_size']
+ out.write(qed.raw_pread(0, header_size_bytes))
+
+ # Copy L1 table
+ out.seek(qed.header['l1_table_offset'])
+ s = ''.join(pack_table_elem(x) for x in qed.l1_table)
+ out.write(s)
+
+ # Copy L2 tables
+ for l2_offset in qed.l1_table:
+ if l2_offset == 0:
+ continue
+ l2_table = qed.read_table(l2_offset)
+ out.seek(l2_offset)
+ s = ''.join(pack_table_elem(x) for x in l2_table)
+ out.write(s)
+
+ out.close()
+
+def usage():
+ print 'Usage: %s <file> <cmd> [<arg>, ...]' % sys.argv[0]
+ print
+ print 'Supported commands:'
+ for cmd in sorted(x for x in globals() if x.startswith('cmd_')):
+ print globals()[cmd].__doc__
+ sys.exit(1)
+
+def main():
+ if len(sys.argv) < 3:
+ usage()
+ filename, cmd = sys.argv[1:3]
+
+ cmd = 'cmd_' + cmd.replace('-', '_')
+ if cmd not in globals():
+ usage()
+
+ qed = QED(open(filename, 'r+b'))
+ try:
+ globals()[cmd](qed, *sys.argv[3:])
+ except TypeError, e:
+ sys.stderr.write(globals()[cmd].__doc__ + '\n')
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/tests/tcg/openrisc/Makefile b/tests/tcg/openrisc/Makefile
new file mode 100644
index 0000000000..7e65888761
--- /dev/null
+++ b/tests/tcg/openrisc/Makefile
@@ -0,0 +1,71 @@
+-include ../../config-host.mak
+
+CROSS = or32-linux-
+
+SIM = qemu-or32
+
+CC = $(CROSS)gcc
+
+TESTCASES = test_add.tst
+TESTCASES += test_sub.tst
+TESTCASES += test_addc.tst
+TESTCASES += test_addi.tst
+TESTCASES += test_addic.tst
+TESTCASES += test_and_or.tst
+TESTCASES += test_bf.tst
+TESTCASES += test_bnf.tst
+TESTCASES += test_div.tst
+TESTCASES += test_divu.tst
+TESTCASES += test_extx.tst
+TESTCASES += test_fx.tst
+TESTCASES += test_jal.tst
+TESTCASES += test_j.tst
+TESTCASES += test_lf_div.tst
+TESTCASES += test_lf_eqs.tst
+TESTCASES += test_lf_ges.tst
+TESTCASES += test_lf_gts.tst
+TESTCASES += test_lf_les.tst
+TESTCASES += test_lf_lts.tst
+TESTCASES += test_lf_mul.tst
+TESTCASES += test_lf_nes.tst
+TESTCASES += test_lf_rem.tst
+TESTCASES += test_lf_sub.tst
+TESTCASES += test_lf_add.tst
+TESTCASES += test_logic.tst
+TESTCASES += test_lx.tst
+TESTCASES += test_movhi.tst
+TESTCASES += test_mul.tst
+TESTCASES += test_mulu.tst
+TESTCASES += test_muli.tst
+TESTCASES += test_sfeq.tst
+TESTCASES += test_sfeqi.tst
+TESTCASES += test_sfges.tst
+TESTCASES += test_sfgesi.tst
+TESTCASES += test_sfgeu.tst
+TESTCASES += test_sfgeui.tst
+TESTCASES += test_sfgts.tst
+TESTCASES += test_sfgtsi.tst
+TESTCASES += test_sfgtu.tst
+TESTCASES += test_sfgtui.tst
+TESTCASES += test_sfles.tst
+TESTCASES += test_sflesi.tst
+TESTCASES += test_sfleu.tst
+TESTCASES += test_sfleui.tst
+TESTCASES += test_sflts.tst
+TESTCASES += test_sfltsi.tst
+TESTCASES += test_sfltu.tst
+TESTCASES += test_sfltui.tst
+TESTCASES += test_sfne.tst
+TESTCASES += test_sfnei.tst
+
+all: $(TESTCASES)
+
+%.tst: %.c
+ $(CC) -static $< -o $@
+
+
+check: $(TESTCASES)
+ @for case in $(TESTCASES); do $(SIM) $$case; echo $$case pass!; sleep 0.2; done
+
+clean:
+ $(RM) -rf $(TESTCASES)
diff --git a/tests/tcg/openrisc/test_add.c b/tests/tcg/openrisc/test_add.c
new file mode 100644
index 0000000000..3d23592e76
--- /dev/null
+++ b/tests/tcg/openrisc/test_add.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, d;
+ int result;
+
+ a = 0x100;
+ b = 0x100;
+ result = 0x200;
+ __asm
+ ("l.add %0, %0, %1\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("add error\n");
+ return -1;
+ }
+
+ a = 0xffff;
+ b = 0x1;
+ result = 0x10000;
+ __asm
+ ("l.add %0, %0, %1\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("add error\n");
+ return -1;
+ }
+
+ a = 0x7fffffff;
+ b = 0x1;
+ __asm
+ ("l.add %0, %1, %2\n\t"
+ : "=r"(d)
+ : "r"(b), "r"(a)
+ );
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_addc.c b/tests/tcg/openrisc/test_addc.c
new file mode 100644
index 0000000000..05d18f8ce5
--- /dev/null
+++ b/tests/tcg/openrisc/test_addc.c
@@ -0,0 +1,38 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x01;
+ c = 0xffffffff;
+ result = 1;
+ __asm
+ ("l.addc %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("first addc error\n");
+ return -1;
+ }
+
+ b = 0x01;
+ c = 0xffffffff;
+ result = 0x80000001;
+ __asm
+ ("l.addc %0, %1, %2\n\t"
+ "l.movhi %2, 0x7fff\n\t"
+ "l.ori %2, %2, 0xffff\n\t"
+ "l.addc %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("addc error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_addi.c b/tests/tcg/openrisc/test_addi.c
new file mode 100644
index 0000000000..bbf5a5ffab
--- /dev/null
+++ b/tests/tcg/openrisc/test_addi.c
@@ -0,0 +1,33 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ b = 0x01;
+ result = 0x00;
+ __asm
+ ("l.addi %0, %1, 0xffff\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("addi error\n\t");
+ return -1;
+ }
+
+ b = 0x010000;
+ result = 0xffff;
+ __asm
+ ("l.addi %0, %1, 0xffff\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("addi error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_addic.c b/tests/tcg/openrisc/test_addic.c
new file mode 100644
index 0000000000..4ba7432521
--- /dev/null
+++ b/tests/tcg/openrisc/test_addic.c
@@ -0,0 +1,33 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ a = 1;
+ result = 0x1;
+ __asm
+ ("l.addic %0, %0, 0xffff\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("first addic error\n");
+ return -1;
+ }
+
+ a = 0x1;
+ result = 0x201;
+ __asm
+ ("l.addic %0, %0, 0xffff\n\t"
+ "l.ori %0, r0, 0x100\n\t"
+ "l.addic %0, %0, 0x100\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("second addic error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_and_or.c b/tests/tcg/openrisc/test_and_or.c
new file mode 100644
index 0000000000..810d868c7b
--- /dev/null
+++ b/tests/tcg/openrisc/test_and_or.c
@@ -0,0 +1,65 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x2;
+ c = 0x1;
+ result = 0;
+ __asm
+ ("l.and %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("and error\n");
+ return -1;
+ }
+
+ result = 0x2;
+ __asm
+ ("l.andi %0, %1, 0x3\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("andi error %x\n", a);
+ return -1;
+ }
+
+ result = 0x3;
+ __asm
+ ("l.or %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("or error\n");
+ return -1;
+ }
+
+ result = 0x3;
+ __asm
+ ("l.xor %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("xor error\n");
+ return -1;
+ }
+
+ __asm
+ ("l.xori %0, %1, 0x1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("xori error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_bf.c b/tests/tcg/openrisc/test_bf.c
new file mode 100644
index 0000000000..79f3fb99aa
--- /dev/null
+++ b/tests/tcg/openrisc/test_bf.c
@@ -0,0 +1,47 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ a = 0;
+ b = 10;
+ c = 11;
+ result = 0x2;
+ __asm
+ ("1:\n\t"
+ "l.addi %1, %1, 0x01\n\t"
+ "l.addi %0, %0, 0x01\n\t"
+ "l.sfeq %1, %2\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sfeq error\n");
+ return -1;
+ }
+
+ a = 0x00;
+ b = 0x11;
+ c = 0x11;
+ result = 0x01;
+ __asm
+ ("1:\n\t"
+ "l.addi %1, %1, 0x01\n\t"
+ "l.addi %0, %0, 0x01\n\t"
+ "l.sfeq %1, %2\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sfeq error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_bnf.c b/tests/tcg/openrisc/test_bnf.c
new file mode 100644
index 0000000000..f716215f10
--- /dev/null
+++ b/tests/tcg/openrisc/test_bnf.c
@@ -0,0 +1,51 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ b = 0;
+ result = 0x3;
+ __asm
+ ("l.sfeqi %1, 0x0\n\t"
+ "l.bnf 1f\n\t"
+ "l.nop\n\t"
+ "\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "\n\t"
+ "1:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("l.bnf error\n");
+ return -1;
+ }
+
+ a = 0;
+ b = 0;
+ result = 1;
+ __asm
+ ("l.sfeqi %1, 0x1\n\t"
+ "l.bnf 1f\n\t"
+ "l.nop\n\t"
+ "\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "\n\t"
+ "1:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("l.bnf error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_div.c b/tests/tcg/openrisc/test_div.c
new file mode 100644
index 0000000000..9b65f6e673
--- /dev/null
+++ b/tests/tcg/openrisc/test_div.c
@@ -0,0 +1,54 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x120;
+ c = 0x4;
+ result = 0x48;
+ __asm
+ ("l.div %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("div error\n");
+ return -1;
+ }
+
+ result = 0x4;
+ __asm
+ ("l.div %0, %1, %0\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("div error\n");
+ return -1;
+ }
+
+ b = 0xffffffff;
+ c = 0x80000000;
+ result = 0;
+ __asm
+ ("l.div %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("div error\n");
+ return -1;
+ }
+
+ b = 0x80000000;
+ c = 0xffffffff;
+ __asm
+ ("l.div %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_divu.c b/tests/tcg/openrisc/test_divu.c
new file mode 100644
index 0000000000..bff9e3ea59
--- /dev/null
+++ b/tests/tcg/openrisc/test_divu.c
@@ -0,0 +1,34 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x120;
+ c = 0x4;
+ result = 0x48;
+
+ __asm
+ ("l.divu %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("divu error\n");
+ return -1;
+ }
+
+ result = 0x4;
+ __asm
+ ("l.divu %0, %1, %0\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("divu error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_extx.c b/tests/tcg/openrisc/test_extx.c
new file mode 100644
index 0000000000..09221484a6
--- /dev/null
+++ b/tests/tcg/openrisc/test_extx.c
@@ -0,0 +1,78 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ b = 0x83;
+ result = 0xffffff83;
+ __asm
+ ("l.extbs %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("extbs error\n");
+ return -1;
+ }
+
+ result = 0x83;
+ __asm
+ ("l.extbz %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("extbz error\n");
+ return -1;
+ }
+
+ b = 0x8083;
+ result = 0xffff8083;
+ __asm
+ ("l.exths %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("exths error\n");
+ return -1;
+ }
+
+ result = 0x8083;
+ __asm
+ ("l.exthz %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("exthz error\n");
+ return -1;
+ }
+
+ b = 0x11;
+ result = 0x11;
+ __asm
+ ("l.extws %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+
+ if (a != result) {
+ printf("extws error\n");
+ return -1;
+ }
+
+ __asm
+ ("l.extwz %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("extwz error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_fx.c b/tests/tcg/openrisc/test_fx.c
new file mode 100644
index 0000000000..df86000d90
--- /dev/null
+++ b/tests/tcg/openrisc/test_fx.c
@@ -0,0 +1,57 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ b = 0x123;
+ result = 1;
+ __asm
+ ("l.ff1 %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("ff1 error\n");
+ return -1;
+ }
+
+ b = 0x0;
+ result = 0;
+ __asm
+ ("l.ff1 %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("ff1 error\n");
+ return -1;
+ }
+
+ b = 0x123;
+ result = 9;
+ __asm
+ ("l.fl1 %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("fl1 error\n");
+ return -1;
+ }
+
+ b = 0x0;
+ result = 0;
+ __asm
+ ("l.fl1 %0, %1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("fl1 error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_j.c b/tests/tcg/openrisc/test_j.c
new file mode 100644
index 0000000000..9ddf8bfbb5
--- /dev/null
+++ b/tests/tcg/openrisc/test_j.c
@@ -0,0 +1,26 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 0;
+ result = 2;
+ __asm
+ ("l.addi %0, %0, 1\n\t"
+ "l.j j\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.nop\n\t"
+ "j:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("j error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_jal.c b/tests/tcg/openrisc/test_jal.c
new file mode 100644
index 0000000000..7e2da40163
--- /dev/null
+++ b/tests/tcg/openrisc/test_jal.c
@@ -0,0 +1,26 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 0;
+ result = 2;
+ __asm
+ ("l.addi %0, %0, 1\n\t"
+ "l.jal jal\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.nop\n\t"
+ "jal:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("jal error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_add.c b/tests/tcg/openrisc/test_lf_add.c
new file mode 100644
index 0000000000..e00212dad6
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_add.c
@@ -0,0 +1,39 @@
+#include <stdio.h>
+
+int main(void)
+{
+ float a, b;
+ float res2;
+
+ a = 1.5;
+ b = 2.5;
+ res2 = 4.0;
+ __asm
+ ("lf.add.s %0, %0, %1\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != res2) {
+ printf("lf.add.s error, %f\n", a);
+ return -1;
+ }
+
+/* double c, d;
+ double res1;
+
+ c = 1.5;
+ d = 1.5;
+ res1 = 3.00;
+ __asm
+ ("lf.add.d %0, %1, %2\n\t"
+ : "+r"(c)
+ : "r"(d)
+ );
+
+ if ((e - res1) > 0.002) {
+ printf("lf.add.d error, %f\n", e - res1);
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_div.c b/tests/tcg/openrisc/test_lf_div.c
new file mode 100644
index 0000000000..70b5d1c17b
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_div.c
@@ -0,0 +1,37 @@
+#include <stdio.h>
+
+int main(void)
+{
+ float a, b, c;
+ float result;
+
+ b = 1.5;
+ c = 0.5;
+ result = 3.0;
+ __asm
+ ("lf.div.s %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.div.s error\n");
+ return -1;
+ }
+
+/* double a, b, c, res;
+
+ b = 0x80000000;
+ c = 0x40;
+ result = 0x2000000;
+ __asm
+ ("lf.div.d %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.div.d error\n");
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_eqs.c b/tests/tcg/openrisc/test_lf_eqs.c
new file mode 100644
index 0000000000..a176bd6fe0
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_eqs.c
@@ -0,0 +1,88 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, result;
+ float b, c;
+
+ a = 0x1;
+ b = 122.5;
+ c = 123.5;
+ result = 0x3;
+ __asm
+ ("lfeqd:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfeq.s %1, %2\n\t"
+ "l.bf lfeqd\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfeq.s error\n");
+ return -1;
+ }
+
+ b = 13.5;
+ c = 13.5;
+ result = 0x3;
+ __asm
+ ("lf.sfeq.s %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi r4, r4, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfeq.s error\n");
+ return -1;
+ }
+
+/* double b, c;
+ double result;
+ int a;
+
+ a = 0x1;
+ b = 122.5;
+ c = 133.5;
+ result = 0x3;
+
+ __asm
+ ("lfeqd:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfeq.d %1, %2\n\t"
+ "l.bf lfeqd\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfeq.d error\n");
+ return -1;
+ }
+
+ double c, d, res;
+ int e = 0;
+ c = 11.5;
+ d = 11.5;
+ res = 1;
+ __asm
+ ("lf.sfeq.d %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(e)
+ : "r"(c), "r"(d)
+ );
+ if (e != res) {
+ printf("lf.sfeq.d error\n");
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_ges.c b/tests/tcg/openrisc/test_lf_ges.c
new file mode 100644
index 0000000000..98e7f50b6e
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_ges.c
@@ -0,0 +1,88 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, result;
+ float b, c;
+
+ a = 0;
+ b = 122.5;
+ c = 123.5;
+ result = 0x1;
+ __asm
+ ("lfges:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfge.s %1, %2\n\t"
+ "l.bf lfges\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfge.s error\n");
+ return -1;
+ }
+
+ b = 133.5;
+ c = 13.5;
+ result = 0x3;
+ __asm
+ ("l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfge.s %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfge.s error\n");
+ return -1;
+ }
+
+/* int a, result;
+ double b, c;
+
+ a = 0x1;
+ b = 122.5;
+ c = 123.5;
+ result = 0x2;
+ __asm
+ ("lfged:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfge.d %1, %2\n\t"
+ "l.bf lfged\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfge.d error\n");
+ return -1;
+ }
+
+ b = 133.5;
+ c = 13.5;
+ result = 0x4;
+ __asm
+ ("lf.sfge.d %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfge.d error\n");
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_gts.c b/tests/tcg/openrisc/test_lf_gts.c
new file mode 100644
index 0000000000..f3df27958e
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_gts.c
@@ -0,0 +1,86 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, result;
+ float b, c;
+
+ a = 0;
+ b = 122.5;
+ c = 123.5;
+ result = 0x1;
+ __asm
+ ("lfgts:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfgt.s %1, %2\n\t"
+ "l.bf lfgts\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfgt.s error\n");
+ return -1;
+ }
+
+ b = 133.5;
+ c = 13.5;
+ result = 0x1;
+ __asm
+ ("lf.sfgt.s %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfgt.s error\n");
+ return -1;
+ }
+
+/* int a, result;
+ double b, c;
+
+ a = 0;
+ b = 122.5;
+ c = 123.5;
+ result = 0x1;
+ __asm
+ ("lfgtd:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfgt.d %1, %2\n\t"
+ "l.bf lfgtd\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfgt.d error\n");
+ return -1;
+ }
+
+ b = 133.5;
+ c = 13.5;
+ result = 0x3;
+ __asm
+ ("l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfgt.d %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfgt.d error, %x\n", a);
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_les.c b/tests/tcg/openrisc/test_lf_les.c
new file mode 100644
index 0000000000..046c511d93
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_les.c
@@ -0,0 +1,88 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ float b, c;
+ int result;
+
+ a = 0;
+ b = 1234.2;
+ c = 12.4;
+ result = 0x1;
+ __asm
+ ("lfles:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfle.s %1, %2\n\t"
+ "l.bf lfles\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfle.s error\n");
+ return -1;
+ }
+
+ b = 1.1;
+ c = 19.4;
+ result = 0x3;
+ __asm
+ ("l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfle.s %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfle.s error\n");
+ return -1;
+ }
+
+/* int a;
+ double b, c;
+ int result;
+
+ a = 0;
+ b = 1212.5;
+ c = 123.5;
+ result = 0x1;
+ __asm
+ ("lfled:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfle.d %1, %2\n\t"
+ "l.bf lfled\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfle.d error\n");
+ return -1;
+ }
+
+ b = 13.5;
+ c = 113.5;
+ result = 0x2;
+ __asm
+ ("l.addi %0, %0, 0x1\n\t"
+ "lf.sfle.d %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfle.d error\n");
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_lts.c b/tests/tcg/openrisc/test_lf_lts.c
new file mode 100644
index 0000000000..fa56721dfa
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_lts.c
@@ -0,0 +1,92 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ float b, c, d;
+ int result;
+
+ a = 0;
+ b = 124.5;
+ c = 1.4;
+ result = 1;
+ __asm
+ ("lfltd:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sflt.s %1, %2\n\t"
+ "l.bf lfltd\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sflt.s error\n");
+ return -1;
+ }
+
+ a = 0;
+ b = 11.1;
+ c = 13.1;
+ d = 1.0;
+ result = 2;
+ __asm
+ ("1:\n\t"
+ "lf.add.s %1, %1, %3\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "lf.sflt.s %1, %2\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c), "r"(d)
+ );
+ if (a != result) {
+ printf("lf.sflt.s error\n");
+ return -1;
+ }
+
+/* int a;
+ double b, c;
+ int result;
+
+ a = 0;
+ b = 1432.1;
+ c = 2.4;
+ result = 0x1;
+ __asm
+ ("lfltd:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sflt.d %1, %2\n\t"
+ "l.bf lfltd\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sflt.d error\n");
+ return -1;
+ }
+
+ a = 0;
+ b = 1.1;
+ c = 19.7;
+ result = 2;
+ __asm
+ ("lf.sflt.d %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.addi %0, %0, 1\n\t"
+ : "+r"(a), "+r"(b)
+ : "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sflt.d error\n");
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_mul.c b/tests/tcg/openrisc/test_lf_mul.c
new file mode 100644
index 0000000000..bc8ad800c7
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_mul.c
@@ -0,0 +1,22 @@
+#include <stdio.h>
+
+int main(void)
+{
+ float a, b, c;
+ float result;
+
+ b = 1.5;
+ c = 4.0;
+ result = 6.0;
+ __asm
+ ("lf.mul.s %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.mul.s error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_nes.c b/tests/tcg/openrisc/test_lf_nes.c
new file mode 100644
index 0000000000..613631005b
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_nes.c
@@ -0,0 +1,89 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ float b, c;
+ int result;
+
+ a = 0;
+ b = 23.1;
+ c = 23.1;
+ result = 0x1;
+ __asm
+ ("lfnes:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfne.s %1, %2\n\t"
+ "l.bf lfnes\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfne.s error");
+ return -1;
+ }
+
+ b = 12.4;
+ c = 7.8;
+ result = 0x3;
+ __asm
+ ("l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfne.s %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfne.s error\n");
+ return -1;
+ }
+/* int a;
+ double b, c;
+ int result;
+
+ a = 0;
+ b = 124.3;
+ c = 124.3;
+ result = 0x1;
+ __asm
+ ("lfned:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfne.d %1, %2\n\t"
+ "l.bf lfned\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfne.d error\n");
+ return -1;
+ }
+
+ b = 11.5;
+ c = 16.7;
+ result = 0x3;
+ __asm
+ ("l.addi %0, %0, 0x1\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "lf.sfne.d %1, %2\n\t"
+ "l.bf 1f\n\t"
+ "l.nop\n\t"
+ "l.addi r4, r4, 0x1\n\t"
+ "l.addi r4, r4, 0x1\n\t"
+ "1:\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sfne.d error\n");
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_rem.c b/tests/tcg/openrisc/test_lf_rem.c
new file mode 100644
index 0000000000..bd6090d694
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_rem.c
@@ -0,0 +1,32 @@
+#include <stdio.h>
+
+int main(void)
+{
+ float a, b, c;
+ float result;
+
+ b = 101.5;
+ c = 10;
+ result = 1.5;
+/* __asm
+ ("lf.rem.d %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.rem.d error\n");
+ return -1;
+ }*/
+
+ __asm
+ ("lf.rem.s %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.rem.s error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lf_sub.c b/tests/tcg/openrisc/test_lf_sub.c
new file mode 100644
index 0000000000..5ee9b03910
--- /dev/null
+++ b/tests/tcg/openrisc/test_lf_sub.c
@@ -0,0 +1,35 @@
+#include <stdio.h>
+
+int main(void)
+{
+ float a, b, c;
+ float result;
+
+ b = 10.5;
+ c = 1.5;
+ result = 9.0;
+ __asm
+ ("lf.sub.s %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sub.s error\n");
+ return -1;
+ }
+
+/* b = 0x999;
+ c = 0x654;
+ result = 0x345;
+ __asm
+ ("lf.sub.d %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("lf.sub.d error\n");
+ return -1;
+ }*/
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_logic.c b/tests/tcg/openrisc/test_logic.c
new file mode 100644
index 0000000000..46d173f481
--- /dev/null
+++ b/tests/tcg/openrisc/test_logic.c
@@ -0,0 +1,105 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x9743;
+ c = 0x2;
+ result = 0x25d0c;
+ __asm
+ ("l.sll %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sll error\n");
+ return -1;
+ }
+
+ b = 0x9743;
+ result = 0x25d0c;
+ __asm
+ ("l.slli %0, %1, 0x2\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("slli error\n");
+ return -1;
+ }
+
+ b = 0x7654;
+ c = 0x03;
+ result = 0xeca;
+ __asm
+ ("l.srl %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+
+ b = 0x7654;
+ result = 0xeca;
+ __asm
+ ("l.srli %0, %1, 0x3\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("srli error\n");
+ return -1;
+ }
+
+ b = 0x80000001;
+ c = 0x4;
+ result = 0x18000000;
+ __asm
+ ("l.ror %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("ror error\n");
+ return -1;
+ }
+
+ b = 0x80000001;
+ result = 0x18000000;
+ __asm
+ ("l.rori %0, %1, 0x4\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("rori error\n");
+ return -1;
+ }
+
+ b = 0x80000001;
+ c = 0x03;
+ result = 0xf0000000;
+ __asm
+ ("l.sra %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sra error\n");
+ return -1;
+ }
+
+ b = 0x80000001;
+ result = 0xf0000000;
+ __asm
+ ("l.srai %0, %1, 0x3\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("srai error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_lx.c b/tests/tcg/openrisc/test_lx.c
new file mode 100644
index 0000000000..792e3d5c7f
--- /dev/null
+++ b/tests/tcg/openrisc/test_lx.c
@@ -0,0 +1,84 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int p[50];
+ int result;
+
+ result = 0x23;
+ __asm
+ ("l.ori r8, r0, 0x123\n\t"
+ "l.sb 0x4 + %1, r8\n\t"
+ "\n\t"
+ "l.lbz %0, 0x4 + %1\n\t"
+ : "=r"(a), "+m"(*p)
+ );
+ if (a != result) {
+ printf("lbz error, %x\n", a);
+ return -1;
+ }
+
+ result = 0x23;
+ __asm
+ ("l.lbs %0, 0x4 + %1\n\t"
+ : "=r"(a)
+ : "m"(*p)
+ );
+ if (a != result) {
+ printf("lbs error\n");
+ return -1;
+ }
+
+ result = 0x1111;
+ __asm
+ ("l.ori r8, r0, 0x1111\n\t"
+ "l.sh 0x20 + %1, r8\n\t"
+ "\n\t"
+ "l.lhs %0, 0x20 + %1\n\t"
+ : "=r"(a), "=m"(*p)
+ );
+ if (a != result) {
+ printf("lhs error, %x\n", a);
+ return -1;
+ }
+
+ result = 0x1111;
+ __asm
+ ("l.lhz %0, 0x20 + %1\n\t"
+ : "=r"(a)
+ : "m"(*p)
+ );
+ if (a != result) {
+ printf("lhz error\n");
+ return -1;
+ }
+
+ result = 0x1111233;
+ __asm
+ ("l.ori r8, r0, 0x1233\n\t"
+ "l.movhi r1, 0x111\n\t"
+ "l.or r8, r8, r1\n\t"
+ "l.sw 0x123 + %1, r8\n\t"
+ "\n\t"
+ "l.lws %0, 0x123 + %1\n\t"
+ : "=r"(a), "+m"(*p)
+ );
+ if (a != result) {
+ printf("lws error, %x\n", a);
+ return -1;
+ }
+
+ result = 0x1111233;
+ __asm
+ ("l.lwz %0, 0x123 + %1\n\t"
+ : "=r"(a)
+ : "m"(*p)
+ );
+ if (a != result) {
+ printf("lwz error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_movhi.c b/tests/tcg/openrisc/test_movhi.c
new file mode 100644
index 0000000000..737f75b9fd
--- /dev/null
+++ b/tests/tcg/openrisc/test_movhi.c
@@ -0,0 +1,31 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ result = 0x1222;
+ __asm
+ ("l.movhi r3, 0x1222\n\t"
+ "l.srli %0, r3, 16\n\t"
+ : "=r"(a)
+ );
+ if (a != result) {
+ printf("movhi error\n");
+ return -1;
+ }
+
+ result = 0x1111;
+ __asm
+ ("l.movhi r8, 0x1111\n\t"
+ "l.srli %0, r8, 16\n\t"
+ : "=r"(a)
+ );
+ if (a != result) {
+ printf("movhi error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_mul.c b/tests/tcg/openrisc/test_mul.c
new file mode 100644
index 0000000000..130101fdee
--- /dev/null
+++ b/tests/tcg/openrisc/test_mul.c
@@ -0,0 +1,61 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x4;
+ c = 0x1;
+ result = 0x4;
+ __asm
+ ("l.mul %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("mul error\n");
+ return -1;
+ }
+
+ b = 0x1;
+ c = 0x0;
+ result = 0x0;
+ __asm
+ ("l.mul %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("mul error\n");
+ return -1;
+ }
+
+ b = 0x1;
+ c = 0xff;
+ result = 0xff;
+ __asm
+ ("l.mul %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("mul error\n");
+ return -1;
+ }
+
+ b = 0x7fffffff;
+ c = 0x2;
+ result = 0xfffffffe;
+ __asm
+ ("l.mul %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("mul error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_muli.c b/tests/tcg/openrisc/test_muli.c
new file mode 100644
index 0000000000..f1042e98de
--- /dev/null
+++ b/tests/tcg/openrisc/test_muli.c
@@ -0,0 +1,48 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x4;
+ c = 0x1;
+ result = 0x4;
+ __asm
+ ("l.muli %0, %1, 0x1\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("muli error\n");
+ return -1;
+ }
+
+ b = 0x1;
+ c = 0x0;
+ result = 0x0;
+ __asm
+ ("l.muli %0, %1, 0x0\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("muli error\n");
+ return -1;
+ }
+
+ b = 0x1;
+ c = 0xff;
+ result = 0xff;
+ __asm
+ ("l.muli %0, %1, 0xff\n\t"
+ : "=r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("muli error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_mulu.c b/tests/tcg/openrisc/test_mulu.c
new file mode 100644
index 0000000000..2d1e97d16e
--- /dev/null
+++ b/tests/tcg/openrisc/test_mulu.c
@@ -0,0 +1,48 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ b = 0x4;
+ c = 0x1;
+ result = 0x4;
+ __asm
+ ("l.mulu %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("mulu error\n");
+ return -1;
+ }
+
+ b = 0x1;
+ c = 0x0;
+ result = 0x0;
+ __asm
+ ("l.mulu %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("mulu error\n");
+ return -1;
+ }
+
+ b = 0x1;
+ c = 0xff;
+ result = 0xff;
+ __asm
+ ("l.mulu %0, %1, %2\n\t"
+ : "=r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("mulu error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfeq.c b/tests/tcg/openrisc/test_sfeq.c
new file mode 100644
index 0000000000..bd7f875b71
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfeq.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0x1;
+ b = 0x80;
+ result = 0x2;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.sfeq %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfeq error\n");
+ return -1;
+ }
+
+ a = 0x7f;
+ b = 0x80;
+ result = 0x81;
+ __asm
+ ("2:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.sfeq %0, %1\n\t"
+ "l.bf 2b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfeq error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfeqi.c b/tests/tcg/openrisc/test_sfeqi.c
new file mode 100644
index 0000000000..574261321b
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfeqi.c
@@ -0,0 +1,39 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 1;
+ result = 2;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.sfeqi %0, 0x80\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfeqi error\n");
+ return -1;
+ }
+
+ a = 0x7f;
+ result = 0x81;
+ __asm
+ ("2:\n\t"
+ "l.addi %0, %0, 0x1\n\t"
+ "l.sfeqi %0, 0x80\n\t"
+ "l.bf 2b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfeqi error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfges.c b/tests/tcg/openrisc/test_sfges.c
new file mode 100644
index 0000000000..23761d7f5a
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfges.c
@@ -0,0 +1,44 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfges %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfges error\n");
+ return -1;
+ }
+
+ a = 0xff;
+ b = 3;
+ c = 0x1;
+ result = 2;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %2\n\t"
+ "l.sfges %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sfges error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfgesi.c b/tests/tcg/openrisc/test_sfgesi.c
new file mode 100644
index 0000000000..54a2d51cd5
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfgesi.c
@@ -0,0 +1,40 @@
+#include <stdio.h>
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfgesi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfgesi error\n");
+ return -1;
+ }
+
+ a = 0xff;
+ b = 1;
+ result = 2;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %1\n\t"
+ "l.sfgesi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfgesi error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfgeu.c b/tests/tcg/openrisc/test_sfgeu.c
new file mode 100644
index 0000000000..2a491d91ea
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfgeu.c
@@ -0,0 +1,44 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfgeu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfgeu error\n");
+ return -1;
+ }
+
+ a = 0xff;
+ b = 3;
+ c = 1;
+ result = 2;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %2\n\t"
+ "l.sfgeu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sfgeu error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfgeui.c b/tests/tcg/openrisc/test_sfgeui.c
new file mode 100644
index 0000000000..40af35c68f
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfgeui.c
@@ -0,0 +1,41 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfgeui %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfgeui error\n");
+ return -1;
+ }
+
+ a = 0xff;
+ b = 1;
+ result = 2;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %1\n\t"
+ "l.sfgeui %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfgeui error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfgts.c b/tests/tcg/openrisc/test_sfgts.c
new file mode 100644
index 0000000000..4481a9cc3d
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfgts.c
@@ -0,0 +1,45 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfgts %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfgts error\n");
+ return -1;
+ }
+
+
+ a = 0xff;
+ b = 3;
+ c = 1;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %2\n\t"
+ "l.sfgts %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sfgts error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfgtsi.c b/tests/tcg/openrisc/test_sfgtsi.c
new file mode 100644
index 0000000000..7366e1292c
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfgtsi.c
@@ -0,0 +1,41 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfgtsi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfgtsi error\n");
+ return -1;
+ }
+
+ a = 0xff;
+ b = 1;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %1\n\t"
+ "l.sfgtsi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfgtsi error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfgtu.c b/tests/tcg/openrisc/test_sfgtu.c
new file mode 100644
index 0000000000..da2868916d
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfgtu.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+int main(void)
+{
+ int a, b, c;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfgtu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfgtu error\n");
+ return -1;
+ }
+
+ a = 0xff;
+ b = 3;
+ c = 1;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %2\n\t"
+ "l.sfgtu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b), "r"(c)
+ );
+ if (a != result) {
+ printf("sfgtu error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfgtui.c b/tests/tcg/openrisc/test_sfgtui.c
new file mode 100644
index 0000000000..565d44f112
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfgtui.c
@@ -0,0 +1,42 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ result = 1;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfgtui %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfgtui error\n");
+ return -1;
+ }
+
+
+ a = 0xff;
+ b = 1;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.sub %0, %0, %1\n\t"
+ "l.sfgtui %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfgtui error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfles.c b/tests/tcg/openrisc/test_sfles.c
new file mode 100644
index 0000000000..f5735228fe
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfles.c
@@ -0,0 +1,26 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sfles %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfles error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sflesi.c b/tests/tcg/openrisc/test_sflesi.c
new file mode 100644
index 0000000000..16fe6053e5
--- /dev/null
+++ b/tests/tcg/openrisc/test_sflesi.c
@@ -0,0 +1,39 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 0;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sflesi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sflesi error\n");
+ return -1;
+ }
+
+ a = 0;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sflesi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sflesi error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfleu.c b/tests/tcg/openrisc/test_sfleu.c
new file mode 100644
index 0000000000..be0a3c3f48
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfleu.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sfleu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfleu error\n");
+ return -1;
+ }
+
+ a = 0;
+ b = 3;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfleu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfleu error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfleui.c b/tests/tcg/openrisc/test_sfleui.c
new file mode 100644
index 0000000000..38d3c89709
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfleui.c
@@ -0,0 +1,39 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 0;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sfleui %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfleui error\n");
+ return -1;
+ }
+
+ a = 0;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfleui %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfleui error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sflts.c b/tests/tcg/openrisc/test_sflts.c
new file mode 100644
index 0000000000..7deeb48d09
--- /dev/null
+++ b/tests/tcg/openrisc/test_sflts.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sflts %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sflts error\n");
+ return -1;
+ }
+
+ a = 0;
+ b = 3;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sflts %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sflts error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfltsi.c b/tests/tcg/openrisc/test_sfltsi.c
new file mode 100644
index 0000000000..3cb1f02857
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfltsi.c
@@ -0,0 +1,39 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 0;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sfltsi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfltsi error\n");
+ return -1;
+ }
+
+ a = 0;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfltsi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfltsi error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfltu.c b/tests/tcg/openrisc/test_sfltu.c
new file mode 100644
index 0000000000..7ed3b26858
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfltu.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sfltu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfltu error\n");
+ return -1;
+ }
+
+ a = 0;
+ b = 3;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfltu %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfltu error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfltui.c b/tests/tcg/openrisc/test_sfltui.c
new file mode 100644
index 0000000000..a5cb9f6a97
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfltui.c
@@ -0,0 +1,39 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 0;
+ result = 4;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 4\n\t"
+ "l.sfltsi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfltui error\n");
+ return -1;
+ }
+
+ a = 0;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfltsi %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfltui error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfne.c b/tests/tcg/openrisc/test_sfne.c
new file mode 100644
index 0000000000..b33a35cf96
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfne.c
@@ -0,0 +1,43 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0;
+ b = 3;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 3\n\t"
+ "l.sfne %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfne error\n");
+ return -1;
+ }
+
+ a = 0;
+ b = 3;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfne %0, %1\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sfne error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sfnei.c b/tests/tcg/openrisc/test_sfnei.c
new file mode 100644
index 0000000000..d311c9e660
--- /dev/null
+++ b/tests/tcg/openrisc/test_sfnei.c
@@ -0,0 +1,39 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a;
+ int result;
+
+ a = 0;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 3\n\t"
+ "l.sfnei %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfnei error\n");
+ return -1;
+ }
+
+ a = 0;
+ result = 3;
+ __asm
+ ("1:\n\t"
+ "l.addi %0, %0, 1\n\t"
+ "l.sfnei %0, 0x3\n\t"
+ "l.bf 1b\n\t"
+ "l.nop\n\t"
+ : "+r"(a)
+ );
+ if (a != result) {
+ printf("sfnei error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/tcg/openrisc/test_sub.c b/tests/tcg/openrisc/test_sub.c
new file mode 100644
index 0000000000..474ec6055b
--- /dev/null
+++ b/tests/tcg/openrisc/test_sub.c
@@ -0,0 +1,35 @@
+#include <stdio.h>
+
+int main(void)
+{
+ int a, b;
+ int result;
+
+ a = 0x100;
+ b = 0x100;
+ result = 0x0;
+ __asm
+ ("l.sub %0, %0, %1\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sub error\n");
+ return -1;
+ }
+
+ a = 0xffff;
+ b = 0x1;
+ result = 0xfffe;
+ __asm
+ ("l.sub %0, %0, %1\n\t"
+ : "+r"(a)
+ : "r"(b)
+ );
+ if (a != result) {
+ printf("sub error\n");
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/tests/test-iov.c b/tests/test-iov.c
new file mode 100644
index 0000000000..cbe7a8955c
--- /dev/null
+++ b/tests/test-iov.c
@@ -0,0 +1,260 @@
+#include <glib.h>
+#include "qemu-common.h"
+#include "iov.h"
+#include "qemu_socket.h"
+
+/* create a randomly-sized iovec with random vectors */
+static void iov_random(struct iovec **iovp, unsigned *iov_cntp)
+{
+ unsigned niov = g_test_rand_int_range(3,8);
+ struct iovec *iov = g_malloc(niov * sizeof(*iov));
+ unsigned i;
+ for (i = 0; i < niov; ++i) {
+ iov[i].iov_len = g_test_rand_int_range(5,20);
+ iov[i].iov_base = g_malloc(iov[i].iov_len);
+ }
+ *iovp = iov;
+ *iov_cntp = niov;
+}
+
+static void iov_free(struct iovec *iov, unsigned niov)
+{
+ unsigned i;
+ for (i = 0; i < niov; ++i) {
+ g_free(iov[i].iov_base);
+ }
+ g_free(iov);
+}
+
+static void test_iov_bytes(struct iovec *iov, unsigned niov,
+ size_t offset, size_t bytes)
+{
+ unsigned i;
+ size_t j, o;
+ unsigned char *b;
+ o = 0;
+
+ /* we walk over all elements, */
+ for (i = 0; i < niov; ++i) {
+ b = iov[i].iov_base;
+ /* over each char of each element, */
+ for (j = 0; j < iov[i].iov_len; ++j) {
+ /* counting each of them and
+ * verifying that the ones within [offset,offset+bytes)
+ * range are equal to the position number (o) */
+ if (o >= offset && o < offset + bytes) {
+ g_assert(b[j] == (o & 255));
+ } else {
+ g_assert(b[j] == 0xff);
+ }
+ ++o;
+ }
+ }
+}
+
+static void test_to_from_buf_1(void)
+{
+ unsigned niov;
+ struct iovec *iov;
+ size_t sz;
+ unsigned char *ibuf, *obuf;
+ unsigned i, j, n;
+
+ iov_random(&iov, &niov);
+
+ sz = iov_size(iov, niov);
+
+ ibuf = g_malloc(sz + 8) + 4;
+ memcpy(ibuf-4, "aaaa", 4); memcpy(ibuf + sz, "bbbb", 4);
+ obuf = g_malloc(sz + 8) + 4;
+ memcpy(obuf-4, "xxxx", 4); memcpy(obuf + sz, "yyyy", 4);
+
+ /* fill in ibuf with 0123456... */
+ for (i = 0; i < sz; ++i) {
+ ibuf[i] = i & 255;
+ }
+
+ for (i = 0; i <= sz; ++i) {
+
+ /* Test from/to buf for offset(i) in [0..sz] up to the end of buffer.
+ * For last iteration with offset == sz, the procedure should
+ * skip whole vector and process exactly 0 bytes */
+
+ /* first set bytes [i..sz) to some "random" value */
+ n = iov_memset(iov, niov, 0, 0xff, -1);
+ g_assert(n == sz);
+
+ /* next copy bytes [i..sz) from ibuf to iovec */
+ n = iov_from_buf(iov, niov, i, ibuf + i, -1);
+ g_assert(n == sz - i);
+
+ /* clear part of obuf */
+ memset(obuf + i, 0, sz - i);
+ /* and set this part of obuf to values from iovec */
+ n = iov_to_buf(iov, niov, i, obuf + i, -1);
+ g_assert(n == sz - i);
+
+ /* now compare resulting buffers */
+ g_assert(memcmp(ibuf, obuf, sz) == 0);
+
+ /* test just one char */
+ n = iov_to_buf(iov, niov, i, obuf + i, 1);
+ g_assert(n == (i < sz));
+ if (n) {
+ g_assert(obuf[i] == (i & 255));
+ }
+
+ for (j = i; j <= sz; ++j) {
+ /* now test num of bytes cap up to byte no. j,
+ * with j in [i..sz]. */
+
+ /* clear iovec */
+ n = iov_memset(iov, niov, 0, 0xff, -1);
+ g_assert(n == sz);
+
+ /* copy bytes [i..j) from ibuf to iovec */
+ n = iov_from_buf(iov, niov, i, ibuf + i, j - i);
+ g_assert(n == j - i);
+
+ /* clear part of obuf */
+ memset(obuf + i, 0, j - i);
+
+ /* copy bytes [i..j) from iovec to obuf */
+ n = iov_to_buf(iov, niov, i, obuf + i, j - i);
+ g_assert(n == j - i);
+
+ /* verify result */
+ g_assert(memcmp(ibuf, obuf, sz) == 0);
+
+ /* now actually check if the iovec contains the right data */
+ test_iov_bytes(iov, niov, i, j - i);
+ }
+ }
+ g_assert(!memcmp(ibuf-4, "aaaa", 4) && !memcmp(ibuf+sz, "bbbb", 4));
+ g_free(ibuf-4);
+ g_assert(!memcmp(obuf-4, "xxxx", 4) && !memcmp(obuf+sz, "yyyy", 4));
+ g_free(obuf-4);
+ iov_free(iov, niov);
+}
+
+static void test_to_from_buf(void)
+{
+ int x;
+ for (x = 0; x < 4; ++x) {
+ test_to_from_buf_1();
+ }
+}
+
+static void test_io(void)
+{
+#ifndef _WIN32
+/* socketpair(PF_UNIX) which does not exist on windows */
+
+ int sv[2];
+ int r;
+ unsigned i, j, k, s, t;
+ fd_set fds;
+ unsigned niov;
+ struct iovec *iov, *siov;
+ unsigned char *buf;
+ size_t sz;
+
+ iov_random(&iov, &niov);
+ sz = iov_size(iov, niov);
+ buf = g_malloc(sz);
+ for (i = 0; i < sz; ++i) {
+ buf[i] = i & 255;
+ }
+ iov_from_buf(iov, niov, 0, buf, sz);
+
+ siov = g_malloc(sizeof(*iov) * niov);
+ memcpy(siov, iov, sizeof(*iov) * niov);
+
+ if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) < 0) {
+ perror("socketpair");
+ exit(1);
+ }
+
+ FD_ZERO(&fds);
+
+ t = 0;
+ if (fork() == 0) {
+ /* writer */
+
+ close(sv[0]);
+ FD_SET(sv[1], &fds);
+ fcntl(sv[1], F_SETFL, O_RDWR|O_NONBLOCK);
+ r = g_test_rand_int_range(sz / 2, sz);
+ setsockopt(sv[1], SOL_SOCKET, SO_SNDBUF, &r, sizeof(r));
+
+ for (i = 0; i <= sz; ++i) {
+ for (j = i; j <= sz; ++j) {
+ k = i;
+ do {
+ s = g_test_rand_int_range(0, j - k + 1);
+ r = iov_send(sv[1], iov, niov, k, s);
+ g_assert(memcmp(iov, siov, sizeof(*iov)*niov) == 0);
+ if (r >= 0) {
+ k += r;
+ t += r;
+ usleep(g_test_rand_int_range(0, 30));
+ } else if (errno == EAGAIN) {
+ select(sv[1]+1, NULL, &fds, NULL, NULL);
+ continue;
+ } else {
+ perror("send");
+ exit(1);
+ }
+ } while(k < j);
+ }
+ }
+ exit(0);
+
+ } else {
+ /* reader & verifier */
+
+ close(sv[1]);
+ FD_SET(sv[0], &fds);
+ fcntl(sv[0], F_SETFL, O_RDWR|O_NONBLOCK);
+ r = g_test_rand_int_range(sz / 2, sz);
+ setsockopt(sv[0], SOL_SOCKET, SO_RCVBUF, &r, sizeof(r));
+ usleep(500000);
+
+ for (i = 0; i <= sz; ++i) {
+ for (j = i; j <= sz; ++j) {
+ k = i;
+ iov_memset(iov, niov, 0, 0xff, -1);
+ do {
+ s = g_test_rand_int_range(0, j - k + 1);
+ r = iov_recv(sv[0], iov, niov, k, s);
+ g_assert(memcmp(iov, siov, sizeof(*iov)*niov) == 0);
+ if (r > 0) {
+ k += r;
+ t += r;
+ } else if (!r) {
+ if (s) {
+ break;
+ }
+ } else if (errno == EAGAIN) {
+ select(sv[0]+1, &fds, NULL, NULL, NULL);
+ continue;
+ } else {
+ perror("recv");
+ exit(1);
+ }
+ } while(k < j);
+ test_iov_bytes(iov, niov, i, j - i);
+ }
+ }
+ }
+#endif
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+ g_test_rand_int();
+ g_test_add_func("/basic/iov/from-to-buf", test_to_from_buf);
+ g_test_add_func("/basic/iov/io", test_io);
+ return g_test_run();
+}
diff --git a/tests/test-qmp-commands.c b/tests/test-qmp-commands.c
index 60cbf019bb..dc3c507f2b 100644
--- a/tests/test-qmp-commands.c
+++ b/tests/test-qmp-commands.c
@@ -3,6 +3,9 @@
#include "test-qmp-commands.h"
#include "qapi/qmp-core.h"
#include "module.h"
+#include "qapi/qmp-input-visitor.h"
+#include "tests/test-qapi-types.h"
+#include "tests/test-qapi-visit.h"
void qmp_user_def_cmd(Error **errp)
{
@@ -123,6 +126,44 @@ static void test_dealloc_types(void)
qapi_free_UserDefOneList(ud1list);
}
+/* test generated deallocation on an object whose construction was prematurely
+ * terminated due to an error */
+static void test_dealloc_partial(void)
+{
+ static const char text[] = "don't leak me";
+
+ UserDefTwo *ud2 = NULL;
+ Error *err = NULL;
+
+ /* create partial object */
+ {
+ QDict *ud2_dict;
+ QmpInputVisitor *qiv;
+
+ ud2_dict = qdict_new();
+ qdict_put_obj(ud2_dict, "string", QOBJECT(qstring_from_str(text)));
+
+ qiv = qmp_input_visitor_new(QOBJECT(ud2_dict));
+ visit_type_UserDefTwo(qmp_input_get_visitor(qiv), &ud2, NULL, &err);
+ qmp_input_visitor_cleanup(qiv);
+ QDECREF(ud2_dict);
+ }
+
+ /* verify partial success */
+ assert(ud2 != NULL);
+ assert(ud2->string != NULL);
+ assert(strcmp(ud2->string, text) == 0);
+ assert(ud2->dict.dict.userdef == NULL);
+
+ /* confirm & release construction error */
+ assert(err != NULL);
+ error_free(err);
+
+ /* tear down partial object */
+ qapi_free_UserDefTwo(ud2);
+}
+
+
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
@@ -131,6 +172,7 @@ int main(int argc, char **argv)
g_test_add_func("/0.15/dispatch_cmd_error", test_dispatch_cmd_error);
g_test_add_func("/0.15/dispatch_cmd_io", test_dispatch_cmd_io);
g_test_add_func("/0.15/dealloc_types", test_dealloc_types);
+ g_test_add_func("/0.15/dealloc_partial", test_dealloc_partial);
module_call_init(MODULE_INIT_QAPI);
g_test_run();
diff --git a/tests/test-qmp-input-visitor.c b/tests/test-qmp-input-visitor.c
index c30fdc4e59..8f5a509582 100644
--- a/tests/test-qmp-input-visitor.c
+++ b/tests/test-qmp-input-visitor.c
@@ -151,14 +151,22 @@ typedef struct TestStruct
static void visit_type_TestStruct(Visitor *v, TestStruct **obj,
const char *name, Error **errp)
{
- visit_start_struct(v, (void **)obj, "TestStruct", name, sizeof(TestStruct),
- errp);
-
- visit_type_int(v, &(*obj)->integer, "integer", errp);
- visit_type_bool(v, &(*obj)->boolean, "boolean", errp);
- visit_type_str(v, &(*obj)->string, "string", errp);
-
- visit_end_struct(v, errp);
+ Error *err = NULL;
+ if (!error_is_set(errp)) {
+ visit_start_struct(v, (void **)obj, "TestStruct", name, sizeof(TestStruct),
+ &err);
+ if (!err) {
+ visit_type_int(v, &(*obj)->integer, "integer", &err);
+ visit_type_bool(v, &(*obj)->boolean, "boolean", &err);
+ visit_type_str(v, &(*obj)->string, "string", &err);
+
+ /* Always call end_struct if start_struct succeeded. */
+ error_propagate(errp, err);
+ err = NULL;
+ visit_end_struct(v, &err);
+ }
+ error_propagate(errp, err);
+ }
}
static void test_visitor_in_struct(TestInputVisitorData *data,
diff --git a/trace-events b/trace-events
index 5c82b3acf2..6b12f83de0 100644
--- a/trace-events
+++ b/trace-events
@@ -141,6 +141,10 @@ ecc_mem_readl_ecr1(uint32_t ret) "Read event count 2 %08x"
ecc_diag_mem_writeb(uint64_t addr, uint32_t val) "Write diagnostic %"PRId64" = %02x"
ecc_diag_mem_readb(uint64_t addr, uint32_t ret) "Read diagnostic %"PRId64"= %02x"
+# hw/hd-geometry.c
+hd_geometry_lchs_guess(void *bs, int cyls, int heads, int secs) "bs %p LCHS %d %d %d"
+hd_geometry_guess(void *bs, uint32_t cyls, uint32_t heads, uint32_t secs, int trans) "bs %p CHS %u %u %u trans %d"
+
# hw/jazz-led.c
jazz_led_read(uint64_t addr, uint8_t val) "read addr=0x%"PRIx64": 0x%x"
jazz_led_write(uint64_t addr, uint8_t new) "write addr=0x%"PRIx64": 0x%x"
@@ -252,12 +256,13 @@ usb_ehci_qtd_fields(uint32_t addr, int tbytes, int cpage, int cerr, int pid) "QT
usb_ehci_qtd_bits(uint32_t addr, int ioc, int active, int halt, int babble, int xacterr) "QTD @ %08x - ioc %d, active %d, halt %d, babble %d, xacterr %d"
usb_ehci_itd(uint32_t addr, uint32_t nxt, uint32_t mplen, uint32_t mult, uint32_t ep, uint32_t devaddr) "ITD @ %08x: next %08x - mplen %d, mult %d, ep %d, dev %d"
usb_ehci_sitd(uint32_t addr, uint32_t nxt, uint32_t active) "ITD @ %08x: next %08x - active %d"
-usb_ehci_port_attach(uint32_t port, const char *device) "attach port #%d - %s"
-usb_ehci_port_detach(uint32_t port) "detach port #%d"
+usb_ehci_port_attach(uint32_t port, const char *owner, const char *device) "attach port #%d, owner %s, device %s"
+usb_ehci_port_detach(uint32_t port, const char *owner) "detach port #%d, owner %s"
usb_ehci_port_reset(uint32_t port, int enable) "reset port #%d - %d"
usb_ehci_data(int rw, uint32_t cpage, uint32_t offset, uint32_t addr, uint32_t len, uint32_t bufpos) "write %d, cpage %d, offset 0x%03x, addr 0x%08x, len %d, bufpos %d"
usb_ehci_queue_action(void *q, const char *action) "q %p: %s"
usb_ehci_packet_action(void *q, void *p, const char *action) "q %p p %p: %s"
+usb_ehci_irq(uint32_t level, uint32_t frindex, uint32_t sts, uint32_t mask) "level %d, frindex 0x%04x, sts 0x%x, mask 0x%x"
# hw/usb/hcd-uhci.c
usb_uhci_reset(void) "=== RESET ==="
@@ -346,6 +351,20 @@ usb_hub_clear_port_feature(int addr, int nr, const char *f) "dev %d, port %d, fe
usb_hub_attach(int addr, int nr) "dev %d, port %d"
usb_hub_detach(int addr, int nr) "dev %d, port %d"
+# hw/usb/dev-uas.c
+usb_uas_reset(int addr) "dev %d"
+usb_uas_command(int addr, uint16_t tag, int lun, uint32_t lun64_1, uint32_t lun64_2) "dev %d, tag 0x%x, lun %d, lun64 %08x-%08x"
+usb_uas_response(int addr, uint16_t tag, uint8_t code) "dev %d, tag 0x%x, code 0x%x"
+usb_uas_sense(int addr, uint16_t tag, uint8_t status) "dev %d, tag 0x%x, status 0x%x"
+usb_uas_read_ready(int addr, uint16_t tag) "dev %d, tag 0x%x"
+usb_uas_write_ready(int addr, uint16_t tag) "dev %d, tag 0x%x"
+usb_uas_xfer_data(int addr, uint16_t tag, uint32_t copy, uint32_t uoff, uint32_t usize, uint32_t soff, uint32_t ssize) "dev %d, tag 0x%x, copy %d, usb-pkt %d/%d, scsi-buf %d/%d"
+usb_uas_scsi_data(int addr, uint16_t tag, uint32_t bytes) "dev %d, tag 0x%x, bytes %d"
+usb_uas_scsi_complete(int addr, uint16_t tag, uint32_t status, uint32_t resid) "dev %d, tag 0x%x, status 0x%x, residue %d"
+usb_uas_tmf_abort_task(int addr, uint16_t tag, uint16_t task_tag) "dev %d, tag 0x%x, task-tag 0x%x"
+usb_uas_tmf_logical_unit_reset(int addr, uint16_t tag, int lun) "dev %d, tag 0x%x, lun %d"
+usb_uas_tmf_unsupported(int addr, uint16_t tag, uint32_t function) "dev %d, tag 0x%x, function 0x%x"
+
# hw/usb/host-linux.c
usb_host_open_started(int bus, int addr) "dev %d:%d"
usb_host_open_success(int bus, int addr) "dev %d:%d"
@@ -367,8 +386,10 @@ usb_host_urb_complete(int bus, int addr, void *aurb, int status, int length, int
usb_host_urb_canceled(int bus, int addr, void *aurb) "dev %d:%d, aurb %p"
usb_host_ep_set_halt(int bus, int addr, int ep) "dev %d:%d, ep %d"
usb_host_ep_clear_halt(int bus, int addr, int ep) "dev %d:%d, ep %d"
-usb_host_ep_start_iso(int bus, int addr, int ep) "dev %d:%d, ep %d"
-usb_host_ep_stop_iso(int bus, int addr, int ep) "dev %d:%d, ep %d"
+usb_host_iso_start(int bus, int addr, int ep) "dev %d:%d, ep %d"
+usb_host_iso_stop(int bus, int addr, int ep) "dev %d:%d, ep %d"
+usb_host_iso_out_of_bufs(int bus, int addr, int ep) "dev %d:%d, ep %d"
+usb_host_iso_many_urbs(int bus, int addr, int count) "dev %d:%d, count %d"
usb_host_reset(int bus, int addr) "dev %d:%d"
usb_host_auto_scan_enabled(void)
usb_host_auto_scan_disabled(void)
@@ -382,6 +403,7 @@ usb_host_parse_error(int bus, int addr, const char *errmsg) "dev %d:%d, msg %s"
# hw/scsi-bus.c
scsi_req_alloc(int target, int lun, int tag) "target %d lun %d tag %d"
+scsi_req_cancel(int target, int lun, int tag) "target %d lun %d tag %d"
scsi_req_data(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d"
scsi_req_data_canceled(int target, int lun, int tag, int len) "target %d lun %d tag %d len %d"
scsi_req_dequeue(int target, int lun, int tag) "target %d lun %d tag %d"
@@ -390,6 +412,7 @@ scsi_req_parsed(int target, int lun, int tag, int cmd, int mode, int xfer) "targ
scsi_req_parsed_lba(int target, int lun, int tag, int cmd, uint64_t lba) "target %d lun %d tag %d command %d lba %"PRIu64
scsi_req_parse_bad(int target, int lun, int tag, int cmd) "target %d lun %d tag %d command %d"
scsi_req_build_sense(int target, int lun, int tag, int key, int asc, int ascq) "target %d lun %d tag %d key %#02x asc %#02x ascq %#02x"
+scsi_device_set_ua(int target, int lun, int key, int asc, int ascq) "target %d lun %d key %#02x asc %#02x ascq %#02x"
scsi_report_luns(int target, int lun, int tag) "target %d lun %d tag %d"
scsi_inquiry(int target, int lun, int tag, int cdb1, int cdb2) "target %d lun %d tag %d page %#02x/%#02x"
scsi_test_unit_ready(int target, int lun, int tag) "target %d lun %d tag %d"
@@ -511,6 +534,85 @@ lm32_uart_irq_state(int level) "irq state %d"
# hw/lm32_sys.c
lm32_sys_memory_write(uint32_t addr, uint32_t value) "addr 0x%08x value 0x%08x"
+# hw/megasas.c
+megasas_init_firmware(uint64_t pa) "pa %" PRIx64 " "
+megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at %" PRIx64 " len %d head %" PRIx64 " tail %" PRIx64 " flags %x"
+megasas_initq_map_failed(int frame) "scmd %d: failed to map queue"
+megasas_initq_mismatch(int queue_len, int fw_cmds) "queue size %d max fw cmds %d"
+megasas_qf_found(unsigned int index, uint64_t pa) "found mapped frame %x pa %" PRIx64 ""
+megasas_qf_new(unsigned int index, void *cmd) "return new frame %x cmd %p"
+megasas_qf_failed(unsigned long pa) "all frames busy for frame %lx"
+megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, unsigned int tail, int busy) "enqueue frame %x count %d context %" PRIx64 " tail %x busy %d"
+megasas_qf_update(unsigned int head, unsigned int busy) "update reply queue head %x busy %d"
+megasas_qf_dequeue(unsigned int index) "dequeue frame %x"
+megasas_qf_map_failed(int cmd, unsigned long frame) "scmd %d: frame %lu"
+megasas_qf_complete_noirq(uint64_t context) "context %" PRIx64 " "
+megasas_qf_complete(uint64_t context, unsigned int tail, unsigned int offset, int busy, unsigned int doorbell) "context %" PRIx64 " tail %x offset %d busy %d doorbell %x"
+megasas_handle_frame(const char *cmd, uint64_t addr, uint64_t context, uint32_t count) "MFI cmd %s addr %" PRIx64 " context %" PRIx64 " count %d"
+megasas_frame_busy(uint64_t addr) "frame %" PRIx64 " busy"
+megasas_unhandled_frame_cmd(int cmd, uint8_t frame_cmd) "scmd %d: Unhandled MFI cmd %x"
+megasas_handle_scsi(const char *frame, int bus, int dev, int lun, void *sdev, unsigned long size) "%s dev %x/%x/%x sdev %p xfer %lu"
+megasas_scsi_target_not_present(const char *frame, int bus, int dev, int lun) "%s dev %x/%x/%x target not present"
+megasas_scsi_invalid_cdb_len(const char *frame, int bus, int dev, int lun, int len) "%s dev %x/%x/%x invalid cdb len %d"
+megasas_iov_read_overflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes"
+megasas_iov_write_overflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes"
+megasas_iov_read_underflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes"
+megasas_iov_write_underflow(int cmd, int bytes, int len) "scmd %d: %d/%d bytes"
+megasas_scsi_req_alloc_failed(const char *frame, int dev, int lun) "%s dev %x/%x req allocation failed"
+megasas_scsi_read_start(int cmd, int len) "scmd %d: transfer %d bytes of data"
+megasas_scsi_write_start(int cmd, int len) "scmd %d: transfer %d bytes of data"
+megasas_scsi_nodata(int cmd) "scmd %d: no data to be transferred"
+megasas_scsi_complete(int cmd, uint32_t status, int len, int xfer) "scmd %d: finished with status %x, len %u/%u"
+megasas_command_complete(int cmd, uint32_t status, uint32_t resid) "scmd %d: command completed, status %x, residual %d"
+megasas_handle_io(int cmd, const char *frame, int dev, int lun, unsigned long lba, unsigned long count) "scmd %d: %s dev %x/%x lba %lx count %lu"
+megasas_io_target_not_present(int cmd, const char *frame, int dev, int lun) "scmd %d: %s dev 1/%x/%x LUN not present"
+megasas_io_read_start(int cmd, unsigned long lba, unsigned long count, unsigned long len) "scmd %d: start LBA %lx %lu blocks (%lu bytes)"
+megasas_io_write_start(int cmd, unsigned long lba, unsigned long count, unsigned long len) "scmd %d: start LBA %lx %lu blocks (%lu bytes)"
+megasas_io_complete(int cmd, uint32_t len) "scmd %d: %d bytes completed"
+megasas_io_read(int cmd, int bytes, int len, unsigned long offset) "scmd %d: %d/%d bytes, iov offset %lu"
+megasas_io_write(int cmd, int bytes, int len, unsigned long offset) "scmd %d: %d/%d bytes, iov offset %lu"
+megasas_io_continue(int cmd, int bytes) "scmd %d: %d bytes left"
+megasas_iovec_map_failed(int cmd, int index, unsigned long iov_size) "scmd %d: iovec %d size %lu"
+megasas_iovec_sgl_overflow(int cmd, int index, int limit) "scmd %d: iovec count %d limit %d"
+megasas_iovec_sgl_underflow(int cmd, int index) "scmd %d: iovec count %d"
+megasas_iovec_sgl_invalid(int cmd, int index, uint64_t pa, uint32_t len) "scmd %d: element %d pa %" PRIx64 " len %u"
+megasas_iovec_overflow(int cmd, int len, int limit) "scmd %d: len %d limit %d"
+megasas_iovec_underflow(int cmd, int len, int limit) "scmd %d: len %d limit %d"
+megasas_handle_dcmd(int cmd, int opcode) "scmd %d: MFI DCMD opcode %x"
+megasas_finish_dcmd(int cmd, int size) "scmd %d: MFI DCMD wrote %d bytes"
+megasas_dcmd_req_alloc_failed(int cmd, const char *desc) "scmd %d: %s alloc failed"
+megasas_dcmd_internal_submit(int cmd, const char *desc, int dev) "scmd %d: %s to dev %d"
+megasas_dcmd_internal_finish(int cmd, int opcode, int lun) "scmd %d: DCMD finish internal cmd %x lun %d"
+megasas_dcmd_internal_invalid(int cmd, int opcode) "scmd %d: Invalid internal DCMD %x"
+megasas_dcmd_unhandled(int cmd, int opcode, int len) "scmd %d: opcode %x, len %d"
+megasas_dcmd_zero_sge(int cmd) "scmd %d: zero DCMD sge count"
+megasas_dcmd_invalid_sge(int cmd, int count) "scmd %d: invalid DCMD sge count %d"
+megasas_dcmd_map_failed(int cmd) "scmd %d: Failed to map DCMD buffer"
+megasas_dcmd_invalid_xfer_len(int cmd, unsigned long size, unsigned long max) "scmd %d: invalid xfer len %ld, max %ld"
+megasas_dcmd_enter(int cmd, const char *dcmd, int len) "scmd %d: DCMD %s len %d"
+megasas_dcmd_dummy(int cmd, unsigned long size) "scmd %d: DCMD dummy xfer len %ld"
+megasas_dcmd_set_fw_time(int cmd, unsigned long time) "scmd %d: Set FW time %lx"
+megasas_dcmd_pd_get_list(int cmd, int num, int max, int offset) "scmd %d: DCMD PD get list: %d / %d PDs, size %d"
+megasas_dcmd_ld_get_list(int cmd, int num, int max) "scmd %d: DCMD LD get list: found %d / %d LDs"
+megasas_dcmd_ld_get_info(int cmd, int ld_id) "scmd %d: DCMD LD get info for dev %d"
+megasas_dcmd_pd_get_info(int cmd, int pd_id) "scmd %d: DCMD PD get info for dev %d"
+megasas_dcmd_pd_list_query(int cmd, int flags) "scmd %d: DCMD PD list query flags %x"
+megasas_dcmd_unsupported(int cmd, unsigned long size) "scmd %d: set properties len %ld"
+megasas_abort_frame(int cmd, int abort_cmd) "scmd %d: aborting frame %x"
+megasas_abort_no_cmd(int cmd, uint64_t context) "scmd %d: no active command for frame context %" PRIx64 ""
+megasas_abort_invalid_context(int cmd, uint64_t context, int abort_cmd) "scmd %d: invalid frame context %" PRIx64 " for abort frame %x"
+megasas_reset(void) "Reset"
+megasas_init(int sges, int cmds, const char *intr, const char *mode) "Using %d sges, %d cmds, %s, %s mode"
+megasas_msix_raise(int vector) "vector %d"
+megasas_irq_lower(void) "INTx"
+megasas_irq_raise(void) "INTx"
+megasas_intr_enabled(void) "Interrupts enabled"
+megasas_intr_disabled(void) "Interrupts disabled"
+megasas_mmio_readl(unsigned long addr, uint32_t val) "addr 0x%lx: 0x%x"
+megasas_mmio_invalid_readl(unsigned long addr) "addr 0x%lx"
+megasas_mmio_writel(uint32_t addr, uint32_t val) "addr 0x%x: 0x%x"
+megasas_mmio_invalid_writel(uint32_t addr, uint32_t val) "addr 0x%x: 0x%x"
+
# hw/milkymist-ac97.c
milkymist_ac97_memory_read(uint32_t addr, uint32_t value) "addr %08x value %08x"
milkymist_ac97_memory_write(uint32_t addr, uint32_t value) "addr %08x value %08x"
@@ -644,6 +746,9 @@ iscsi_aio_read16_cb(void *iscsi, int status, void *acb, int canceled) "iscsi %p
iscsi_aio_readv(void *iscsi, int64_t sector_num, int nb_sectors, void *opaque, void *acb) "iscsi %p sector_num %"PRId64" nb_sectors %d opaque %p acb %p"
# hw/esp.c
+esp_error_fifo_overrun(void) "FIFO overrun"
+esp_error_unhandled_command(uint32_t val) "unhandled command (%2.2x)"
+esp_error_invalid_write(uint32_t val, uint32_t addr) "invalid write of 0x%02x at [0x%x]"
esp_raise_irq(void) "Raise IRQ"
esp_lower_irq(void) "Lower IRQ"
esp_dma_enable(void) "Raise enable"
@@ -669,10 +774,24 @@ esp_mem_writeb_cmd_iccs(uint32_t val) "Initiator Command Complete Sequence (%2.2
esp_mem_writeb_cmd_msgacc(uint32_t val) "Message Accepted (%2.2x)"
esp_mem_writeb_cmd_pad(uint32_t val) "Transfer padding (%2.2x)"
esp_mem_writeb_cmd_satn(uint32_t val) "Set ATN (%2.2x)"
+esp_mem_writeb_cmd_rstatn(uint32_t val) "Reset ATN (%2.2x)"
esp_mem_writeb_cmd_sel(uint32_t val) "Select without ATN (%2.2x)"
esp_mem_writeb_cmd_selatn(uint32_t val) "Select with ATN (%2.2x)"
esp_mem_writeb_cmd_selatns(uint32_t val) "Select with ATN & stop (%2.2x)"
esp_mem_writeb_cmd_ensel(uint32_t val) "Enable selection (%2.2x)"
+esp_mem_writeb_cmd_dissel(uint32_t val) "Disable selection (%2.2x)"
+esp_pci_error_invalid_dma_direction(void) "invalid DMA transfer direction"
+esp_pci_error_invalid_read(uint32_t reg) "read access outside bounds (reg 0x%x)"
+esp_pci_error_invalid_write(uint32_t reg) "write access outside bounds (reg 0x%x)"
+esp_pci_error_invalid_write_dma(uint32_t val, uint32_t addr) "invalid write of 0x%02x at [0x%x]"
+esp_pci_dma_read(uint32_t saddr, uint32_t reg) "reg[%d]: 0x%8.8x"
+esp_pci_dma_write(uint32_t saddr, uint32_t reg, uint32_t val) "reg[%d]: 0x%8.8x -> 0x%8.8x"
+esp_pci_dma_idle(uint32_t val) "IDLE (%.8x)"
+esp_pci_dma_blast(uint32_t val) "BLAST (%.8x)"
+esp_pci_dma_abort(uint32_t val) "ABORT (%.8x)"
+esp_pci_dma_start(uint32_t val) "START (%.8x)"
+esp_pci_sbac_read(uint32_t reg) "sbac: 0x%8.8x"
+esp_pci_sbac_write(uint32_t reg, uint32_t val) "sbac: 0x%8.8x -> 0x%8.8x"
# monitor.c
handle_qmp_command(void *mon, const char *cmd_name) "mon %p cmd_name \"%s\""
@@ -782,6 +901,11 @@ displaysurface_resize(void *display_state, void *display_surface, int width, int
# vga.c
ppm_save(const char *filename, void *display_surface) "%s surface=%p"
+# savevm.c
+
+savevm_section_start(void) ""
+savevm_section_end(unsigned int section_id) "section_id %u"
+
# hw/qxl.c
disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d"
disable qxl_io_write_vga(int qid, const char *mode, uint32_t addr, uint32_t val) "%d %s addr=%u val=%u"
diff --git a/trace/control.c b/trace/control.c
index 4c5527d20a..22d5863eeb 100644
--- a/trace/control.c
+++ b/trace/control.c
@@ -27,6 +27,9 @@ void trace_backend_init_events(const char *fname)
size_t len = strlen(line_buf);
if (len > 1) { /* skip empty lines */
line_buf[len - 1] = '\0';
+ if ('#' == line_buf[0]) { /* skip commented lines */
+ continue;
+ }
if (!trace_event_set_state(line_buf, true)) {
fprintf(stderr,
"error: trace event '%s' does not exist\n", line_buf);
diff --git a/trace/simple.c b/trace/simple.c
index b4a3c6e950..b700ea317c 100644
--- a/trace/simple.c
+++ b/trace/simple.c
@@ -27,7 +27,7 @@
#define HEADER_MAGIC 0xf2b177cb0aa429b4ULL
/** Trace file version number, bump if format changes */
-#define HEADER_VERSION 0
+#define HEADER_VERSION 2
/** Records were dropped event ID */
#define DROPPED_EVENT_ID (~(uint64_t)0 - 1)
@@ -35,23 +35,6 @@
/** Trace record is valid */
#define TRACE_RECORD_VALID ((uint64_t)1 << 63)
-/** Trace buffer entry */
-typedef struct {
- uint64_t event;
- uint64_t timestamp_ns;
- uint64_t x1;
- uint64_t x2;
- uint64_t x3;
- uint64_t x4;
- uint64_t x5;
- uint64_t x6;
-} TraceRecord;
-
-enum {
- TRACE_BUF_LEN = 4096,
- TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
-};
-
/*
* Trace records are written out by a dedicated thread. The thread waits for
* records to become available, writes them out, and then waits again.
@@ -62,11 +45,48 @@ static GCond *trace_empty_cond;
static bool trace_available;
static bool trace_writeout_enabled;
-static TraceRecord trace_buf[TRACE_BUF_LEN];
+enum {
+ TRACE_BUF_LEN = 4096 * 64,
+ TRACE_BUF_FLUSH_THRESHOLD = TRACE_BUF_LEN / 4,
+};
+
+uint8_t trace_buf[TRACE_BUF_LEN];
static unsigned int trace_idx;
+static unsigned int writeout_idx;
+static uint64_t dropped_events;
static FILE *trace_fp;
static char *trace_file_name = NULL;
+/* * Trace buffer entry */
+typedef struct {
+ uint64_t event; /* TraceEventID */
+ uint64_t timestamp_ns;
+ uint32_t length; /* in bytes */
+ uint32_t reserved; /* unused */
+ uint8_t arguments[];
+} TraceRecord;
+
+typedef struct {
+ uint64_t header_event_id; /* HEADER_EVENT_ID */
+ uint64_t header_magic; /* HEADER_MAGIC */
+ uint64_t header_version; /* HEADER_VERSION */
+} TraceRecordHeader;
+
+
+static void read_from_buffer(unsigned int idx, void *dataptr, size_t size);
+static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size);
+
+static void clear_buffer_range(unsigned int idx, size_t len)
+{
+ uint32_t num = 0;
+ while (num < len) {
+ if (idx >= TRACE_BUF_LEN) {
+ idx = idx % TRACE_BUF_LEN;
+ }
+ trace_buf[idx++] = 0;
+ num++;
+ }
+}
/**
* Read a trace record from the trace buffer
*
@@ -75,16 +95,30 @@ static char *trace_file_name = NULL;
*
* Returns false if the record is not valid.
*/
-static bool get_trace_record(unsigned int idx, TraceRecord *record)
+static bool get_trace_record(unsigned int idx, TraceRecord **recordptr)
{
- if (!(trace_buf[idx].event & TRACE_RECORD_VALID)) {
+ uint64_t event_flag = 0;
+ TraceRecord record;
+ /* read the event flag to see if its a valid record */
+ read_from_buffer(idx, &record, sizeof(event_flag));
+
+ if (!(record.event & TRACE_RECORD_VALID)) {
return false;
}
- __sync_synchronize(); /* read memory barrier before accessing record */
-
- *record = trace_buf[idx];
- record->event &= ~TRACE_RECORD_VALID;
+ smp_rmb(); /* read memory barrier before accessing record */
+ /* read the record header to know record length */
+ read_from_buffer(idx, &record, sizeof(TraceRecord));
+ *recordptr = malloc(record.length); /* dont use g_malloc, can deadlock when traced */
+ /* make a copy of record to avoid being overwritten */
+ read_from_buffer(idx, *recordptr, record.length);
+ smp_rmb(); /* memory barrier before clearing valid flag */
+ (*recordptr)->event &= ~TRACE_RECORD_VALID;
+ /* clear the trace buffer range for consumed record otherwise any byte
+ * with its MSB set may be considered as a valid event id when the writer
+ * thread crosses this range of buffer again.
+ */
+ clear_buffer_range(idx, record.length);
return true;
}
@@ -120,29 +154,39 @@ static void wait_for_trace_records_available(void)
static gpointer writeout_thread(gpointer opaque)
{
- TraceRecord record;
- unsigned int writeout_idx = 0;
- unsigned int num_available, idx;
+ TraceRecord *recordptr;
+ union {
+ TraceRecord rec;
+ uint8_t bytes[sizeof(TraceRecord) + sizeof(uint64_t)];
+ } dropped;
+ unsigned int idx = 0;
+ uint64_t dropped_count;
size_t unused __attribute__ ((unused));
for (;;) {
wait_for_trace_records_available();
- num_available = trace_idx - writeout_idx;
- if (num_available > TRACE_BUF_LEN) {
- record = (TraceRecord){
- .event = DROPPED_EVENT_ID,
- .x1 = num_available,
- };
- unused = fwrite(&record, sizeof(record), 1, trace_fp);
- writeout_idx += num_available;
+ if (dropped_events) {
+ dropped.rec.event = DROPPED_EVENT_ID,
+ dropped.rec.timestamp_ns = get_clock();
+ dropped.rec.length = sizeof(TraceRecord) + sizeof(dropped_events),
+ dropped.rec.reserved = 0;
+ while (1) {
+ dropped_count = dropped_events;
+ if (g_atomic_int_compare_and_exchange((gint *)&dropped_events,
+ dropped_count, 0)) {
+ break;
+ }
+ }
+ memcpy(dropped.rec.arguments, &dropped_count, sizeof(uint64_t));
+ unused = fwrite(&dropped.rec, dropped.rec.length, 1, trace_fp);
}
- idx = writeout_idx % TRACE_BUF_LEN;
- while (get_trace_record(idx, &record)) {
- trace_buf[idx].event = 0; /* clear valid bit */
- unused = fwrite(&record, sizeof(record), 1, trace_fp);
- idx = ++writeout_idx % TRACE_BUF_LEN;
+ while (get_trace_record(idx, &recordptr)) {
+ unused = fwrite(recordptr, recordptr->length, 1, trace_fp);
+ writeout_idx += recordptr->length;
+ free(recordptr); /* dont use g_free, can deadlock when traced */
+ idx = writeout_idx % TRACE_BUF_LEN;
}
fflush(trace_fp);
@@ -150,73 +194,93 @@ static gpointer writeout_thread(gpointer opaque)
return NULL;
}
-static void trace(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3,
- uint64_t x4, uint64_t x5, uint64_t x6)
+void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val)
{
- unsigned int idx;
- uint64_t timestamp;
-
- if (!trace_list[event].state) {
- return;
- }
-
- timestamp = get_clock();
-#if GLIB_CHECK_VERSION(2, 30, 0)
- idx = g_atomic_int_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN;
-#else
- idx = g_atomic_int_exchange_and_add((gint *)&trace_idx, 1) % TRACE_BUF_LEN;
-#endif
- trace_buf[idx] = (TraceRecord){
- .event = event,
- .timestamp_ns = timestamp,
- .x1 = x1,
- .x2 = x2,
- .x3 = x3,
- .x4 = x4,
- .x5 = x5,
- .x6 = x6,
- };
- __sync_synchronize(); /* write barrier before marking as valid */
- trace_buf[idx].event |= TRACE_RECORD_VALID;
-
- if ((idx + 1) % TRACE_BUF_FLUSH_THRESHOLD == 0) {
- flush_trace_file(false);
- }
+ rec->rec_off = write_to_buffer(rec->rec_off, &val, sizeof(uint64_t));
}
-void trace0(TraceEventID event)
+void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen)
{
- trace(event, 0, 0, 0, 0, 0, 0);
+ /* Write string length first */
+ rec->rec_off = write_to_buffer(rec->rec_off, &slen, sizeof(slen));
+ /* Write actual string now */
+ rec->rec_off = write_to_buffer(rec->rec_off, (void*)s, slen);
}
-void trace1(TraceEventID event, uint64_t x1)
+int trace_record_start(TraceBufferRecord *rec, TraceEventID event, size_t datasize)
{
- trace(event, x1, 0, 0, 0, 0, 0);
-}
+ unsigned int idx, rec_off, old_idx, new_idx;
+ uint32_t rec_len = sizeof(TraceRecord) + datasize;
+ uint64_t timestamp_ns = get_clock();
+
+ while (1) {
+ old_idx = trace_idx;
+ smp_rmb();
+ new_idx = old_idx + rec_len;
+
+ if (new_idx - writeout_idx > TRACE_BUF_LEN) {
+ /* Trace Buffer Full, Event dropped ! */
+ g_atomic_int_inc((gint *)&dropped_events);
+ return -ENOSPC;
+ }
-void trace2(TraceEventID event, uint64_t x1, uint64_t x2)
-{
- trace(event, x1, x2, 0, 0, 0, 0);
-}
+ if (g_atomic_int_compare_and_exchange((gint *)&trace_idx,
+ old_idx, new_idx)) {
+ break;
+ }
+ }
-void trace3(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3)
-{
- trace(event, x1, x2, x3, 0, 0, 0);
+ idx = old_idx % TRACE_BUF_LEN;
+ /* To check later if threshold crossed */
+ rec->next_tbuf_idx = new_idx % TRACE_BUF_LEN;
+
+ rec_off = idx;
+ rec_off = write_to_buffer(rec_off, (uint8_t*)&event, sizeof(event));
+ rec_off = write_to_buffer(rec_off, (uint8_t*)&timestamp_ns, sizeof(timestamp_ns));
+ rec_off = write_to_buffer(rec_off, (uint8_t*)&rec_len, sizeof(rec_len));
+
+ rec->tbuf_idx = idx;
+ rec->rec_off = (idx + sizeof(TraceRecord)) % TRACE_BUF_LEN;
+ return 0;
}
-void trace4(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4)
+static void read_from_buffer(unsigned int idx, void *dataptr, size_t size)
{
- trace(event, x1, x2, x3, x4, 0, 0);
+ uint8_t *data_ptr = dataptr;
+ uint32_t x = 0;
+ while (x < size) {
+ if (idx >= TRACE_BUF_LEN) {
+ idx = idx % TRACE_BUF_LEN;
+ }
+ data_ptr[x++] = trace_buf[idx++];
+ }
}
-void trace5(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5)
+static unsigned int write_to_buffer(unsigned int idx, void *dataptr, size_t size)
{
- trace(event, x1, x2, x3, x4, x5, 0);
+ uint8_t *data_ptr = dataptr;
+ uint32_t x = 0;
+ while (x < size) {
+ if (idx >= TRACE_BUF_LEN) {
+ idx = idx % TRACE_BUF_LEN;
+ }
+ trace_buf[idx++] = data_ptr[x++];
+ }
+ return idx; /* most callers wants to know where to write next */
}
-void trace6(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6)
+void trace_record_finish(TraceBufferRecord *rec)
{
- trace(event, x1, x2, x3, x4, x5, x6);
+ uint8_t temp_rec[sizeof(TraceRecord)];
+ TraceRecord *record = (TraceRecord *) temp_rec;
+ read_from_buffer(rec->tbuf_idx, temp_rec, sizeof(TraceRecord));
+ smp_wmb(); /* write barrier before marking as valid */
+ record->event |= TRACE_RECORD_VALID;
+ write_to_buffer(rec->tbuf_idx, temp_rec, sizeof(TraceRecord));
+
+ if ((trace_idx - writeout_idx) > TRACE_BUF_FLUSH_THRESHOLD) {
+ flush_trace_file(false);
+ }
}
void st_set_trace_file_enabled(bool enable)
@@ -231,10 +295,11 @@ void st_set_trace_file_enabled(bool enable)
flush_trace_file(true);
if (enable) {
- static const TraceRecord header = {
- .event = HEADER_EVENT_ID,
- .timestamp_ns = HEADER_MAGIC,
- .x1 = HEADER_VERSION,
+ static const TraceRecordHeader header = {
+ .header_event_id = HEADER_EVENT_ID,
+ .header_magic = HEADER_MAGIC,
+ /* Older log readers will check for version at next location */
+ .header_version = HEADER_VERSION,
};
trace_fp = fopen(trace_file_name, "wb");
@@ -291,24 +356,6 @@ void st_print_trace_file_status(FILE *stream, int (*stream_printf)(FILE *stream,
trace_file_name, trace_fp ? "on" : "off");
}
-void st_print_trace(FILE *stream, int (*stream_printf)(FILE *stream, const char *fmt, ...))
-{
- unsigned int i;
-
- for (i = 0; i < TRACE_BUF_LEN; i++) {
- TraceRecord record;
-
- if (!get_trace_record(i, &record)) {
- continue;
- }
- stream_printf(stream, "Event %" PRIu64 " : %" PRIx64 " %" PRIx64
- " %" PRIx64 " %" PRIx64 " %" PRIx64 " %" PRIx64 "\n",
- record.event, record.x1, record.x2,
- record.x3, record.x4, record.x5,
- record.x6);
- }
-}
-
void st_flush_trace_buffer(void)
{
flush_trace_file(true);
diff --git a/trace/simple.h b/trace/simple.h
index 466e75b4ff..7e521c1e1f 100644
--- a/trace/simple.h
+++ b/trace/simple.h
@@ -22,17 +22,41 @@ typedef struct {
bool state;
} TraceEvent;
-void trace0(TraceEventID event);
-void trace1(TraceEventID event, uint64_t x1);
-void trace2(TraceEventID event, uint64_t x1, uint64_t x2);
-void trace3(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3);
-void trace4(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4);
-void trace5(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5);
-void trace6(TraceEventID event, uint64_t x1, uint64_t x2, uint64_t x3, uint64_t x4, uint64_t x5, uint64_t x6);
-void st_print_trace(FILE *stream, fprintf_function stream_printf);
void st_print_trace_file_status(FILE *stream, fprintf_function stream_printf);
void st_set_trace_file_enabled(bool enable);
bool st_set_trace_file(const char *file);
void st_flush_trace_buffer(void);
+typedef struct {
+ unsigned int tbuf_idx;
+ unsigned int next_tbuf_idx;
+ unsigned int rec_off;
+} TraceBufferRecord;
+
+/* Note for hackers: Make sure MAX_TRACE_LEN < sizeof(uint32_t) */
+#define MAX_TRACE_STRLEN 512
+/**
+ * Initialize a trace record and claim space for it in the buffer
+ *
+ * @arglen number of bytes required for arguments
+ */
+int trace_record_start(TraceBufferRecord *rec, TraceEventID id, size_t arglen);
+
+/**
+ * Append a 64-bit argument to a trace record
+ */
+void trace_record_write_u64(TraceBufferRecord *rec, uint64_t val);
+
+/**
+ * Append a string argument to a trace record
+ */
+void trace_record_write_str(TraceBufferRecord *rec, const char *s, uint32_t slen);
+
+/**
+ * Mark a trace record completed
+ *
+ * Don't append any more arguments to the trace record after calling this.
+ */
+void trace_record_finish(TraceBufferRecord *rec);
+
#endif /* TRACE_SIMPLE_H */
diff --git a/ui/Makefile.objs b/ui/Makefile.objs
index 3687c8a518..adc07be761 100644
--- a/ui/Makefile.objs
+++ b/ui/Makefile.objs
@@ -4,11 +4,7 @@ vnc-obj-y += vnc-enc-tight.o vnc-palette.o
vnc-obj-y += vnc-enc-zrle.o
vnc-obj-$(CONFIG_VNC_TLS) += vnc-tls.o vnc-auth-vencrypt.o
vnc-obj-$(CONFIG_VNC_SASL) += vnc-auth-sasl.o
-ifdef CONFIG_VNC_THREAD
-vnc-obj-y += vnc-jobs-async.o
-else
-vnc-obj-y += vnc-jobs-sync.o
-endif
+vnc-obj-y += vnc-jobs.o
common-obj-y += keymaps.o
common-obj-$(CONFIG_SPICE) += spice-core.o spice-input.o spice-display.o
diff --git a/ui/spice-display.c b/ui/spice-display.c
index 5418eb3c7c..3e8f0b3ad5 100644
--- a/ui/spice-display.c
+++ b/ui/spice-display.c
@@ -244,6 +244,8 @@ void qemu_spice_create_host_primary(SimpleSpiceDisplay *ssd)
{
QXLDevSurfaceCreate surface;
+ memset(&surface, 0, sizeof(surface));
+
dprint(1, "%s: %dx%d\n", __FUNCTION__,
ds_get_width(ssd->ds), ds_get_height(ssd->ds));
diff --git a/ui/vnc-auth-vencrypt.c b/ui/vnc-auth-vencrypt.c
index 674ba97dc7..c59b188602 100644
--- a/ui/vnc-auth-vencrypt.c
+++ b/ui/vnc-auth-vencrypt.c
@@ -47,7 +47,8 @@ static void start_auth_vencrypt_subauth(VncState *vs)
case VNC_AUTH_VENCRYPT_TLSSASL:
case VNC_AUTH_VENCRYPT_X509SASL:
VNC_DEBUG("Start TLS auth SASL\n");
- return start_auth_sasl(vs);
+ start_auth_sasl(vs);
+ break;
#endif /* CONFIG_VNC_SASL */
default: /* Should not be possible, but just in case */
diff --git a/ui/vnc-jobs-async.c b/ui/vnc-jobs.c
index 087b84d319..087b84d319 100644
--- a/ui/vnc-jobs-async.c
+++ b/ui/vnc-jobs.c
diff --git a/ui/vnc-jobs.h b/ui/vnc-jobs.h
index 4c661f95e5..86e6d888c6 100644
--- a/ui/vnc-jobs.h
+++ b/ui/vnc-jobs.h
@@ -38,51 +38,35 @@ bool vnc_has_job(VncState *vs);
void vnc_jobs_clear(VncState *vs);
void vnc_jobs_join(VncState *vs);
-#ifdef CONFIG_VNC_THREAD
-
void vnc_jobs_consume_buffer(VncState *vs);
void vnc_start_worker_thread(void);
bool vnc_worker_thread_running(void);
void vnc_stop_worker_thread(void);
-#endif /* CONFIG_VNC_THREAD */
-
/* Locks */
static inline int vnc_trylock_display(VncDisplay *vd)
{
-#ifdef CONFIG_VNC_THREAD
return qemu_mutex_trylock(&vd->mutex);
-#else
- return 0;
-#endif
}
static inline void vnc_lock_display(VncDisplay *vd)
{
-#ifdef CONFIG_VNC_THREAD
qemu_mutex_lock(&vd->mutex);
-#endif
}
static inline void vnc_unlock_display(VncDisplay *vd)
{
-#ifdef CONFIG_VNC_THREAD
qemu_mutex_unlock(&vd->mutex);
-#endif
}
static inline void vnc_lock_output(VncState *vs)
{
-#ifdef CONFIG_VNC_THREAD
qemu_mutex_lock(&vs->output_mutex);
-#endif
}
static inline void vnc_unlock_output(VncState *vs)
{
-#ifdef CONFIG_VNC_THREAD
qemu_mutex_unlock(&vs->output_mutex);
-#endif
}
#endif /* VNC_JOBS_H */
diff --git a/ui/vnc.c b/ui/vnc.c
index 54bc5adab6..312ad7fe36 100644
--- a/ui/vnc.c
+++ b/ui/vnc.c
@@ -32,6 +32,7 @@
#include "acl.h"
#include "qemu-objects.h"
#include "qmp-commands.h"
+#include "osdep.h"
#define VNC_REFRESH_INTERVAL_BASE 30
#define VNC_REFRESH_INTERVAL_INC 50
@@ -526,7 +527,6 @@ static void vnc_desktop_resize(VncState *vs)
vnc_flush(vs);
}
-#ifdef CONFIG_VNC_THREAD
static void vnc_abort_display_jobs(VncDisplay *vd)
{
VncState *vs;
@@ -545,11 +545,6 @@ static void vnc_abort_display_jobs(VncDisplay *vd)
vnc_unlock_output(vs);
}
}
-#else
-static void vnc_abort_display_jobs(VncDisplay *vd)
-{
-}
-#endif
static void vnc_dpy_resize(DisplayState *ds)
{
@@ -867,19 +862,12 @@ static int find_and_clear_dirty_height(struct VncState *vs,
return h;
}
-#ifdef CONFIG_VNC_THREAD
static int vnc_update_client_sync(VncState *vs, int has_dirty)
{
int ret = vnc_update_client(vs, has_dirty);
vnc_jobs_join(vs);
return ret;
}
-#else
-static int vnc_update_client_sync(VncState *vs, int has_dirty)
-{
- return vnc_update_client(vs, has_dirty);
-}
-#endif
static int vnc_update_client(VncState *vs, int has_dirty)
{
@@ -1066,11 +1054,9 @@ static void vnc_disconnect_finish(VncState *vs)
qemu_remove_led_event_handler(vs->led);
vnc_unlock_output(vs);
-#ifdef CONFIG_VNC_THREAD
qemu_mutex_destroy(&vs->output_mutex);
qemu_bh_delete(vs->bh);
buffer_free(&vs->jobs_buffer);
-#endif
for (i = 0; i < VNC_STAT_ROWS; ++i) {
g_free(vs->lossy_rect[i]);
@@ -1286,14 +1272,12 @@ static long vnc_client_read_plain(VncState *vs)
return ret;
}
-#ifdef CONFIG_VNC_THREAD
static void vnc_jobs_bh(void *opaque)
{
VncState *vs = opaque;
vnc_jobs_consume_buffer(vs);
}
-#endif
/*
* First function called whenever there is more data to be read from
@@ -2699,10 +2683,8 @@ static void vnc_connect(VncDisplay *vd, int csock, int skipauth)
vs->as.fmt = AUD_FMT_S16;
vs->as.endianness = 0;
-#ifdef CONFIG_VNC_THREAD
qemu_mutex_init(&vs->output_mutex);
vs->bh = qemu_bh_new(vnc_jobs_bh, vs);
-#endif
QTAILQ_INSERT_HEAD(&vd->clients, vs, next);
@@ -2762,10 +2744,8 @@ void vnc_display_init(DisplayState *ds)
if (!vs->kbd_layout)
exit(1);
-#ifdef CONFIG_VNC_THREAD
qemu_mutex_init(&vs->mutex);
vnc_start_worker_thread();
-#endif
dcl->dpy_copy = vnc_dpy_copy;
dcl->dpy_update = vnc_dpy_update;
@@ -2896,6 +2876,15 @@ int vnc_display_open(DisplayState *ds, const char *display)
while ((options = strchr(options, ','))) {
options++;
if (strncmp(options, "password", 8) == 0) {
+ if (fips_get_state()) {
+ fprintf(stderr,
+ "VNC password auth disabled due to FIPS mode, "
+ "consider using the VeNCrypt or SASL authentication "
+ "methods as an alternative\n");
+ g_free(vs->display);
+ vs->display = NULL;
+ return -1;
+ }
password = 1; /* Require password auth */
} else if (strncmp(options, "reverse", 7) == 0) {
reverse = 1;
@@ -3110,5 +3099,5 @@ void vnc_display_add_client(DisplayState *ds, int csock, int skipauth)
{
VncDisplay *vs = ds ? (VncDisplay *)ds->opaque : vnc_display;
- return vnc_connect(vs, csock, skipauth);
+ vnc_connect(vs, csock, skipauth);
}
diff --git a/ui/vnc.h b/ui/vnc.h
index a851ebd8ea..068c2fcda5 100644
--- a/ui/vnc.h
+++ b/ui/vnc.h
@@ -29,9 +29,7 @@
#include "qemu-common.h"
#include "qemu-queue.h"
-#ifdef CONFIG_VNC_THREAD
#include "qemu-thread.h"
-#endif
#include "console.h"
#include "monitor.h"
#include "audio/audio.h"
@@ -146,9 +144,7 @@ struct VncDisplay
DisplayState *ds;
kbd_layout_t *kbd_layout;
int lock_key_sync;
-#ifdef CONFIG_VNC_THREAD
QemuMutex mutex;
-#endif
QEMUCursor *cursor;
int cursor_msize;
@@ -216,7 +212,6 @@ typedef struct VncZywrle {
int buf[VNC_ZRLE_TILE_WIDTH * VNC_ZRLE_TILE_HEIGHT];
} VncZywrle;
-#ifdef CONFIG_VNC_THREAD
struct VncRect
{
int x;
@@ -238,14 +233,6 @@ struct VncJob
QLIST_HEAD(, VncRectEntry) rectangles;
QTAILQ_ENTRY(VncJob) next;
};
-#else
-struct VncJob
-{
- VncState *vs;
- int rectangles;
- size_t saved_offset;
-};
-#endif
struct VncState
{
@@ -300,13 +287,9 @@ struct VncState
QEMUPutLEDEntry *led;
bool abort;
-#ifndef CONFIG_VNC_THREAD
- VncJob job;
-#else
QemuMutex output_mutex;
QEMUBH *bh;
Buffer jobs_buffer;
-#endif
/* Encoding specific, if you add something here, don't forget to
* update vnc_async_encoding_start()
diff --git a/user-exec.c b/user-exec.c
index d8c2ad9f2f..b9ea9dd32f 100644
--- a/user-exec.c
+++ b/user-exec.c
@@ -18,7 +18,9 @@
*/
#include "config.h"
#include "cpu.h"
+#ifndef CONFIG_TCG_PASS_AREG0
#include "dyngen-exec.h"
+#endif
#include "disas.h"
#include "tcg.h"
@@ -41,7 +43,7 @@
static void exception_action(CPUArchState *env1)
{
#if defined(TARGET_I386)
- raise_exception_err_env(env1, env1->exception_index, env1->error_code);
+ raise_exception_err(env1, env1->exception_index, env1->error_code);
#else
cpu_loop_exit(env1);
#endif
@@ -58,9 +60,11 @@ void cpu_resume_from_signal(CPUArchState *env1, void *puc)
struct sigcontext *uc = puc;
#endif
+#ifndef CONFIG_TCG_PASS_AREG0
env = env1;
/* XXX: restore cpu registers saved in host registers */
+#endif
if (puc) {
/* XXX: use siglongjmp ? */
@@ -74,8 +78,8 @@ void cpu_resume_from_signal(CPUArchState *env1, void *puc)
sigprocmask(SIG_SETMASK, &uc->sc_mask, NULL);
#endif
}
- env->exception_index = -1;
- longjmp(env->jmp_env, 1);
+ env1->exception_index = -1;
+ longjmp(env1->jmp_env, 1);
}
/* 'pc' is the host PC at which the exception was raised. 'address' is
@@ -89,9 +93,11 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
TranslationBlock *tb;
int ret;
+#ifndef CONFIG_TCG_PASS_AREG0
if (cpu_single_env) {
env = cpu_single_env; /* XXX: find a correct solution for multithread */
}
+#endif
#if defined(DEBUG_SIGNAL)
qemu_printf("qemu: SIGSEGV pc=0x%08lx address=%08lx w=%d oldset=0x%08lx\n",
pc, address, is_write, *(unsigned long *)old_set);
@@ -103,7 +109,8 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
}
/* see if it is an MMU fault */
- ret = cpu_handle_mmu_fault(env, address, is_write, MMU_USER_IDX);
+ ret = cpu_handle_mmu_fault(cpu_single_env, address, is_write,
+ MMU_USER_IDX);
if (ret < 0) {
return 0; /* not an MMU fault */
}
@@ -115,13 +122,13 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
if (tb) {
/* the PC is inside the translated code. It means that we have
a virtual CPU fault */
- cpu_restore_state(tb, env, pc);
+ cpu_restore_state(tb, cpu_single_env, pc);
}
/* we restore the process signal mask as the sigreturn should
do it (XXX: use sigsetjmp) */
sigprocmask(SIG_SETMASK, old_set, NULL);
- exception_action(env);
+ exception_action(cpu_single_env);
/* never comes here */
return 1;
@@ -588,7 +595,7 @@ int cpu_signal_handler(int host_signum, void *pinfo,
int cpu_signal_handler(int host_signum, void *pinfo,
void *puc)
{
- struct siginfo *info = pinfo;
+ siginfo_t *info = pinfo;
struct ucontext *uc = puc;
unsigned long pc = uc->uc_mcontext.sc_iaoq[0];
uint32_t insn = *(uint32_t *)pc;
diff --git a/vl.c b/vl.c
index 1329c30e6a..e71cb30ecf 100644
--- a/vl.c
+++ b/vl.c
@@ -28,6 +28,7 @@
#include <errno.h>
#include <sys/time.h>
#include <zlib.h>
+#include "bitmap.h"
/* Needed early for CONFIG_BSD etc. */
#include "config-host.h"
@@ -130,8 +131,8 @@ int main(int argc, char **argv)
#include "qemu-timer.h"
#include "qemu-char.h"
#include "cache-utils.h"
-#include "block.h"
#include "blockdev.h"
+#include "hw/block-common.h"
#include "block-migration.h"
#include "dma.h"
#include "audio/audio.h"
@@ -159,6 +160,7 @@ int main(int argc, char **argv)
#include "qemu-queue.h"
#include "cpus.h"
#include "arch_init.h"
+#include "osdep.h"
#include "ui/qemu-spice.h"
@@ -240,7 +242,7 @@ QTAILQ_HEAD(, FWBootEntry) fw_boot_order = QTAILQ_HEAD_INITIALIZER(fw_boot_order
int nb_numa_nodes;
uint64_t node_mem[MAX_NODES];
-uint64_t node_cpumask[MAX_NODES];
+unsigned long *node_cpumask[MAX_NODES];
uint8_t qemu_uuid[16];
@@ -951,6 +953,8 @@ static void numa_add(const char *optarg)
unsigned long long value, endvalue;
int nodenr;
+ value = endvalue = 0ULL;
+
optarg = get_opt_name(option, 128, optarg, ',') + 1;
if (!strcmp(option, "node")) {
if (get_param_value(option, 128, "nodeid", optarg) == 0) {
@@ -970,27 +974,22 @@ static void numa_add(const char *optarg)
}
node_mem[nodenr] = sval;
}
- if (get_param_value(option, 128, "cpus", optarg) == 0) {
- node_cpumask[nodenr] = 0;
- } else {
+ if (get_param_value(option, 128, "cpus", optarg) != 0) {
value = strtoull(option, &endptr, 10);
- if (value >= 64) {
- value = 63;
- fprintf(stderr, "only 64 CPUs in NUMA mode supported.\n");
+ if (*endptr == '-') {
+ endvalue = strtoull(endptr+1, &endptr, 10);
} else {
- if (*endptr == '-') {
- endvalue = strtoull(endptr+1, &endptr, 10);
- if (endvalue >= 63) {
- endvalue = 62;
- fprintf(stderr,
- "only 63 CPUs in NUMA mode supported.\n");
- }
- value = (2ULL << endvalue) - (1ULL << value);
- } else {
- value = 1ULL << value;
- }
+ endvalue = value;
}
- node_cpumask[nodenr] = value;
+
+ if (!(endvalue < MAX_CPUMASK_BITS)) {
+ endvalue = MAX_CPUMASK_BITS - 1;
+ fprintf(stderr,
+ "A max of %d CPUs are supported in a guest\n",
+ MAX_CPUMASK_BITS);
+ }
+
+ bitmap_set(node_cpumask[nodenr], value, endvalue-value+1);
}
nb_numa_nodes++;
}
@@ -1795,9 +1794,8 @@ char *qemu_find_file(int type, const char *name)
const char *subdir;
char *buf;
- /* If name contains path separators then try it as a straight path. */
- if ((strchr(name, '/') || strchr(name, '\\'))
- && access(name, R_OK) == 0) {
+ /* Try the name as a straight path first */
+ if (access(name, R_OK) == 0) {
return g_strdup(name);
}
switch (type) {
@@ -1985,8 +1983,8 @@ static int serial_parse(const char *devname)
snprintf(label, sizeof(label), "serial%d", index);
serial_hds[index] = qemu_chr_new(label, devname, NULL);
if (!serial_hds[index]) {
- fprintf(stderr, "qemu: could not open serial device '%s': %s\n",
- devname, strerror(errno));
+ fprintf(stderr, "qemu: could not connect serial device"
+ " to character backend '%s'\n", devname);
return -1;
}
index++;
@@ -2007,8 +2005,8 @@ static int parallel_parse(const char *devname)
snprintf(label, sizeof(label), "parallel%d", index);
parallel_hds[index] = qemu_chr_new(label, devname, NULL);
if (!parallel_hds[index]) {
- fprintf(stderr, "qemu: could not open parallel device '%s': %s\n",
- devname, strerror(errno));
+ fprintf(stderr, "qemu: could not connect parallel device"
+ " to character backend '%s'\n", devname);
return -1;
}
index++;
@@ -2042,8 +2040,8 @@ static int virtcon_parse(const char *devname)
snprintf(label, sizeof(label), "virtcon%d", index);
virtcon_hds[index] = qemu_chr_new(label, devname, NULL);
if (!virtcon_hds[index]) {
- fprintf(stderr, "qemu: could not open virtio console '%s': %s\n",
- devname, strerror(errno));
+ fprintf(stderr, "qemu: could not connect virtio console"
+ " to character backend '%s'\n", devname);
return -1;
}
qemu_opt_set(dev_opts, "chardev", label);
@@ -2087,7 +2085,7 @@ static QEMUMachine *machine_parse(const char *name)
printf("%-20s %s%s\n", m->name, m->desc,
m->is_default ? " (default)" : "");
}
- exit(!name || *name != '?');
+ exit(!name || !is_help_option(name));
}
static int tcg_init(void)
@@ -2331,7 +2329,7 @@ int main(int argc, char **argv, char **envp)
for (i = 0; i < MAX_NODES; i++) {
node_mem[i] = 0;
- node_cpumask[i] = 0;
+ node_cpumask[i] = bitmap_new(MAX_CPUMASK_BITS);
}
nb_numa_nodes = 0;
@@ -3217,7 +3215,7 @@ int main(int argc, char **argv, char **envp)
*/
cpudef_init();
- if (cpu_model && *cpu_model == '?') {
+ if (cpu_model && is_help_option(cpu_model)) {
list_cpus(stdout, &fprintf, cpu_model);
exit(0);
}
@@ -3438,8 +3436,7 @@ int main(int argc, char **argv, char **envp)
default_drive(default_sdcard, snapshot, machine->use_scsi,
IF_SD, 0, SD_OPTS);
- register_savevm_live(NULL, "ram", 0, 4, NULL, ram_save_live, NULL,
- ram_load, NULL);
+ register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
if (nb_numa_nodes > 0) {
int i;
@@ -3469,8 +3466,9 @@ int main(int argc, char **argv, char **envp)
}
for (i = 0; i < nb_numa_nodes; i++) {
- if (node_cpumask[i] != 0)
+ if (!bitmap_empty(node_cpumask[i], MAX_CPUMASK_BITS)) {
break;
+ }
}
/* assigning the VCPUs round-robin is easier to implement, guest OSes
* must cope with this anyway, because there are BIOSes out there in
@@ -3478,7 +3476,7 @@ int main(int argc, char **argv, char **envp)
*/
if (i == nb_numa_nodes) {
for (i = 0; i < max_cpus; i++) {
- node_cpumask[i % nb_numa_nodes] |= 1 << i;
+ set_bit(i, node_cpumask[i % nb_numa_nodes]);
}
}
}
@@ -3585,8 +3583,11 @@ int main(int argc, char **argv, char **envp)
/* init remote displays */
if (vnc_display) {
vnc_display_init(ds);
- if (vnc_display_open(ds, vnc_display) < 0)
+ if (vnc_display_open(ds, vnc_display) < 0) {
+ fprintf(stderr, "Failed to start VNC server on `%s'\n",
+ vnc_display);
exit(1);
+ }
if (show_vnc_port) {
printf("VNC server running on `%s'\n", vnc_display_local_addr(ds));
diff --git a/vmstate.h b/vmstate.h
index 82d97aead4..5bd2b762ab 100644
--- a/vmstate.h
+++ b/vmstate.h
@@ -26,11 +26,20 @@
#ifndef QEMU_VMSTATE_H
#define QEMU_VMSTATE_H 1
-typedef void SaveSetParamsHandler(int blk_enable, int shared, void * opaque);
typedef void SaveStateHandler(QEMUFile *f, void *opaque);
-typedef int SaveLiveStateHandler(QEMUFile *f, int stage, void *opaque);
typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id);
+typedef struct SaveVMHandlers {
+ void (*set_params)(const MigrationParams *params, void * opaque);
+ SaveStateHandler *save_state;
+ int (*save_live_setup)(QEMUFile *f, void *opaque);
+ int (*save_live_iterate)(QEMUFile *f, void *opaque);
+ int (*save_live_complete)(QEMUFile *f, void *opaque);
+ void (*cancel)(void *opaque);
+ LoadStateHandler *load_state;
+ bool (*is_active)(void *opaque);
+} SaveVMHandlers;
+
int register_savevm(DeviceState *dev,
const char *idstr,
int instance_id,
@@ -43,10 +52,7 @@ int register_savevm_live(DeviceState *dev,
const char *idstr,
int instance_id,
int version_id,
- SaveSetParamsHandler *set_params,
- SaveLiveStateHandler *save_live_state,
- SaveStateHandler *save_state,
- LoadStateHandler *load_state,
+ SaveVMHandlers *ops,
void *opaque);
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque);
diff --git a/xen-all.c b/xen-all.c
index b5220cc6a3..61def2ec8f 100644
--- a/xen-all.c
+++ b/xen-all.c
@@ -560,13 +560,15 @@ static void xen_log_global_stop(MemoryListener *listener)
static void xen_eventfd_add(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data,
+ EventNotifier *e)
{
}
static void xen_eventfd_del(MemoryListener *listener,
MemoryRegionSection *section,
- bool match_data, uint64_t data, int fd)
+ bool match_data, uint64_t data,
+ EventNotifier *e)
{
}
@@ -1191,3 +1193,15 @@ void xen_register_framebuffer(MemoryRegion *mr)
{
framebuffer = mr;
}
+
+void xen_shutdown_fatal_error(const char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ fprintf(stderr, "Will destroy the domain.\n");
+ /* destroy the domain */
+ qemu_system_shutdown_request();
+}