From d8438bd24a3f2b6fd6d0e788c18483ca9e239b36 Mon Sep 17 00:00:00 2001 From: Raphael Norwitz Date: Thu, 22 Aug 2019 11:34:24 -0700 Subject: vhost-user-blk: prevent using uninitialized vqs Same rational as: e6cc11d64fc998c11a4dfcde8fda3fc33a74d844 Of the 3 virtqueues, seabios only sets cmd, leaving ctrl and event without a physical address. This can cause vhost_verify_ring_part_mapping to return ENOMEM, causing the following logs: qemu-system-x86_64: Unable to map available ring for ring 0 qemu-system-x86_64: Verify ring failure on region 0 This has already been fixed for vhost scsi devices and was recently vhost-user scsi devices. This commit fixes it for vhost-user-blk devices. Suggested-by: Phillippe Mathieu-Daude Signed-off-by: Raphael Norwitz Message-Id: <1566498865-55506-1-git-send-email-raphael.norwitz@nutanix.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Stefan Hajnoczi --- hw/block/vhost-user-blk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'hw') diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index 0b8c5dfeab..63da9bb619 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -421,7 +421,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) } s->inflight = g_new0(struct vhost_inflight, 1); - s->vqs = g_new(struct vhost_virtqueue, s->num_queues); + s->vqs = g_new0(struct vhost_virtqueue, s->num_queues); s->watch = 0; s->connected = false; -- cgit v1.2.3 From 28cf553afeb29b0c4f339c600171552a72a68cb7 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 16 Sep 2019 16:07:15 +0800 Subject: intel_iommu: Sanity check vfio-pci config on machine init done This check was previously only happened when the IOMMU is enabled in the guest. It was always too late because the enabling of IOMMU normally only happens during the boot of guest OS. It means that we can bail out and exit directly during the guest OS boots if the configuration of devices are not supported. Or, if the guest didn't enable vIOMMU at all, then the user can use the guest normally but as long as it reconfigure the guest OS to enable the vIOMMU then reboot, the user will see the panic right after the reset when the next boot starts. Let's make this failure even earlier so that we force the user to use caching-mode for vfio-pci devices when with the vIOMMU. So the user won't get surprise at least during execution of the guest, which seems a bit nicer. This will affect some user who didn't enable vIOMMU in the guest OS but was using vfio-pci and the vtd device in the past. However I hope it's not a majority because not enabling vIOMMU with the device attached is actually meaningless. We still keep the old assertion for safety so far because the hotplug path could still reach it, so far. Reviewed-by: Eric Auger Signed-off-by: Peter Xu Message-Id: <20190916080718.3299-2-peterx@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/i386/intel_iommu.c | 39 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 36 insertions(+), 3 deletions(-) (limited to 'hw') diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 75ca6f9c70..bed8ffe446 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -35,6 +35,7 @@ #include "hw/i386/x86-iommu.h" #include "hw/pci-host/q35.h" #include "sysemu/kvm.h" +#include "sysemu/sysemu.h" #include "hw/i386/apic_internal.h" #include "kvm_i386.h" #include "migration/vmstate.h" @@ -64,6 +65,13 @@ static void vtd_address_space_refresh_all(IntelIOMMUState *s); static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); +static void vtd_panic_require_caching_mode(void) +{ + error_report("We need to set caching-mode=on for intel-iommu to enable " + "device assignment with IOMMU protection."); + exit(1); +} + static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val, uint64_t wmask, uint64_t w1cmask) { @@ -2929,9 +2937,7 @@ static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, IntelIOMMUState *s = vtd_as->iommu_state; if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { - error_report("We need to set caching-mode=on for intel-iommu to enable " - "device assignment with IOMMU protection."); - exit(1); + vtd_panic_require_caching_mode(); } /* Update per-address-space notifier flags */ @@ -3699,6 +3705,32 @@ static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) return true; } +static int vtd_machine_done_notify_one(Object *child, void *unused) +{ + IntelIOMMUState *iommu = INTEL_IOMMU_DEVICE(x86_iommu_get_default()); + + /* + * We hard-coded here because vfio-pci is the only special case + * here. Let's be more elegant in the future when we can, but so + * far there seems to be no better way. + */ + if (object_dynamic_cast(child, "vfio-pci") && !iommu->caching_mode) { + vtd_panic_require_caching_mode(); + } + + return 0; +} + +static void vtd_machine_done_hook(Notifier *notifier, void *unused) +{ + object_child_foreach_recursive(object_get_root(), + vtd_machine_done_notify_one, NULL); +} + +static Notifier vtd_machine_done_notify = { + .notify = vtd_machine_done_hook, +}; + static void vtd_realize(DeviceState *dev, Error **errp) { MachineState *ms = MACHINE(qdev_get_machine()); @@ -3744,6 +3776,7 @@ static void vtd_realize(DeviceState *dev, Error **errp) pci_setup_iommu(bus, vtd_host_dma_iommu, dev); /* Pseudo address space under root PCI bus. */ pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); + qemu_add_machine_init_done_notifier(&vtd_machine_done_notify); } static void vtd_class_init(ObjectClass *klass, void *data) -- cgit v1.2.3 From d2321d31ff98b75b652c2b1594f00a4cfd48102a Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 16 Sep 2019 16:07:16 +0800 Subject: qdev/machine: Introduce hotplug_allowed hook Introduce this new per-machine hook to give any machine class a chance to do a sanity check on the to-be-hotplugged device as a sanity test. This will be used for x86 to try to detect some illegal configuration of devices, e.g., possible conflictions between vfio-pci and x86 vIOMMU. Reviewed-by: Eric Auger Signed-off-by: Peter Xu Message-Id: <20190916080718.3299-3-peterx@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/core/qdev.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'hw') diff --git a/hw/core/qdev.c b/hw/core/qdev.c index 60d66c2f39..cbad6c1d55 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -237,6 +237,23 @@ HotplugHandler *qdev_get_machine_hotplug_handler(DeviceState *dev) return NULL; } +bool qdev_hotplug_allowed(DeviceState *dev, Error **errp) +{ + MachineState *machine; + MachineClass *mc; + Object *m_obj = qdev_get_machine(); + + if (object_dynamic_cast(m_obj, TYPE_MACHINE)) { + machine = MACHINE(m_obj); + mc = MACHINE_GET_CLASS(machine); + if (mc->hotplug_allowed) { + return mc->hotplug_allowed(machine, dev, errp); + } + } + + return true; +} + HotplugHandler *qdev_get_bus_hotplug_handler(DeviceState *dev) { if (dev->parent_bus) { -- cgit v1.2.3 From c6cbc29d36fe8df078776ed715c37cebac582238 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 16 Sep 2019 16:07:17 +0800 Subject: pc/q35: Disallow vfio-pci hotplug without VT-d caching mode Instead of bailing out when trying to hotplug a vfio-pci device with below configuration: -device intel-iommu,caching-mode=off With this we can return a warning message to the user via QMP/HMP and the VM will continue to work after failing the hotplug: (qemu) device_add vfio-pci,bus=root.3,host=05:00.0,id=vfio1 Error: Device assignment is not allowed without enabling caching-mode=on for Intel IOMMU. Reviewed-by: Eric Auger Signed-off-by: Peter Xu Message-Id: <20190916080718.3299-4-peterx@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/i386/pc.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'hw') diff --git a/hw/i386/pc.c b/hw/i386/pc.c index bad866fe44..0a6fa6e549 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -2944,6 +2944,26 @@ static void x86_nmi(NMIState *n, int cpu_index, Error **errp) } } + +static bool pc_hotplug_allowed(MachineState *ms, DeviceState *dev, Error **errp) +{ + X86IOMMUState *iommu = x86_iommu_get_default(); + IntelIOMMUState *intel_iommu; + + if (iommu && + object_dynamic_cast((Object *)iommu, TYPE_INTEL_IOMMU_DEVICE) && + object_dynamic_cast((Object *)dev, "vfio-pci")) { + intel_iommu = INTEL_IOMMU_DEVICE(iommu); + if (!intel_iommu->caching_mode) { + error_setg(errp, "Device assignment is not allowed without " + "enabling caching-mode=on for Intel IOMMU."); + return false; + } + } + + return true; +} + static void pc_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -2968,6 +2988,7 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) pcmc->pvh_enabled = true; assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = pc_get_hotplug_handler; + mc->hotplug_allowed = pc_hotplug_allowed; mc->cpu_index_to_instance_props = pc_cpu_index_to_props; mc->get_default_cpu_node_id = pc_get_default_cpu_node_id; mc->possible_cpu_arch_ids = pc_possible_cpu_arch_ids; -- cgit v1.2.3 From e7df189e19e86bf9f4d7aea4c6cf50ac0ebfce46 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Mon, 16 Sep 2019 16:07:18 +0800 Subject: intel_iommu: Remove the caching-mode check during flag change That's never a good place to stop QEMU process... Since now we have both the machine done sanity check and also the hotplug handler, we can safely remove this to avoid that. Reviewed-by: Eric Auger Signed-off-by: Peter Xu Message-Id: <20190916080718.3299-5-peterx@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/i386/intel_iommu.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'hw') diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index bed8ffe446..f1de8fdb75 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -2936,10 +2936,6 @@ static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); IntelIOMMUState *s = vtd_as->iommu_state; - if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { - vtd_panic_require_caching_mode(); - } - /* Update per-address-space notifier flags */ vtd_as->notifier_flags = new; -- cgit v1.2.3 From 44e687a4d9ab327761e221844ced7dc9c23350a5 Mon Sep 17 00:00:00 2001 From: Sergio Lopez Date: Fri, 13 Sep 2019 14:06:01 +0200 Subject: virtio-mmio: implement modern (v2) personality (virtio-1) Implement the modern (v2) personality, according to the VirtIO 1.0 specification. Support for v2 among guests is not as widespread as it'd be desirable. While the Linux driver has had it for a while, support is missing, at least, from Tianocore EDK II, NetBSD and FreeBSD. For this reason, the v2 personality is disabled, keeping the legacy behavior as default. Machine types willing to use v2, can enable it using MachineClass's compat_props. Signed-off-by: Sergio Lopez Message-Id: <20190913120559.40835-1-slp@redhat.com> Reviewed-by: Cornelia Huck Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin --- hw/virtio/virtio-mmio.c | 342 ++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 319 insertions(+), 23 deletions(-) (limited to 'hw') diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index eccc795f28..3d5ca0f667 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -50,14 +50,24 @@ OBJECT_CHECK(VirtIOMMIOProxy, (obj), TYPE_VIRTIO_MMIO) #define VIRT_MAGIC 0x74726976 /* 'virt' */ -#define VIRT_VERSION 1 +#define VIRT_VERSION 2 +#define VIRT_VERSION_LEGACY 1 #define VIRT_VENDOR 0x554D4551 /* 'QEMU' */ +typedef struct VirtIOMMIOQueue { + uint16_t num; + bool enabled; + uint32_t desc[2]; + uint32_t avail[2]; + uint32_t used[2]; +} VirtIOMMIOQueue; + typedef struct { /* Generic */ SysBusDevice parent_obj; MemoryRegion iomem; qemu_irq irq; + bool legacy; /* Guest accessible state needing migration and reset */ uint32_t host_features_sel; uint32_t guest_features_sel; @@ -65,6 +75,9 @@ typedef struct { /* virtio-bus */ VirtioBusState bus; bool format_transport_address; + /* Fields only used for non-legacy (v2) devices */ + uint32_t guest_features[2]; + VirtIOMMIOQueue vqs[VIRTIO_QUEUE_MAX]; } VirtIOMMIOProxy; static bool virtio_mmio_ioeventfd_enabled(DeviceState *d) @@ -118,7 +131,11 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) case VIRTIO_MMIO_MAGIC_VALUE: return VIRT_MAGIC; case VIRTIO_MMIO_VERSION: - return VIRT_VERSION; + if (proxy->legacy) { + return VIRT_VERSION_LEGACY; + } else { + return VIRT_VERSION; + } case VIRTIO_MMIO_VENDOR_ID: return VIRT_VENDOR; default: @@ -149,28 +166,64 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) case VIRTIO_MMIO_MAGIC_VALUE: return VIRT_MAGIC; case VIRTIO_MMIO_VERSION: - return VIRT_VERSION; + if (proxy->legacy) { + return VIRT_VERSION_LEGACY; + } else { + return VIRT_VERSION; + } case VIRTIO_MMIO_DEVICE_ID: return vdev->device_id; case VIRTIO_MMIO_VENDOR_ID: return VIRT_VENDOR; case VIRTIO_MMIO_DEVICE_FEATURES: - if (proxy->host_features_sel) { - return 0; + if (proxy->legacy) { + if (proxy->host_features_sel) { + return 0; + } else { + return vdev->host_features; + } + } else { + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + return (vdev->host_features & ~vdc->legacy_features) + >> (32 * proxy->host_features_sel); } - return vdev->host_features; case VIRTIO_MMIO_QUEUE_NUM_MAX: if (!virtio_queue_get_num(vdev, vdev->queue_sel)) { return 0; } return VIRTQUEUE_MAX_SIZE; case VIRTIO_MMIO_QUEUE_PFN: + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return 0; + } return virtio_queue_get_addr(vdev, vdev->queue_sel) >> proxy->guest_page_shift; + case VIRTIO_MMIO_QUEUE_READY: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return 0; + } + return proxy->vqs[vdev->queue_sel].enabled; case VIRTIO_MMIO_INTERRUPT_STATUS: return atomic_read(&vdev->isr); case VIRTIO_MMIO_STATUS: return vdev->status; + case VIRTIO_MMIO_CONFIG_GENERATION: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: read from non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return 0; + } + return vdev->generation; case VIRTIO_MMIO_DEVICE_FEATURES_SEL: case VIRTIO_MMIO_DRIVER_FEATURES: case VIRTIO_MMIO_DRIVER_FEATURES_SEL: @@ -180,12 +233,20 @@ static uint64_t virtio_mmio_read(void *opaque, hwaddr offset, unsigned size) case VIRTIO_MMIO_QUEUE_ALIGN: case VIRTIO_MMIO_QUEUE_NOTIFY: case VIRTIO_MMIO_INTERRUPT_ACK: + case VIRTIO_MMIO_QUEUE_DESC_LOW: + case VIRTIO_MMIO_QUEUE_DESC_HIGH: + case VIRTIO_MMIO_QUEUE_AVAIL_LOW: + case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: + case VIRTIO_MMIO_QUEUE_USED_LOW: + case VIRTIO_MMIO_QUEUE_USED_HIGH: qemu_log_mask(LOG_GUEST_ERROR, - "%s: read of write-only register\n", - __func__); + "%s: read of write-only register (0x%" HWADDR_PRIx ")\n", + __func__, offset); return 0; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad register offset (0x%" HWADDR_PRIx ")\n", + __func__, offset); return 0; } return 0; @@ -232,17 +293,41 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, } switch (offset) { case VIRTIO_MMIO_DEVICE_FEATURES_SEL: - proxy->host_features_sel = value; + if (value) { + proxy->host_features_sel = 1; + } else { + proxy->host_features_sel = 0; + } break; case VIRTIO_MMIO_DRIVER_FEATURES: - if (!proxy->guest_features_sel) { - virtio_set_features(vdev, value); + if (proxy->legacy) { + if (proxy->guest_features_sel) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: attempt to write guest features with " + "guest_features_sel > 0 in legacy mode\n", + __func__); + } else { + virtio_set_features(vdev, value); + } + } else { + proxy->guest_features[proxy->guest_features_sel] = value; } break; case VIRTIO_MMIO_DRIVER_FEATURES_SEL: - proxy->guest_features_sel = value; + if (value) { + proxy->guest_features_sel = 1; + } else { + proxy->guest_features_sel = 0; + } break; case VIRTIO_MMIO_GUEST_PAGE_SIZE: + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return; + } proxy->guest_page_shift = ctz32(value); if (proxy->guest_page_shift > 31) { proxy->guest_page_shift = 0; @@ -256,15 +341,31 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, break; case VIRTIO_MMIO_QUEUE_NUM: trace_virtio_mmio_queue_write(value, VIRTQUEUE_MAX_SIZE); - virtio_queue_set_num(vdev, vdev->queue_sel, value); - /* Note: only call this function for legacy devices */ - virtio_queue_update_rings(vdev, vdev->queue_sel); + if (proxy->legacy) { + virtio_queue_set_num(vdev, vdev->queue_sel, value); + virtio_queue_update_rings(vdev, vdev->queue_sel); + } else { + proxy->vqs[vdev->queue_sel].num = value; + } break; case VIRTIO_MMIO_QUEUE_ALIGN: - /* Note: this is only valid for legacy devices */ + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return; + } virtio_queue_set_align(vdev, vdev->queue_sel, value); break; case VIRTIO_MMIO_QUEUE_PFN: + if (!proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to legacy register (0x%" + HWADDR_PRIx ") in non-legacy mode\n", + __func__, offset); + return; + } if (value == 0) { virtio_reset(vdev); } else { @@ -272,6 +373,29 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, value << proxy->guest_page_shift); } break; + case VIRTIO_MMIO_QUEUE_READY: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + if (value) { + virtio_queue_set_num(vdev, vdev->queue_sel, + proxy->vqs[vdev->queue_sel].num); + virtio_queue_set_rings(vdev, vdev->queue_sel, + ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | + proxy->vqs[vdev->queue_sel].desc[0], + ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | + proxy->vqs[vdev->queue_sel].avail[0], + ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | + proxy->vqs[vdev->queue_sel].used[0]); + proxy->vqs[vdev->queue_sel].enabled = 1; + } else { + proxy->vqs[vdev->queue_sel].enabled = 0; + } + break; case VIRTIO_MMIO_QUEUE_NOTIFY: if (value < VIRTIO_QUEUE_MAX) { virtio_queue_notify(vdev, value); @@ -286,6 +410,12 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, virtio_mmio_stop_ioeventfd(proxy); } + if (!proxy->legacy && (value & VIRTIO_CONFIG_S_FEATURES_OK)) { + virtio_set_features(vdev, + ((uint64_t)proxy->guest_features[1]) << 32 | + proxy->guest_features[0]); + } + virtio_set_status(vdev, value & 0xff); if (value & VIRTIO_CONFIG_S_DRIVER_OK) { @@ -296,6 +426,66 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, virtio_reset(vdev); } break; + case VIRTIO_MMIO_QUEUE_DESC_LOW: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].desc[0] = value; + break; + case VIRTIO_MMIO_QUEUE_DESC_HIGH: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].desc[1] = value; + break; + case VIRTIO_MMIO_QUEUE_AVAIL_LOW: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].avail[0] = value; + break; + case VIRTIO_MMIO_QUEUE_AVAIL_HIGH: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].avail[1] = value; + break; + case VIRTIO_MMIO_QUEUE_USED_LOW: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].used[0] = value; + break; + case VIRTIO_MMIO_QUEUE_USED_HIGH: + if (proxy->legacy) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to non-legacy register (0x%" + HWADDR_PRIx ") in legacy mode\n", + __func__, offset); + return; + } + proxy->vqs[vdev->queue_sel].used[1] = value; + break; case VIRTIO_MMIO_MAGIC_VALUE: case VIRTIO_MMIO_VERSION: case VIRTIO_MMIO_DEVICE_ID: @@ -303,22 +493,31 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value, case VIRTIO_MMIO_DEVICE_FEATURES: case VIRTIO_MMIO_QUEUE_NUM_MAX: case VIRTIO_MMIO_INTERRUPT_STATUS: + case VIRTIO_MMIO_CONFIG_GENERATION: qemu_log_mask(LOG_GUEST_ERROR, - "%s: write to readonly register\n", - __func__); + "%s: write to read-only register (0x%" HWADDR_PRIx ")\n", + __func__, offset); break; default: - qemu_log_mask(LOG_GUEST_ERROR, "%s: bad register offset\n", __func__); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bad register offset (0x%" HWADDR_PRIx ")\n", + __func__, offset); } } -static const MemoryRegionOps virtio_mem_ops = { +static const MemoryRegionOps virtio_legacy_mem_ops = { .read = virtio_mmio_read, .write = virtio_mmio_write, .endianness = DEVICE_NATIVE_ENDIAN, }; +static const MemoryRegionOps virtio_mem_ops = { + .read = virtio_mmio_read, + .write = virtio_mmio_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + static void virtio_mmio_update_irq(DeviceState *opaque, uint16_t vector) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); @@ -352,15 +551,90 @@ static void virtio_mmio_save_config(DeviceState *opaque, QEMUFile *f) qemu_put_be32(f, proxy->guest_page_shift); } +static const VMStateDescription vmstate_virtio_mmio_queue_state = { + .name = "virtio_mmio/queue_state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT16(num, VirtIOMMIOQueue), + VMSTATE_BOOL(enabled, VirtIOMMIOQueue), + VMSTATE_UINT32_ARRAY(desc, VirtIOMMIOQueue, 2), + VMSTATE_UINT32_ARRAY(avail, VirtIOMMIOQueue, 2), + VMSTATE_UINT32_ARRAY(used, VirtIOMMIOQueue, 2), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_mmio_state_sub = { + .name = "virtio_mmio/state", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32_ARRAY(guest_features, VirtIOMMIOProxy, 2), + VMSTATE_STRUCT_ARRAY(vqs, VirtIOMMIOProxy, VIRTIO_QUEUE_MAX, 0, + vmstate_virtio_mmio_queue_state, + VirtIOMMIOQueue), + VMSTATE_END_OF_LIST() + } +}; + +static const VMStateDescription vmstate_virtio_mmio = { + .name = "virtio_mmio", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField[]) { + VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_virtio_mmio_state_sub, + NULL + } +}; + +static void virtio_mmio_save_extra_state(DeviceState *opaque, QEMUFile *f) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + vmstate_save_state(f, &vmstate_virtio_mmio, proxy, NULL); +} + +static int virtio_mmio_load_extra_state(DeviceState *opaque, QEMUFile *f) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + return vmstate_load_state(f, &vmstate_virtio_mmio, proxy, 1); +} + +static bool virtio_mmio_has_extra_state(DeviceState *opaque) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(opaque); + + return !proxy->legacy; +} + static void virtio_mmio_reset(DeviceState *d) { VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + int i; virtio_mmio_stop_ioeventfd(proxy); virtio_bus_reset(&proxy->bus); proxy->host_features_sel = 0; proxy->guest_features_sel = 0; proxy->guest_page_shift = 0; + + if (!proxy->legacy) { + proxy->guest_features[0] = proxy->guest_features[1] = 0; + + for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { + proxy->vqs[i].enabled = 0; + proxy->vqs[i].num = 0; + proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; + proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; + proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; + } + } } static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, @@ -423,11 +697,22 @@ assign_error: return r; } +static void virtio_mmio_pre_plugged(DeviceState *d, Error **errp) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + + if (!proxy->legacy) { + virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); + } +} + /* virtio-mmio device */ static Property virtio_mmio_properties[] = { DEFINE_PROP_BOOL("format_transport_address", VirtIOMMIOProxy, format_transport_address, true), + DEFINE_PROP_BOOL("force-legacy", VirtIOMMIOProxy, legacy, true), DEFINE_PROP_END_OF_LIST(), }; @@ -439,8 +724,15 @@ static void virtio_mmio_realizefn(DeviceState *d, Error **errp) qbus_create_inplace(&proxy->bus, sizeof(proxy->bus), TYPE_VIRTIO_MMIO_BUS, d, NULL); sysbus_init_irq(sbd, &proxy->irq); - memory_region_init_io(&proxy->iomem, OBJECT(d), &virtio_mem_ops, proxy, - TYPE_VIRTIO_MMIO, 0x200); + if (proxy->legacy) { + memory_region_init_io(&proxy->iomem, OBJECT(d), + &virtio_legacy_mem_ops, proxy, + TYPE_VIRTIO_MMIO, 0x200); + } else { + memory_region_init_io(&proxy->iomem, OBJECT(d), + &virtio_mem_ops, proxy, + TYPE_VIRTIO_MMIO, 0x200); + } sysbus_init_mmio(sbd, &proxy->iomem); } @@ -511,9 +803,13 @@ static void virtio_mmio_bus_class_init(ObjectClass *klass, void *data) k->notify = virtio_mmio_update_irq; k->save_config = virtio_mmio_save_config; k->load_config = virtio_mmio_load_config; + k->save_extra_state = virtio_mmio_save_extra_state; + k->load_extra_state = virtio_mmio_load_extra_state; + k->has_extra_state = virtio_mmio_has_extra_state; k->set_guest_notifiers = virtio_mmio_set_guest_notifiers; k->ioeventfd_enabled = virtio_mmio_ioeventfd_enabled; k->ioeventfd_assign = virtio_mmio_ioeventfd_assign; + k->pre_plugged = virtio_mmio_pre_plugged; k->has_variable_vring_alignment = true; bus_class->max_dev = 1; bus_class->get_dev_path = virtio_mmio_bus_get_dev_path; -- cgit v1.2.3