aboutsummaryrefslogtreecommitdiff
path: root/hw/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'hw/virtio')
-rw-r--r--hw/virtio/vhost.c94
-rw-r--r--hw/virtio/virtio-balloon.c2
-rw-r--r--hw/virtio/virtio-crypto-pci.c2
-rw-r--r--hw/virtio/virtio-crypto.c1
-rw-r--r--hw/virtio/virtio-pci.c4
-rw-r--r--hw/virtio/virtio.c23
6 files changed, 74 insertions, 52 deletions
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index 131f1643b2..30aee88a3e 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -421,32 +421,73 @@ static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size)
dev->log_size = size;
}
+
+static int vhost_verify_ring_part_mapping(void *part,
+ uint64_t part_addr,
+ uint64_t part_size,
+ uint64_t start_addr,
+ uint64_t size)
+{
+ hwaddr l;
+ void *p;
+ int r = 0;
+
+ if (!ranges_overlap(start_addr, size, part_addr, part_size)) {
+ return 0;
+ }
+ l = part_size;
+ p = cpu_physical_memory_map(part_addr, &l, 1);
+ if (!p || l != part_size) {
+ r = -ENOMEM;
+ }
+ if (p != part) {
+ r = -EBUSY;
+ }
+ cpu_physical_memory_unmap(p, l, 0, 0);
+ return r;
+}
+
static int vhost_verify_ring_mappings(struct vhost_dev *dev,
uint64_t start_addr,
uint64_t size)
{
- int i;
+ int i, j;
int r = 0;
+ const char *part_name[] = {
+ "descriptor table",
+ "available ring",
+ "used ring"
+ };
- for (i = 0; !r && i < dev->nvqs; ++i) {
+ for (i = 0; i < dev->nvqs; ++i) {
struct vhost_virtqueue *vq = dev->vqs + i;
- hwaddr l;
- void *p;
- if (!ranges_overlap(start_addr, size, vq->ring_phys, vq->ring_size)) {
- continue;
+ j = 0;
+ r = vhost_verify_ring_part_mapping(vq->desc, vq->desc_phys,
+ vq->desc_size, start_addr, size);
+ if (!r) {
+ break;
}
- l = vq->ring_size;
- p = cpu_physical_memory_map(vq->ring_phys, &l, 1);
- if (!p || l != vq->ring_size) {
- error_report("Unable to map ring buffer for ring %d", i);
- r = -ENOMEM;
+
+ j++;
+ r = vhost_verify_ring_part_mapping(vq->avail, vq->avail_phys,
+ vq->avail_size, start_addr, size);
+ if (!r) {
+ break;
}
- if (p != vq->ring) {
- error_report("Ring buffer relocated for ring %d", i);
- r = -EBUSY;
+
+ j++;
+ r = vhost_verify_ring_part_mapping(vq->used, vq->used_phys,
+ vq->used_size, start_addr, size);
+ if (!r) {
+ break;
}
- cpu_physical_memory_unmap(p, l, 0, 0);
+ }
+
+ if (r == -ENOMEM) {
+ error_report("Unable to map %s for ring %d", part_name[j], i);
+ } else if (r == -EBUSY) {
+ error_report("%s relocated for ring %d", part_name[j], i);
}
return r;
}
@@ -860,15 +901,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
}
}
- s = l = virtio_queue_get_desc_size(vdev, idx);
- a = virtio_queue_get_desc_addr(vdev, idx);
+ vq->desc_size = s = l = virtio_queue_get_desc_size(vdev, idx);
+ vq->desc_phys = a = virtio_queue_get_desc_addr(vdev, idx);
vq->desc = cpu_physical_memory_map(a, &l, 0);
if (!vq->desc || l != s) {
r = -ENOMEM;
goto fail_alloc_desc;
}
- s = l = virtio_queue_get_avail_size(vdev, idx);
- a = virtio_queue_get_avail_addr(vdev, idx);
+ vq->avail_size = s = l = virtio_queue_get_avail_size(vdev, idx);
+ vq->avail_phys = a = virtio_queue_get_avail_addr(vdev, idx);
vq->avail = cpu_physical_memory_map(a, &l, 0);
if (!vq->avail || l != s) {
r = -ENOMEM;
@@ -882,14 +923,6 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
goto fail_alloc_used;
}
- vq->ring_size = s = l = virtio_queue_get_ring_size(vdev, idx);
- vq->ring_phys = a = virtio_queue_get_ring_addr(vdev, idx);
- vq->ring = cpu_physical_memory_map(a, &l, 1);
- if (!vq->ring || l != s) {
- r = -ENOMEM;
- goto fail_alloc_ring;
- }
-
r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled);
if (r < 0) {
r = -errno;
@@ -930,9 +963,6 @@ static int vhost_virtqueue_start(struct vhost_dev *dev,
fail_vector:
fail_kick:
fail_alloc:
- cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
- 0, 0);
-fail_alloc_ring:
cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
0, 0);
fail_alloc_used:
@@ -973,8 +1003,6 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev,
vhost_vq_index);
}
- cpu_physical_memory_unmap(vq->ring, virtio_queue_get_ring_size(vdev, idx),
- 0, virtio_queue_get_ring_size(vdev, idx));
cpu_physical_memory_unmap(vq->used, virtio_queue_get_used_size(vdev, idx),
1, virtio_queue_get_used_size(vdev, idx));
cpu_physical_memory_unmap(vq->avail, virtio_queue_get_avail_size(vdev, idx),
@@ -1122,7 +1150,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
- } else if (!qemu_memfd_check()) {
+ } else if (vhost_dev_log_is_shared(hdev) && !qemu_memfd_check()) {
error_setg(&hdev->migration_blocker,
"Migration disabled: failed to allocate shared memory");
}
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index cfba053280..884570a57d 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -456,7 +456,7 @@ static void virtio_balloon_device_reset(VirtIODevice *vdev)
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
if (s->stats_vq_elem != NULL) {
- virtqueue_discard(s->svq, s->stats_vq_elem, 0);
+ virtqueue_unpop(s->svq, s->stats_vq_elem, 0);
g_free(s->stats_vq_elem);
s->stats_vq_elem = NULL;
}
diff --git a/hw/virtio/virtio-crypto-pci.c b/hw/virtio/virtio-crypto-pci.c
index 21d998401a..a1b09064c0 100644
--- a/hw/virtio/virtio-crypto-pci.c
+++ b/hw/virtio/virtio-crypto-pci.c
@@ -48,7 +48,7 @@ static void virtio_crypto_pci_class_init(ObjectClass *klass, void *data)
k->realize = virtio_crypto_pci_realize;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
dc->props = virtio_crypto_pci_properties;
-
+ dc->hotpluggable = false;
pcidev_k->class_id = PCI_CLASS_OTHERS;
}
diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c
index 170114f52b..32938433b7 100644
--- a/hw/virtio/virtio-crypto.c
+++ b/hw/virtio/virtio-crypto.c
@@ -813,6 +813,7 @@ static void virtio_crypto_device_unrealize(DeviceState *dev, Error **errp)
static const VMStateDescription vmstate_virtio_crypto = {
.name = "virtio-crypto",
+ .unmigratable = 1,
.minimum_version_id = VIRTIO_CRYPTO_VM_VERSION,
.version_id = VIRTIO_CRYPTO_VM_VERSION,
.fields = (VMStateField[]) {
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 62001b46d7..97b32febaf 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -1175,7 +1175,9 @@ static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
break;
case VIRTIO_PCI_COMMON_DF:
if (proxy->dfselect <= 1) {
- val = (vdev->host_features & ~VIRTIO_LEGACY_FEATURES) >>
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ val = (vdev->host_features & ~vdc->legacy_features) >>
(32 * proxy->dfselect);
}
break;
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index bcbcfe063c..55a00cdf9e 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -279,7 +279,7 @@ void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
virtqueue_unmap_sg(vq, elem, len);
}
-/* virtqueue_discard:
+/* virtqueue_unpop:
* @vq: The #VirtQueue
* @elem: The #VirtQueueElement
* @len: number of bytes written
@@ -287,8 +287,8 @@ void virtqueue_detach_element(VirtQueue *vq, const VirtQueueElement *elem,
* Pretend the most recent element wasn't popped from the virtqueue. The next
* call to virtqueue_pop() will refetch the element.
*/
-void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
- unsigned int len)
+void virtqueue_unpop(VirtQueue *vq, const VirtQueueElement *elem,
+ unsigned int len)
{
vq->last_avail_idx--;
virtqueue_detach_element(vq, elem, len);
@@ -301,7 +301,7 @@ void virtqueue_discard(VirtQueue *vq, const VirtQueueElement *elem,
* Pretend that elements weren't popped from the virtqueue. The next
* virtqueue_pop() will refetch the oldest element.
*
- * Use virtqueue_discard() instead if you have a VirtQueueElement.
+ * Use virtqueue_unpop() instead if you have a VirtQueueElement.
*
* Returns: true on success, false if @num is greater than the number of in use
* elements.
@@ -632,7 +632,7 @@ void virtqueue_map(VirtQueueElement *elem)
VIRTQUEUE_MAX_SIZE, 0);
}
-void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
+static void *virtqueue_alloc_element(size_t sz, unsigned out_num, unsigned in_num)
{
VirtQueueElement *elem;
size_t in_addr_ofs = QEMU_ALIGN_UP(sz, __alignof__(elem->in_addr[0]));
@@ -1935,11 +1935,6 @@ hwaddr virtio_queue_get_used_addr(VirtIODevice *vdev, int n)
return vdev->vq[n].vring.used;
}
-hwaddr virtio_queue_get_ring_addr(VirtIODevice *vdev, int n)
-{
- return vdev->vq[n].vring.desc;
-}
-
hwaddr virtio_queue_get_desc_size(VirtIODevice *vdev, int n)
{
return sizeof(VRingDesc) * vdev->vq[n].vring.num;
@@ -1957,12 +1952,6 @@ hwaddr virtio_queue_get_used_size(VirtIODevice *vdev, int n)
sizeof(VRingUsedElem) * vdev->vq[n].vring.num;
}
-hwaddr virtio_queue_get_ring_size(VirtIODevice *vdev, int n)
-{
- return vdev->vq[n].vring.used - vdev->vq[n].vring.desc +
- virtio_queue_get_used_size(vdev, n);
-}
-
uint16_t virtio_queue_get_last_avail_idx(VirtIODevice *vdev, int n)
{
return vdev->vq[n].last_avail_idx;
@@ -2214,6 +2203,8 @@ static void virtio_device_class_init(ObjectClass *klass, void *data)
dc->props = virtio_properties;
vdc->start_ioeventfd = virtio_device_start_ioeventfd_impl;
vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
+
+ vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
}
bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)