diff options
author | Eugenio Pérez <eperezma@redhat.com> | 2022-03-14 18:34:47 +0100 |
---|---|---|
committer | Jason Wang <jasowang@redhat.com> | 2022-03-15 13:57:44 +0800 |
commit | 100890f7cad502b6fa49465ffff287ba79fcf94b (patch) | |
tree | 9b50bf6cd426488ef70076dab1e349ece5018bbc /hw/virtio/vhost-vdpa.c | |
parent | d96be4c8942a9f983f8a1642b397150d0d80c6ea (diff) |
vhost: Shadow virtqueue buffers forwarding
Initial version of shadow virtqueue that actually forward buffers. There
is no iommu support at the moment, and that will be addressed in future
patches of this series. Since all vhost-vdpa devices use forced IOMMU,
this means that SVQ is not usable at this point of the series on any
device.
For simplicity it only supports modern devices, that expects vring
in little endian, with split ring and no event idx or indirect
descriptors. Support for them will not be added in this series.
It reuses the VirtQueue code for the device part. The driver part is
based on Linux's virtio_ring driver, but with stripped functionality
and optimizations so it's easier to review.
However, forwarding buffers have some particular pieces: One of the most
unexpected ones is that a guest's buffer can expand through more than
one descriptor in SVQ. While this is handled gracefully by qemu's
emulated virtio devices, it may cause unexpected SVQ queue full. This
patch also solves it by checking for this condition at both guest's
kicks and device's calls. The code may be more elegant in the future if
SVQ code runs in its own iocontext.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
Acked-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
Diffstat (limited to 'hw/virtio/vhost-vdpa.c')
-rw-r--r-- | hw/virtio/vhost-vdpa.c | 155 |
1 files changed, 150 insertions, 5 deletions
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index 3e2c181d2b..297505f331 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -777,9 +777,9 @@ static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, * Note that this function does not rewind kick file descriptor if cannot set * call one. */ -static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, - VhostShadowVirtqueue *svq, unsigned idx, - Error **errp) +static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, + VhostShadowVirtqueue *svq, unsigned idx, + Error **errp) { struct vhost_vring_file file = { .index = dev->vq_index + idx, @@ -791,7 +791,7 @@ static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, r = vhost_vdpa_set_vring_dev_kick(dev, &file); if (unlikely(r != 0)) { error_setg_errno(errp, -r, "Can't set device kick fd"); - return false; + return r; } event_notifier = &svq->hdev_call; @@ -801,6 +801,95 @@ static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, error_setg_errno(errp, -r, "Can't set device call fd"); } + return r; +} + +/** + * Unmap a SVQ area in the device + */ +static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr iova, + hwaddr size) +{ + int r; + + size = ROUND_UP(size, qemu_real_host_page_size); + r = vhost_vdpa_dma_unmap(v, iova, size); + return r == 0; +} + +static bool vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, + const VhostShadowVirtqueue *svq) +{ + struct vhost_vdpa *v = dev->opaque; + struct vhost_vring_addr svq_addr; + size_t device_size = vhost_svq_device_area_size(svq); + size_t driver_size = vhost_svq_driver_area_size(svq); + bool ok; + + vhost_svq_get_vring_addr(svq, &svq_addr); + + ok = vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr, driver_size); + if (unlikely(!ok)) { + return false; + } + + return vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr, device_size); +} + +/** + * Map the shadow virtqueue rings in the device + * + * @dev: The vhost device + * @svq: The shadow virtqueue + * @addr: Assigned IOVA addresses + * @errp: Error pointer + */ +static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, + const VhostShadowVirtqueue *svq, + struct vhost_vring_addr *addr, + Error **errp) +{ + struct vhost_vdpa *v = dev->opaque; + size_t device_size = vhost_svq_device_area_size(svq); + size_t driver_size = vhost_svq_driver_area_size(svq); + int r; + + ERRP_GUARD(); + vhost_svq_get_vring_addr(svq, addr); + + r = vhost_vdpa_dma_map(v, addr->desc_user_addr, driver_size, + (void *)(uintptr_t)addr->desc_user_addr, true); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Cannot create vq driver region: "); + return false; + } + + r = vhost_vdpa_dma_map(v, addr->used_user_addr, device_size, + (void *)(intptr_t)addr->used_user_addr, false); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Cannot create vq device region: "); + } + + return r == 0; +} + +static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, + VhostShadowVirtqueue *svq, unsigned idx, + Error **errp) +{ + uint16_t vq_index = dev->vq_index + idx; + struct vhost_vring_state s = { + .index = vq_index, + }; + int r; + + r = vhost_vdpa_set_dev_vring_base(dev, &s); + if (unlikely(r)) { + error_setg_errno(errp, -r, "Cannot set vring base"); + return false; + } + + r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); return r == 0; } @@ -815,10 +904,62 @@ static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) } for (i = 0; i < v->shadow_vqs->len; ++i) { + VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + struct vhost_vring_addr addr = { + .index = i, + }; + int r; bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); if (unlikely(!ok)) { - error_reportf_err(err, "Cannot setup SVQ %u: ", i); + goto err; + } + + vhost_svq_start(svq, dev->vdev, vq); + ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); + if (unlikely(!ok)) { + goto err_map; + } + + /* Override vring GPA set by vhost subsystem */ + r = vhost_vdpa_set_vring_dev_addr(dev, &addr); + if (unlikely(r != 0)) { + error_setg_errno(&err, -r, "Cannot set device address"); + goto err_set_addr; + } + } + + return true; + +err_set_addr: + vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); + +err_map: + vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); + +err: + error_reportf_err(err, "Cannot setup SVQ %u: ", i); + for (unsigned j = 0; j < i; ++j) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); + vhost_vdpa_svq_unmap_rings(dev, svq); + vhost_svq_stop(svq); + } + + return false; +} + +static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + if (!v->shadow_vqs) { + return true; + } + + for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + bool ok = vhost_vdpa_svq_unmap_rings(dev, svq); + if (unlikely(!ok)) { return false; } } @@ -840,6 +981,10 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) } vhost_vdpa_set_vring_ready(dev); } else { + ok = vhost_vdpa_svqs_stop(dev); + if (unlikely(!ok)) { + return -1; + } vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); } |