aboutsummaryrefslogtreecommitdiff
path: root/contrib
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-02-13 16:33:31 +0000
committerPeter Maydell <peter.maydell@linaro.org>2018-02-13 16:33:31 +0000
commitb734ed9de10dbf10a873ae4b44cb1c13f59213d0 (patch)
treecff6967d9c144377ab741ba27927170fa5552674 /contrib
parentfb68096da3d35e64c88cd610c1fa42766c58e92a (diff)
parentbf1e7140ef0b3a149860ab9f05b36665133238f6 (diff)
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
virtio,vhost,pci,pc: features, fixes and cleanups - new stats in virtio balloon - virtio eventfd rework for boot speedup - vhost memory rework for boot speedup - fixes and cleanups all over the place Signed-off-by: Michael S. Tsirkin <mst@redhat.com> # gpg: Signature made Tue 13 Feb 2018 16:29:55 GMT # gpg: using RSA key 281F0DB8D28D5469 # gpg: Good signature from "Michael S. Tsirkin <mst@kernel.org>" # gpg: aka "Michael S. Tsirkin <mst@redhat.com>" # Primary key fingerprint: 0270 606B 6F3C DF3D 0B17 0970 C350 3912 AFBE 8E67 # Subkey fingerprint: 5D09 FD08 71C8 F85B 94CA 8A0D 281F 0DB8 D28D 5469 * remotes/mst/tags/for_upstream: (22 commits) virtio-balloon: include statistics of disk/file caches acpi-test: update FADT lpc: drop pcie host dependency tests: acpi: fix FADT not being compared to reference table hw/pci-bridge: fix pcie root port's IO hints capability libvhost-user: Support across-memory-boundary access libvhost-user: Fix resource leak virtio-balloon: unref the memory region before continuing pci: removed the is_express field since a uniform interface was inserted virtio-blk: enable multiple vectors when using multiple I/O queues pci/bus: let it has higher migration priority pci-bridge/i82801b11: clear bridge registers on platform reset vhost: Move log_dirty check vhost: Merge and delete unused callbacks vhost: Clean out old vhost_set_memory and friends vhost: Regenerate region list from changed sections list vhost: Merge sections added to temporary list vhost: Simplify ring verification checks vhost: Build temporary section list and deref after commit virtio: improve virtio devices initialization time ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'contrib')
-rw-r--r--contrib/libvhost-user/libvhost-user.c147
-rw-r--r--contrib/libvhost-user/libvhost-user.h3
2 files changed, 136 insertions, 14 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 27cc59791b..2e358b5bce 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -118,15 +118,22 @@ vu_panic(VuDev *dev, const char *msg, ...)
/* Translate guest physical address to our virtual address. */
void *
-vu_gpa_to_va(VuDev *dev, uint64_t guest_addr)
+vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr)
{
int i;
+ if (*plen == 0) {
+ return NULL;
+ }
+
/* Find matching memory region. */
for (i = 0; i < dev->nregions; i++) {
VuDevRegion *r = &dev->regions[i];
if ((guest_addr >= r->gpa) && (guest_addr < (r->gpa + r->size))) {
+ if ((guest_addr + *plen) > (r->gpa + r->size)) {
+ *plen = r->gpa + r->size - guest_addr;
+ }
return (void *)(uintptr_t)
guest_addr - r->gpa + r->mmap_addr + r->mmap_offset;
}
@@ -407,6 +414,15 @@ vu_set_mem_table_exec(VuDev *dev, VhostUserMsg *vmsg)
{
int i;
VhostUserMemory *memory = &vmsg->payload.memory;
+
+ for (i = 0; i < dev->nregions; i++) {
+ VuDevRegion *r = &dev->regions[i];
+ void *m = (void *) (uintptr_t) r->mmap_addr;
+
+ if (m) {
+ munmap(m, r->size + r->mmap_offset);
+ }
+ }
dev->nregions = memory->nregions;
DPRINT("Nregions: %d\n", memory->nregions);
@@ -472,9 +488,14 @@ vu_set_log_base_exec(VuDev *dev, VhostUserMsg *vmsg)
rc = mmap(0, log_mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd,
log_mmap_offset);
+ close(fd);
if (rc == MAP_FAILED) {
perror("log mmap error");
}
+
+ if (dev->log_table) {
+ munmap(dev->log_table, dev->log_size);
+ }
dev->log_table = rc;
dev->log_size = log_mmap_size;
@@ -1102,6 +1123,37 @@ virtqueue_get_head(VuDev *dev, VuVirtq *vq,
return true;
}
+static int
+virtqueue_read_indirect_desc(VuDev *dev, struct vring_desc *desc,
+ uint64_t addr, size_t len)
+{
+ struct vring_desc *ori_desc;
+ uint64_t read_len;
+
+ if (len > (VIRTQUEUE_MAX_SIZE * sizeof(struct vring_desc))) {
+ return -1;
+ }
+
+ if (len == 0) {
+ return -1;
+ }
+
+ while (len) {
+ read_len = len;
+ ori_desc = vu_gpa_to_va(dev, &read_len, addr);
+ if (!ori_desc) {
+ return -1;
+ }
+
+ memcpy(desc, ori_desc, read_len);
+ len -= read_len;
+ addr += read_len;
+ desc += read_len;
+ }
+
+ return 0;
+}
+
enum {
VIRTQUEUE_READ_DESC_ERROR = -1,
VIRTQUEUE_READ_DESC_DONE = 0, /* end of chain */
@@ -1148,8 +1200,10 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
}
while ((rc = virtqueue_num_heads(dev, vq, idx)) > 0) {
- unsigned int max, num_bufs, indirect = 0;
+ unsigned int max, desc_len, num_bufs, indirect = 0;
+ uint64_t desc_addr, read_len;
struct vring_desc *desc;
+ struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
unsigned int i;
max = vq->vring.num;
@@ -1173,8 +1227,24 @@ vu_queue_get_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int *in_bytes,
/* loop over the indirect descriptor table */
indirect = 1;
- max = desc[i].len / sizeof(struct vring_desc);
- desc = vu_gpa_to_va(dev, desc[i].addr);
+ desc_addr = desc[i].addr;
+ desc_len = desc[i].len;
+ max = desc_len / sizeof(struct vring_desc);
+ read_len = desc_len;
+ desc = vu_gpa_to_va(dev, &read_len, desc_addr);
+ if (unlikely(desc && read_len != desc_len)) {
+ /* Failed to use zero copy */
+ desc = NULL;
+ if (!virtqueue_read_indirect_desc(dev, desc_buf,
+ desc_addr,
+ desc_len)) {
+ desc = desc_buf;
+ }
+ }
+ if (!desc) {
+ vu_panic(dev, "Invalid indirect buffer table");
+ goto err;
+ }
num_bufs = i = 0;
}
@@ -1372,9 +1442,24 @@ virtqueue_map_desc(VuDev *dev,
return;
}
- iov[num_sg].iov_base = vu_gpa_to_va(dev, pa);
- iov[num_sg].iov_len = sz;
- num_sg++;
+ while (sz) {
+ uint64_t len = sz;
+
+ if (num_sg == max_num_sg) {
+ vu_panic(dev, "virtio: too many descriptors in indirect table");
+ return;
+ }
+
+ iov[num_sg].iov_base = vu_gpa_to_va(dev, &len, pa);
+ if (iov[num_sg].iov_base == NULL) {
+ vu_panic(dev, "virtio: invalid address for buffers");
+ return;
+ }
+ iov[num_sg].iov_len = len;
+ num_sg++;
+ sz -= len;
+ pa += len;
+ }
*p_num_sg = num_sg;
}
@@ -1406,10 +1491,12 @@ virtqueue_alloc_element(size_t sz,
void *
vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
{
- unsigned int i, head, max;
+ unsigned int i, head, max, desc_len;
+ uint64_t desc_addr, read_len;
VuVirtqElement *elem;
unsigned out_num, in_num;
struct iovec iov[VIRTQUEUE_MAX_SIZE];
+ struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
struct vring_desc *desc;
int rc;
@@ -1450,8 +1537,24 @@ vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz)
}
/* loop over the indirect descriptor table */
- max = desc[i].len / sizeof(struct vring_desc);
- desc = vu_gpa_to_va(dev, desc[i].addr);
+ desc_addr = desc[i].addr;
+ desc_len = desc[i].len;
+ max = desc_len / sizeof(struct vring_desc);
+ read_len = desc_len;
+ desc = vu_gpa_to_va(dev, &read_len, desc_addr);
+ if (unlikely(desc && read_len != desc_len)) {
+ /* Failed to use zero copy */
+ desc = NULL;
+ if (!virtqueue_read_indirect_desc(dev, desc_buf,
+ desc_addr,
+ desc_len)) {
+ desc = desc_buf;
+ }
+ }
+ if (!desc) {
+ vu_panic(dev, "Invalid indirect buffer table");
+ return NULL;
+ }
i = 0;
}
@@ -1527,7 +1630,9 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
unsigned int len)
{
struct vring_desc *desc = vq->vring.desc;
- unsigned int i, max, min;
+ unsigned int i, max, min, desc_len;
+ uint64_t desc_addr, read_len;
+ struct vring_desc desc_buf[VIRTQUEUE_MAX_SIZE];
unsigned num_bufs = 0;
max = vq->vring.num;
@@ -1539,8 +1644,24 @@ vu_log_queue_fill(VuDev *dev, VuVirtq *vq,
}
/* loop over the indirect descriptor table */
- max = desc[i].len / sizeof(struct vring_desc);
- desc = vu_gpa_to_va(dev, desc[i].addr);
+ desc_addr = desc[i].addr;
+ desc_len = desc[i].len;
+ max = desc_len / sizeof(struct vring_desc);
+ read_len = desc_len;
+ desc = vu_gpa_to_va(dev, &read_len, desc_addr);
+ if (unlikely(desc && read_len != desc_len)) {
+ /* Failed to use zero copy */
+ desc = NULL;
+ if (!virtqueue_read_indirect_desc(dev, desc_buf,
+ desc_addr,
+ desc_len)) {
+ desc = desc_buf;
+ }
+ }
+ if (!desc) {
+ vu_panic(dev, "Invalid indirect buffer table");
+ return;
+ }
i = 0;
}
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
index f8a730b725..18f95f65d7 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -327,11 +327,12 @@ bool vu_dispatch(VuDev *dev);
/**
* vu_gpa_to_va:
* @dev: a VuDev context
+ * @plen: guest memory size
* @guest_addr: guest address
*
* Translate a guest address to a pointer. Returns NULL on failure.
*/
-void *vu_gpa_to_va(VuDev *dev, uint64_t guest_addr);
+void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
/**
* vu_get_queue: