aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRaphael Norwitz <raphael.norwitz@nutanix.com>2020-05-21 05:00:47 +0000
committerMichael S. Tsirkin <mst@redhat.com>2020-06-12 10:17:06 -0400
commit08fccf8f077397b47c740d4850cdeedc947f21ac (patch)
tree321d05eeac14f18aeea84815522ac7a6fa622964
parent27598393a23215cfbf92ad550b9541675b0b8f2b (diff)
Refactor out libvhost-user fault generation logic
In libvhost-user, the incoming postcopy migration path for setting the backend's memory tables has become convolued. In particular, moving the logic which starts generating faults, having received the final ACK from qemu can be moved to a separate function. This simplifies the code substantially. This logic will also be needed by the postcopy path once the VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS feature is supported. Signed-off-by: Raphael Norwitz <raphael.norwitz@nutanix.com> Message-Id: <1588533678-23450-7-git-send-email-raphael.norwitz@nutanix.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Reviewed-by: Marc-André Lureau <marcandre.lureau@redhat.com>
-rw-r--r--contrib/libvhost-user/libvhost-user.c147
1 files changed, 79 insertions, 68 deletions
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index 3bca996c62..cccfa22209 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -584,6 +584,84 @@ map_ring(VuDev *dev, VuVirtq *vq)
}
static bool
+generate_faults(VuDev *dev) {
+ int i;
+ for (i = 0; i < dev->nregions; i++) {
+ VuDevRegion *dev_region = &dev->regions[i];
+ int ret;
+#ifdef UFFDIO_REGISTER
+ /*
+ * We should already have an open ufd. Mark each memory
+ * range as ufd.
+ * Discard any mapping we have here; note I can't use MADV_REMOVE
+ * or fallocate to make the hole since I don't want to lose
+ * data that's already arrived in the shared process.
+ * TODO: How to do hugepage
+ */
+ ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
+ dev_region->size + dev_region->mmap_offset,
+ MADV_DONTNEED);
+ if (ret) {
+ fprintf(stderr,
+ "%s: Failed to madvise(DONTNEED) region %d: %s\n",
+ __func__, i, strerror(errno));
+ }
+ /*
+ * Turn off transparent hugepages so we dont get lose wakeups
+ * in neighbouring pages.
+ * TODO: Turn this backon later.
+ */
+ ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
+ dev_region->size + dev_region->mmap_offset,
+ MADV_NOHUGEPAGE);
+ if (ret) {
+ /*
+ * Note: This can happen legally on kernels that are configured
+ * without madvise'able hugepages
+ */
+ fprintf(stderr,
+ "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
+ __func__, i, strerror(errno));
+ }
+ struct uffdio_register reg_struct;
+ reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
+ reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
+ reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
+
+ if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
+ vu_panic(dev, "%s: Failed to userfault region %d "
+ "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
+ __func__, i,
+ dev_region->mmap_addr,
+ dev_region->size, dev_region->mmap_offset,
+ dev->postcopy_ufd, strerror(errno));
+ return false;
+ }
+ if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
+ vu_panic(dev, "%s Region (%d) doesn't support COPY",
+ __func__, i);
+ return false;
+ }
+ DPRINT("%s: region %d: Registered userfault for %"
+ PRIx64 " + %" PRIx64 "\n", __func__, i,
+ (uint64_t)reg_struct.range.start,
+ (uint64_t)reg_struct.range.len);
+ /* Now it's registered we can let the client at it */
+ if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
+ dev_region->size + dev_region->mmap_offset,
+ PROT_READ | PROT_WRITE)) {
+ vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
+ i, strerror(errno));
+ return false;
+ }
+ /* TODO: Stash 'zero' support flags somewhere */
+#endif
+ }
+
+ return true;
+}
+
+static bool
vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
{
int i;
@@ -655,74 +733,7 @@ vu_set_mem_table_exec_postcopy(VuDev *dev, VhostUserMsg *vmsg)
}
/* OK, now we can go and register the memory and generate faults */
- for (i = 0; i < dev->nregions; i++) {
- VuDevRegion *dev_region = &dev->regions[i];
- int ret;
-#ifdef UFFDIO_REGISTER
- /* We should already have an open ufd. Mark each memory
- * range as ufd.
- * Discard any mapping we have here; note I can't use MADV_REMOVE
- * or fallocate to make the hole since I don't want to lose
- * data that's already arrived in the shared process.
- * TODO: How to do hugepage
- */
- ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
- dev_region->size + dev_region->mmap_offset,
- MADV_DONTNEED);
- if (ret) {
- fprintf(stderr,
- "%s: Failed to madvise(DONTNEED) region %d: %s\n",
- __func__, i, strerror(errno));
- }
- /* Turn off transparent hugepages so we dont get lose wakeups
- * in neighbouring pages.
- * TODO: Turn this backon later.
- */
- ret = madvise((void *)(uintptr_t)dev_region->mmap_addr,
- dev_region->size + dev_region->mmap_offset,
- MADV_NOHUGEPAGE);
- if (ret) {
- /* Note: This can happen legally on kernels that are configured
- * without madvise'able hugepages
- */
- fprintf(stderr,
- "%s: Failed to madvise(NOHUGEPAGE) region %d: %s\n",
- __func__, i, strerror(errno));
- }
- struct uffdio_register reg_struct;
- reg_struct.range.start = (uintptr_t)dev_region->mmap_addr;
- reg_struct.range.len = dev_region->size + dev_region->mmap_offset;
- reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING;
-
- if (ioctl(dev->postcopy_ufd, UFFDIO_REGISTER, &reg_struct)) {
- vu_panic(dev, "%s: Failed to userfault region %d "
- "@%p + size:%zx offset: %zx: (ufd=%d)%s\n",
- __func__, i,
- dev_region->mmap_addr,
- dev_region->size, dev_region->mmap_offset,
- dev->postcopy_ufd, strerror(errno));
- return false;
- }
- if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) {
- vu_panic(dev, "%s Region (%d) doesn't support COPY",
- __func__, i);
- return false;
- }
- DPRINT("%s: region %d: Registered userfault for %"
- PRIx64 " + %" PRIx64 "\n", __func__, i,
- (uint64_t)reg_struct.range.start,
- (uint64_t)reg_struct.range.len);
- /* Now it's registered we can let the client at it */
- if (mprotect((void *)(uintptr_t)dev_region->mmap_addr,
- dev_region->size + dev_region->mmap_offset,
- PROT_READ | PROT_WRITE)) {
- vu_panic(dev, "failed to mprotect region %d for postcopy (%s)",
- i, strerror(errno));
- return false;
- }
- /* TODO: Stash 'zero' support flags somewhere */
-#endif
- }
+ (void)generate_faults(dev);
return false;
}