aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/migration/migration.h1
-rw-r--r--include/migration/postcopy-ram.h21
-rw-r--r--migration/postcopy-ram.c100
-rw-r--r--trace-events2
4 files changed, 124 insertions, 0 deletions
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 1491bf32ed..a48471e026 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -96,6 +96,7 @@ struct MigrationIncomingState {
int userfault_fd;
QEMUFile *to_src_file;
QemuMutex rp_mutex; /* We send replies from multiple threads */
+ void *postcopy_tmp_page;
/* See savevm.c */
LoadStateEntry_Head loadvm_handlers;
diff --git a/include/migration/postcopy-ram.h b/include/migration/postcopy-ram.h
index b10c03db6e..d7c292fffa 100644
--- a/include/migration/postcopy-ram.h
+++ b/include/migration/postcopy-ram.h
@@ -69,4 +69,25 @@ void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds,
void postcopy_discard_send_finish(MigrationState *ms,
PostcopyDiscardState *pds);
+/*
+ * Place a page (from) at (host) efficiently
+ * There are restrictions on how 'from' must be mapped, in general best
+ * to use other postcopy_ routines to allocate.
+ * returns 0 on success
+ */
+int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from);
+
+/*
+ * Place a zero page at (host) atomically
+ * returns 0 on success
+ */
+int postcopy_place_page_zero(MigrationIncomingState *mis, void *host);
+
+/*
+ * Allocate a page of memory that can be mapped at a later point in time
+ * using postcopy_place_page
+ * Returns: Pointer to allocated page
+ */
+void *postcopy_get_tmp_page(MigrationIncomingState *mis);
+
#endif
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index 3110b2ab56..58492c0c08 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -272,6 +272,10 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis)
return -1;
}
+ if (mis->postcopy_tmp_page) {
+ munmap(mis->postcopy_tmp_page, getpagesize());
+ mis->postcopy_tmp_page = NULL;
+ }
return 0;
}
@@ -338,6 +342,83 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
return 0;
}
+/*
+ * Place a host page (from) at (host) atomically
+ * returns 0 on success
+ */
+int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from)
+{
+ struct uffdio_copy copy_struct;
+
+ copy_struct.dst = (uint64_t)(uintptr_t)host;
+ copy_struct.src = (uint64_t)(uintptr_t)from;
+ copy_struct.len = getpagesize();
+ copy_struct.mode = 0;
+
+ /* copy also acks to the kernel waking the stalled thread up
+ * TODO: We can inhibit that ack and only do it if it was requested
+ * which would be slightly cheaper, but we'd have to be careful
+ * of the order of updating our page state.
+ */
+ if (ioctl(mis->userfault_fd, UFFDIO_COPY, &copy_struct)) {
+ int e = errno;
+ error_report("%s: %s copy host: %p from: %p",
+ __func__, strerror(e), host, from);
+
+ return -e;
+ }
+
+ trace_postcopy_place_page(host);
+ return 0;
+}
+
+/*
+ * Place a zero page at (host) atomically
+ * returns 0 on success
+ */
+int postcopy_place_page_zero(MigrationIncomingState *mis, void *host)
+{
+ struct uffdio_zeropage zero_struct;
+
+ zero_struct.range.start = (uint64_t)(uintptr_t)host;
+ zero_struct.range.len = getpagesize();
+ zero_struct.mode = 0;
+
+ if (ioctl(mis->userfault_fd, UFFDIO_ZEROPAGE, &zero_struct)) {
+ int e = errno;
+ error_report("%s: %s zero host: %p",
+ __func__, strerror(e), host);
+
+ return -e;
+ }
+
+ trace_postcopy_place_page_zero(host);
+ return 0;
+}
+
+/*
+ * Returns a target page of memory that can be mapped at a later point in time
+ * using postcopy_place_page
+ * The same address is used repeatedly, postcopy_place_page just takes the
+ * backing page away.
+ * Returns: Pointer to allocated page
+ *
+ */
+void *postcopy_get_tmp_page(MigrationIncomingState *mis)
+{
+ if (!mis->postcopy_tmp_page) {
+ mis->postcopy_tmp_page = mmap(NULL, getpagesize(),
+ PROT_READ | PROT_WRITE, MAP_PRIVATE |
+ MAP_ANONYMOUS, -1, 0);
+ if (!mis->postcopy_tmp_page) {
+ error_report("%s: %s", __func__, strerror(errno));
+ return NULL;
+ }
+ }
+
+ return mis->postcopy_tmp_page;
+}
+
#else
/* No target OS support, stubs just fail */
bool postcopy_ram_supported_by_host(void)
@@ -370,6 +451,25 @@ int postcopy_ram_enable_notify(MigrationIncomingState *mis)
assert(0);
return -1;
}
+
+int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from)
+{
+ assert(0);
+ return -1;
+}
+
+int postcopy_place_page_zero(MigrationIncomingState *mis, void *host)
+{
+ assert(0);
+ return -1;
+}
+
+void *postcopy_get_tmp_page(MigrationIncomingState *mis)
+{
+ assert(0);
+ return NULL;
+}
+
#endif
/* ------------------------------------------------------------------------- */
diff --git a/trace-events b/trace-events
index 3df3656f96..c493f5dc10 100644
--- a/trace-events
+++ b/trace-events
@@ -1551,6 +1551,8 @@ postcopy_discard_send_range(const char *ramblock, unsigned long start, unsigned
postcopy_ram_discard_range(void *start, size_t length) "%p,+%zx"
postcopy_cleanup_range(const char *ramblock, void *host_addr, size_t offset, size_t length) "%s: %p offset=%zx length=%zx"
postcopy_init_range(const char *ramblock, void *host_addr, size_t offset, size_t length) "%s: %p offset=%zx length=%zx"
+postcopy_place_page(void *host_addr) "host=%p"
+postcopy_place_page_zero(void *host_addr) "host=%p"
# kvm-all.c
kvm_ioctl(int type, void *arg) "type 0x%x, arg %p"