aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDr. David Alan Gilbert <dgilbert@redhat.com>2015-11-05 18:11:08 +0000
committerJuan Quintela <quintela@redhat.com>2015-11-10 15:00:27 +0100
commit6c595cdee116dc46b0d4d7d632a426681ae66ad9 (patch)
tree98026fbdc9644b65f56a4f62aae5271bb7178b42
parent1e2d90ebc54531c416a6765849308c8476d98f2d (diff)
Page request: Process incoming page request
On receiving MIG_RPCOMM_REQ_PAGES look up the address and queue the page. Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Amit Shah <amit.shah@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
-rw-r--r--include/migration/migration.h22
-rw-r--r--migration/migration.c31
-rw-r--r--migration/ram.c85
-rw-r--r--trace-events1
4 files changed, 138 insertions, 1 deletions
diff --git a/include/migration/migration.h b/include/migration/migration.h
index 1046d4e370..1491bf32ed 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -105,6 +105,18 @@ MigrationIncomingState *migration_incoming_get_current(void);
MigrationIncomingState *migration_incoming_state_new(QEMUFile *f);
void migration_incoming_state_destroy(void);
+/*
+ * An outstanding page request, on the source, having been received
+ * and queued
+ */
+struct MigrationSrcPageRequest {
+ RAMBlock *rb;
+ hwaddr offset;
+ hwaddr len;
+
+ QSIMPLEQ_ENTRY(MigrationSrcPageRequest) next_req;
+};
+
struct MigrationState
{
int64_t bandwidth_limit;
@@ -141,6 +153,12 @@ struct MigrationState
/* Flag set once the migration thread is running (and needs joining) */
bool migration_thread_running;
+
+ /* Queue of outstanding page requests from the destination */
+ QemuMutex src_page_req_mutex;
+ QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests;
+ /* The RAMBlock used in the last src_page_request */
+ RAMBlock *last_req_rb;
};
void process_incoming_migration(QEMUFile *f);
@@ -288,6 +306,10 @@ void savevm_skip_configuration(void);
int global_state_store(void);
void global_state_store_running(void);
+void flush_page_queue(MigrationState *ms);
+int ram_save_queue_pages(MigrationState *ms, const char *rbname,
+ ram_addr_t start, ram_addr_t len);
+
PostcopyState postcopy_state_get(void);
/* Set the state and return the old state */
PostcopyState postcopy_state_set(PostcopyState new_state);
diff --git a/migration/migration.c b/migration/migration.c
index 6ccdeb8023..7d64cd3240 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -21,6 +21,7 @@
#include "sysemu/sysemu.h"
#include "block/block.h"
#include "qapi/qmp/qerror.h"
+#include "qapi/util.h"
#include "qemu/sockets.h"
#include "qemu/rcu.h"
#include "migration/block.h"
@@ -28,9 +29,10 @@
#include "qemu/thread.h"
#include "qmp-commands.h"
#include "trace.h"
-#include "qapi/util.h"
#include "qapi-event.h"
#include "qom/cpu.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
#define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
@@ -72,6 +74,7 @@ static PostcopyState incoming_postcopy_state;
/* For outgoing */
MigrationState *migrate_get_current(void)
{
+ static bool once;
static MigrationState current_migration = {
.state = MIGRATION_STATUS_NONE,
.bandwidth_limit = MAX_THROTTLE,
@@ -89,6 +92,10 @@ MigrationState *migrate_get_current(void)
DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT,
};
+ if (!once) {
+ qemu_mutex_init(&current_migration.src_page_req_mutex);
+ once = true;
+ }
return &current_migration;
}
@@ -771,6 +778,8 @@ static void migrate_fd_cleanup(void *opaque)
qemu_bh_delete(s->cleanup_bh);
s->cleanup_bh = NULL;
+ flush_page_queue(s);
+
if (s->file) {
trace_migrate_fd_cleanup();
qemu_mutex_unlock_iothread();
@@ -903,6 +912,8 @@ MigrationState *migrate_init(const MigrationParams *params)
s->bandwidth_limit = bandwidth_limit;
migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
+ QSIMPLEQ_INIT(&s->src_page_requests);
+
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
return s;
}
@@ -1193,7 +1204,25 @@ static struct rp_cmd_args {
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
ram_addr_t start, size_t len)
{
+ long our_host_ps = getpagesize();
+
trace_migrate_handle_rp_req_pages(rbname, start, len);
+
+ /*
+ * Since we currently insist on matching page sizes, just sanity check
+ * we're being asked for whole host pages.
+ */
+ if (start & (our_host_ps-1) ||
+ (len & (our_host_ps-1))) {
+ error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
+ " len: %zd", __func__, start, len);
+ mark_source_rp_bad(ms);
+ return;
+ }
+
+ if (ram_save_queue_pages(ms, rbname, start, len)) {
+ mark_source_rp_bad(ms);
+ }
}
/*
diff --git a/migration/ram.c b/migration/ram.c
index 2e27b26d4c..8302d097c5 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1016,6 +1016,91 @@ static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
}
/**
+ * flush_page_queue: Flush any remaining pages in the ram request queue
+ * it should be empty at the end anyway, but in error cases there may be
+ * some left.
+ *
+ * ms: MigrationState
+ */
+void flush_page_queue(MigrationState *ms)
+{
+ struct MigrationSrcPageRequest *mspr, *next_mspr;
+ /* This queue generally should be empty - but in the case of a failed
+ * migration might have some droppings in.
+ */
+ rcu_read_lock();
+ QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
+ memory_region_unref(mspr->rb->mr);
+ QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
+ g_free(mspr);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * Queue the pages for transmission, e.g. a request from postcopy destination
+ * ms: MigrationStatus in which the queue is held
+ * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
+ * start: Offset from the start of the RAMBlock
+ * len: Length (in bytes) to send
+ * Return: 0 on success
+ */
+int ram_save_queue_pages(MigrationState *ms, const char *rbname,
+ ram_addr_t start, ram_addr_t len)
+{
+ RAMBlock *ramblock;
+
+ rcu_read_lock();
+ if (!rbname) {
+ /* Reuse last RAMBlock */
+ ramblock = ms->last_req_rb;
+
+ if (!ramblock) {
+ /*
+ * Shouldn't happen, we can't reuse the last RAMBlock if
+ * it's the 1st request.
+ */
+ error_report("ram_save_queue_pages no previous block");
+ goto err;
+ }
+ } else {
+ ramblock = qemu_ram_block_by_name(rbname);
+
+ if (!ramblock) {
+ /* We shouldn't be asked for a non-existent RAMBlock */
+ error_report("ram_save_queue_pages no block '%s'", rbname);
+ goto err;
+ }
+ ms->last_req_rb = ramblock;
+ }
+ trace_ram_save_queue_pages(ramblock->idstr, start, len);
+ if (start+len > ramblock->used_length) {
+ error_report("%s request overrun start=%zx len=%zx blocklen=%zx",
+ __func__, start, len, ramblock->used_length);
+ goto err;
+ }
+
+ struct MigrationSrcPageRequest *new_entry =
+ g_malloc0(sizeof(struct MigrationSrcPageRequest));
+ new_entry->rb = ramblock;
+ new_entry->offset = start;
+ new_entry->len = len;
+
+ memory_region_ref(ramblock->mr);
+ qemu_mutex_lock(&ms->src_page_req_mutex);
+ QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
+ qemu_mutex_unlock(&ms->src_page_req_mutex);
+ rcu_read_unlock();
+
+ return 0;
+
+err:
+ rcu_read_unlock();
+ return -1;
+}
+
+
+/**
* ram_find_and_save_block: Finds a dirty page and sends it to f
*
* Called within an RCU critical section.
diff --git a/trace-events b/trace-events
index 2bafa12f52..0914e0a2a2 100644
--- a/trace-events
+++ b/trace-events
@@ -1256,6 +1256,7 @@ migration_bitmap_sync_start(void) ""
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64""
migration_throttle(void) ""
ram_postcopy_send_discard_bitmap(void) ""
+ram_save_queue_pages(const char *rbname, size_t start, size_t len) "%s: start: %zx len: %zx"
# hw/display/qxl.c
disable qxl_interface_set_mm_time(int qid, uint32_t mm_time) "%d %d"