aboutsummaryrefslogtreecommitdiff
path: root/migration/ram.c
diff options
context:
space:
mode:
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c166
1 files changed, 164 insertions, 2 deletions
diff --git a/migration/ram.c b/migration/ram.c
index bc38d98cc3..7e7deec4d8 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3447,6 +3447,29 @@ static inline void *host_from_ram_block_offset(RAMBlock *block,
return block->host + offset;
}
+static inline void *colo_cache_from_block_offset(RAMBlock *block,
+ ram_addr_t offset)
+{
+ if (!offset_in_ramblock(block, offset)) {
+ return NULL;
+ }
+ if (!block->colo_cache) {
+ error_report("%s: colo_cache is NULL in block :%s",
+ __func__, block->idstr);
+ return NULL;
+ }
+
+ /*
+ * During colo checkpoint, we need bitmap of these migrated pages.
+ * It help us to decide which pages in ram cache should be flushed
+ * into VM's RAM later.
+ */
+ if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
+ ram_state->migration_dirty_pages++;
+ }
+ return block->colo_cache + offset;
+}
+
/**
* ram_handle_compressed: handle the zero page case
*
@@ -3651,6 +3674,88 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
qemu_mutex_unlock(&decomp_done_lock);
}
+/*
+ * colo cache: this is for secondary VM, we cache the whole
+ * memory of the secondary VM, it is need to hold the global lock
+ * to call this helper.
+ */
+int colo_init_ram_cache(void)
+{
+ RAMBlock *block;
+
+ rcu_read_lock();
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ block->colo_cache = qemu_anon_ram_alloc(block->used_length,
+ NULL,
+ false);
+ if (!block->colo_cache) {
+ error_report("%s: Can't alloc memory for COLO cache of block %s,"
+ "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
+ block->used_length);
+ goto out_locked;
+ }
+ memcpy(block->colo_cache, block->host, block->used_length);
+ }
+ rcu_read_unlock();
+ /*
+ * Record the dirty pages that sent by PVM, we use this dirty bitmap together
+ * with to decide which page in cache should be flushed into SVM's RAM. Here
+ * we use the same name 'ram_bitmap' as for migration.
+ */
+ if (ram_bytes_total()) {
+ RAMBlock *block;
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
+
+ block->bmap = bitmap_new(pages);
+ bitmap_set(block->bmap, 0, pages);
+ }
+ }
+ ram_state = g_new0(RAMState, 1);
+ ram_state->migration_dirty_pages = 0;
+ memory_global_dirty_log_start();
+
+ return 0;
+
+out_locked:
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ if (block->colo_cache) {
+ qemu_anon_ram_free(block->colo_cache, block->used_length);
+ block->colo_cache = NULL;
+ }
+ }
+
+ rcu_read_unlock();
+ return -errno;
+}
+
+/* It is need to hold the global lock to call this helper */
+void colo_release_ram_cache(void)
+{
+ RAMBlock *block;
+
+ memory_global_dirty_log_stop();
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ g_free(block->bmap);
+ block->bmap = NULL;
+ }
+
+ rcu_read_lock();
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ if (block->colo_cache) {
+ qemu_anon_ram_free(block->colo_cache, block->used_length);
+ block->colo_cache = NULL;
+ }
+ }
+
+ rcu_read_unlock();
+ g_free(ram_state);
+ ram_state = NULL;
+}
+
/**
* ram_load_setup: Setup RAM for migration incoming side
*
@@ -3667,6 +3772,7 @@ static int ram_load_setup(QEMUFile *f, void *opaque)
xbzrle_load_setup();
ramblock_recv_map_init();
+
return 0;
}
@@ -3687,6 +3793,7 @@ static int ram_load_cleanup(void *opaque)
g_free(rb->receivedmap);
rb->receivedmap = NULL;
}
+
return 0;
}
@@ -3869,6 +3976,46 @@ static bool postcopy_is_running(void)
return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
}
+/*
+ * Flush content of RAM cache into SVM's memory.
+ * Only flush the pages that be dirtied by PVM or SVM or both.
+ */
+static void colo_flush_ram_cache(void)
+{
+ RAMBlock *block = NULL;
+ void *dst_host;
+ void *src_host;
+ unsigned long offset = 0;
+
+ memory_global_dirty_log_sync();
+ rcu_read_lock();
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ migration_bitmap_sync_range(ram_state, block, 0, block->used_length);
+ }
+ rcu_read_unlock();
+
+ trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
+ rcu_read_lock();
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
+
+ while (block) {
+ offset = migration_bitmap_find_dirty(ram_state, block, offset);
+
+ if (offset << TARGET_PAGE_BITS >= block->used_length) {
+ offset = 0;
+ block = QLIST_NEXT_RCU(block, next);
+ } else {
+ migration_bitmap_clear_dirty(ram_state, block, offset);
+ dst_host = block->host + (offset << TARGET_PAGE_BITS);
+ src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
+ memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
+ }
+ }
+
+ rcu_read_unlock();
+ trace_colo_flush_ram_cache_end();
+}
+
static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
int flags = 0, ret = 0, invalid_flags = 0;
@@ -3924,13 +4071,24 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
RAMBlock *block = ram_block_from_stream(f, flags);
- host = host_from_ram_block_offset(block, addr);
+ /*
+ * After going into COLO, we should load the Page into colo_cache.
+ */
+ if (migration_incoming_in_colo_state()) {
+ host = colo_cache_from_block_offset(block, addr);
+ } else {
+ host = host_from_ram_block_offset(block, addr);
+ }
if (!host) {
error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
break;
}
- ramblock_recv_bitmap_set(block, host);
+
+ if (!migration_incoming_in_colo_state()) {
+ ramblock_recv_bitmap_set(block, host);
+ }
+
trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
}
@@ -4034,6 +4192,10 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
ret |= wait_for_decompress_done();
rcu_read_unlock();
trace_ram_load_complete(ret, seq_iter);
+
+ if (!ret && migration_incoming_in_colo_state()) {
+ colo_flush_ram_cache();
+ }
return ret;
}