aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorZhang Chen <zhangckid@gmail.com>2018-09-03 12:38:49 +0800
committerJason Wang <jasowang@redhat.com>2018-10-19 11:15:03 +0800
commit7d9acafa2cc094d03f46abc522786a1696983639 (patch)
treefde68fb48718623b06ae1cd62b8f02cf7ea54ad6 /migration
parent13af18f2228892d19d40ff96672677d168da7e9e (diff)
ram/COLO: Record the dirty pages that SVM received
We record the address of the dirty pages that received, it will help flushing pages that cached into SVM. Here, it is a trick, we record dirty pages by re-using migration dirty bitmap. In the later patch, we will start the dirty log for SVM, just like migration, in this way, we can record both the dirty pages caused by PVM and SVM, we only flush those dirty pages from RAM cache while do checkpoint. Signed-off-by: zhanghailiang <zhang.zhanghailiang@huawei.com> Signed-off-by: Zhang Chen <zhangckid@gmail.com> Signed-off-by: Zhang Chen <chen.zhang@intel.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/ram.c43
1 files changed, 40 insertions, 3 deletions
diff --git a/migration/ram.c b/migration/ram.c
index cd7a446c95..404c8f0853 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3458,6 +3458,15 @@ static inline void *colo_cache_from_block_offset(RAMBlock *block,
__func__, block->idstr);
return NULL;
}
+
+ /*
+ * During colo checkpoint, we need bitmap of these migrated pages.
+ * It help us to decide which pages in ram cache should be flushed
+ * into VM's RAM later.
+ */
+ if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
+ ram_state->migration_dirty_pages++;
+ }
return block->colo_cache + offset;
}
@@ -3675,7 +3684,7 @@ int colo_init_ram_cache(void)
RAMBlock *block;
rcu_read_lock();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
block->colo_cache = qemu_anon_ram_alloc(block->used_length,
NULL,
false);
@@ -3688,10 +3697,29 @@ int colo_init_ram_cache(void)
memcpy(block->colo_cache, block->host, block->used_length);
}
rcu_read_unlock();
+ /*
+ * Record the dirty pages that sent by PVM, we use this dirty bitmap together
+ * with to decide which page in cache should be flushed into SVM's RAM. Here
+ * we use the same name 'ram_bitmap' as for migration.
+ */
+ if (ram_bytes_total()) {
+ RAMBlock *block;
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
+
+ block->bmap = bitmap_new(pages);
+ bitmap_set(block->bmap, 0, pages);
+ }
+ }
+ ram_state = g_new0(RAMState, 1);
+ ram_state->migration_dirty_pages = 0;
+
return 0;
out_locked:
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
@@ -3707,14 +3735,23 @@ void colo_release_ram_cache(void)
{
RAMBlock *block;
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
+ g_free(block->bmap);
+ block->bmap = NULL;
+ }
+
rcu_read_lock();
- QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+
+ RAMBLOCK_FOREACH_MIGRATABLE(block) {
if (block->colo_cache) {
qemu_anon_ram_free(block->colo_cache, block->used_length);
block->colo_cache = NULL;
}
}
+
rcu_read_unlock();
+ g_free(ram_state);
+ ram_state = NULL;
}
/**