aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorWei Wang <wei.w.wang@intel.com>2018-12-11 16:24:52 +0800
committerDr. David Alan Gilbert <dgilbert@redhat.com>2019-03-06 10:49:18 +0000
commit6eeb63f740874150d7b5921541948c29b920a21d (patch)
treec33cce71b9ab41956cbdc2cde176e7b0bd20f651 /migration
parentbd2270608fa0112108aafcba89b87282c68db741 (diff)
migration/ram.c: add the free page optimization enable flag
This patch adds the free page optimization enable flag, and a function to set this flag. When the free page optimization is enabled, not all the pages are needed to be sent in the bulk stage. Why using a new flag, instead of directly disabling ram_bulk_stage when the optimization is running? Thanks for Peter Xu's reminder that disabling ram_bulk_stage will affect the use of compression. Please see save_page_use_compression. When xbzrle and compression are used, if free page optimizaion causes the ram_bulk_stage to be disabled, save_page_use_compression will return false, which disables the use of compression. That is, if free page optimization avoids the sending of half of the guest pages, the other half of pages loses the benefits of compression in the meantime. Using a new flag to let migration_bitmap_find_dirty skip the free pages in the bulk stage will avoid the above issue. Signed-off-by: Wei Wang <wei.w.wang@intel.com> CC: Dr. David Alan Gilbert <dgilbert@redhat.com> CC: Juan Quintela <quintela@redhat.com> CC: Michael S. Tsirkin <mst@redhat.com> CC: Peter Xu <peterx@redhat.com> Message-Id: <1544516693-5395-7-git-send-email-wei.w.wang@intel.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/ram.c18
1 files changed, 17 insertions, 1 deletions
diff --git a/migration/ram.c b/migration/ram.c
index bee4fb3fd4..35bd6213e9 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -316,6 +316,8 @@ struct RAMState {
uint32_t last_version;
/* We are in the first round */
bool ram_bulk_stage;
+ /* The free page optimization is enabled */
+ bool fpo_enabled;
/* How many times we have dirty too many pages */
int dirty_rate_high_cnt;
/* these variables are used for bitmap sync */
@@ -380,6 +382,15 @@ int precopy_notify(PrecopyNotifyReason reason, Error **errp)
return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
}
+void precopy_enable_free_page_optimization(void)
+{
+ if (!ram_state) {
+ return;
+ }
+
+ ram_state->fpo_enabled = true;
+}
+
uint64_t ram_bytes_remaining(void)
{
return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
@@ -1601,7 +1612,11 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
return size;
}
- if (rs->ram_bulk_stage && start > 0) {
+ /*
+ * When the free page optimization is enabled, we need to check the bitmap
+ * to send the non-free pages rather than all the pages in the bulk stage.
+ */
+ if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
next = start + 1;
} else {
next = find_next_bit(bitmap, size, start);
@@ -2651,6 +2666,7 @@ static void ram_state_reset(RAMState *rs)
rs->last_page = 0;
rs->last_version = ram_list.version;
rs->ram_bulk_stage = true;
+ rs->fpo_enabled = false;
}
#define MAX_WAIT 50 /* ms, half buffered_file limit */