aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--migration/ram.c41
1 files changed, 18 insertions, 23 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 57efa67f20..7223b0d8ca 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -325,7 +325,8 @@ struct RAMState {
uint64_t xbzrle_bytes_prev;
/* Start using XBZRLE (e.g., after the first round). */
bool xbzrle_enabled;
-
+ /* Are we on the last stage of migration */
+ bool last_stage;
/* compression statistics since the beginning of the period */
/* amount of count that no free thread to compress data */
uint64_t compress_thread_busy_prev;
@@ -683,11 +684,10 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
* @current_addr: addr of the page
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
- * @last_stage: if we are at the completion stage
*/
static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
ram_addr_t current_addr, RAMBlock *block,
- ram_addr_t offset, bool last_stage)
+ ram_addr_t offset)
{
int encoded_len = 0, bytes_xbzrle;
uint8_t *prev_cached_page;
@@ -695,7 +695,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
if (!cache_is_cached(XBZRLE.cache, current_addr,
ram_counters.dirty_sync_count)) {
xbzrle_counters.cache_miss++;
- if (!last_stage) {
+ if (!rs->last_stage) {
if (cache_insert(XBZRLE.cache, current_addr, *current_data,
ram_counters.dirty_sync_count) == -1) {
return -1;
@@ -734,7 +734,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
* Update the cache contents, so that it corresponds to the data
* sent, in all cases except where we skip the page.
*/
- if (!last_stage && encoded_len != 0) {
+ if (!rs->last_stage && encoded_len != 0) {
memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
/*
* In the case where we couldn't compress, ensure that the caller
@@ -1290,9 +1290,8 @@ static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
* @rs: current RAM state
* @block: block that contains the page we want to send
* @offset: offset inside the block for the page
- * @last_stage: if we are at the completion stage
*/
-static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
+static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
{
int pages = -1;
uint8_t *p;
@@ -1307,8 +1306,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
XBZRLE_cache_lock();
if (rs->xbzrle_enabled && !migration_in_postcopy()) {
pages = save_xbzrle_page(rs, &p, current_addr, block,
- offset, last_stage);
- if (!last_stage) {
+ offset);
+ if (!rs->last_stage) {
/* Can't send this cached data async, since the cache page
* might get updated before it gets to the wire
*/
@@ -2129,10 +2128,8 @@ static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
*
* @rs: current RAM state
* @pss: data about the page we want to send
- * @last_stage: if we are at the completion stage
*/
-static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
- bool last_stage)
+static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss)
{
RAMBlock *block = pss->block;
ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
@@ -2171,7 +2168,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
return ram_save_multifd_page(rs, block, offset);
}
- return ram_save_page(rs, pss, last_stage);
+ return ram_save_page(rs, pss);
}
/**
@@ -2190,10 +2187,8 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
* @rs: current RAM state
* @ms: current migration state
* @pss: data about the page we want to send
- * @last_stage: if we are at the completion stage
*/
-static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
- bool last_stage)
+static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss)
{
int tmppages, pages = 0;
size_t pagesize_bits =
@@ -2211,7 +2206,7 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
do {
/* Check the pages is dirty and if it is send it */
if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
- tmppages = ram_save_target_page(rs, pss, last_stage);
+ tmppages = ram_save_target_page(rs, pss);
if (tmppages < 0) {
return tmppages;
}
@@ -2245,13 +2240,11 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
* or negative on error
*
* @rs: current RAM state
- * @last_stage: if we are at the completion stage
*
* On systems where host-page-size > target-page-size it will send all the
* pages in a host page that are dirty.
*/
-
-static int ram_find_and_save_block(RAMState *rs, bool last_stage)
+static int ram_find_and_save_block(RAMState *rs)
{
PageSearchStatus pss;
int pages = 0;
@@ -2280,7 +2273,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage)
}
if (found) {
- pages = ram_save_host_page(rs, &pss, last_stage);
+ pages = ram_save_host_page(rs, &pss);
}
} while (!pages && again);
@@ -3080,7 +3073,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
break;
}
- pages = ram_find_and_save_block(rs, false);
+ pages = ram_find_and_save_block(rs);
/* no more pages to sent */
if (pages == 0) {
done = 1;
@@ -3160,6 +3153,8 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
RAMState *rs = *temp;
int ret = 0;
+ rs->last_stage = !migration_in_colo_state();
+
WITH_RCU_READ_LOCK_GUARD() {
if (!migration_in_postcopy()) {
migration_bitmap_sync_precopy(rs);
@@ -3173,7 +3168,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque)
while (true) {
int pages;
- pages = ram_find_and_save_block(rs, !migration_in_colo_state());
+ pages = ram_find_and_save_block(rs);
/* no more blocks to sent */
if (pages == 0) {
break;