diff options
Diffstat (limited to 'migration')
-rw-r--r-- | migration/ram.c | 95 |
1 files changed, 76 insertions, 19 deletions
diff --git a/migration/ram.c b/migration/ram.c index f4cd9038f4..4d7b50ef79 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -481,6 +481,11 @@ struct PageSearchStatus { * postcopy pages via postcopy preempt channel. */ bool postcopy_target_channel; + /* Whether we're sending a host page */ + bool host_page_sending; + /* The start/end of current host page. Only valid if host_page_sending==true */ + unsigned long host_page_start; + unsigned long host_page_end; }; typedef struct PageSearchStatus PageSearchStatus; @@ -858,26 +863,38 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, } /** - * migration_bitmap_find_dirty: find the next dirty page from start + * pss_find_next_dirty: find the next dirty page of current ramblock * - * Returns the page offset within memory region of the start of a dirty page + * This function updates pss->page to point to the next dirty page index + * within the ramblock to migrate, or the end of ramblock when nothing + * found. Note that when pss->host_page_sending==true it means we're + * during sending a host page, so we won't look for dirty page that is + * outside the host page boundary. * - * @rs: current RAM state - * @rb: RAMBlock where to search for dirty pages - * @start: page where we start the search + * @pss: the current page search status */ -static inline -unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, - unsigned long start) +static void pss_find_next_dirty(PageSearchStatus *pss) { + RAMBlock *rb = pss->block; unsigned long size = rb->used_length >> TARGET_PAGE_BITS; unsigned long *bitmap = rb->bmap; if (ramblock_is_ignored(rb)) { - return size; + /* Points directly to the end, so we know no dirty page */ + pss->page = size; + return; + } + + /* + * If during sending a host page, only look for dirty pages within the + * current host page being send. + */ + if (pss->host_page_sending) { + assert(pss->host_page_end); + size = MIN(size, pss->host_page_end); } - return find_next_bit(bitmap, size, start); + pss->page = find_next_bit(bitmap, size, pss->page); } static void migration_clear_memory_region_dirty_bitmap(RAMBlock *rb, @@ -1563,7 +1580,9 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) pss->postcopy_requested = false; pss->postcopy_target_channel = RAM_CHANNEL_PRECOPY; - pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); + /* Update pss->page for the next dirty bit in ramblock */ + pss_find_next_dirty(pss); + if (pss->complete_round && pss->block == rs->last_seen_block && pss->page >= rs->last_page) { /* @@ -2452,6 +2471,44 @@ static void postcopy_preempt_reset_channel(RAMState *rs) } } +/* Should be called before sending a host page */ +static void pss_host_page_prepare(PageSearchStatus *pss) +{ + /* How many guest pages are there in one host page? */ + size_t guest_pfns = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; + + pss->host_page_sending = true; + pss->host_page_start = ROUND_DOWN(pss->page, guest_pfns); + pss->host_page_end = ROUND_UP(pss->page + 1, guest_pfns); +} + +/* + * Whether the page pointed by PSS is within the host page being sent. + * Must be called after a previous pss_host_page_prepare(). + */ +static bool pss_within_range(PageSearchStatus *pss) +{ + ram_addr_t ram_addr; + + assert(pss->host_page_sending); + + /* Over host-page boundary? */ + if (pss->page >= pss->host_page_end) { + return false; + } + + ram_addr = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; + + return offset_in_ramblock(pss->block, ram_addr); +} + +static void pss_host_page_finish(PageSearchStatus *pss) +{ + pss->host_page_sending = false; + /* This is not needed, but just to reset it */ + pss->host_page_start = pss->host_page_end = 0; +} + /** * ram_save_host_page: save a whole host page * @@ -2479,8 +2536,6 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) int tmppages, pages = 0; size_t pagesize_bits = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; - unsigned long hostpage_boundary = - QEMU_ALIGN_UP(pss->page + 1, pagesize_bits); unsigned long start_page = pss->page; int res; @@ -2493,6 +2548,9 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) postcopy_preempt_choose_channel(rs, pss); } + /* Update host page boundary information */ + pss_host_page_prepare(pss); + do { if (postcopy_needs_preempt(rs, pss)) { postcopy_do_preempt(rs, pss); @@ -2530,15 +2588,14 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss) } if (tmppages < 0) { + pss_host_page_finish(pss); return tmppages; } - pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); - } while ((pss->page < hostpage_boundary) && - offset_in_ramblock(pss->block, - ((ram_addr_t)pss->page) << TARGET_PAGE_BITS)); - /* The offset we leave with is the min boundary of host page and block */ - pss->page = MIN(pss->page, hostpage_boundary); + pss_find_next_dirty(pss); + } while (pss_within_range(pss)); + + pss_host_page_finish(pss); /* * When with postcopy preempt mode, flush the data as soon as possible for |