From 3d0684b2ad82a5dde68e3f08b0d7786dccaf619c Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Thu, 23 Mar 2017 15:06:39 +0100 Subject: ram: Update all functions comments Added doc comments for existing functions comment and rewrite them in a common style. Signed-off-by: Juan Quintela Reviewed-by: Peter Xu -- Fix Peter Xu comments Improve postcopy comments as per reviews. --- migration/ram.c | 348 ++++++++++++++++++++++++++++++++++++-------------------- 1 file changed, 227 insertions(+), 121 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index de1e0a3b18..652abe4dff 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -96,11 +96,17 @@ static void XBZRLE_cache_unlock(void) qemu_mutex_unlock(&XBZRLE.lock); } -/* - * called from qmp_migrate_set_cache_size in main thread, possibly while - * a migration is in progress. - * A running migration maybe using the cache and might finish during this - * call, hence changes to the cache are protected by XBZRLE.lock(). +/** + * xbzrle_cache_resize: resize the xbzrle cache + * + * This function is called from qmp_migrate_set_cache_size in main + * thread, possibly while a migration is in progress. A running + * migration may be using the cache and might finish during this call, + * hence changes to the cache are protected by XBZRLE.lock(). + * + * Returns the new_size or negative in case of error. + * + * @new_size: new cache size */ int64_t xbzrle_cache_resize(int64_t new_size) { @@ -323,6 +329,7 @@ static inline void terminate_compression_threads(void) int idx, thread_count; thread_count = migrate_compress_threads(); + for (idx = 0; idx < thread_count; idx++) { qemu_mutex_lock(&comp_param[idx].mutex); comp_param[idx].quit = true; @@ -383,11 +390,11 @@ void migrate_compress_threads_create(void) } /** - * save_page_header: Write page header to wire + * save_page_header: write page header to wire * * If this is the 1st block, it also writes the block identification * - * Returns: Number of bytes written + * Returns the number of bytes written * * @f: QEMUFile where to send the data * @block: block that contains the page we want to send @@ -410,11 +417,14 @@ static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset) return size; } -/* Reduce amount of guest cpu execution to hopefully slow down memory writes. - * If guest dirty memory rate is reduced below the rate at which we can - * transfer pages to the destination then we should be able to complete - * migration. Some workloads dirty memory way too fast and will not effectively - * converge, even with auto-converge. +/** + * mig_throttle_guest_down: throotle down the guest + * + * Reduce amount of guest cpu execution to hopefully slow down memory + * writes. If guest dirty memory rate is reduced below the rate at + * which we can transfer pages to the destination then we should be + * able to complete migration. Some workloads dirty memory way too + * fast and will not effectively converge, even with auto-converge. */ static void mig_throttle_guest_down(void) { @@ -431,11 +441,16 @@ static void mig_throttle_guest_down(void) } } -/* Update the xbzrle cache to reflect a page that's been sent as all 0. +/** + * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache + * + * @current_addr: address for the zero page + * + * Update the xbzrle cache to reflect a page that's been sent as all 0. * The important thing is that a stale (not-yet-0'd) page be replaced * by the new data. * As a bonus, if the page wasn't in the cache it gets added so that - * when a small write is made into the 0'd page it gets XBZRLE sent + * when a small write is made into the 0'd page it gets XBZRLE sent. */ static void xbzrle_cache_zero_page(ram_addr_t current_addr) { @@ -459,8 +474,8 @@ static void xbzrle_cache_zero_page(ram_addr_t current_addr) * -1 means that xbzrle would be longer than normal * * @f: QEMUFile where to send the data - * @current_data: - * @current_addr: + * @current_data: pointer to the address of the page contents + * @current_addr: addr of the page * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage @@ -530,13 +545,17 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, return 1; } -/* Called with rcu_read_lock() to protect migration_bitmap - * rb: The RAMBlock to search for dirty pages in - * start: Start address (typically so we can continue from previous page) - * ram_addr_abs: Pointer into which to store the address of the dirty page - * within the global ram_addr space +/** + * migration_bitmap_find_dirty: find the next dirty page from start * - * Returns: byte offset within memory region of the start of a dirty page + * Called with rcu_read_lock() to protect migration_bitmap + * + * Returns the byte offset within memory region of the start of a dirty page + * + * @rb: RAMBlock where to search for dirty pages + * @start: starting address (typically so we can continue from previous page) + * @ram_addr_abs: pointer into which to store the address of the dirty page + * within the global ram_addr space */ static inline ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb, @@ -600,10 +619,14 @@ static void migration_bitmap_sync_init(void) iterations_prev = 0; } -/* Returns a summary bitmap of the page sizes of all RAMBlocks; - * for VMs with just normal pages this is equivalent to the - * host page size. If it's got some huge pages then it's the OR - * of all the different page sizes. +/** + * ram_pagesize_summary: calculate all the pagesizes of a VM + * + * Returns a summary bitmap of the page sizes of all RAMBlocks + * + * For VMs with just normal pages this is equivalent to the host page + * size. If it's got some huge pages then it's the OR of all the + * different page sizes. */ uint64_t ram_pagesize_summary(void) { @@ -693,9 +716,9 @@ static void migration_bitmap_sync(void) } /** - * save_zero_page: Send the zero page to the stream + * save_zero_page: send the zero page to the stream * - * Returns: Number of pages written. + * Returns the number of pages written. * * @f: QEMUFile where to send the data * @block: block that contains the page we want to send @@ -731,14 +754,14 @@ static void ram_release_pages(MigrationState *ms, const char *block_name, } /** - * ram_save_page: Send the given page to the stream + * ram_save_page: send the given page to the stream * - * Returns: Number of pages written. + * Returns the number of pages written. * < 0 - error * >=0 - Number of pages written - this might legally be 0 * if xbzrle noticed the page was the same. * - * @ms: The current migration state. + * @ms: current migration state * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page @@ -921,9 +944,9 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, /** * ram_save_compressed_page: compress the given page and send it to the stream * - * Returns: Number of pages written. + * Returns the number of pages written. * - * @ms: The current migration state. + * @ms: current migration state * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page @@ -1000,17 +1023,17 @@ static int ram_save_compressed_page(MigrationState *ms, QEMUFile *f, return pages; } -/* - * Find the next dirty page and update any state associated with - * the search process. +/** + * find_dirty_block: find the next dirty page and update any state + * associated with the search process. * - * Returns: True if a page is found + * Returns if a page is found * - * @f: Current migration stream. - * @pss: Data about the state of the current dirty page scan. - * @*again: Set to false if the search has scanned the whole of RAM - * *ram_addr_abs: Pointer into which to store the address of the dirty page - * within the global ram_addr space + * @f: QEMUFile where to send the data + * @pss: data about the state of the current dirty page scan + * @again: set to false if the search has scanned the whole of RAM + * @ram_addr_abs: pointer into which to store the address of the dirty page + * within the global ram_addr space */ static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss, bool *again, ram_addr_t *ram_addr_abs) @@ -1055,13 +1078,17 @@ static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss, } } -/* +/** + * unqueue_page: gets a page of the queue + * * Helper for 'get_queued_page' - gets a page off the queue - * ms: MigrationState in - * *offset: Used to return the offset within the RAMBlock - * ram_addr_abs: global offset in the dirty/sent bitmaps * - * Returns: block (or NULL if none available) + * Returns the block of the page (or NULL if none available) + * + * @ms: current migration state + * @offset: used to return the offset within the RAMBlock + * @ram_addr_abs: pointer into which to store the address of the dirty page + * within the global ram_addr space */ static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, ram_addr_t *ram_addr_abs) @@ -1091,15 +1118,17 @@ static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, return block; } -/* - * Unqueue a page from the queue fed by postcopy page requests; skips pages - * that are already sent (!dirty) +/** + * get_queued_page: unqueue a page from the postocpy requests + * + * Skips pages that are already sent (!dirty) * - * ms: MigrationState in - * pss: PageSearchStatus structure updated with found block/offset - * ram_addr_abs: global offset in the dirty/sent bitmaps + * Returns if a queued page is found * - * Returns: true if a queued page is found + * @ms: current migration state + * @pss: data about the state of the current dirty page scan + * @ram_addr_abs: pointer into which to store the address of the dirty page + * within the global ram_addr space */ static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss, ram_addr_t *ram_addr_abs) @@ -1157,11 +1186,12 @@ static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss, } /** - * flush_page_queue: Flush any remaining pages in the ram request queue - * it should be empty at the end anyway, but in error cases there may be - * some left. + * flush_page_queue: flush any remaining pages in the ram request queue * - * ms: MigrationState + * It should be empty at the end anyway, but in error cases there may + * be some left. in case that there is any page left, we drop it. + * + * @ms: current migration state */ void flush_page_queue(MigrationState *ms) { @@ -1179,12 +1209,17 @@ void flush_page_queue(MigrationState *ms) } /** - * Queue the pages for transmission, e.g. a request from postcopy destination - * ms: MigrationStatus in which the queue is held - * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last) - * start: Offset from the start of the RAMBlock - * len: Length (in bytes) to send - * Return: 0 on success + * ram_save_queue_pages: queue the page for transmission + * + * A request from postcopy destination for example. + * + * Returns zero on success or negative on error + * + * @ms: current migration state + * @rbname: Name of the RAMBLock of the request. NULL means the + * same that last one. + * @start: starting address from the start of the RAMBlock + * @len: length (in bytes) to send */ int ram_save_queue_pages(MigrationState *ms, const char *rbname, ram_addr_t start, ram_addr_t len) @@ -1243,17 +1278,16 @@ err: } /** - * ram_save_target_page: Save one target page + * ram_save_target_page: save one target page * + * Returns the number of pages written * + * @ms: current migration state * @f: QEMUFile where to send the data - * @block: pointer to block that contains the page we want to send - * @offset: offset inside the block for the page; + * @pss: data about the page we want to send * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes - * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space - * - * Returns: Number of pages written. + * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space */ static int ram_save_target_page(MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, @@ -1295,20 +1329,19 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f, } /** - * ram_save_host_page: Starting at *offset send pages up to the end - * of the current host page. It's valid for the initial - * offset to point into the middle of a host page - * in which case the remainder of the hostpage is sent. - * Only dirty target pages are sent. - * Note that the host page size may be a huge page for this - * block. + * ram_save_host_page: save a whole host page * - * Returns: Number of pages written. + * Starting at *offset send pages up to the end of the current host + * page. It's valid for the initial offset to point into the middle of + * a host page in which case the remainder of the hostpage is sent. + * Only dirty target pages are sent. Note that the host page size may + * be a huge page for this block. * + * Returns the number of pages written or negative on error + * + * @ms: current migration state * @f: QEMUFile where to send the data - * @block: pointer to block that contains the page we want to send - * @offset: offset inside the block for the page; updated to last target page - * sent + * @pss: data about the page we want to send * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space @@ -1340,12 +1373,11 @@ static int ram_save_host_page(MigrationState *ms, QEMUFile *f, } /** - * ram_find_and_save_block: Finds a dirty page and sends it to f + * ram_find_and_save_block: finds a dirty page and sends it to f * * Called within an RCU critical section. * - * Returns: The number of pages written - * 0 means no dirty pages + * Returns the number of pages written where zero means no dirty pages * * @f: QEMUFile where to send the data * @last_stage: if we are at the completion stage @@ -1580,12 +1612,19 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms) } } -/* +/** + * postcopy_send_discard_bm_ram: discard a RAMBlock + * + * Returns zero on success + * * Callback from postcopy_each_ram_send_discard for each RAMBlock * Note: At this point the 'unsentmap' is the processed bitmap combined * with the dirtymap; so a '1' means it's either dirty or unsent. - * start,length: Indexes into the bitmap for the first bit - * representing the named block and length in target-pages + * + * @ms: current migration state + * @pds: state for postcopy + * @start: RAMBlock starting page + * @length: RAMBlock size */ static int postcopy_send_discard_bm_ram(MigrationState *ms, PostcopyDiscardState *pds, @@ -1621,13 +1660,18 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, return 0; } -/* +/** + * postcopy_each_ram_send_discard: discard all RAMBlocks + * + * Returns 0 for success or negative for error + * * Utility for the outgoing postcopy code. * Calls postcopy_send_discard_bm_ram for each RAMBlock * passing it bitmap indexes and name. - * Returns: 0 on success * (qemu_ram_foreach_block ends up passing unscaled lengths * which would mean postcopy code would have to deal with target page) + * + * @ms: current migration state */ static int postcopy_each_ram_send_discard(MigrationState *ms) { @@ -1656,17 +1700,21 @@ static int postcopy_each_ram_send_discard(MigrationState *ms) return 0; } -/* - * Helper for postcopy_chunk_hostpages; it's called twice to cleanup - * the two bitmaps, that are similar, but one is inverted. +/** + * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages + * + * Helper for postcopy_chunk_hostpages; it's called twice to + * canonicalize the two bitmaps, that are similar, but one is + * inverted. * - * We search for runs of target-pages that don't start or end on a - * host page boundary; - * unsent_pass=true: Cleans up partially unsent host pages by searching - * the unsentmap - * unsent_pass=false: Cleans up partially dirty host pages by searching - * the main migration bitmap + * Postcopy requires that all target pages in a hostpage are dirty or + * clean, not a mix. This function canonicalizes the bitmaps. * + * @ms: current migration state + * @unsent_pass: if true we need to canonicalize partially unsent host pages + * otherwise we need to canonicalize partially dirty host pages + * @block: block that contains the page we want to canonicalize + * @pds: state for postcopy */ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, RAMBlock *block, @@ -1784,14 +1832,18 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, } } -/* +/** + * postcopy_chuck_hostpages: discrad any partially sent host page + * * Utility for the outgoing postcopy code. * * Discard any partially sent host-page size chunks, mark any partially * dirty host-page size chunks as all dirty. In this case the host-page * is the host-page for the particular RAMBlock, i.e. it might be a huge page * - * Returns: 0 on success + * Returns zero on success + * + * @ms: current migration state */ static int postcopy_chunk_hostpages(MigrationState *ms) { @@ -1822,7 +1874,11 @@ static int postcopy_chunk_hostpages(MigrationState *ms) return 0; } -/* +/** + * ram_postcopy_send_discard_bitmap: transmit the discard bitmap + * + * Returns zero on success + * * Transmit the set of pages to be discarded after precopy to the target * these are pages that: * a) Have been previously transmitted but are now dirty again @@ -1830,6 +1886,8 @@ static int postcopy_chunk_hostpages(MigrationState *ms) * any pages on the destination that have been mapped by background * tasks get discarded (transparent huge pages is the specific concern) * Hopefully this is pretty sparse + * + * @ms: current migration state */ int ram_postcopy_send_discard_bitmap(MigrationState *ms) { @@ -1878,13 +1936,16 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) return ret; } -/* - * At the start of the postcopy phase of migration, any now-dirty - * precopied pages are discarded. +/** + * ram_discard_range: discard dirtied pages at the beginning of postcopy * - * start, length describe a byte address range within the RAMBlock + * Returns zero on success * - * Returns 0 on success. + * @mis: current migration incoming state + * @block_name: Name of the RAMBlock of the request. NULL means the + * same that last one. + * @start: RAMBlock starting page + * @length: RAMBlock size */ int ram_discard_range(MigrationIncomingState *mis, const char *block_name, @@ -1987,12 +2048,21 @@ static int ram_save_init_globals(void) return 0; } -/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has +/* + * Each of ram_save_setup, ram_save_iterate and ram_save_complete has * long-running RCU critical section. When rcu-reclaims in the code * start to become numerous it will be necessary to reduce the * granularity of these critical sections. */ +/** + * ram_save_setup: Setup RAM for migration + * + * Returns zero to indicate success and negative for error + * + * @f: QEMUFile where to send the data + * @opaque: RAMState pointer + */ static int ram_save_setup(QEMUFile *f, void *opaque) { RAMBlock *block; @@ -2027,6 +2097,14 @@ static int ram_save_setup(QEMUFile *f, void *opaque) return 0; } +/** + * ram_save_iterate: iterative stage for migration + * + * Returns zero to indicate success and negative for error + * + * @f: QEMUFile where to send the data + * @opaque: RAMState pointer + */ static int ram_save_iterate(QEMUFile *f, void *opaque) { int ret; @@ -2091,7 +2169,16 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) return done; } -/* Called with iothread lock */ +/** + * ram_save_complete: function called to send the remaining amount of ram + * + * Returns zero to indicate success + * + * Called with iothread lock + * + * @f: QEMUFile where to send the data + * @opaque: RAMState pointer + */ static int ram_save_complete(QEMUFile *f, void *opaque) { rcu_read_lock(); @@ -2185,17 +2272,17 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host) return 0; } -/* Must be called from within a rcu critical section. +/** + * ram_block_from_stream: read a RAMBlock id from the migration stream + * + * Must be called from within a rcu critical section. + * * Returns a pointer from within the RCU-protected ram_list. - */ -/* - * Read a RAMBlock ID from the stream f. * - * f: Stream to read from - * flags: Page flags (mostly to see if it's a continuation of previous block) + * @f: QEMUFile where to read the data from + * @flags: Page flags (mostly to see if it's a continuation of previous block) */ -static inline RAMBlock *ram_block_from_stream(QEMUFile *f, - int flags) +static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags) { static RAMBlock *block = NULL; char id[256]; @@ -2232,9 +2319,15 @@ static inline void *host_from_ram_block_offset(RAMBlock *block, return block->host + offset; } -/* +/** + * ram_handle_compressed: handle the zero page case + * * If a page (or a whole RDMA chunk) has been * determined to be zero, then zap it. + * + * @host: host address for the zero page + * @ch: what the page is filled from. We only support zero + * @size: size of the zero page */ void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) { @@ -2373,9 +2466,16 @@ static void decompress_data_with_multi_threads(QEMUFile *f, qemu_mutex_unlock(&decomp_done_lock); } -/* - * Allocate data structures etc needed by incoming migration with postcopy-ram - * postcopy-ram's similarly names postcopy_ram_incoming_init does the work +/** + * ram_postcopy_incoming_init: allocate postcopy data structures + * + * Returns 0 for success and negative if there was one error + * + * @mis: current migration incoming state + * + * Allocate data structures etc needed by incoming migration with + * postcopy-ram. postcopy-ram's similarly names + * postcopy_ram_incoming_init does the work. */ int ram_postcopy_incoming_init(MigrationIncomingState *mis) { @@ -2384,9 +2484,15 @@ int ram_postcopy_incoming_init(MigrationIncomingState *mis) return postcopy_ram_incoming_init(mis, ram_pages); } -/* +/** + * ram_load_postcopy: load a page in postcopy case + * + * Returns 0 for success or -errno in case of error + * * Called in postcopy mode by ram_load(). * rcu_read_lock is taken prior to this being called. + * + * @f: QEMUFile where to send the data */ static int ram_load_postcopy(QEMUFile *f) { -- cgit v1.2.3 From 5e58f968f432cc079e2c53c07fdf0d4801d2bde5 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 3 Apr 2017 22:06:54 +0200 Subject: ram: Rename flush_page_queue() to migration_page_queue_free() It reflects better what it does. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- include/migration/migration.h | 2 +- migration/migration.c | 2 +- migration/ram.c | 5 +++-- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 5720c884f4..24487be295 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -377,7 +377,7 @@ void savevm_skip_configuration(void); int global_state_store(void); void global_state_store_running(void); -void flush_page_queue(MigrationState *ms); +void migration_page_queue_free(MigrationState *ms); int ram_save_queue_pages(MigrationState *ms, const char *rbname, ram_addr_t start, ram_addr_t len); uint64_t ram_pagesize_summary(void); diff --git a/migration/migration.c b/migration/migration.c index ad4036fdb1..4bee05de9e 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -955,7 +955,7 @@ static void migrate_fd_cleanup(void *opaque) qemu_bh_delete(s->cleanup_bh); s->cleanup_bh = NULL; - flush_page_queue(s); + migration_page_queue_free(s); if (s->to_dst_file) { trace_migrate_fd_cleanup(); diff --git a/migration/ram.c b/migration/ram.c index 652abe4dff..356f8ce11b 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1186,14 +1186,15 @@ static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss, } /** - * flush_page_queue: flush any remaining pages in the ram request queue + * migration_page_queue_free: drop any remaining pages in the ram + * request queue * * It should be empty at the end anyway, but in error cases there may * be some left. in case that there is any page left, we drop it. * * @ms: current migration state */ -void flush_page_queue(MigrationState *ms) +void migration_page_queue_free(MigrationState *ms) { struct MigrationSrcPageRequest *mspr, *next_mspr; /* This queue generally should be empty - but in the case of a failed -- cgit v1.2.3 From 36449157267e7269bed698c0590a3e6feb1edba1 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Thu, 23 Mar 2017 15:11:59 +0100 Subject: ram: Rename block_name to rbname So all places are consistent on the naming of a block name parameter. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- migration/ram.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 356f8ce11b..776f8a79df 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -743,14 +743,14 @@ static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, return pages; } -static void ram_release_pages(MigrationState *ms, const char *block_name, +static void ram_release_pages(MigrationState *ms, const char *rbname, uint64_t offset, int pages) { if (!migrate_release_ram() || !migration_in_postcopy(ms)) { return; } - ram_discard_range(NULL, block_name, offset, pages << TARGET_PAGE_BITS); + ram_discard_range(NULL, rbname, offset, pages << TARGET_PAGE_BITS); } /** @@ -1943,25 +1943,24 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) * Returns zero on success * * @mis: current migration incoming state - * @block_name: Name of the RAMBlock of the request. NULL means the - * same that last one. + * @rbname: name of the RAMBlock of the request. NULL means the + * same that last one. * @start: RAMBlock starting page * @length: RAMBlock size */ int ram_discard_range(MigrationIncomingState *mis, - const char *block_name, + const char *rbname, uint64_t start, size_t length) { int ret = -1; - trace_ram_discard_range(block_name, start, length); + trace_ram_discard_range(rbname, start, length); rcu_read_lock(); - RAMBlock *rb = qemu_ram_block_by_name(block_name); + RAMBlock *rb = qemu_ram_block_by_name(rbname); if (!rb) { - error_report("ram_discard_range: Failed to find block '%s'", - block_name); + error_report("ram_discard_range: Failed to find block '%s'", rbname); goto err; } -- cgit v1.2.3 From 6f37bb8bf31e98be5cbe536aec881e65ec985526 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 19:26:29 +0100 Subject: ram: Create RAMState We create a struct where to put all the ram state Start with the following fields: last_seen_block, last_sent_block, last_offset, last_version and ram_bulk_stage are globals that are really related together. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu -- Fix typo and warnings --- migration/ram.c | 140 +++++++++++++++++++++++++++++++++----------------------- 1 file changed, 83 insertions(+), 57 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 776f8a79df..3eb4430ce5 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -142,6 +142,23 @@ out: return ret; } +/* State of RAM for migration */ +struct RAMState { + /* Last block that we have visited searching for dirty pages */ + RAMBlock *last_seen_block; + /* Last block from where we have sent data */ + RAMBlock *last_sent_block; + /* Last offset we have sent data from */ + ram_addr_t last_offset; + /* last ram version we have seen */ + uint32_t last_version; + /* We are in the first round */ + bool ram_bulk_stage; +}; +typedef struct RAMState RAMState; + +static RAMState ram_state; + /* accounting for migration statistics */ typedef struct AccountingInfo { uint64_t dup_pages; @@ -217,16 +234,8 @@ uint64_t xbzrle_mig_pages_overflow(void) return acct_info.xbzrle_overflows; } -/* This is the last block that we have visited serching for dirty pages - */ -static RAMBlock *last_seen_block; -/* This is the last block from where we have sent data */ -static RAMBlock *last_sent_block; -static ram_addr_t last_offset; static QemuMutex migration_bitmap_mutex; static uint64_t migration_dirty_pages; -static uint32_t last_version; -static bool ram_bulk_stage; /* used by the search for pages to send */ struct PageSearchStatus { @@ -444,6 +453,7 @@ static void mig_throttle_guest_down(void) /** * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache * + * @rs: current RAM state * @current_addr: address for the zero page * * Update the xbzrle cache to reflect a page that's been sent as all 0. @@ -452,9 +462,9 @@ static void mig_throttle_guest_down(void) * As a bonus, if the page wasn't in the cache it gets added so that * when a small write is made into the 0'd page it gets XBZRLE sent. */ -static void xbzrle_cache_zero_page(ram_addr_t current_addr) +static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) { - if (ram_bulk_stage || !migrate_use_xbzrle()) { + if (rs->ram_bulk_stage || !migrate_use_xbzrle()) { return; } @@ -552,13 +562,14 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, * * Returns the byte offset within memory region of the start of a dirty page * + * @rs: current RAM state * @rb: RAMBlock where to search for dirty pages * @start: starting address (typically so we can continue from previous page) * @ram_addr_abs: pointer into which to store the address of the dirty page * within the global ram_addr space */ static inline -ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb, +ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, ram_addr_t start, ram_addr_t *ram_addr_abs) { @@ -571,7 +582,7 @@ ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb, unsigned long next; bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; - if (ram_bulk_stage && nr > base) { + if (rs->ram_bulk_stage && nr > base) { next = nr + 1; } else { next = find_next_bit(bitmap, size, nr); @@ -761,6 +772,7 @@ static void ram_release_pages(MigrationState *ms, const char *rbname, * >=0 - Number of pages written - this might legally be 0 * if xbzrle noticed the page was the same. * + * @rs: current RAM state * @ms: current migration state * @f: QEMUFile where to send the data * @block: block that contains the page we want to send @@ -768,8 +780,9 @@ static void ram_release_pages(MigrationState *ms, const char *rbname, * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes */ -static int ram_save_page(MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, - bool last_stage, uint64_t *bytes_transferred) +static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, + PageSearchStatus *pss, bool last_stage, + uint64_t *bytes_transferred) { int pages = -1; uint64_t bytes_xmit; @@ -795,7 +808,7 @@ static int ram_save_page(MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, current_addr = block->offset + offset; - if (block == last_sent_block) { + if (block == rs->last_sent_block) { offset |= RAM_SAVE_FLAG_CONTINUE; } if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { @@ -812,9 +825,9 @@ static int ram_save_page(MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale */ - xbzrle_cache_zero_page(current_addr); + xbzrle_cache_zero_page(rs, current_addr); ram_release_pages(ms, block->idstr, pss->offset, pages); - } else if (!ram_bulk_stage && + } else if (!rs->ram_bulk_stage && !migration_in_postcopy(ms) && migrate_use_xbzrle()) { pages = save_xbzrle_page(f, &p, current_addr, block, offset, last_stage, bytes_transferred); @@ -946,6 +959,7 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, * * Returns the number of pages written. * + * @rs: current RAM state * @ms: current migration state * @f: QEMUFile where to send the data * @block: block that contains the page we want to send @@ -953,7 +967,8 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes */ -static int ram_save_compressed_page(MigrationState *ms, QEMUFile *f, +static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, + QEMUFile *f, PageSearchStatus *pss, bool last_stage, uint64_t *bytes_transferred) { @@ -987,7 +1002,7 @@ static int ram_save_compressed_page(MigrationState *ms, QEMUFile *f, * out, keeping this order is important, because the 'cont' flag * is used to avoid resending the block name. */ - if (block != last_sent_block) { + if (block != rs->last_sent_block) { flush_compressed_data(f); pages = save_zero_page(f, block, offset, p, bytes_transferred); if (pages == -1) { @@ -1029,19 +1044,20 @@ static int ram_save_compressed_page(MigrationState *ms, QEMUFile *f, * * Returns if a page is found * + * @rs: current RAM state * @f: QEMUFile where to send the data * @pss: data about the state of the current dirty page scan * @again: set to false if the search has scanned the whole of RAM * @ram_addr_abs: pointer into which to store the address of the dirty page * within the global ram_addr space */ -static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss, +static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss, bool *again, ram_addr_t *ram_addr_abs) { - pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset, + pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset, ram_addr_abs); - if (pss->complete_round && pss->block == last_seen_block && - pss->offset >= last_offset) { + if (pss->complete_round && pss->block == rs->last_seen_block && + pss->offset >= rs->last_offset) { /* * We've been once around the RAM and haven't found anything. * Give up. @@ -1058,7 +1074,7 @@ static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss, pss->block = QLIST_FIRST_RCU(&ram_list.blocks); /* Flag that we've looped */ pss->complete_round = true; - ram_bulk_stage = false; + rs->ram_bulk_stage = false; if (migrate_use_xbzrle()) { /* If xbzrle is on, stop using the data compression at this * point. In theory, xbzrle can do better than compression. @@ -1125,12 +1141,14 @@ static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, * * Returns if a queued page is found * + * @rs: current RAM state * @ms: current migration state * @pss: data about the state of the current dirty page scan * @ram_addr_abs: pointer into which to store the address of the dirty page * within the global ram_addr space */ -static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss, +static bool get_queued_page(RAMState *rs, MigrationState *ms, + PageSearchStatus *pss, ram_addr_t *ram_addr_abs) { RAMBlock *block; @@ -1171,7 +1189,7 @@ static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss, * in (migration_bitmap_find_and_reset_dirty) that every page is * dirty, that's no longer true. */ - ram_bulk_stage = false; + rs->ram_bulk_stage = false; /* * We want the background search to continue from the queued page @@ -1283,6 +1301,7 @@ err: * * Returns the number of pages written * + * @rs: current RAM state * @ms: current migration state * @f: QEMUFile where to send the data * @pss: data about the page we want to send @@ -1290,7 +1309,7 @@ err: * @bytes_transferred: increase it with the number of transferred bytes * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space */ -static int ram_save_target_page(MigrationState *ms, QEMUFile *f, +static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, bool last_stage, uint64_t *bytes_transferred, @@ -1302,11 +1321,11 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f, if (migration_bitmap_clear_dirty(dirty_ram_abs)) { unsigned long *unsentmap; if (compression_switch && migrate_use_compression()) { - res = ram_save_compressed_page(ms, f, pss, + res = ram_save_compressed_page(rs, ms, f, pss, last_stage, bytes_transferred); } else { - res = ram_save_page(ms, f, pss, last_stage, + res = ram_save_page(rs, ms, f, pss, last_stage, bytes_transferred); } @@ -1322,7 +1341,7 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f, * to the stream. */ if (res > 0) { - last_sent_block = pss->block; + rs->last_sent_block = pss->block; } } @@ -1340,6 +1359,7 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f, * * Returns the number of pages written or negative on error * + * @rs: current RAM state * @ms: current migration state * @f: QEMUFile where to send the data * @pss: data about the page we want to send @@ -1347,7 +1367,7 @@ static int ram_save_target_page(MigrationState *ms, QEMUFile *f, * @bytes_transferred: increase it with the number of transferred bytes * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space */ -static int ram_save_host_page(MigrationState *ms, QEMUFile *f, +static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, bool last_stage, uint64_t *bytes_transferred, @@ -1357,7 +1377,7 @@ static int ram_save_host_page(MigrationState *ms, QEMUFile *f, size_t pagesize = qemu_ram_pagesize(pss->block); do { - tmppages = ram_save_target_page(ms, f, pss, last_stage, + tmppages = ram_save_target_page(rs, ms, f, pss, last_stage, bytes_transferred, dirty_ram_abs); if (tmppages < 0) { return tmppages; @@ -1380,6 +1400,7 @@ static int ram_save_host_page(MigrationState *ms, QEMUFile *f, * * Returns the number of pages written where zero means no dirty pages * + * @rs: current RAM state * @f: QEMUFile where to send the data * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes @@ -1388,7 +1409,7 @@ static int ram_save_host_page(MigrationState *ms, QEMUFile *f, * pages in a host page that are dirty. */ -static int ram_find_and_save_block(QEMUFile *f, bool last_stage, +static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage, uint64_t *bytes_transferred) { PageSearchStatus pss; @@ -1403,8 +1424,8 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage, return pages; } - pss.block = last_seen_block; - pss.offset = last_offset; + pss.block = rs->last_seen_block; + pss.offset = rs->last_offset; pss.complete_round = false; if (!pss.block) { @@ -1413,22 +1434,22 @@ static int ram_find_and_save_block(QEMUFile *f, bool last_stage, do { again = true; - found = get_queued_page(ms, &pss, &dirty_ram_abs); + found = get_queued_page(rs, ms, &pss, &dirty_ram_abs); if (!found) { /* priority queue empty, so just search for something dirty */ - found = find_dirty_block(f, &pss, &again, &dirty_ram_abs); + found = find_dirty_block(rs, f, &pss, &again, &dirty_ram_abs); } if (found) { - pages = ram_save_host_page(ms, f, &pss, + pages = ram_save_host_page(rs, ms, f, &pss, last_stage, bytes_transferred, dirty_ram_abs); } } while (!pages && again); - last_seen_block = pss.block; - last_offset = pss.offset; + rs->last_seen_block = pss.block; + rs->last_offset = pss.offset; return pages; } @@ -1510,13 +1531,13 @@ static void ram_migration_cleanup(void *opaque) XBZRLE_cache_unlock(); } -static void reset_ram_globals(void) +static void ram_state_reset(RAMState *rs) { - last_seen_block = NULL; - last_sent_block = NULL; - last_offset = 0; - last_version = ram_list.version; - ram_bulk_stage = true; + rs->last_seen_block = NULL; + rs->last_sent_block = NULL; + rs->last_offset = 0; + rs->last_version = ram_list.version; + rs->ram_bulk_stage = true; } #define MAX_WAIT 50 /* ms, half buffered_file limit */ @@ -1848,12 +1869,13 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, */ static int postcopy_chunk_hostpages(MigrationState *ms) { + RAMState *rs = &ram_state; struct RAMBlock *block; /* Easiest way to make sure we don't resume in the middle of a host-page */ - last_seen_block = NULL; - last_sent_block = NULL; - last_offset = 0; + rs->last_seen_block = NULL; + rs->last_sent_block = NULL; + rs->last_offset = 0; QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { unsigned long first = block->offset >> TARGET_PAGE_BITS; @@ -1972,7 +1994,7 @@ err: return ret; } -static int ram_save_init_globals(void) +static int ram_save_init_globals(RAMState *rs) { int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ @@ -2018,7 +2040,7 @@ static int ram_save_init_globals(void) qemu_mutex_lock_ramlist(); rcu_read_lock(); bytes_transferred = 0; - reset_ram_globals(); + ram_state_reset(rs); migration_bitmap_rcu = g_new0(struct BitmapRcu, 1); /* Skip setting bitmap if there is no RAM */ @@ -2065,11 +2087,12 @@ static int ram_save_init_globals(void) */ static int ram_save_setup(QEMUFile *f, void *opaque) { + RAMState *rs = opaque; RAMBlock *block; /* migration has already setup the bitmap, reuse it. */ if (!migration_in_colo_state()) { - if (ram_save_init_globals() < 0) { + if (ram_save_init_globals(rs) < 0) { return -1; } } @@ -2107,14 +2130,15 @@ static int ram_save_setup(QEMUFile *f, void *opaque) */ static int ram_save_iterate(QEMUFile *f, void *opaque) { + RAMState *rs = opaque; int ret; int i; int64_t t0; int done = 0; rcu_read_lock(); - if (ram_list.version != last_version) { - reset_ram_globals(); + if (ram_list.version != rs->last_version) { + ram_state_reset(rs); } /* Read version before ram_list.blocks */ @@ -2127,7 +2151,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) while ((ret = qemu_file_rate_limit(f)) == 0) { int pages; - pages = ram_find_and_save_block(f, false, &bytes_transferred); + pages = ram_find_and_save_block(rs, f, false, &bytes_transferred); /* no more pages to sent */ if (pages == 0) { done = 1; @@ -2181,6 +2205,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) */ static int ram_save_complete(QEMUFile *f, void *opaque) { + RAMState *rs = opaque; + rcu_read_lock(); if (!migration_in_postcopy(migrate_get_current())) { @@ -2195,7 +2221,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) while (true) { int pages; - pages = ram_find_and_save_block(f, !migration_in_colo_state(), + pages = ram_find_and_save_block(rs, f, !migration_in_colo_state(), &bytes_transferred); /* no more blocks to sent */ if (pages == 0) { @@ -2779,5 +2805,5 @@ static SaveVMHandlers savevm_ram_handlers = { void ram_mig_init(void) { qemu_mutex_init(&XBZRLE.lock); - register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL); + register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state); } -- cgit v1.2.3 From 8d820d6f675fe4c9391da4eb55561402c75fb05e Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 19:35:50 +0100 Subject: ram: Add dirty_rate_high_cnt to RAMState We need to add a parameter to several functions to make this work. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 3eb4430ce5..a59140bc0f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -45,8 +45,6 @@ #include "qemu/rcu_queue.h" #include "migration/colo.h" -static int dirty_rate_high_cnt; - static uint64_t bitmap_sync_count; /***********************************************************/ @@ -154,6 +152,8 @@ struct RAMState { uint32_t last_version; /* We are in the first round */ bool ram_bulk_stage; + /* How many times we have dirty too many pages */ + int dirty_rate_high_cnt; }; typedef struct RAMState RAMState; @@ -651,7 +651,7 @@ uint64_t ram_pagesize_summary(void) return summary; } -static void migration_bitmap_sync(void) +static void migration_bitmap_sync(RAMState *rs) { RAMBlock *block; MigrationState *s = migrate_get_current(); @@ -696,9 +696,9 @@ static void migration_bitmap_sync(void) if (s->dirty_pages_rate && (num_dirty_pages_period * TARGET_PAGE_SIZE > (bytes_xfer_now - bytes_xfer_prev)/2) && - (dirty_rate_high_cnt++ >= 2)) { + (rs->dirty_rate_high_cnt++ >= 2)) { trace_migration_throttle(); - dirty_rate_high_cnt = 0; + rs->dirty_rate_high_cnt = 0; mig_throttle_guest_down(); } bytes_xfer_prev = bytes_xfer_now; @@ -1920,7 +1920,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) rcu_read_lock(); /* This should be our last sync, the src is now paused */ - migration_bitmap_sync(); + migration_bitmap_sync(&ram_state); unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; if (!unsentmap) { @@ -1998,7 +1998,7 @@ static int ram_save_init_globals(RAMState *rs) { int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ - dirty_rate_high_cnt = 0; + rs->dirty_rate_high_cnt = 0; bitmap_sync_count = 0; migration_bitmap_sync_init(); qemu_mutex_init(&migration_bitmap_mutex); @@ -2062,7 +2062,7 @@ static int ram_save_init_globals(RAMState *rs) migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; memory_global_dirty_log_start(); - migration_bitmap_sync(); + migration_bitmap_sync(rs); qemu_mutex_unlock_ramlist(); qemu_mutex_unlock_iothread(); rcu_read_unlock(); @@ -2210,7 +2210,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) rcu_read_lock(); if (!migration_in_postcopy(migrate_get_current())) { - migration_bitmap_sync(); + migration_bitmap_sync(rs); } ram_control_before_iterate(f, RAM_CONTROL_FINISH); @@ -2243,6 +2243,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, uint64_t *non_postcopiable_pending, uint64_t *postcopiable_pending) { + RAMState *rs = opaque; uint64_t remaining_size; remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; @@ -2251,7 +2252,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, remaining_size < max_size) { qemu_mutex_lock_iothread(); rcu_read_lock(); - migration_bitmap_sync(); + migration_bitmap_sync(rs); rcu_read_unlock(); qemu_mutex_unlock_iothread(); remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; -- cgit v1.2.3 From 5a987738965d2f1d02b6f4e34e9ec64f2b6c79d8 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 19:39:02 +0100 Subject: ram: Move bitmap_sync_count into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index a59140bc0f..935311d412 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -45,8 +45,6 @@ #include "qemu/rcu_queue.h" #include "migration/colo.h" -static uint64_t bitmap_sync_count; - /***********************************************************/ /* ram save/restore */ @@ -154,6 +152,8 @@ struct RAMState { bool ram_bulk_stage; /* How many times we have dirty too many pages */ int dirty_rate_high_cnt; + /* How many times we have synchronized the bitmap */ + uint64_t bitmap_sync_count; }; typedef struct RAMState RAMState; @@ -471,7 +471,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) /* We don't care if this fails to allocate a new cache page * as long as it updated an old one */ cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE, - bitmap_sync_count); + rs->bitmap_sync_count); } #define ENCODING_FLAG_XBZRLE 0x1 @@ -483,6 +483,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) * 0 means that page is identical to the one already sent * -1 means that xbzrle would be longer than normal * + * @rs: current RAM state * @f: QEMUFile where to send the data * @current_data: pointer to the address of the page contents * @current_addr: addr of the page @@ -491,7 +492,7 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) * @last_stage: if we are at the completion stage * @bytes_transferred: increase it with the number of transferred bytes */ -static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, +static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, ram_addr_t current_addr, RAMBlock *block, ram_addr_t offset, bool last_stage, uint64_t *bytes_transferred) @@ -499,11 +500,11 @@ static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data, int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; - if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) { + if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) { acct_info.xbzrle_cache_miss++; if (!last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data, - bitmap_sync_count) == -1) { + rs->bitmap_sync_count) == -1) { return -1; } else { /* update *current_data when the page has been @@ -658,7 +659,7 @@ static void migration_bitmap_sync(RAMState *rs) int64_t end_time; int64_t bytes_xfer_now; - bitmap_sync_count++; + rs->bitmap_sync_count++; if (!bytes_xfer_prev) { bytes_xfer_prev = ram_bytes_transferred(); @@ -720,9 +721,9 @@ static void migration_bitmap_sync(RAMState *rs) start_time = end_time; num_dirty_pages_period = 0; } - s->dirty_sync_count = bitmap_sync_count; + s->dirty_sync_count = rs->bitmap_sync_count; if (migrate_use_events()) { - qapi_event_send_migration_pass(bitmap_sync_count, NULL); + qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL); } } @@ -829,7 +830,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, ram_release_pages(ms, block->idstr, pss->offset, pages); } else if (!rs->ram_bulk_stage && !migration_in_postcopy(ms) && migrate_use_xbzrle()) { - pages = save_xbzrle_page(f, &p, current_addr, block, + pages = save_xbzrle_page(rs, f, &p, current_addr, block, offset, last_stage, bytes_transferred); if (!last_stage) { /* Can't send this cached data async, since the cache page @@ -1999,7 +2000,7 @@ static int ram_save_init_globals(RAMState *rs) int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ rs->dirty_rate_high_cnt = 0; - bitmap_sync_count = 0; + rs->bitmap_sync_count = 0; migration_bitmap_sync_init(); qemu_mutex_init(&migration_bitmap_mutex); -- cgit v1.2.3 From f664da80fcc9120faecc1afd4b2af31019f0aff9 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 19:44:57 +0100 Subject: ram: Move start time into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu -- Renamed start_time to time_last_bitmap_sync(peterx suggestion) --- migration/ram.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 935311d412..53a547cd94 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -154,6 +154,9 @@ struct RAMState { int dirty_rate_high_cnt; /* How many times we have synchronized the bitmap */ uint64_t bitmap_sync_count; + /* these variables are used for bitmap sync */ + /* last time we did a full bitmap_sync */ + int64_t time_last_bitmap_sync; }; typedef struct RAMState RAMState; @@ -617,14 +620,13 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) } /* Fix me: there are too many global variables used in migration process. */ -static int64_t start_time; static int64_t bytes_xfer_prev; static uint64_t xbzrle_cache_miss_prev; static uint64_t iterations_prev; -static void migration_bitmap_sync_init(void) +static void migration_bitmap_sync_init(RAMState *rs) { - start_time = 0; + rs->time_last_bitmap_sync = 0; bytes_xfer_prev = 0; num_dirty_pages_period = 0; xbzrle_cache_miss_prev = 0; @@ -665,8 +667,8 @@ static void migration_bitmap_sync(RAMState *rs) bytes_xfer_prev = ram_bytes_transferred(); } - if (!start_time) { - start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + if (!rs->time_last_bitmap_sync) { + rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); } trace_migration_bitmap_sync_start(); @@ -685,7 +687,7 @@ static void migration_bitmap_sync(RAMState *rs) end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); /* more than 1 second = 1000 millisecons */ - if (end_time > start_time + 1000) { + if (end_time > rs->time_last_bitmap_sync + 1000) { if (migrate_auto_converge()) { /* The following detection logic can be refined later. For now: Check to see if the dirtied bytes is 50% more than the approx. @@ -716,9 +718,9 @@ static void migration_bitmap_sync(RAMState *rs) xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; } s->dirty_pages_rate = num_dirty_pages_period * 1000 - / (end_time - start_time); + / (end_time - rs->time_last_bitmap_sync); s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; - start_time = end_time; + rs->time_last_bitmap_sync = end_time; num_dirty_pages_period = 0; } s->dirty_sync_count = rs->bitmap_sync_count; @@ -2001,7 +2003,7 @@ static int ram_save_init_globals(RAMState *rs) rs->dirty_rate_high_cnt = 0; rs->bitmap_sync_count = 0; - migration_bitmap_sync_init(); + migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); if (migrate_use_xbzrle()) { -- cgit v1.2.3 From eac741595890ac3dec3c8addd02bf34cbf8fec25 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 28 Mar 2017 14:59:01 +0200 Subject: ram: Move bytes_xfer_prev into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 53a547cd94..aeef5637a5 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -157,6 +157,8 @@ struct RAMState { /* these variables are used for bitmap sync */ /* last time we did a full bitmap_sync */ int64_t time_last_bitmap_sync; + /* bytes transferred at start_time */ + int64_t bytes_xfer_prev; }; typedef struct RAMState RAMState; @@ -620,14 +622,13 @@ static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) } /* Fix me: there are too many global variables used in migration process. */ -static int64_t bytes_xfer_prev; static uint64_t xbzrle_cache_miss_prev; static uint64_t iterations_prev; static void migration_bitmap_sync_init(RAMState *rs) { rs->time_last_bitmap_sync = 0; - bytes_xfer_prev = 0; + rs->bytes_xfer_prev = 0; num_dirty_pages_period = 0; xbzrle_cache_miss_prev = 0; iterations_prev = 0; @@ -663,8 +664,8 @@ static void migration_bitmap_sync(RAMState *rs) rs->bitmap_sync_count++; - if (!bytes_xfer_prev) { - bytes_xfer_prev = ram_bytes_transferred(); + if (!rs->bytes_xfer_prev) { + rs->bytes_xfer_prev = ram_bytes_transferred(); } if (!rs->time_last_bitmap_sync) { @@ -698,13 +699,13 @@ static void migration_bitmap_sync(RAMState *rs) if (s->dirty_pages_rate && (num_dirty_pages_period * TARGET_PAGE_SIZE > - (bytes_xfer_now - bytes_xfer_prev)/2) && + (bytes_xfer_now - rs->bytes_xfer_prev) / 2) && (rs->dirty_rate_high_cnt++ >= 2)) { trace_migration_throttle(); rs->dirty_rate_high_cnt = 0; mig_throttle_guest_down(); } - bytes_xfer_prev = bytes_xfer_now; + rs->bytes_xfer_prev = bytes_xfer_now; } if (migrate_use_xbzrle()) { -- cgit v1.2.3 From c4bdf0cf4b62a90b07f27f55d4650dc1cb3f8d7e Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 28 Mar 2017 14:59:54 +0200 Subject: ram: Change byte_xfer_{prev,now} type to uint64_t Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index aeef5637a5..d13674f4b4 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -158,7 +158,7 @@ struct RAMState { /* last time we did a full bitmap_sync */ int64_t time_last_bitmap_sync; /* bytes transferred at start_time */ - int64_t bytes_xfer_prev; + uint64_t bytes_xfer_prev; }; typedef struct RAMState RAMState; @@ -660,7 +660,7 @@ static void migration_bitmap_sync(RAMState *rs) RAMBlock *block; MigrationState *s = migrate_get_current(); int64_t end_time; - int64_t bytes_xfer_now; + uint64_t bytes_xfer_now; rs->bitmap_sync_count++; -- cgit v1.2.3 From a66cd90c74f2a521d2441ba591f822cc93291cab Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 28 Mar 2017 15:02:43 +0200 Subject: ram: Move num_dirty_pages_period into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index d13674f4b4..d6cf032342 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -159,6 +159,8 @@ struct RAMState { int64_t time_last_bitmap_sync; /* bytes transferred at start_time */ uint64_t bytes_xfer_prev; + /* number of dirty pages since start_time */ + int64_t num_dirty_pages_period; }; typedef struct RAMState RAMState; @@ -612,13 +614,13 @@ static inline bool migration_bitmap_clear_dirty(ram_addr_t addr) return ret; } -static int64_t num_dirty_pages_period; -static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length) +static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start, + ram_addr_t length) { unsigned long *bitmap; bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap, - start, length, &num_dirty_pages_period); + start, length, &rs->num_dirty_pages_period); } /* Fix me: there are too many global variables used in migration process. */ @@ -629,7 +631,7 @@ static void migration_bitmap_sync_init(RAMState *rs) { rs->time_last_bitmap_sync = 0; rs->bytes_xfer_prev = 0; - num_dirty_pages_period = 0; + rs->num_dirty_pages_period = 0; xbzrle_cache_miss_prev = 0; iterations_prev = 0; } @@ -678,12 +680,12 @@ static void migration_bitmap_sync(RAMState *rs) qemu_mutex_lock(&migration_bitmap_mutex); rcu_read_lock(); QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { - migration_bitmap_sync_range(block->offset, block->used_length); + migration_bitmap_sync_range(rs, block->offset, block->used_length); } rcu_read_unlock(); qemu_mutex_unlock(&migration_bitmap_mutex); - trace_migration_bitmap_sync_end(num_dirty_pages_period); + trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); @@ -698,7 +700,7 @@ static void migration_bitmap_sync(RAMState *rs) bytes_xfer_now = ram_bytes_transferred(); if (s->dirty_pages_rate && - (num_dirty_pages_period * TARGET_PAGE_SIZE > + (rs->num_dirty_pages_period * TARGET_PAGE_SIZE > (bytes_xfer_now - rs->bytes_xfer_prev) / 2) && (rs->dirty_rate_high_cnt++ >= 2)) { trace_migration_throttle(); @@ -718,11 +720,11 @@ static void migration_bitmap_sync(RAMState *rs) iterations_prev = acct_info.iterations; xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; } - s->dirty_pages_rate = num_dirty_pages_period * 1000 + s->dirty_pages_rate = rs->num_dirty_pages_period * 1000 / (end_time - rs->time_last_bitmap_sync); s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; rs->time_last_bitmap_sync = end_time; - num_dirty_pages_period = 0; + rs->num_dirty_pages_period = 0; } s->dirty_sync_count = rs->bitmap_sync_count; if (migrate_use_events()) { -- cgit v1.2.3 From 68908ed665f4d97afb271c5d5b564d951951538c Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 28 Mar 2017 15:05:53 +0200 Subject: ram: Change num_dirty_pages_period type to uint64_t Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- include/exec/ram_addr.h | 2 +- migration/ram.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index b05dc84ab9..9aadc5c830 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -356,7 +356,7 @@ static inline uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, ram_addr_t start, ram_addr_t length, - int64_t *real_dirty_pages) + uint64_t *real_dirty_pages) { ram_addr_t addr; unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); diff --git a/migration/ram.c b/migration/ram.c index d6cf032342..9a823558db 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -160,7 +160,7 @@ struct RAMState { /* bytes transferred at start_time */ uint64_t bytes_xfer_prev; /* number of dirty pages since start_time */ - int64_t num_dirty_pages_period; + uint64_t num_dirty_pages_period; }; typedef struct RAMState RAMState; -- cgit v1.2.3 From b5833fde40b1c32eb80887cf55e971973856a412 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 19:49:19 +0100 Subject: ram: Move xbzrle_cache_miss_prev into RAMState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Philippe Mathieu-Daudé --- migration/ram.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 9a823558db..32b1cbb0cd 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -161,6 +161,8 @@ struct RAMState { uint64_t bytes_xfer_prev; /* number of dirty pages since start_time */ uint64_t num_dirty_pages_period; + /* xbzrle misses since the beginning of the period */ + uint64_t xbzrle_cache_miss_prev; }; typedef struct RAMState RAMState; @@ -624,7 +626,6 @@ static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start, } /* Fix me: there are too many global variables used in migration process. */ -static uint64_t xbzrle_cache_miss_prev; static uint64_t iterations_prev; static void migration_bitmap_sync_init(RAMState *rs) @@ -632,7 +633,7 @@ static void migration_bitmap_sync_init(RAMState *rs) rs->time_last_bitmap_sync = 0; rs->bytes_xfer_prev = 0; rs->num_dirty_pages_period = 0; - xbzrle_cache_miss_prev = 0; + rs->xbzrle_cache_miss_prev = 0; iterations_prev = 0; } @@ -714,11 +715,11 @@ static void migration_bitmap_sync(RAMState *rs) if (iterations_prev != acct_info.iterations) { acct_info.xbzrle_cache_miss_rate = (double)(acct_info.xbzrle_cache_miss - - xbzrle_cache_miss_prev) / + rs->xbzrle_cache_miss_prev) / (acct_info.iterations - iterations_prev); } iterations_prev = acct_info.iterations; - xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; + rs->xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; } s->dirty_pages_rate = rs->num_dirty_pages_period * 1000 / (end_time - rs->time_last_bitmap_sync); -- cgit v1.2.3 From 36040d9cb25454d21a04d322423e36a0bd3975c5 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 19:51:13 +0100 Subject: ram: Move iterations_prev into RAMState MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Philippe Mathieu-Daudé --- migration/ram.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 32b1cbb0cd..5b6fef8575 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -163,6 +163,8 @@ struct RAMState { uint64_t num_dirty_pages_period; /* xbzrle misses since the beginning of the period */ uint64_t xbzrle_cache_miss_prev; + /* number of iterations at the beginning of period */ + uint64_t iterations_prev; }; typedef struct RAMState RAMState; @@ -625,16 +627,13 @@ static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start, start, length, &rs->num_dirty_pages_period); } -/* Fix me: there are too many global variables used in migration process. */ -static uint64_t iterations_prev; - static void migration_bitmap_sync_init(RAMState *rs) { rs->time_last_bitmap_sync = 0; rs->bytes_xfer_prev = 0; rs->num_dirty_pages_period = 0; rs->xbzrle_cache_miss_prev = 0; - iterations_prev = 0; + rs->iterations_prev = 0; } /** @@ -712,13 +711,13 @@ static void migration_bitmap_sync(RAMState *rs) } if (migrate_use_xbzrle()) { - if (iterations_prev != acct_info.iterations) { + if (rs->iterations_prev != acct_info.iterations) { acct_info.xbzrle_cache_miss_rate = (double)(acct_info.xbzrle_cache_miss - rs->xbzrle_cache_miss_prev) / - (acct_info.iterations - iterations_prev); + (acct_info.iterations - rs->iterations_prev); } - iterations_prev = acct_info.iterations; + rs->iterations_prev = acct_info.iterations; rs->xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; } s->dirty_pages_rate = rs->num_dirty_pages_period * 1000 -- cgit v1.2.3 From f7ccd61b4ceee7c8bfaa78c943a179765ea745a6 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:30:21 +0100 Subject: ram: Move dup_pages into RAMState Once there rename it to its actual meaning, zero_pages. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 5b6fef8575..cdd56b7c33 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -165,6 +165,9 @@ struct RAMState { uint64_t xbzrle_cache_miss_prev; /* number of iterations at the beginning of period */ uint64_t iterations_prev; + /* Accounting fields */ + /* number of zero pages. It used to be pages filled by the same char. */ + uint64_t zero_pages; }; typedef struct RAMState RAMState; @@ -172,7 +175,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - uint64_t dup_pages; uint64_t skipped_pages; uint64_t norm_pages; uint64_t iterations; @@ -192,12 +194,12 @@ static void acct_clear(void) uint64_t dup_mig_bytes_transferred(void) { - return acct_info.dup_pages * TARGET_PAGE_SIZE; + return ram_state.zero_pages * TARGET_PAGE_SIZE; } uint64_t dup_mig_pages_transferred(void) { - return acct_info.dup_pages; + return ram_state.zero_pages; } uint64_t skipped_mig_bytes_transferred(void) @@ -737,19 +739,21 @@ static void migration_bitmap_sync(RAMState *rs) * * Returns the number of pages written. * + * @rs: current RAM state * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @p: pointer to the page * @bytes_transferred: increase it with the number of transferred bytes */ -static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset, +static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block, + ram_addr_t offset, uint8_t *p, uint64_t *bytes_transferred) { int pages = -1; if (is_zero_range(p, TARGET_PAGE_SIZE)) { - acct_info.dup_pages++; + rs->zero_pages++; *bytes_transferred += save_page_header(f, block, offset | RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, 0); @@ -822,11 +826,11 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, if (bytes_xmit > 0) { acct_info.norm_pages++; } else if (bytes_xmit == 0) { - acct_info.dup_pages++; + rs->zero_pages++; } } } else { - pages = save_zero_page(f, block, offset, p, bytes_transferred); + pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); if (pages > 0) { /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale @@ -998,7 +1002,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, if (bytes_xmit > 0) { acct_info.norm_pages++; } else if (bytes_xmit == 0) { - acct_info.dup_pages++; + rs->zero_pages++; } } } else { @@ -1010,7 +1014,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, */ if (block != rs->last_sent_block) { flush_compressed_data(f); - pages = save_zero_page(f, block, offset, p, bytes_transferred); + pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); if (pages == -1) { /* Make sure the first page is sent out before other pages */ bytes_xmit = save_page_header(f, block, offset | @@ -1031,7 +1035,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, } } else { offset |= RAM_SAVE_FLAG_CONTINUE; - pages = save_zero_page(f, block, offset, p, bytes_transferred); + pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); if (pages == -1) { pages = compress_page_with_multi_thread(f, block, offset, bytes_transferred); @@ -1463,8 +1467,10 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage, void acct_update_position(QEMUFile *f, size_t size, bool zero) { uint64_t pages = size / TARGET_PAGE_SIZE; + RAMState *rs = &ram_state; + if (zero) { - acct_info.dup_pages += pages; + rs->zero_pages += pages; } else { acct_info.norm_pages += pages; bytes_transferred += size; @@ -2006,6 +2012,7 @@ static int ram_save_init_globals(RAMState *rs) rs->dirty_rate_high_cnt = 0; rs->bitmap_sync_count = 0; + rs->zero_pages = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); -- cgit v1.2.3 From 5bb1272c38dea94fdded3048d7efac42a72f97f0 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:33:01 +0100 Subject: ram: Remove unused dup_mig_bytes_transferred() Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- include/migration/migration.h | 1 - migration/ram.c | 5 ----- 2 files changed, 6 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 24487be295..f07700f3ef 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -276,7 +276,6 @@ void free_xbzrle_decoded_buf(void); void acct_update_position(QEMUFile *f, size_t size, bool zero); -uint64_t dup_mig_bytes_transferred(void); uint64_t dup_mig_pages_transferred(void); uint64_t skipped_mig_bytes_transferred(void); uint64_t skipped_mig_pages_transferred(void); diff --git a/migration/ram.c b/migration/ram.c index cdd56b7c33..1398597202 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -192,11 +192,6 @@ static void acct_clear(void) memset(&acct_info, 0, sizeof(acct_info)); } -uint64_t dup_mig_bytes_transferred(void) -{ - return ram_state.zero_pages * TARGET_PAGE_SIZE; -} - uint64_t dup_mig_pages_transferred(void) { return ram_state.zero_pages; -- cgit v1.2.3 From bedf53c14c35f6121c11f81b6c72714804177550 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:35:54 +0100 Subject: ram: Remove unused pages_skipped variable For compatibility, we need to still send a value, but just specify it and comment the fact. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- include/migration/migration.h | 2 -- migration/migration.c | 3 ++- migration/ram.c | 11 ----------- 3 files changed, 2 insertions(+), 14 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index f07700f3ef..bd0b60bbb4 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -277,8 +277,6 @@ void free_xbzrle_decoded_buf(void); void acct_update_position(QEMUFile *f, size_t size, bool zero); uint64_t dup_mig_pages_transferred(void); -uint64_t skipped_mig_bytes_transferred(void); -uint64_t skipped_mig_pages_transferred(void); uint64_t norm_mig_bytes_transferred(void); uint64_t norm_mig_pages_transferred(void); uint64_t xbzrle_mig_bytes_transferred(void); diff --git a/migration/migration.c b/migration/migration.c index 4bee05de9e..34c1a83ad8 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -651,7 +651,8 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->transferred = ram_bytes_transferred(); info->ram->total = ram_bytes_total(); info->ram->duplicate = dup_mig_pages_transferred(); - info->ram->skipped = skipped_mig_pages_transferred(); + /* legacy value. It is not used anymore */ + info->ram->skipped = 0; info->ram->normal = norm_mig_pages_transferred(); info->ram->normal_bytes = norm_mig_bytes_transferred(); info->ram->mbps = s->mbps; diff --git a/migration/ram.c b/migration/ram.c index 1398597202..78c7f3d6aa 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -175,7 +175,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - uint64_t skipped_pages; uint64_t norm_pages; uint64_t iterations; uint64_t xbzrle_bytes; @@ -197,16 +196,6 @@ uint64_t dup_mig_pages_transferred(void) return ram_state.zero_pages; } -uint64_t skipped_mig_bytes_transferred(void) -{ - return acct_info.skipped_pages * TARGET_PAGE_SIZE; -} - -uint64_t skipped_mig_pages_transferred(void) -{ - return acct_info.skipped_pages; -} - uint64_t norm_mig_bytes_transferred(void) { return acct_info.norm_pages * TARGET_PAGE_SIZE; -- cgit v1.2.3 From b4d1c6e722b0b754de08e72125ef50b37b4a1b6e Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:40:53 +0100 Subject: ram: Move norm_pages to RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 78c7f3d6aa..587e2c7265 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -168,6 +168,8 @@ struct RAMState { /* Accounting fields */ /* number of zero pages. It used to be pages filled by the same char. */ uint64_t zero_pages; + /* number of normal transferred pages */ + uint64_t norm_pages; }; typedef struct RAMState RAMState; @@ -175,7 +177,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - uint64_t norm_pages; uint64_t iterations; uint64_t xbzrle_bytes; uint64_t xbzrle_pages; @@ -198,12 +199,12 @@ uint64_t dup_mig_pages_transferred(void) uint64_t norm_mig_bytes_transferred(void) { - return acct_info.norm_pages * TARGET_PAGE_SIZE; + return ram_state.norm_pages * TARGET_PAGE_SIZE; } uint64_t norm_mig_pages_transferred(void) { - return acct_info.norm_pages; + return ram_state.norm_pages; } uint64_t xbzrle_mig_bytes_transferred(void) @@ -808,7 +809,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_DELAYED) { if (bytes_xmit > 0) { - acct_info.norm_pages++; + rs->norm_pages++; } else if (bytes_xmit == 0) { rs->zero_pages++; } @@ -847,7 +848,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, } *bytes_transferred += TARGET_PAGE_SIZE; pages = 1; - acct_info.norm_pages++; + rs->norm_pages++; } XBZRLE_cache_unlock(); @@ -914,8 +915,8 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block, param->offset = offset; } -static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, - ram_addr_t offset, +static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f, + RAMBlock *block, ram_addr_t offset, uint64_t *bytes_transferred) { int idx, thread_count, bytes_xmit = -1, pages = -1; @@ -932,7 +933,7 @@ static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block, qemu_cond_signal(&comp_param[idx].cond); qemu_mutex_unlock(&comp_param[idx].mutex); pages = 1; - acct_info.norm_pages++; + rs->norm_pages++; *bytes_transferred += bytes_xmit; break; } @@ -984,7 +985,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_DELAYED) { if (bytes_xmit > 0) { - acct_info.norm_pages++; + rs->norm_pages++; } else if (bytes_xmit == 0) { rs->zero_pages++; } @@ -1007,7 +1008,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, migrate_compress_level()); if (blen > 0) { *bytes_transferred += bytes_xmit + blen; - acct_info.norm_pages++; + rs->norm_pages++; pages = 1; } else { qemu_file_set_error(f, blen); @@ -1021,7 +1022,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, offset |= RAM_SAVE_FLAG_CONTINUE; pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); if (pages == -1) { - pages = compress_page_with_multi_thread(f, block, offset, + pages = compress_page_with_multi_thread(rs, f, block, offset, bytes_transferred); } else { ram_release_pages(ms, block->idstr, pss->offset, pages); @@ -1456,7 +1457,7 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) if (zero) { rs->zero_pages += pages; } else { - acct_info.norm_pages += pages; + rs->norm_pages += pages; bytes_transferred += size; qemu_update_position(f, size); } @@ -1997,6 +1998,7 @@ static int ram_save_init_globals(RAMState *rs) rs->dirty_rate_high_cnt = 0; rs->bitmap_sync_count = 0; rs->zero_pages = 0; + rs->norm_pages = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); -- cgit v1.2.3 From 29cc3d8a9b595dfcd0c6fd75b8fd490b88fbb184 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:43:34 +0100 Subject: ram: Remove norm_mig_bytes_transferred Its value can be calculated by other exported. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- include/migration/migration.h | 1 - migration/migration.c | 3 ++- migration/ram.c | 5 ----- 3 files changed, 2 insertions(+), 7 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index bd0b60bbb4..87d0cc51e9 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -277,7 +277,6 @@ void free_xbzrle_decoded_buf(void); void acct_update_position(QEMUFile *f, size_t size, bool zero); uint64_t dup_mig_pages_transferred(void); -uint64_t norm_mig_bytes_transferred(void); uint64_t norm_mig_pages_transferred(void); uint64_t xbzrle_mig_bytes_transferred(void); uint64_t xbzrle_mig_pages_transferred(void); diff --git a/migration/migration.c b/migration/migration.c index 34c1a83ad8..7f7bab9523 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -654,7 +654,8 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) /* legacy value. It is not used anymore */ info->ram->skipped = 0; info->ram->normal = norm_mig_pages_transferred(); - info->ram->normal_bytes = norm_mig_bytes_transferred(); + info->ram->normal_bytes = norm_mig_pages_transferred() * + (1ul << qemu_target_page_bits()); info->ram->mbps = s->mbps; info->ram->dirty_sync_count = s->dirty_sync_count; info->ram->postcopy_requests = s->postcopy_requests; diff --git a/migration/ram.c b/migration/ram.c index 587e2c7265..85d66ea08e 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -197,11 +197,6 @@ uint64_t dup_mig_pages_transferred(void) return ram_state.zero_pages; } -uint64_t norm_mig_bytes_transferred(void) -{ - return ram_state.norm_pages * TARGET_PAGE_SIZE; -} - uint64_t norm_mig_pages_transferred(void) { return ram_state.norm_pages; -- cgit v1.2.3 From 23b28c3c62fc95a9652a76d2257cce21b9b0b391 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:51:34 +0100 Subject: ram: Move iterations into RAMState Signed-off-by: Juan Quintela Reviewed-by: Peter Xu --- migration/ram.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 85d66ea08e..3e500ae417 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -170,6 +170,8 @@ struct RAMState { uint64_t zero_pages; /* number of normal transferred pages */ uint64_t norm_pages; + /* Iterations since start */ + uint64_t iterations; }; typedef struct RAMState RAMState; @@ -177,7 +179,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - uint64_t iterations; uint64_t xbzrle_bytes; uint64_t xbzrle_pages; uint64_t xbzrle_cache_miss; @@ -693,13 +694,13 @@ static void migration_bitmap_sync(RAMState *rs) } if (migrate_use_xbzrle()) { - if (rs->iterations_prev != acct_info.iterations) { + if (rs->iterations_prev != rs->iterations) { acct_info.xbzrle_cache_miss_rate = (double)(acct_info.xbzrle_cache_miss - rs->xbzrle_cache_miss_prev) / - (acct_info.iterations - rs->iterations_prev); + (rs->iterations - rs->iterations_prev); } - rs->iterations_prev = acct_info.iterations; + rs->iterations_prev = rs->iterations; rs->xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; } s->dirty_pages_rate = rs->num_dirty_pages_period * 1000 @@ -1994,6 +1995,7 @@ static int ram_save_init_globals(RAMState *rs) rs->bitmap_sync_count = 0; rs->zero_pages = 0; rs->norm_pages = 0; + rs->iterations = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); @@ -2151,7 +2153,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) done = 1; break; } - acct_info.iterations++; + rs->iterations++; /* we want to check in the 1st loop, just in case it was the 1st time and we had to sync the dirty bitmap. -- cgit v1.2.3 From 07ed50a2bb66a4d5ab2d7e54a5dfc4b618cccacc Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:57:27 +0100 Subject: ram: Move xbzrle_bytes into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- migration/ram.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 3e500ae417..4dc786921e 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -172,6 +172,8 @@ struct RAMState { uint64_t norm_pages; /* Iterations since start */ uint64_t iterations; + /* xbzrle transmitted bytes */ + uint64_t xbzrle_bytes; }; typedef struct RAMState RAMState; @@ -179,7 +181,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - uint64_t xbzrle_bytes; uint64_t xbzrle_pages; uint64_t xbzrle_cache_miss; double xbzrle_cache_miss_rate; @@ -205,7 +206,7 @@ uint64_t norm_mig_pages_transferred(void) uint64_t xbzrle_mig_bytes_transferred(void) { - return acct_info.xbzrle_bytes; + return ram_state.xbzrle_bytes; } uint64_t xbzrle_mig_pages_transferred(void) @@ -544,7 +545,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); bytes_xbzrle += encoded_len + 1 + 2; acct_info.xbzrle_pages++; - acct_info.xbzrle_bytes += bytes_xbzrle; + rs->xbzrle_bytes += bytes_xbzrle; *bytes_transferred += bytes_xbzrle; return 1; @@ -1996,6 +1997,7 @@ static int ram_save_init_globals(RAMState *rs) rs->zero_pages = 0; rs->norm_pages = 0; rs->iterations = 0; + rs->xbzrle_bytes = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); -- cgit v1.2.3 From f36ada95dedefaf2a47f19ae3214913506716b90 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 20:59:32 +0100 Subject: ram: Move xbzrle_pages into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu -- Comment why we need bytes and pages --- migration/ram.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 4dc786921e..cf6681c467 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -172,8 +172,11 @@ struct RAMState { uint64_t norm_pages; /* Iterations since start */ uint64_t iterations; - /* xbzrle transmitted bytes */ + /* xbzrle transmitted bytes. Notice that this is with + * compression, they can't be calculated from the pages */ uint64_t xbzrle_bytes; + /* xbzrle transmmited pages */ + uint64_t xbzrle_pages; }; typedef struct RAMState RAMState; @@ -181,7 +184,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - uint64_t xbzrle_pages; uint64_t xbzrle_cache_miss; double xbzrle_cache_miss_rate; uint64_t xbzrle_overflows; @@ -211,7 +213,7 @@ uint64_t xbzrle_mig_bytes_transferred(void) uint64_t xbzrle_mig_pages_transferred(void) { - return acct_info.xbzrle_pages; + return ram_state.xbzrle_pages; } uint64_t xbzrle_mig_pages_cache_miss(void) @@ -544,7 +546,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, qemu_put_be16(f, encoded_len); qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); bytes_xbzrle += encoded_len + 1 + 2; - acct_info.xbzrle_pages++; + rs->xbzrle_pages++; rs->xbzrle_bytes += bytes_xbzrle; *bytes_transferred += bytes_xbzrle; @@ -1998,6 +2000,7 @@ static int ram_save_init_globals(RAMState *rs) rs->norm_pages = 0; rs->iterations = 0; rs->xbzrle_bytes = 0; + rs->xbzrle_pages = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); -- cgit v1.2.3 From 544c36f188cb10f0c07d31c135f863972799e25c Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:02:08 +0100 Subject: ram: Move xbzrle_cache_miss into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index cf6681c467..b70db68b8f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -177,6 +177,8 @@ struct RAMState { uint64_t xbzrle_bytes; /* xbzrle transmmited pages */ uint64_t xbzrle_pages; + /* xbzrle number of cache miss */ + uint64_t xbzrle_cache_miss; }; typedef struct RAMState RAMState; @@ -184,7 +186,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - uint64_t xbzrle_cache_miss; double xbzrle_cache_miss_rate; uint64_t xbzrle_overflows; } AccountingInfo; @@ -218,7 +219,7 @@ uint64_t xbzrle_mig_pages_transferred(void) uint64_t xbzrle_mig_pages_cache_miss(void) { - return acct_info.xbzrle_cache_miss; + return ram_state.xbzrle_cache_miss; } double xbzrle_mig_cache_miss_rate(void) @@ -498,7 +499,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, uint8_t *prev_cached_page; if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) { - acct_info.xbzrle_cache_miss++; + rs->xbzrle_cache_miss++; if (!last_stage) { if (cache_insert(XBZRLE.cache, current_addr, *current_data, rs->bitmap_sync_count) == -1) { @@ -699,12 +700,12 @@ static void migration_bitmap_sync(RAMState *rs) if (migrate_use_xbzrle()) { if (rs->iterations_prev != rs->iterations) { acct_info.xbzrle_cache_miss_rate = - (double)(acct_info.xbzrle_cache_miss - + (double)(rs->xbzrle_cache_miss - rs->xbzrle_cache_miss_prev) / (rs->iterations - rs->iterations_prev); } rs->iterations_prev = rs->iterations; - rs->xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss; + rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss; } s->dirty_pages_rate = rs->num_dirty_pages_period * 1000 / (end_time - rs->time_last_bitmap_sync); @@ -2001,6 +2002,7 @@ static int ram_save_init_globals(RAMState *rs) rs->iterations = 0; rs->xbzrle_bytes = 0; rs->xbzrle_pages = 0; + rs->xbzrle_cache_miss = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); -- cgit v1.2.3 From b07016b674cb3e5393cb6365dd5628b57f7c03e6 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:04:16 +0100 Subject: ram: Move xbzrle_cache_miss_rate into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index b70db68b8f..09f9effe91 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -179,6 +179,8 @@ struct RAMState { uint64_t xbzrle_pages; /* xbzrle number of cache miss */ uint64_t xbzrle_cache_miss; + /* xbzrle miss rate */ + double xbzrle_cache_miss_rate; }; typedef struct RAMState RAMState; @@ -186,7 +188,6 @@ static RAMState ram_state; /* accounting for migration statistics */ typedef struct AccountingInfo { - double xbzrle_cache_miss_rate; uint64_t xbzrle_overflows; } AccountingInfo; @@ -224,7 +225,7 @@ uint64_t xbzrle_mig_pages_cache_miss(void) double xbzrle_mig_cache_miss_rate(void) { - return acct_info.xbzrle_cache_miss_rate; + return ram_state.xbzrle_cache_miss_rate; } uint64_t xbzrle_mig_pages_overflow(void) @@ -699,7 +700,7 @@ static void migration_bitmap_sync(RAMState *rs) if (migrate_use_xbzrle()) { if (rs->iterations_prev != rs->iterations) { - acct_info.xbzrle_cache_miss_rate = + rs->xbzrle_cache_miss_rate = (double)(rs->xbzrle_cache_miss - rs->xbzrle_cache_miss_prev) / (rs->iterations - rs->iterations_prev); @@ -2003,6 +2004,7 @@ static int ram_save_init_globals(RAMState *rs) rs->xbzrle_bytes = 0; rs->xbzrle_pages = 0; rs->xbzrle_cache_miss = 0; + rs->xbzrle_cache_miss_rate = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); -- cgit v1.2.3 From 180f61f75af4963e08e2204b207c74e4144ba150 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:07:03 +0100 Subject: ram: Move xbzrle_overflows into RAMState Once there, remove the now unused AccountingInfo struct and var. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 21 +++++---------------- 1 file changed, 5 insertions(+), 16 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 09f9effe91..88392e4c71 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -181,23 +181,13 @@ struct RAMState { uint64_t xbzrle_cache_miss; /* xbzrle miss rate */ double xbzrle_cache_miss_rate; + /* xbzrle number of overflows */ + uint64_t xbzrle_overflows; }; typedef struct RAMState RAMState; static RAMState ram_state; -/* accounting for migration statistics */ -typedef struct AccountingInfo { - uint64_t xbzrle_overflows; -} AccountingInfo; - -static AccountingInfo acct_info; - -static void acct_clear(void) -{ - memset(&acct_info, 0, sizeof(acct_info)); -} - uint64_t dup_mig_pages_transferred(void) { return ram_state.zero_pages; @@ -230,7 +220,7 @@ double xbzrle_mig_cache_miss_rate(void) uint64_t xbzrle_mig_pages_overflow(void) { - return acct_info.xbzrle_overflows; + return ram_state.xbzrle_overflows; } static QemuMutex migration_bitmap_mutex; @@ -528,7 +518,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, return 0; } else if (encoded_len == -1) { trace_save_xbzrle_page_overflow(); - acct_info.xbzrle_overflows++; + rs->xbzrle_overflows++; /* update data in the cache */ if (!last_stage) { memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE); @@ -2005,6 +1995,7 @@ static int ram_save_init_globals(RAMState *rs) rs->xbzrle_pages = 0; rs->xbzrle_cache_miss = 0; rs->xbzrle_cache_miss_rate = 0; + rs->xbzrle_overflows = 0; migration_bitmap_sync_init(rs); qemu_mutex_init(&migration_bitmap_mutex); @@ -2035,8 +2026,6 @@ static int ram_save_init_globals(RAMState *rs) XBZRLE.encoded_buf = NULL; return -1; } - - acct_clear(); } /* For memory_global_dirty_log_start below. */ -- cgit v1.2.3 From 0d8ec885ed899d20b14eb8a19544ad66f04937bc Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:21:41 +0100 Subject: ram: Move migration_dirty_pages to RAMState Signed-off-by: Juan Quintela Reviewed-by: Peter Xu --- migration/ram.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 88392e4c71..71f8ef3224 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -183,6 +183,8 @@ struct RAMState { double xbzrle_cache_miss_rate; /* xbzrle number of overflows */ uint64_t xbzrle_overflows; + /* number of dirty bits in the bitmap */ + uint64_t migration_dirty_pages; }; typedef struct RAMState RAMState; @@ -223,8 +225,12 @@ uint64_t xbzrle_mig_pages_overflow(void) return ram_state.xbzrle_overflows; } +static ram_addr_t ram_save_remaining(void) +{ + return ram_state.migration_dirty_pages; +} + static QemuMutex migration_bitmap_mutex; -static uint64_t migration_dirty_pages; /* used by the search for pages to send */ struct PageSearchStatus { @@ -582,7 +588,7 @@ ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, return (next - base) << TARGET_PAGE_BITS; } -static inline bool migration_bitmap_clear_dirty(ram_addr_t addr) +static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr) { bool ret; int nr = addr >> TARGET_PAGE_BITS; @@ -591,7 +597,7 @@ static inline bool migration_bitmap_clear_dirty(ram_addr_t addr) ret = test_and_clear_bit(nr, bitmap); if (ret) { - migration_dirty_pages--; + rs->migration_dirty_pages--; } return ret; } @@ -601,8 +607,9 @@ static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start, { unsigned long *bitmap; bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; - migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap, - start, length, &rs->num_dirty_pages_period); + rs->migration_dirty_pages += + cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length, + &rs->num_dirty_pages_period); } static void migration_bitmap_sync_init(RAMState *rs) @@ -1304,7 +1311,7 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, int res = 0; /* Check the pages is dirty and if it is send it */ - if (migration_bitmap_clear_dirty(dirty_ram_abs)) { + if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) { unsigned long *unsentmap; if (compression_switch && migrate_use_compression()) { res = ram_save_compressed_page(rs, ms, f, pss, @@ -1454,11 +1461,6 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) } } -static ram_addr_t ram_save_remaining(void) -{ - return migration_dirty_pages; -} - uint64_t ram_bytes_remaining(void) { return ram_save_remaining() * TARGET_PAGE_SIZE; @@ -1532,6 +1534,7 @@ static void ram_state_reset(RAMState *rs) void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) { + RAMState *rs = &ram_state; /* called in qemu main thread, so there is * no writing race against this migration_bitmap */ @@ -1557,7 +1560,7 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) atomic_rcu_set(&migration_bitmap_rcu, bitmap); qemu_mutex_unlock(&migration_bitmap_mutex); - migration_dirty_pages += new - old; + rs->migration_dirty_pages += new - old; call_rcu(old_bitmap, migration_bitmap_free, rcu); } } @@ -1730,6 +1733,7 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, RAMBlock *block, PostcopyDiscardState *pds) { + RAMState *rs = &ram_state; unsigned long *bitmap; unsigned long *unsentmap; unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE; @@ -1827,7 +1831,7 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, * Remark them as dirty, updating the count for any pages * that weren't previously dirty. */ - migration_dirty_pages += !test_and_set_bit(page, bitmap); + rs->migration_dirty_pages += !test_and_set_bit(page, bitmap); } } @@ -2053,7 +2057,7 @@ static int ram_save_init_globals(RAMState *rs) * Count the total number of pages used by ram blocks not including any * gaps due to alignment or unplugs. */ - migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; + rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS; memory_global_dirty_log_start(); migration_bitmap_sync(rs); -- cgit v1.2.3 From ceb4d16898549e5b7d9123b830907de8908fca9d Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:29:54 +0100 Subject: ram: Everything was init to zero, so use memset And then init only things that are not zero by default. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 25 +++---------------------- 1 file changed, 3 insertions(+), 22 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 71f8ef3224..42bb82f8ba 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -612,15 +612,6 @@ static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start, &rs->num_dirty_pages_period); } -static void migration_bitmap_sync_init(RAMState *rs) -{ - rs->time_last_bitmap_sync = 0; - rs->bytes_xfer_prev = 0; - rs->num_dirty_pages_period = 0; - rs->xbzrle_cache_miss_prev = 0; - rs->iterations_prev = 0; -} - /** * ram_pagesize_summary: calculate all the pagesizes of a VM * @@ -1986,21 +1977,11 @@ err: return ret; } -static int ram_save_init_globals(RAMState *rs) +static int ram_state_init(RAMState *rs) { int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ - rs->dirty_rate_high_cnt = 0; - rs->bitmap_sync_count = 0; - rs->zero_pages = 0; - rs->norm_pages = 0; - rs->iterations = 0; - rs->xbzrle_bytes = 0; - rs->xbzrle_pages = 0; - rs->xbzrle_cache_miss = 0; - rs->xbzrle_cache_miss_rate = 0; - rs->xbzrle_overflows = 0; - migration_bitmap_sync_init(rs); + memset(rs, 0, sizeof(*rs)); qemu_mutex_init(&migration_bitmap_mutex); if (migrate_use_xbzrle()) { @@ -2090,7 +2071,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) /* migration has already setup the bitmap, reuse it. */ if (!migration_in_colo_state()) { - if (ram_save_init_globals(rs) < 0) { + if (ram_state_init(rs) < 0) { return -1; } } -- cgit v1.2.3 From 108cfae0195fc51c007017a3a5ab49f4e3c3f781 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:38:09 +0100 Subject: ram: Move migration_bitmap_mutex into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 42bb82f8ba..23819cda16 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -185,6 +185,8 @@ struct RAMState { uint64_t xbzrle_overflows; /* number of dirty bits in the bitmap */ uint64_t migration_dirty_pages; + /* protects modification of the bitmap */ + QemuMutex bitmap_mutex; }; typedef struct RAMState RAMState; @@ -230,8 +232,6 @@ static ram_addr_t ram_save_remaining(void) return ram_state.migration_dirty_pages; } -static QemuMutex migration_bitmap_mutex; - /* used by the search for pages to send */ struct PageSearchStatus { /* Current block being searched */ @@ -653,13 +653,13 @@ static void migration_bitmap_sync(RAMState *rs) trace_migration_bitmap_sync_start(); memory_global_dirty_log_sync(); - qemu_mutex_lock(&migration_bitmap_mutex); + qemu_mutex_lock(&rs->bitmap_mutex); rcu_read_lock(); QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { migration_bitmap_sync_range(rs, block->offset, block->used_length); } rcu_read_unlock(); - qemu_mutex_unlock(&migration_bitmap_mutex); + qemu_mutex_unlock(&rs->bitmap_mutex); trace_migration_bitmap_sync_end(rs->num_dirty_pages_period); @@ -1526,6 +1526,7 @@ static void ram_state_reset(RAMState *rs) void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) { RAMState *rs = &ram_state; + /* called in qemu main thread, so there is * no writing race against this migration_bitmap */ @@ -1539,7 +1540,7 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) * it is safe to migration if migration_bitmap is cleared bit * at the same time. */ - qemu_mutex_lock(&migration_bitmap_mutex); + qemu_mutex_lock(&rs->bitmap_mutex); bitmap_copy(bitmap->bmap, old_bitmap->bmap, old); bitmap_set(bitmap->bmap, old, new - old); @@ -1550,7 +1551,7 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) bitmap->unsentmap = NULL; atomic_rcu_set(&migration_bitmap_rcu, bitmap); - qemu_mutex_unlock(&migration_bitmap_mutex); + qemu_mutex_unlock(&rs->bitmap_mutex); rs->migration_dirty_pages += new - old; call_rcu(old_bitmap, migration_bitmap_free, rcu); } @@ -1982,7 +1983,7 @@ static int ram_state_init(RAMState *rs) int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ memset(rs, 0, sizeof(*rs)); - qemu_mutex_init(&migration_bitmap_mutex); + qemu_mutex_init(&rs->bitmap_mutex); if (migrate_use_xbzrle()) { XBZRLE_cache_lock(); -- cgit v1.2.3 From eb859c53dd23deae527b3c259b4c1b639f879c4d Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:51:55 +0100 Subject: ram: Move migration_bitmap_rcu into RAMState Once there, rename the type to be shorter. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 86 +++++++++++++++++++++++++++++++-------------------------- 1 file changed, 47 insertions(+), 39 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 23819cda16..f6ae17f1a5 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -138,6 +138,19 @@ out: return ret; } +struct RAMBitmap { + struct rcu_head rcu; + /* Main migration bitmap */ + unsigned long *bmap; + /* bitmap of pages that haven't been sent even once + * only maintained and used in postcopy at the moment + * where it's used to send the dirtymap at the start + * of the postcopy phase + */ + unsigned long *unsentmap; +}; +typedef struct RAMBitmap RAMBitmap; + /* State of RAM for migration */ struct RAMState { /* Last block that we have visited searching for dirty pages */ @@ -187,6 +200,8 @@ struct RAMState { uint64_t migration_dirty_pages; /* protects modification of the bitmap */ QemuMutex bitmap_mutex; + /* Ram Bitmap protected by RCU */ + RAMBitmap *ram_bitmap; }; typedef struct RAMState RAMState; @@ -243,18 +258,6 @@ struct PageSearchStatus { }; typedef struct PageSearchStatus PageSearchStatus; -static struct BitmapRcu { - struct rcu_head rcu; - /* Main migration bitmap */ - unsigned long *bmap; - /* bitmap of pages that haven't been sent even once - * only maintained and used in postcopy at the moment - * where it's used to send the dirtymap at the start - * of the postcopy phase - */ - unsigned long *unsentmap; -} *migration_bitmap_rcu; - struct CompressParam { bool done; bool quit; @@ -577,7 +580,7 @@ ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, unsigned long next; - bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; if (rs->ram_bulk_stage && nr > base) { next = nr + 1; } else { @@ -592,7 +595,7 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr) { bool ret; int nr = addr >> TARGET_PAGE_BITS; - unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; ret = test_and_clear_bit(nr, bitmap); @@ -606,7 +609,7 @@ static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start, ram_addr_t length) { unsigned long *bitmap; - bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; rs->migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length, &rs->num_dirty_pages_period); @@ -1149,14 +1152,14 @@ static bool get_queued_page(RAMState *rs, MigrationState *ms, */ if (block) { unsigned long *bitmap; - bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap); if (!dirty) { trace_get_queued_page_not_dirty( block->idstr, (uint64_t)offset, (uint64_t)*ram_addr_abs, test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, - atomic_rcu_read(&migration_bitmap_rcu)->unsentmap)); + atomic_rcu_read(&rs->ram_bitmap)->unsentmap)); } else { trace_get_queued_page(block->idstr, (uint64_t)offset, @@ -1316,7 +1319,7 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, if (res < 0) { return res; } - unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; + unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; if (unsentmap) { clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap); } @@ -1480,7 +1483,7 @@ void free_xbzrle_decoded_buf(void) xbzrle_decoded_buf = NULL; } -static void migration_bitmap_free(struct BitmapRcu *bmap) +static void migration_bitmap_free(struct RAMBitmap *bmap) { g_free(bmap->bmap); g_free(bmap->unsentmap); @@ -1489,11 +1492,13 @@ static void migration_bitmap_free(struct BitmapRcu *bmap) static void ram_migration_cleanup(void *opaque) { + RAMState *rs = opaque; + /* caller have hold iothread lock or is in a bh, so there is * no writing race against this migration_bitmap */ - struct BitmapRcu *bitmap = migration_bitmap_rcu; - atomic_rcu_set(&migration_bitmap_rcu, NULL); + struct RAMBitmap *bitmap = rs->ram_bitmap; + atomic_rcu_set(&rs->ram_bitmap, NULL); if (bitmap) { memory_global_dirty_log_stop(); call_rcu(bitmap, migration_bitmap_free, rcu); @@ -1530,9 +1535,9 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) /* called in qemu main thread, so there is * no writing race against this migration_bitmap */ - if (migration_bitmap_rcu) { - struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap; - bitmap = g_new(struct BitmapRcu, 1); + if (rs->ram_bitmap) { + struct RAMBitmap *old_bitmap = rs->ram_bitmap, *bitmap; + bitmap = g_new(struct RAMBitmap, 1); bitmap->bmap = bitmap_new(new); /* prevent migration_bitmap content from being set bit @@ -1550,7 +1555,7 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) */ bitmap->unsentmap = NULL; - atomic_rcu_set(&migration_bitmap_rcu, bitmap); + atomic_rcu_set(&rs->ram_bitmap, bitmap); qemu_mutex_unlock(&rs->bitmap_mutex); rs->migration_dirty_pages += new - old; call_rcu(old_bitmap, migration_bitmap_free, rcu); @@ -1565,13 +1570,13 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) void ram_debug_dump_bitmap(unsigned long *todump, bool expected) { int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; - + RAMState *rs = &ram_state; int64_t cur; int64_t linelen = 128; char linebuf[129]; if (!todump) { - todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + todump = atomic_rcu_read(&rs->ram_bitmap)->bmap; } for (cur = 0; cur < ram_pages; cur += linelen) { @@ -1600,8 +1605,9 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected) void ram_postcopy_migrated_memory_release(MigrationState *ms) { + RAMState *rs = &ram_state; struct RAMBlock *block; - unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { unsigned long first = block->offset >> TARGET_PAGE_BITS; @@ -1636,11 +1642,12 @@ static int postcopy_send_discard_bm_ram(MigrationState *ms, unsigned long start, unsigned long length) { + RAMState *rs = &ram_state; unsigned long end = start + length; /* one after the end */ unsigned long current; unsigned long *unsentmap; - unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; + unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; for (current = start; current < end; ) { unsigned long one = find_next_bit(unsentmap, end, current); @@ -1739,8 +1746,8 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass, return; } - bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; - unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; + bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; + unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; if (unsent_pass) { /* Find a sent page */ @@ -1898,15 +1905,16 @@ static int postcopy_chunk_hostpages(MigrationState *ms) */ int ram_postcopy_send_discard_bitmap(MigrationState *ms) { + RAMState *rs = &ram_state; int ret; unsigned long *bitmap, *unsentmap; rcu_read_lock(); /* This should be our last sync, the src is now paused */ - migration_bitmap_sync(&ram_state); + migration_bitmap_sync(rs); - unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap; + unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; if (!unsentmap) { /* We don't have a safe way to resize the sentmap, so * if the bitmap was resized it will be NULL at this @@ -1927,7 +1935,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) /* * Update the unsentmap to be unsentmap = unsentmap | dirty */ - bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap; + bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; bitmap_or(unsentmap, unsentmap, bitmap, last_ram_offset() >> TARGET_PAGE_BITS); @@ -2022,16 +2030,16 @@ static int ram_state_init(RAMState *rs) bytes_transferred = 0; ram_state_reset(rs); - migration_bitmap_rcu = g_new0(struct BitmapRcu, 1); + rs->ram_bitmap = g_new0(struct RAMBitmap, 1); /* Skip setting bitmap if there is no RAM */ if (ram_bytes_total()) { ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; - migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages); - bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages); + rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages); + bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages); if (migrate_postcopy_ram()) { - migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages); - bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages); + rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages); + bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages); } } -- cgit v1.2.3 From 2f4fde9352de8b43c4c9831f2cf58114e2b4000e Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 13 Mar 2017 21:58:11 +0100 Subject: ram: Move bytes_transferred into RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 35 +++++++++++++++++------------------ 1 file changed, 17 insertions(+), 18 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index f6ae17f1a5..7fbaa52092 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -198,6 +198,8 @@ struct RAMState { uint64_t xbzrle_overflows; /* number of dirty bits in the bitmap */ uint64_t migration_dirty_pages; + /* total number of bytes transferred */ + uint64_t bytes_transferred; /* protects modification of the bitmap */ QemuMutex bitmap_mutex; /* Ram Bitmap protected by RCU */ @@ -247,6 +249,11 @@ static ram_addr_t ram_save_remaining(void) return ram_state.migration_dirty_pages; } +uint64_t ram_bytes_transferred(void) +{ + return ram_state.bytes_transferred; +} + /* used by the search for pages to send */ struct PageSearchStatus { /* Current block being searched */ @@ -871,9 +878,7 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block, return bytes_sent; } -static uint64_t bytes_transferred; - -static void flush_compressed_data(QEMUFile *f) +static void flush_compressed_data(RAMState *rs, QEMUFile *f) { int idx, len, thread_count; @@ -894,7 +899,7 @@ static void flush_compressed_data(QEMUFile *f) qemu_mutex_lock(&comp_param[idx].mutex); if (!comp_param[idx].quit) { len = qemu_put_qemu_file(f, comp_param[idx].file); - bytes_transferred += len; + rs->bytes_transferred += len; } qemu_mutex_unlock(&comp_param[idx].mutex); } @@ -990,7 +995,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, * is used to avoid resending the block name. */ if (block != rs->last_sent_block) { - flush_compressed_data(f); + flush_compressed_data(rs, f); pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); if (pages == -1) { /* Make sure the first page is sent out before other pages */ @@ -1066,7 +1071,7 @@ static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss, /* If xbzrle is on, stop using the data compression at this * point. In theory, xbzrle can do better than compression. */ - flush_compressed_data(f); + flush_compressed_data(rs, f); compression_switch = false; } } @@ -1450,7 +1455,7 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) rs->zero_pages += pages; } else { rs->norm_pages += pages; - bytes_transferred += size; + rs->bytes_transferred += size; qemu_update_position(f, size); } } @@ -1460,11 +1465,6 @@ uint64_t ram_bytes_remaining(void) return ram_save_remaining() * TARGET_PAGE_SIZE; } -uint64_t ram_bytes_transferred(void) -{ - return bytes_transferred; -} - uint64_t ram_bytes_total(void) { RAMBlock *block; @@ -2027,7 +2027,6 @@ static int ram_state_init(RAMState *rs) qemu_mutex_lock_ramlist(); rcu_read_lock(); - bytes_transferred = 0; ram_state_reset(rs); rs->ram_bitmap = g_new0(struct RAMBitmap, 1); @@ -2139,7 +2138,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) while ((ret = qemu_file_rate_limit(f)) == 0) { int pages; - pages = ram_find_and_save_block(rs, f, false, &bytes_transferred); + pages = ram_find_and_save_block(rs, f, false, &rs->bytes_transferred); /* no more pages to sent */ if (pages == 0) { done = 1; @@ -2161,7 +2160,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) } i++; } - flush_compressed_data(f); + flush_compressed_data(rs, f); rcu_read_unlock(); /* @@ -2171,7 +2170,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) ram_control_after_iterate(f, RAM_CONTROL_ROUND); qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - bytes_transferred += 8; + rs->bytes_transferred += 8; ret = qemu_file_get_error(f); if (ret < 0) { @@ -2210,14 +2209,14 @@ static int ram_save_complete(QEMUFile *f, void *opaque) int pages; pages = ram_find_and_save_block(rs, f, !migration_in_colo_state(), - &bytes_transferred); + &rs->bytes_transferred); /* no more blocks to sent */ if (pages == 0) { break; } } - flush_compressed_data(f); + flush_compressed_data(rs, f); ram_control_after_iterate(f, RAM_CONTROL_FINISH); rcu_read_unlock(); -- cgit v1.2.3 From 072c251157d0903f895bd05280867eb869d14854 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 14 Mar 2017 10:27:31 +0100 Subject: ram: Use the RAMState bytes_transferred parameter Somewhere it was passed by reference, just use it from RAMState. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 75 +++++++++++++++++++++------------------------------------ 1 file changed, 27 insertions(+), 48 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 7fbaa52092..cd7226ee4b 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -495,12 +495,10 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage - * @bytes_transferred: increase it with the number of transferred bytes */ static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, ram_addr_t current_addr, RAMBlock *block, - ram_addr_t offset, bool last_stage, - uint64_t *bytes_transferred) + ram_addr_t offset, bool last_stage) { int encoded_len = 0, bytes_xbzrle; uint8_t *prev_cached_page; @@ -556,7 +554,7 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, bytes_xbzrle += encoded_len + 1 + 2; rs->xbzrle_pages++; rs->xbzrle_bytes += bytes_xbzrle; - *bytes_transferred += bytes_xbzrle; + rs->bytes_transferred += bytes_xbzrle; return 1; } @@ -728,20 +726,18 @@ static void migration_bitmap_sync(RAMState *rs) * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @p: pointer to the page - * @bytes_transferred: increase it with the number of transferred bytes */ static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block, - ram_addr_t offset, - uint8_t *p, uint64_t *bytes_transferred) + ram_addr_t offset, uint8_t *p) { int pages = -1; if (is_zero_range(p, TARGET_PAGE_SIZE)) { rs->zero_pages++; - *bytes_transferred += save_page_header(f, block, - offset | RAM_SAVE_FLAG_COMPRESS); + rs->bytes_transferred += + save_page_header(f, block, offset | RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, 0); - *bytes_transferred += 1; + rs->bytes_transferred += 1; pages = 1; } @@ -772,11 +768,9 @@ static void ram_release_pages(MigrationState *ms, const char *rbname, * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage - * @bytes_transferred: increase it with the number of transferred bytes */ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, - PageSearchStatus *pss, bool last_stage, - uint64_t *bytes_transferred) + PageSearchStatus *pss, bool last_stage) { int pages = -1; uint64_t bytes_xmit; @@ -794,7 +788,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, ret = ram_control_save_page(f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_xmit); if (bytes_xmit) { - *bytes_transferred += bytes_xmit; + rs->bytes_transferred += bytes_xmit; pages = 1; } @@ -814,7 +808,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, } } } else { - pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); + pages = save_zero_page(rs, f, block, offset, p); if (pages > 0) { /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale @@ -824,7 +818,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, } else if (!rs->ram_bulk_stage && !migration_in_postcopy(ms) && migrate_use_xbzrle()) { pages = save_xbzrle_page(rs, f, &p, current_addr, block, - offset, last_stage, bytes_transferred); + offset, last_stage); if (!last_stage) { /* Can't send this cached data async, since the cache page * might get updated before it gets to the wire @@ -836,7 +830,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, /* XBZRLE overflow or normal page */ if (pages == -1) { - *bytes_transferred += save_page_header(f, block, + rs->bytes_transferred += save_page_header(f, block, offset | RAM_SAVE_FLAG_PAGE); if (send_async) { qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE, @@ -845,7 +839,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, } else { qemu_put_buffer(f, p, TARGET_PAGE_SIZE); } - *bytes_transferred += TARGET_PAGE_SIZE; + rs->bytes_transferred += TARGET_PAGE_SIZE; pages = 1; rs->norm_pages++; } @@ -913,8 +907,7 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block, } static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f, - RAMBlock *block, ram_addr_t offset, - uint64_t *bytes_transferred) + RAMBlock *block, ram_addr_t offset) { int idx, thread_count, bytes_xmit = -1, pages = -1; @@ -931,7 +924,7 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f, qemu_mutex_unlock(&comp_param[idx].mutex); pages = 1; rs->norm_pages++; - *bytes_transferred += bytes_xmit; + rs->bytes_transferred += bytes_xmit; break; } } @@ -957,12 +950,10 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f, * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage - * @bytes_transferred: increase it with the number of transferred bytes */ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, QEMUFile *f, - PageSearchStatus *pss, bool last_stage, - uint64_t *bytes_transferred) + PageSearchStatus *pss, bool last_stage) { int pages = -1; uint64_t bytes_xmit = 0; @@ -976,7 +967,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, ret = ram_control_save_page(f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_xmit); if (bytes_xmit) { - *bytes_transferred += bytes_xmit; + rs->bytes_transferred += bytes_xmit; pages = 1; } if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { @@ -996,7 +987,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, */ if (block != rs->last_sent_block) { flush_compressed_data(rs, f); - pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); + pages = save_zero_page(rs, f, block, offset, p); if (pages == -1) { /* Make sure the first page is sent out before other pages */ bytes_xmit = save_page_header(f, block, offset | @@ -1004,7 +995,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE, migrate_compress_level()); if (blen > 0) { - *bytes_transferred += bytes_xmit + blen; + rs->bytes_transferred += bytes_xmit + blen; rs->norm_pages++; pages = 1; } else { @@ -1017,10 +1008,9 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, } } else { offset |= RAM_SAVE_FLAG_CONTINUE; - pages = save_zero_page(rs, f, block, offset, p, bytes_transferred); + pages = save_zero_page(rs, f, block, offset, p); if (pages == -1) { - pages = compress_page_with_multi_thread(rs, f, block, offset, - bytes_transferred); + pages = compress_page_with_multi_thread(rs, f, block, offset); } else { ram_release_pages(ms, block->idstr, pss->offset, pages); } @@ -1298,13 +1288,11 @@ err: * @f: QEMUFile where to send the data * @pss: data about the page we want to send * @last_stage: if we are at the completion stage - * @bytes_transferred: increase it with the number of transferred bytes * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space */ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, bool last_stage, - uint64_t *bytes_transferred, ram_addr_t dirty_ram_abs) { int res = 0; @@ -1313,12 +1301,9 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) { unsigned long *unsentmap; if (compression_switch && migrate_use_compression()) { - res = ram_save_compressed_page(rs, ms, f, pss, - last_stage, - bytes_transferred); + res = ram_save_compressed_page(rs, ms, f, pss, last_stage); } else { - res = ram_save_page(rs, ms, f, pss, last_stage, - bytes_transferred); + res = ram_save_page(rs, ms, f, pss, last_stage); } if (res < 0) { @@ -1356,13 +1341,11 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, * @f: QEMUFile where to send the data * @pss: data about the page we want to send * @last_stage: if we are at the completion stage - * @bytes_transferred: increase it with the number of transferred bytes * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space */ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f, PageSearchStatus *pss, bool last_stage, - uint64_t *bytes_transferred, ram_addr_t dirty_ram_abs) { int tmppages, pages = 0; @@ -1370,7 +1353,7 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f, do { tmppages = ram_save_target_page(rs, ms, f, pss, last_stage, - bytes_transferred, dirty_ram_abs); + dirty_ram_abs); if (tmppages < 0) { return tmppages; } @@ -1395,14 +1378,12 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f, * @rs: current RAM state * @f: QEMUFile where to send the data * @last_stage: if we are at the completion stage - * @bytes_transferred: increase it with the number of transferred bytes * * On systems where host-page-size > target-page-size it will send all the * pages in a host page that are dirty. */ -static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage, - uint64_t *bytes_transferred) +static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage) { PageSearchStatus pss; MigrationState *ms = migrate_get_current(); @@ -1434,8 +1415,7 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage, } if (found) { - pages = ram_save_host_page(rs, ms, f, &pss, - last_stage, bytes_transferred, + pages = ram_save_host_page(rs, ms, f, &pss, last_stage, dirty_ram_abs); } } while (!pages && again); @@ -2138,7 +2118,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) while ((ret = qemu_file_rate_limit(f)) == 0) { int pages; - pages = ram_find_and_save_block(rs, f, false, &rs->bytes_transferred); + pages = ram_find_and_save_block(rs, f, false); /* no more pages to sent */ if (pages == 0) { done = 1; @@ -2208,8 +2188,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) while (true) { int pages; - pages = ram_find_and_save_block(rs, f, !migration_in_colo_state(), - &rs->bytes_transferred); + pages = ram_find_and_save_block(rs, f, !migration_in_colo_state()); /* no more blocks to sent */ if (pages == 0) { break; -- cgit v1.2.3 From 9edabd4de6babf36f110c99a05b1558834535ec8 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 14 Mar 2017 12:02:16 +0100 Subject: ram: Remove ram_save_remaining Just unfold it. Move ram_bytes_remaining() with the rest of exported functions. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index cd7226ee4b..2f5cbd1727 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -244,14 +244,14 @@ uint64_t xbzrle_mig_pages_overflow(void) return ram_state.xbzrle_overflows; } -static ram_addr_t ram_save_remaining(void) +uint64_t ram_bytes_transferred(void) { - return ram_state.migration_dirty_pages; + return ram_state.bytes_transferred; } -uint64_t ram_bytes_transferred(void) +uint64_t ram_bytes_remaining(void) { - return ram_state.bytes_transferred; + return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE; } /* used by the search for pages to send */ @@ -1440,11 +1440,6 @@ void acct_update_position(QEMUFile *f, size_t size, bool zero) } } -uint64_t ram_bytes_remaining(void) -{ - return ram_save_remaining() * TARGET_PAGE_SIZE; -} - uint64_t ram_bytes_total(void) { RAMBlock *block; @@ -2212,7 +2207,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, RAMState *rs = opaque; uint64_t remaining_size; - remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; + remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; if (!migration_in_postcopy(migrate_get_current()) && remaining_size < max_size) { @@ -2221,7 +2216,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, migration_bitmap_sync(rs); rcu_read_unlock(); qemu_mutex_unlock_iothread(); - remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE; + remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; } /* We can do postcopy, and all the data is postcopiable */ -- cgit v1.2.3 From 68a098f38652ad7f4883de7beeba0c3dae9b2a0b Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 14 Mar 2017 13:48:42 +0100 Subject: ram: Move last_req_rb to RAMState It was on MigrationState when it is only used inside ram.c for postcopy. Problem is that we need to access it without being able to pass it RAMState directly. Signed-off-by: Juan Quintela Reviewed-by: Peter Xu --- include/migration/migration.h | 2 -- migration/migration.c | 1 - migration/ram.c | 7 +++++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 87d0cc51e9..0f7557383b 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -189,8 +189,6 @@ struct MigrationState /* Queue of outstanding page requests from the destination */ QemuMutex src_page_req_mutex; QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests; - /* The RAMBlock used in the last src_page_request */ - RAMBlock *last_req_rb; /* The semaphore is used to notify COLO thread that failover is finished */ QemuSemaphore colo_exit_sem; diff --git a/migration/migration.c b/migration/migration.c index 7f7bab9523..c1730a0cad 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1126,7 +1126,6 @@ MigrationState *migrate_init(const MigrationParams *params) s->postcopy_after_devices = false; s->postcopy_requests = 0; s->migration_thread_running = false; - s->last_req_rb = NULL; error_free(s->error); s->error = NULL; diff --git a/migration/ram.c b/migration/ram.c index 2f5cbd1727..cec51b4bdc 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -204,6 +204,8 @@ struct RAMState { QemuMutex bitmap_mutex; /* Ram Bitmap protected by RCU */ RAMBitmap *ram_bitmap; + /* The RAMBlock used in the last src_page_requests */ + RAMBlock *last_req_rb; }; typedef struct RAMState RAMState; @@ -1226,12 +1228,13 @@ int ram_save_queue_pages(MigrationState *ms, const char *rbname, ram_addr_t start, ram_addr_t len) { RAMBlock *ramblock; + RAMState *rs = &ram_state; ms->postcopy_requests++; rcu_read_lock(); if (!rbname) { /* Reuse last RAMBlock */ - ramblock = ms->last_req_rb; + ramblock = rs->last_req_rb; if (!ramblock) { /* @@ -1249,7 +1252,7 @@ int ram_save_queue_pages(MigrationState *ms, const char *rbname, error_report("ram_save_queue_pages no block '%s'", rbname); goto err; } - ms->last_req_rb = ramblock; + rs->last_req_rb = ramblock; } trace_ram_save_queue_pages(ramblock->idstr, start, len); if (start+len > ramblock->used_length) { -- cgit v1.2.3 From ec481c6c576fbac6c4b5a7a5874e313882a49a09 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 20 Mar 2017 22:12:40 +0100 Subject: ram: Move src_page_req* to RAMState This are the last postcopy fields still at MigrationState. Once there Move MigrationSrcPageRequest to ram.c and remove MigrationState parameters where appropiate. Signed-off-by: Juan Quintela Reviewed-by: Peter Xu --- include/migration/migration.h | 17 +----------- migration/migration.c | 5 +--- migration/ram.c | 61 +++++++++++++++++++++++++++---------------- 3 files changed, 40 insertions(+), 43 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 0f7557383b..084d195125 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -128,18 +128,6 @@ struct MigrationIncomingState { MigrationIncomingState *migration_incoming_get_current(void); void migration_incoming_state_destroy(void); -/* - * An outstanding page request, on the source, having been received - * and queued - */ -struct MigrationSrcPageRequest { - RAMBlock *rb; - hwaddr offset; - hwaddr len; - - QSIMPLEQ_ENTRY(MigrationSrcPageRequest) next_req; -}; - struct MigrationState { size_t bytes_xfer; @@ -186,9 +174,6 @@ struct MigrationState /* Flag set once the migration thread called bdrv_inactivate_all */ bool block_inactive; - /* Queue of outstanding page requests from the destination */ - QemuMutex src_page_req_mutex; - QSIMPLEQ_HEAD(src_page_requests, MigrationSrcPageRequest) src_page_requests; /* The semaphore is used to notify COLO thread that failover is finished */ QemuSemaphore colo_exit_sem; @@ -371,7 +356,7 @@ void savevm_skip_configuration(void); int global_state_store(void); void global_state_store_running(void); -void migration_page_queue_free(MigrationState *ms); +void migration_page_queue_free(void); int ram_save_queue_pages(MigrationState *ms, const char *rbname, ram_addr_t start, ram_addr_t len); uint64_t ram_pagesize_summary(void); diff --git a/migration/migration.c b/migration/migration.c index c1730a0cad..5918b21a5c 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -109,7 +109,6 @@ MigrationState *migrate_get_current(void) }; if (!once) { - qemu_mutex_init(¤t_migration.src_page_req_mutex); current_migration.parameters.tls_creds = g_strdup(""); current_migration.parameters.tls_hostname = g_strdup(""); once = true; @@ -957,7 +956,7 @@ static void migrate_fd_cleanup(void *opaque) qemu_bh_delete(s->cleanup_bh); s->cleanup_bh = NULL; - migration_page_queue_free(s); + migration_page_queue_free(); if (s->to_dst_file) { trace_migrate_fd_cleanup(); @@ -1131,8 +1130,6 @@ MigrationState *migrate_init(const MigrationParams *params) migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); - QSIMPLEQ_INIT(&s->src_page_requests); - s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); return s; } diff --git a/migration/ram.c b/migration/ram.c index cec51b4bdc..be26d0c1af 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -151,6 +151,18 @@ struct RAMBitmap { }; typedef struct RAMBitmap RAMBitmap; +/* + * An outstanding page request, on the source, having been received + * and queued + */ +struct RAMSrcPageRequest { + RAMBlock *rb; + hwaddr offset; + hwaddr len; + + QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req; +}; + /* State of RAM for migration */ struct RAMState { /* Last block that we have visited searching for dirty pages */ @@ -206,6 +218,9 @@ struct RAMState { RAMBitmap *ram_bitmap; /* The RAMBlock used in the last src_page_requests */ RAMBlock *last_req_rb; + /* Queue of outstanding page requests from the destination */ + QemuMutex src_page_req_mutex; + QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests; }; typedef struct RAMState RAMState; @@ -1085,20 +1100,20 @@ static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss, * * Returns the block of the page (or NULL if none available) * - * @ms: current migration state + * @rs: current RAM state * @offset: used to return the offset within the RAMBlock * @ram_addr_abs: pointer into which to store the address of the dirty page * within the global ram_addr space */ -static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, +static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset, ram_addr_t *ram_addr_abs) { RAMBlock *block = NULL; - qemu_mutex_lock(&ms->src_page_req_mutex); - if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) { - struct MigrationSrcPageRequest *entry = - QSIMPLEQ_FIRST(&ms->src_page_requests); + qemu_mutex_lock(&rs->src_page_req_mutex); + if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) { + struct RAMSrcPageRequest *entry = + QSIMPLEQ_FIRST(&rs->src_page_requests); block = entry->rb; *offset = entry->offset; *ram_addr_abs = (entry->offset + entry->rb->offset) & @@ -1109,11 +1124,11 @@ static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, entry->offset += TARGET_PAGE_SIZE; } else { memory_region_unref(block->mr); - QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req); + QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); g_free(entry); } } - qemu_mutex_unlock(&ms->src_page_req_mutex); + qemu_mutex_unlock(&rs->src_page_req_mutex); return block; } @@ -1126,13 +1141,11 @@ static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset, * Returns if a queued page is found * * @rs: current RAM state - * @ms: current migration state * @pss: data about the state of the current dirty page scan * @ram_addr_abs: pointer into which to store the address of the dirty page * within the global ram_addr space */ -static bool get_queued_page(RAMState *rs, MigrationState *ms, - PageSearchStatus *pss, +static bool get_queued_page(RAMState *rs, PageSearchStatus *pss, ram_addr_t *ram_addr_abs) { RAMBlock *block; @@ -1140,7 +1153,7 @@ static bool get_queued_page(RAMState *rs, MigrationState *ms, bool dirty; do { - block = unqueue_page(ms, &offset, ram_addr_abs); + block = unqueue_page(rs, &offset, ram_addr_abs); /* * We're sending this page, and since it's postcopy nothing else * will dirty it, and we must make sure it doesn't get sent again @@ -1194,18 +1207,18 @@ static bool get_queued_page(RAMState *rs, MigrationState *ms, * It should be empty at the end anyway, but in error cases there may * be some left. in case that there is any page left, we drop it. * - * @ms: current migration state */ -void migration_page_queue_free(MigrationState *ms) +void migration_page_queue_free(void) { - struct MigrationSrcPageRequest *mspr, *next_mspr; + struct RAMSrcPageRequest *mspr, *next_mspr; + RAMState *rs = &ram_state; /* This queue generally should be empty - but in the case of a failed * migration might have some droppings in. */ rcu_read_lock(); - QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) { + QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) { memory_region_unref(mspr->rb->mr); - QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req); + QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req); g_free(mspr); } rcu_read_unlock(); @@ -1262,16 +1275,16 @@ int ram_save_queue_pages(MigrationState *ms, const char *rbname, goto err; } - struct MigrationSrcPageRequest *new_entry = - g_malloc0(sizeof(struct MigrationSrcPageRequest)); + struct RAMSrcPageRequest *new_entry = + g_malloc0(sizeof(struct RAMSrcPageRequest)); new_entry->rb = ramblock; new_entry->offset = start; new_entry->len = len; memory_region_ref(ramblock->mr); - qemu_mutex_lock(&ms->src_page_req_mutex); - QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req); - qemu_mutex_unlock(&ms->src_page_req_mutex); + qemu_mutex_lock(&rs->src_page_req_mutex); + QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req); + qemu_mutex_unlock(&rs->src_page_req_mutex); rcu_read_unlock(); return 0; @@ -1410,7 +1423,7 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage) do { again = true; - found = get_queued_page(rs, ms, &pss, &dirty_ram_abs); + found = get_queued_page(rs, &pss, &dirty_ram_abs); if (!found) { /* priority queue empty, so just search for something dirty */ @@ -1970,6 +1983,8 @@ static int ram_state_init(RAMState *rs) memset(rs, 0, sizeof(*rs)); qemu_mutex_init(&rs->bitmap_mutex); + qemu_mutex_init(&rs->src_page_req_mutex); + QSIMPLEQ_INIT(&rs->src_page_requests); if (migrate_use_xbzrle()) { XBZRLE_cache_lock(); -- cgit v1.2.3 From 42d219d3b0211c00432c04658dc22da66785f062 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 14 Mar 2017 18:01:38 +0100 Subject: ram: Create ram_dirty_sync_count() This is a ram field that was inside MigrationState. Move it to RAMState and make it the same that the other ram stats. Signed-off-by: Juan Quintela Reviewed-by: Peter Xu --- include/migration/migration.h | 2 +- migration/migration.c | 3 +-- migration/ram.c | 6 +++++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 084d195125..2f9c9736f9 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -159,7 +159,6 @@ struct MigrationState bool enabled_capabilities[MIGRATION_CAPABILITY__MAX]; int64_t xbzrle_cache_size; int64_t setup_time; - int64_t dirty_sync_count; /* Count of requests incoming from destination */ int64_t postcopy_requests; @@ -255,6 +254,7 @@ void migrate_decompress_threads_join(void); uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_transferred(void); uint64_t ram_bytes_total(void); +uint64_t ram_dirty_sync_count(void); void free_xbzrle_decoded_buf(void); void acct_update_position(QEMUFile *f, size_t size, bool zero); diff --git a/migration/migration.c b/migration/migration.c index 5918b21a5c..94647a81e3 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -656,7 +656,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->normal_bytes = norm_mig_pages_transferred() * (1ul << qemu_target_page_bits()); info->ram->mbps = s->mbps; - info->ram->dirty_sync_count = s->dirty_sync_count; + info->ram->dirty_sync_count = ram_dirty_sync_count(); info->ram->postcopy_requests = s->postcopy_requests; if (s->state != MIGRATION_STATUS_COMPLETED) { @@ -1120,7 +1120,6 @@ MigrationState *migrate_init(const MigrationParams *params) s->dirty_pages_rate = 0; s->dirty_bytes_rate = 0; s->setup_time = 0; - s->dirty_sync_count = 0; s->start_postcopy = false; s->postcopy_after_devices = false; s->postcopy_requests = 0; diff --git a/migration/ram.c b/migration/ram.c index be26d0c1af..1580617b38 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -271,6 +271,11 @@ uint64_t ram_bytes_remaining(void) return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE; } +uint64_t ram_dirty_sync_count(void) +{ + return ram_state.bitmap_sync_count; +} + /* used by the search for pages to send */ struct PageSearchStatus { /* Current block being searched */ @@ -727,7 +732,6 @@ static void migration_bitmap_sync(RAMState *rs) rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; } - s->dirty_sync_count = rs->bitmap_sync_count; if (migrate_use_events()) { qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL); } -- cgit v1.2.3 From abbf1d7f9bb43ba5fac7a9c27b67d9a78749e67a Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 14 Mar 2017 18:10:16 +0100 Subject: ram: Remove dirty_bytes_rate It can be recalculated from dirty_pages_rate. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu -- Dave was the one that reviewed it O:-) --- include/migration/migration.h | 1 - migration/migration.c | 6 +++--- migration/ram.c | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 2f9c9736f9..e185d13c9c 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -155,7 +155,6 @@ struct MigrationState int64_t downtime; int64_t expected_downtime; int64_t dirty_pages_rate; - int64_t dirty_bytes_rate; bool enabled_capabilities[MIGRATION_CAPABILITY__MAX]; int64_t xbzrle_cache_size; int64_t setup_time; diff --git a/migration/migration.c b/migration/migration.c index 94647a81e3..dcf9b944cc 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1118,7 +1118,6 @@ MigrationState *migrate_init(const MigrationParams *params) s->downtime = 0; s->expected_downtime = 0; s->dirty_pages_rate = 0; - s->dirty_bytes_rate = 0; s->setup_time = 0; s->start_postcopy = false; s->postcopy_after_devices = false; @@ -2008,8 +2007,9 @@ static void *migration_thread(void *opaque) bandwidth, max_size); /* if we haven't sent anything, we don't want to recalculate 10000 is a small enough number for our purposes */ - if (s->dirty_bytes_rate && transferred_bytes > 10000) { - s->expected_downtime = s->dirty_bytes_rate / bandwidth; + if (s->dirty_pages_rate && transferred_bytes > 10000) { + s->expected_downtime = s->dirty_pages_rate * + (1ul << qemu_target_page_bits()) / bandwidth; } qemu_file_reset_rate_limit(s->to_dst_file); diff --git a/migration/ram.c b/migration/ram.c index 1580617b38..ef4f508b6c 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -728,7 +728,6 @@ static void migration_bitmap_sync(RAMState *rs) } s->dirty_pages_rate = rs->num_dirty_pages_period * 1000 / (end_time - rs->time_last_bitmap_sync); - s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE; rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; } -- cgit v1.2.3 From 47ad861976d8490047c53d5a80b562776f3c951b Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 14 Mar 2017 18:20:30 +0100 Subject: ram: Move dirty_pages_rate to RAMState Treat it like the rest of ram stats counters. Export its value the same way. As an added bonus, no more MigrationState used in migration_bitmap_sync(); Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu -- Again, dave was the one reviewing it --- include/migration/migration.h | 2 +- migration/migration.c | 7 +++---- migration/ram.c | 12 +++++++++--- 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index e185d13c9c..c4e3d4274f 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -154,7 +154,6 @@ struct MigrationState int64_t total_time; int64_t downtime; int64_t expected_downtime; - int64_t dirty_pages_rate; bool enabled_capabilities[MIGRATION_CAPABILITY__MAX]; int64_t xbzrle_cache_size; int64_t setup_time; @@ -254,6 +253,7 @@ uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_transferred(void); uint64_t ram_bytes_total(void); uint64_t ram_dirty_sync_count(void); +uint64_t ram_dirty_pages_rate(void); void free_xbzrle_decoded_buf(void); void acct_update_position(QEMUFile *f, size_t size, bool zero); diff --git a/migration/migration.c b/migration/migration.c index dcf9b944cc..e430aec80e 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -661,7 +661,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) if (s->state != MIGRATION_STATUS_COMPLETED) { info->ram->remaining = ram_bytes_remaining(); - info->ram->dirty_pages_rate = s->dirty_pages_rate; + info->ram->dirty_pages_rate = ram_dirty_pages_rate(); } } @@ -1117,7 +1117,6 @@ MigrationState *migrate_init(const MigrationParams *params) s->mbps = 0.0; s->downtime = 0; s->expected_downtime = 0; - s->dirty_pages_rate = 0; s->setup_time = 0; s->start_postcopy = false; s->postcopy_after_devices = false; @@ -2007,8 +2006,8 @@ static void *migration_thread(void *opaque) bandwidth, max_size); /* if we haven't sent anything, we don't want to recalculate 10000 is a small enough number for our purposes */ - if (s->dirty_pages_rate && transferred_bytes > 10000) { - s->expected_downtime = s->dirty_pages_rate * + if (ram_dirty_pages_rate() && transferred_bytes > 10000) { + s->expected_downtime = ram_dirty_pages_rate() * (1ul << qemu_target_page_bits()) / bandwidth; } diff --git a/migration/ram.c b/migration/ram.c index ef4f508b6c..c5240eb39d 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -212,6 +212,8 @@ struct RAMState { uint64_t migration_dirty_pages; /* total number of bytes transferred */ uint64_t bytes_transferred; + /* number of dirtied pages in the last second */ + uint64_t dirty_pages_rate; /* protects modification of the bitmap */ QemuMutex bitmap_mutex; /* Ram Bitmap protected by RCU */ @@ -276,6 +278,11 @@ uint64_t ram_dirty_sync_count(void) return ram_state.bitmap_sync_count; } +uint64_t ram_dirty_pages_rate(void) +{ + return ram_state.dirty_pages_rate; +} + /* used by the search for pages to send */ struct PageSearchStatus { /* Current block being searched */ @@ -666,7 +673,6 @@ uint64_t ram_pagesize_summary(void) static void migration_bitmap_sync(RAMState *rs) { RAMBlock *block; - MigrationState *s = migrate_get_current(); int64_t end_time; uint64_t bytes_xfer_now; @@ -705,7 +711,7 @@ static void migration_bitmap_sync(RAMState *rs) throttling */ bytes_xfer_now = ram_bytes_transferred(); - if (s->dirty_pages_rate && + if (rs->dirty_pages_rate && (rs->num_dirty_pages_period * TARGET_PAGE_SIZE > (bytes_xfer_now - rs->bytes_xfer_prev) / 2) && (rs->dirty_rate_high_cnt++ >= 2)) { @@ -726,7 +732,7 @@ static void migration_bitmap_sync(RAMState *rs) rs->iterations_prev = rs->iterations; rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss; } - s->dirty_pages_rate = rs->num_dirty_pages_period * 1000 + rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000 / (end_time - rs->time_last_bitmap_sync); rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; -- cgit v1.2.3 From 96506894a316bdb86ce9b78e906bc8ffb9697d5e Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 14 Mar 2017 18:41:03 +0100 Subject: ram: Move postcopy_requests into RAMState Signed-off-by: Juan Quintela Reviewed-by: Peter Xu --- include/migration/migration.h | 6 ++---- migration/migration.c | 5 ++--- migration/ram.c | 13 +++++++++---- 3 files changed, 13 insertions(+), 11 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index c4e3d4274f..357eaa0786 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -157,8 +157,6 @@ struct MigrationState bool enabled_capabilities[MIGRATION_CAPABILITY__MAX]; int64_t xbzrle_cache_size; int64_t setup_time; - /* Count of requests incoming from destination */ - int64_t postcopy_requests; /* Flag set once the migration has been asked to enter postcopy */ bool start_postcopy; @@ -254,6 +252,7 @@ uint64_t ram_bytes_transferred(void); uint64_t ram_bytes_total(void); uint64_t ram_dirty_sync_count(void); uint64_t ram_dirty_pages_rate(void); +uint64_t ram_postcopy_requests(void); void free_xbzrle_decoded_buf(void); void acct_update_position(QEMUFile *f, size_t size, bool zero); @@ -356,8 +355,7 @@ int global_state_store(void); void global_state_store_running(void); void migration_page_queue_free(void); -int ram_save_queue_pages(MigrationState *ms, const char *rbname, - ram_addr_t start, ram_addr_t len); +int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); uint64_t ram_pagesize_summary(void); PostcopyState postcopy_state_get(void); diff --git a/migration/migration.c b/migration/migration.c index e430aec80e..2990c4069c 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -657,7 +657,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) (1ul << qemu_target_page_bits()); info->ram->mbps = s->mbps; info->ram->dirty_sync_count = ram_dirty_sync_count(); - info->ram->postcopy_requests = s->postcopy_requests; + info->ram->postcopy_requests = ram_postcopy_requests(); if (s->state != MIGRATION_STATUS_COMPLETED) { info->ram->remaining = ram_bytes_remaining(); @@ -1120,7 +1120,6 @@ MigrationState *migrate_init(const MigrationParams *params) s->setup_time = 0; s->start_postcopy = false; s->postcopy_after_devices = false; - s->postcopy_requests = 0; s->migration_thread_running = false; error_free(s->error); s->error = NULL; @@ -1480,7 +1479,7 @@ static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, return; } - if (ram_save_queue_pages(ms, rbname, start, len)) { + if (ram_save_queue_pages(rbname, start, len)) { mark_source_rp_bad(ms); } } diff --git a/migration/ram.c b/migration/ram.c index c5240eb39d..b9f415c09d 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -214,6 +214,8 @@ struct RAMState { uint64_t bytes_transferred; /* number of dirtied pages in the last second */ uint64_t dirty_pages_rate; + /* Count of requests incoming from destination */ + uint64_t postcopy_requests; /* protects modification of the bitmap */ QemuMutex bitmap_mutex; /* Ram Bitmap protected by RCU */ @@ -283,6 +285,11 @@ uint64_t ram_dirty_pages_rate(void) return ram_state.dirty_pages_rate; } +uint64_t ram_postcopy_requests(void) +{ + return ram_state.postcopy_requests; +} + /* used by the search for pages to send */ struct PageSearchStatus { /* Current block being searched */ @@ -1240,19 +1247,17 @@ void migration_page_queue_free(void) * * Returns zero on success or negative on error * - * @ms: current migration state * @rbname: Name of the RAMBLock of the request. NULL means the * same that last one. * @start: starting address from the start of the RAMBlock * @len: length (in bytes) to send */ -int ram_save_queue_pages(MigrationState *ms, const char *rbname, - ram_addr_t start, ram_addr_t len) +int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) { RAMBlock *ramblock; RAMState *rs = &ram_state; - ms->postcopy_requests++; + rs->postcopy_requests++; rcu_read_lock(); if (!rbname) { /* Reuse last RAMBlock */ -- cgit v1.2.3 From 204b88b869f9bf2b0e565650d51d664244d33a3d Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Wed, 15 Mar 2017 09:16:57 +0100 Subject: ram: Add QEMUFile to RAMState Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- migration/ram.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index b9f415c09d..62b44cf5a8 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -165,6 +165,8 @@ struct RAMSrcPageRequest { /* State of RAM for migration */ struct RAMState { + /* QEMUFile used for this migration */ + QEMUFile *f; /* Last block that we have visited searching for dirty pages */ RAMBlock *last_seen_block; /* Last block from where we have sent data */ @@ -525,14 +527,13 @@ static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr) * -1 means that xbzrle would be longer than normal * * @rs: current RAM state - * @f: QEMUFile where to send the data * @current_data: pointer to the address of the page contents * @current_addr: addr of the page * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage */ -static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, +static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, ram_addr_t current_addr, RAMBlock *block, ram_addr_t offset, bool last_stage) { @@ -583,10 +584,11 @@ static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data, } /* Send XBZRLE based compressed page */ - bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE); - qemu_put_byte(f, ENCODING_FLAG_XBZRLE); - qemu_put_be16(f, encoded_len); - qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len); + bytes_xbzrle = save_page_header(rs->f, block, + offset | RAM_SAVE_FLAG_XBZRLE); + qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE); + qemu_put_be16(rs->f, encoded_len); + qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len); bytes_xbzrle += encoded_len + 1 + 2; rs->xbzrle_pages++; rs->xbzrle_bytes += bytes_xbzrle; @@ -850,7 +852,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, ram_release_pages(ms, block->idstr, pss->offset, pages); } else if (!rs->ram_bulk_stage && !migration_in_postcopy(ms) && migrate_use_xbzrle()) { - pages = save_xbzrle_page(rs, f, &p, current_addr, block, + pages = save_xbzrle_page(rs, &p, current_addr, block, offset, last_stage); if (!last_stage) { /* Can't send this cached data async, since the cache page @@ -2090,6 +2092,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) return -1; } } + rs->f = f; rcu_read_lock(); -- cgit v1.2.3 From ce25d33781c6c1d679ff6fd06b43188f4adb9c41 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Wed, 15 Mar 2017 11:00:51 +0100 Subject: ram: Move QEMUFile into RAMState We receive the file from save_live operations and we don't use it until 3 or 4 levels of calls down. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- migration/ram.c | 84 +++++++++++++++++++++++++-------------------------------- 1 file changed, 37 insertions(+), 47 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 62b44cf5a8..14d54fd052 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -757,21 +757,20 @@ static void migration_bitmap_sync(RAMState *rs) * Returns the number of pages written. * * @rs: current RAM state - * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @p: pointer to the page */ -static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block, - ram_addr_t offset, uint8_t *p) +static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, + uint8_t *p) { int pages = -1; if (is_zero_range(p, TARGET_PAGE_SIZE)) { rs->zero_pages++; rs->bytes_transferred += - save_page_header(f, block, offset | RAM_SAVE_FLAG_COMPRESS); - qemu_put_byte(f, 0); + save_page_header(rs->f, block, offset | RAM_SAVE_FLAG_COMPRESS); + qemu_put_byte(rs->f, 0); rs->bytes_transferred += 1; pages = 1; } @@ -799,12 +798,11 @@ static void ram_release_pages(MigrationState *ms, const char *rbname, * * @rs: current RAM state * @ms: current migration state - * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage */ -static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, +static int ram_save_page(RAMState *rs, MigrationState *ms, PageSearchStatus *pss, bool last_stage) { int pages = -1; @@ -820,7 +818,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, /* In doubt sent page as normal */ bytes_xmit = 0; - ret = ram_control_save_page(f, block->offset, + ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_xmit); if (bytes_xmit) { rs->bytes_transferred += bytes_xmit; @@ -843,7 +841,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, } } } else { - pages = save_zero_page(rs, f, block, offset, p); + pages = save_zero_page(rs, block, offset, p); if (pages > 0) { /* Must let xbzrle know, otherwise a previous (now 0'd) cached * page would be stale @@ -865,14 +863,14 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f, /* XBZRLE overflow or normal page */ if (pages == -1) { - rs->bytes_transferred += save_page_header(f, block, + rs->bytes_transferred += save_page_header(rs->f, block, offset | RAM_SAVE_FLAG_PAGE); if (send_async) { - qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE, + qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE, migrate_release_ram() & migration_in_postcopy(ms)); } else { - qemu_put_buffer(f, p, TARGET_PAGE_SIZE); + qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE); } rs->bytes_transferred += TARGET_PAGE_SIZE; pages = 1; @@ -907,7 +905,7 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block, return bytes_sent; } -static void flush_compressed_data(RAMState *rs, QEMUFile *f) +static void flush_compressed_data(RAMState *rs) { int idx, len, thread_count; @@ -927,7 +925,7 @@ static void flush_compressed_data(RAMState *rs, QEMUFile *f) for (idx = 0; idx < thread_count; idx++) { qemu_mutex_lock(&comp_param[idx].mutex); if (!comp_param[idx].quit) { - len = qemu_put_qemu_file(f, comp_param[idx].file); + len = qemu_put_qemu_file(rs->f, comp_param[idx].file); rs->bytes_transferred += len; } qemu_mutex_unlock(&comp_param[idx].mutex); @@ -941,8 +939,8 @@ static inline void set_compress_params(CompressParam *param, RAMBlock *block, param->offset = offset; } -static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f, - RAMBlock *block, ram_addr_t offset) +static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block, + ram_addr_t offset) { int idx, thread_count, bytes_xmit = -1, pages = -1; @@ -952,7 +950,7 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f, for (idx = 0; idx < thread_count; idx++) { if (comp_param[idx].done) { comp_param[idx].done = false; - bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file); + bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file); qemu_mutex_lock(&comp_param[idx].mutex); set_compress_params(&comp_param[idx], block, offset); qemu_cond_signal(&comp_param[idx].cond); @@ -981,13 +979,11 @@ static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f, * * @rs: current RAM state * @ms: current migration state - * @f: QEMUFile where to send the data * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage */ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, - QEMUFile *f, PageSearchStatus *pss, bool last_stage) { int pages = -1; @@ -999,7 +995,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, p = block->host + offset; - ret = ram_control_save_page(f, block->offset, + ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE, &bytes_xmit); if (bytes_xmit) { rs->bytes_transferred += bytes_xmit; @@ -1021,20 +1017,20 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, * is used to avoid resending the block name. */ if (block != rs->last_sent_block) { - flush_compressed_data(rs, f); - pages = save_zero_page(rs, f, block, offset, p); + flush_compressed_data(rs); + pages = save_zero_page(rs, block, offset, p); if (pages == -1) { /* Make sure the first page is sent out before other pages */ - bytes_xmit = save_page_header(f, block, offset | + bytes_xmit = save_page_header(rs->f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); - blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE, + blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE, migrate_compress_level()); if (blen > 0) { rs->bytes_transferred += bytes_xmit + blen; rs->norm_pages++; pages = 1; } else { - qemu_file_set_error(f, blen); + qemu_file_set_error(rs->f, blen); error_report("compressed data failed!"); } } @@ -1043,9 +1039,9 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, } } else { offset |= RAM_SAVE_FLAG_CONTINUE; - pages = save_zero_page(rs, f, block, offset, p); + pages = save_zero_page(rs, block, offset, p); if (pages == -1) { - pages = compress_page_with_multi_thread(rs, f, block, offset); + pages = compress_page_with_multi_thread(rs, block, offset); } else { ram_release_pages(ms, block->idstr, pss->offset, pages); } @@ -1062,13 +1058,12 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, * Returns if a page is found * * @rs: current RAM state - * @f: QEMUFile where to send the data * @pss: data about the state of the current dirty page scan * @again: set to false if the search has scanned the whole of RAM * @ram_addr_abs: pointer into which to store the address of the dirty page * within the global ram_addr space */ -static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss, +static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again, ram_addr_t *ram_addr_abs) { pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset, @@ -1096,7 +1091,7 @@ static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss, /* If xbzrle is on, stop using the data compression at this * point. In theory, xbzrle can do better than compression. */ - flush_compressed_data(rs, f); + flush_compressed_data(rs); compression_switch = false; } } @@ -1317,12 +1312,11 @@ err: * * @rs: current RAM state * @ms: current migration state - * @f: QEMUFile where to send the data * @pss: data about the page we want to send * @last_stage: if we are at the completion stage * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space */ -static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, +static int ram_save_target_page(RAMState *rs, MigrationState *ms, PageSearchStatus *pss, bool last_stage, ram_addr_t dirty_ram_abs) @@ -1333,9 +1327,9 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) { unsigned long *unsentmap; if (compression_switch && migrate_use_compression()) { - res = ram_save_compressed_page(rs, ms, f, pss, last_stage); + res = ram_save_compressed_page(rs, ms, pss, last_stage); } else { - res = ram_save_page(rs, ms, f, pss, last_stage); + res = ram_save_page(rs, ms, pss, last_stage); } if (res < 0) { @@ -1370,12 +1364,11 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f, * * @rs: current RAM state * @ms: current migration state - * @f: QEMUFile where to send the data * @pss: data about the page we want to send * @last_stage: if we are at the completion stage * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space */ -static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f, +static int ram_save_host_page(RAMState *rs, MigrationState *ms, PageSearchStatus *pss, bool last_stage, ram_addr_t dirty_ram_abs) @@ -1384,8 +1377,7 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f, size_t pagesize = qemu_ram_pagesize(pss->block); do { - tmppages = ram_save_target_page(rs, ms, f, pss, last_stage, - dirty_ram_abs); + tmppages = ram_save_target_page(rs, ms, pss, last_stage, dirty_ram_abs); if (tmppages < 0) { return tmppages; } @@ -1408,14 +1400,13 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f, * Returns the number of pages written where zero means no dirty pages * * @rs: current RAM state - * @f: QEMUFile where to send the data * @last_stage: if we are at the completion stage * * On systems where host-page-size > target-page-size it will send all the * pages in a host page that are dirty. */ -static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage) +static int ram_find_and_save_block(RAMState *rs, bool last_stage) { PageSearchStatus pss; MigrationState *ms = migrate_get_current(); @@ -1443,12 +1434,11 @@ static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage) if (!found) { /* priority queue empty, so just search for something dirty */ - found = find_dirty_block(rs, f, &pss, &again, &dirty_ram_abs); + found = find_dirty_block(rs, &pss, &again, &dirty_ram_abs); } if (found) { - pages = ram_save_host_page(rs, ms, f, &pss, last_stage, - dirty_ram_abs); + pages = ram_save_host_page(rs, ms, &pss, last_stage, dirty_ram_abs); } } while (!pages && again); @@ -2148,7 +2138,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) while ((ret = qemu_file_rate_limit(f)) == 0) { int pages; - pages = ram_find_and_save_block(rs, f, false); + pages = ram_find_and_save_block(rs, false); /* no more pages to sent */ if (pages == 0) { done = 1; @@ -2170,7 +2160,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) } i++; } - flush_compressed_data(rs, f); + flush_compressed_data(rs); rcu_read_unlock(); /* @@ -2218,14 +2208,14 @@ static int ram_save_complete(QEMUFile *f, void *opaque) while (true) { int pages; - pages = ram_find_and_save_block(rs, f, !migration_in_colo_state()); + pages = ram_find_and_save_block(rs, !migration_in_colo_state()); /* no more blocks to sent */ if (pages == 0) { break; } } - flush_compressed_data(rs, f); + flush_compressed_data(rs); ram_control_after_iterate(f, RAM_CONTROL_FINISH); rcu_read_unlock(); -- cgit v1.2.3 From 6d358d94943076846328cc292e4f45c31e1d5d96 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Thu, 16 Mar 2017 21:29:34 +0100 Subject: ram: Remove compression_switch and inline its logic We can calculate its value, so we don't create a variable for it. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert -- After Peter and Dave review, I dropped the variable and just inlined the condition. Fix typo --- migration/ram.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 14d54fd052..fa5a2901dc 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -336,7 +336,6 @@ static QemuCond comp_done_cond; /* The empty QEMUFileOps will be used by file in CompressParam */ static const QEMUFileOps empty_ops = { }; -static bool compression_switch; static DecompressParam *decomp_param; static QemuThread *decompress_threads; static QemuMutex decomp_done_lock; @@ -420,7 +419,6 @@ void migrate_compress_threads_create(void) if (!migrate_use_compression()) { return; } - compression_switch = true; thread_count = migrate_compress_threads(); compress_threads = g_new0(QemuThread, thread_count); comp_param = g_new0(CompressParam, thread_count); @@ -1092,7 +1090,6 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, * point. In theory, xbzrle can do better than compression. */ flush_compressed_data(rs); - compression_switch = false; } } /* Didn't find anything this time, but try again on the new block */ @@ -1326,7 +1323,14 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, /* Check the pages is dirty and if it is send it */ if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) { unsigned long *unsentmap; - if (compression_switch && migrate_use_compression()) { + /* + * If xbzrle is on, stop using the data compression after first + * round of migration even if compression is enabled. In theory, + * xbzrle can do better than compression. + */ + + if (migrate_use_compression() + && (rs->ram_bulk_stage || !migrate_use_xbzrle())) { res = ram_save_compressed_page(rs, ms, pss, last_stage); } else { res = ram_save_page(rs, ms, pss, last_stage); -- cgit v1.2.3 From 5727309d254b4942645888574a16d36942ec439c Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 20 Mar 2017 22:25:28 +0100 Subject: migration: Remove MigrationState from migration_in_postcopy We need to call for the migrate_get_current() in more that half of the uses, so call that inside. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- include/migration/migration.h | 2 +- migration/migration.c | 6 ++++-- migration/ram.c | 22 ++++++++++------------ migration/savevm.c | 4 ++-- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 357eaa0786..2cd20077ac 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -238,7 +238,7 @@ bool migration_is_idle(MigrationState *s); bool migration_has_finished(MigrationState *); bool migration_has_failed(MigrationState *); /* True if outgoing migration has entered postcopy phase */ -bool migration_in_postcopy(MigrationState *); +bool migration_in_postcopy(void); /* ...and after the device transmission */ bool migration_in_postcopy_after_devices(MigrationState *); MigrationState *migrate_get_current(void); diff --git a/migration/migration.c b/migration/migration.c index 2990c4069c..7da16cf44e 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1062,14 +1062,16 @@ bool migration_has_failed(MigrationState *s) s->state == MIGRATION_STATUS_FAILED); } -bool migration_in_postcopy(MigrationState *s) +bool migration_in_postcopy(void) { + MigrationState *s = migrate_get_current(); + return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); } bool migration_in_postcopy_after_devices(MigrationState *s) { - return migration_in_postcopy(s) && s->postcopy_after_devices; + return migration_in_postcopy() && s->postcopy_after_devices; } bool migration_is_idle(MigrationState *s) diff --git a/migration/ram.c b/migration/ram.c index fa5a2901dc..289d7eb5c9 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -776,10 +776,9 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, return pages; } -static void ram_release_pages(MigrationState *ms, const char *rbname, - uint64_t offset, int pages) +static void ram_release_pages(const char *rbname, uint64_t offset, int pages) { - if (!migrate_release_ram() || !migration_in_postcopy(ms)) { + if (!migrate_release_ram() || !migration_in_postcopy()) { return; } @@ -845,9 +844,9 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, * page would be stale */ xbzrle_cache_zero_page(rs, current_addr); - ram_release_pages(ms, block->idstr, pss->offset, pages); + ram_release_pages(block->idstr, pss->offset, pages); } else if (!rs->ram_bulk_stage && - !migration_in_postcopy(ms) && migrate_use_xbzrle()) { + !migration_in_postcopy() && migrate_use_xbzrle()) { pages = save_xbzrle_page(rs, &p, current_addr, block, offset, last_stage); if (!last_stage) { @@ -866,7 +865,7 @@ static int ram_save_page(RAMState *rs, MigrationState *ms, if (send_async) { qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE, migrate_release_ram() & - migration_in_postcopy(ms)); + migration_in_postcopy()); } else { qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE); } @@ -896,8 +895,7 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block, error_report("compressed data failed!"); } else { bytes_sent += blen; - ram_release_pages(migrate_get_current(), block->idstr, - offset & TARGET_PAGE_MASK, 1); + ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1); } return bytes_sent; @@ -1033,7 +1031,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, } } if (pages > 0) { - ram_release_pages(ms, block->idstr, pss->offset, pages); + ram_release_pages(block->idstr, pss->offset, pages); } } else { offset |= RAM_SAVE_FLAG_CONTINUE; @@ -1041,7 +1039,7 @@ static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, if (pages == -1) { pages = compress_page_with_multi_thread(rs, block, offset); } else { - ram_release_pages(ms, block->idstr, pss->offset, pages); + ram_release_pages(block->idstr, pss->offset, pages); } } } @@ -2200,7 +2198,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) rcu_read_lock(); - if (!migration_in_postcopy(migrate_get_current())) { + if (!migration_in_postcopy()) { migration_bitmap_sync(rs); } @@ -2238,7 +2236,7 @@ static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size, remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE; - if (!migration_in_postcopy(migrate_get_current()) && + if (!migration_in_postcopy() && remaining_size < max_size) { qemu_mutex_lock_iothread(); rcu_read_lock(); diff --git a/migration/savevm.c b/migration/savevm.c index 3b19a4a274..853a81a080 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1062,7 +1062,7 @@ int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy) static bool should_send_vmdesc(void) { MachineState *machine = MACHINE(qdev_get_machine()); - bool in_postcopy = migration_in_postcopy(migrate_get_current()); + bool in_postcopy = migration_in_postcopy(); return !machine->suppress_vmdesc && !in_postcopy; } @@ -1111,7 +1111,7 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) int vmdesc_len; SaveStateEntry *se; int ret; - bool in_postcopy = migration_in_postcopy(migrate_get_current()); + bool in_postcopy = migration_in_postcopy(); trace_savevm_state_complete_precopy(); -- cgit v1.2.3 From a0a8aa147aaa81176f550fbd556594434a345c08 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Mon, 20 Mar 2017 22:29:07 +0100 Subject: ram: We don't need MigrationState parameter anymore Remove it from callers and callees. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 27 ++++++++++----------------- 1 file changed, 10 insertions(+), 17 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 289d7eb5c9..fe01e7ba26 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -794,13 +794,11 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages) * if xbzrle noticed the page was the same. * * @rs: current RAM state - * @ms: current migration state * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage */ -static int ram_save_page(RAMState *rs, MigrationState *ms, - PageSearchStatus *pss, bool last_stage) +static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) { int pages = -1; uint64_t bytes_xmit; @@ -974,13 +972,12 @@ static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block, * Returns the number of pages written. * * @rs: current RAM state - * @ms: current migration state * @block: block that contains the page we want to send * @offset: offset inside the block for the page * @last_stage: if we are at the completion stage */ -static int ram_save_compressed_page(RAMState *rs, MigrationState *ms, - PageSearchStatus *pss, bool last_stage) +static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, + bool last_stage) { int pages = -1; uint64_t bytes_xmit = 0; @@ -1311,10 +1308,8 @@ err: * @last_stage: if we are at the completion stage * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space */ -static int ram_save_target_page(RAMState *rs, MigrationState *ms, - PageSearchStatus *pss, - bool last_stage, - ram_addr_t dirty_ram_abs) +static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, + bool last_stage, ram_addr_t dirty_ram_abs) { int res = 0; @@ -1329,9 +1324,9 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, if (migrate_use_compression() && (rs->ram_bulk_stage || !migrate_use_xbzrle())) { - res = ram_save_compressed_page(rs, ms, pss, last_stage); + res = ram_save_compressed_page(rs, pss, last_stage); } else { - res = ram_save_page(rs, ms, pss, last_stage); + res = ram_save_page(rs, pss, last_stage); } if (res < 0) { @@ -1370,8 +1365,7 @@ static int ram_save_target_page(RAMState *rs, MigrationState *ms, * @last_stage: if we are at the completion stage * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space */ -static int ram_save_host_page(RAMState *rs, MigrationState *ms, - PageSearchStatus *pss, +static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, bool last_stage, ram_addr_t dirty_ram_abs) { @@ -1379,7 +1373,7 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, size_t pagesize = qemu_ram_pagesize(pss->block); do { - tmppages = ram_save_target_page(rs, ms, pss, last_stage, dirty_ram_abs); + tmppages = ram_save_target_page(rs, pss, last_stage, dirty_ram_abs); if (tmppages < 0) { return tmppages; } @@ -1411,7 +1405,6 @@ static int ram_save_host_page(RAMState *rs, MigrationState *ms, static int ram_find_and_save_block(RAMState *rs, bool last_stage) { PageSearchStatus pss; - MigrationState *ms = migrate_get_current(); int pages = 0; bool again, found; ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in @@ -1440,7 +1433,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) } if (found) { - pages = ram_save_host_page(rs, ms, &pss, last_stage, dirty_ram_abs); + pages = ram_save_host_page(rs, &pss, last_stage, dirty_ram_abs); } } while (!pages && again); -- cgit v1.2.3 From 20afaed98b7b1796f2e67213e4118bc64b50b03a Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 09:09:14 +0100 Subject: ram: Rename qemu_target_page_bits() to qemu_target_page_size() It was used as a size in all cases except one. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- exec.c | 4 ++-- include/sysemu/sysemu.h | 2 +- migration/migration.c | 4 ++-- migration/postcopy-ram.c | 8 ++++---- migration/savevm.c | 8 ++++---- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/exec.c b/exec.c index c97ef4a8da..7a640c76b6 100644 --- a/exec.c +++ b/exec.c @@ -3307,9 +3307,9 @@ int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, * Allows code that needs to deal with migration bitmaps etc to still be built * target independent. */ -size_t qemu_target_page_bits(void) +size_t qemu_target_page_size(void) { - return TARGET_PAGE_BITS; + return TARGET_PAGE_SIZE; } #endif diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 576c7ce640..16175f7295 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -67,7 +67,7 @@ int qemu_reset_requested_get(void); void qemu_system_killed(int signal, pid_t pid); void qemu_system_reset(bool report); void qemu_system_guest_panicked(GuestPanicInformation *info); -size_t qemu_target_page_bits(void); +size_t qemu_target_page_size(void); void qemu_add_exit_notifier(Notifier *notify); void qemu_remove_exit_notifier(Notifier *notify); diff --git a/migration/migration.c b/migration/migration.c index 7da16cf44e..92a3754923 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -654,7 +654,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->skipped = 0; info->ram->normal = norm_mig_pages_transferred(); info->ram->normal_bytes = norm_mig_pages_transferred() * - (1ul << qemu_target_page_bits()); + qemu_target_page_size(); info->ram->mbps = s->mbps; info->ram->dirty_sync_count = ram_dirty_sync_count(); info->ram->postcopy_requests = ram_postcopy_requests(); @@ -2009,7 +2009,7 @@ static void *migration_thread(void *opaque) 10000 is a small enough number for our purposes */ if (ram_dirty_pages_rate() && transferred_bytes > 10000) { s->expected_downtime = ram_dirty_pages_rate() * - (1ul << qemu_target_page_bits()) / bandwidth; + qemu_target_page_size() / bandwidth; } qemu_file_reset_rate_limit(s->to_dst_file); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index dc80dbb67f..8756364051 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -123,7 +123,7 @@ bool postcopy_ram_supported_by_host(void) struct uffdio_range range_struct; uint64_t feature_mask; - if ((1ul << qemu_target_page_bits()) > pagesize) { + if (qemu_target_page_size() > pagesize) { error_report("Target page size bigger than host page size"); goto out; } @@ -745,10 +745,10 @@ PostcopyDiscardState *postcopy_discard_send_init(MigrationState *ms, void postcopy_discard_send_range(MigrationState *ms, PostcopyDiscardState *pds, unsigned long start, unsigned long length) { - size_t tp_bits = qemu_target_page_bits(); + size_t tp_size = qemu_target_page_size(); /* Convert to byte offsets within the RAM block */ - pds->start_list[pds->cur_entry] = (start - pds->offset) << tp_bits; - pds->length_list[pds->cur_entry] = length << tp_bits; + pds->start_list[pds->cur_entry] = (start - pds->offset) * tp_size; + pds->length_list[pds->cur_entry] = length * tp_size; trace_postcopy_discard_send_range(pds->ramblock_name, start, length); pds->cur_entry++; pds->nsentwords++; diff --git a/migration/savevm.c b/migration/savevm.c index 853a81a080..bbf055db75 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -871,7 +871,7 @@ void qemu_savevm_send_postcopy_advise(QEMUFile *f) { uint64_t tmp[2]; tmp[0] = cpu_to_be64(ram_pagesize_summary()); - tmp[1] = cpu_to_be64(1ul << qemu_target_page_bits()); + tmp[1] = cpu_to_be64(qemu_target_page_size()); trace_qemu_savevm_send_postcopy_advise(); qemu_savevm_command_send(f, MIG_CMD_POSTCOPY_ADVISE, 16, (uint8_t *)tmp); @@ -1390,13 +1390,13 @@ static int loadvm_postcopy_handle_advise(MigrationIncomingState *mis) } remote_tps = qemu_get_be64(mis->from_src_file); - if (remote_tps != (1ul << qemu_target_page_bits())) { + if (remote_tps != qemu_target_page_size()) { /* * Again, some differences could be dealt with, but for now keep it * simple. */ - error_report("Postcopy needs matching target page sizes (s=%d d=%d)", - (int)remote_tps, 1 << qemu_target_page_bits()); + error_report("Postcopy needs matching target page sizes (s=%d d=%zd)", + (int)remote_tps, qemu_target_page_size()); return -1; } -- cgit v1.2.3 From 030ce1f8612215fcbe9d353dfeaeb2937f8e3f94 Mon Sep 17 00:00:00 2001 From: Chao Fan Date: Tue, 21 Mar 2017 10:22:43 +0800 Subject: ram: Add page-size to output in 'info migrate' The number of dirty pages is output in 'pages' in the command 'info migrate', so add page-size to calculate the number of dirty pages in bytes. Signed-off-by: Chao Fan Signed-off-by: Li Zhijian Reviewed-by: Eric Blake Signed-off-by: Juan Quintela --- hmp.c | 3 +++ migration/migration.c | 1 + qapi-schema.json | 5 ++++- 3 files changed, 8 insertions(+), 1 deletion(-) diff --git a/hmp.c b/hmp.c index edb8970461..be75e7118c 100644 --- a/hmp.c +++ b/hmp.c @@ -215,6 +215,9 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) info->ram->normal_bytes >> 10); monitor_printf(mon, "dirty sync count: %" PRIu64 "\n", info->ram->dirty_sync_count); + monitor_printf(mon, "page size: %" PRIu64 " kbytes\n", + info->ram->page_size >> 10); + if (info->ram->dirty_pages_rate) { monitor_printf(mon, "dirty pages rate: %" PRIu64 " pages\n", info->ram->dirty_pages_rate); diff --git a/migration/migration.c b/migration/migration.c index 92a3754923..156daf976f 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -658,6 +658,7 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->mbps = s->mbps; info->ram->dirty_sync_count = ram_dirty_sync_count(); info->ram->postcopy_requests = ram_postcopy_requests(); + info->ram->page_size = qemu_target_page_size(); if (s->state != MIGRATION_STATUS_COMPLETED) { info->ram->remaining = ram_bytes_remaining(); diff --git a/qapi-schema.json b/qapi-schema.json index 250e4dc49b..01b087fa16 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -598,6 +598,9 @@ # @postcopy-requests: The number of page requests received from the destination # (since 2.7) # +# @page-size: The number of bytes per page for the various page-based +# statistics (since 2.10) +# # Since: 0.14.0 ## { 'struct': 'MigrationStats', @@ -605,7 +608,7 @@ 'duplicate': 'int', 'skipped': 'int', 'normal': 'int', 'normal-bytes': 'int', 'dirty-pages-rate' : 'int', 'mbps' : 'number', 'dirty-sync-count' : 'int', - 'postcopy-requests' : 'int' } } + 'postcopy-requests' : 'int', 'page-size' : 'int' } } ## # @XBZRLECacheStats: -- cgit v1.2.3 From 15440dd5a08a6b10538b9e62c6d95ac8a838d78a Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 09:35:04 +0100 Subject: ram: Pass RAMBlock to bitmap_sync We change the meaning of start to be the offset from the beggining of the block. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- include/exec/ram_addr.h | 2 ++ migration/ram.c | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 9aadc5c830..8a1372c66d 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -354,11 +354,13 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start, static inline uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, + RAMBlock *rb, ram_addr_t start, ram_addr_t length, uint64_t *real_dirty_pages) { ram_addr_t addr; + start = rb->offset + start; unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS); uint64_t num_dirty = 0; diff --git a/migration/ram.c b/migration/ram.c index fe01e7ba26..3f3842a38b 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -646,13 +646,13 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr) return ret; } -static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start, - ram_addr_t length) +static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb, + ram_addr_t start, ram_addr_t length) { unsigned long *bitmap; bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; rs->migration_dirty_pages += - cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length, + cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length, &rs->num_dirty_pages_period); } @@ -699,7 +699,7 @@ static void migration_bitmap_sync(RAMState *rs) qemu_mutex_lock(&rs->bitmap_mutex); rcu_read_lock(); QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { - migration_bitmap_sync_range(rs, block->offset, block->used_length); + migration_bitmap_sync_range(rs, block, 0, block->used_length); } rcu_read_unlock(); qemu_mutex_unlock(&rs->bitmap_mutex); -- cgit v1.2.3 From aaa2064c2ae25ef833b3324aedeba9a6363bd5ec Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 11:35:24 +0100 Subject: ram: ram_discard_range() don't use the mis parameter Signed-off-by: Juan Quintela Reviewed-by: Peter Xu --- include/migration/migration.h | 3 +-- migration/postcopy-ram.c | 6 ++---- migration/ram.c | 9 +++------ migration/savevm.c | 3 +-- 4 files changed, 7 insertions(+), 14 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index 2cd20077ac..b84ec3de32 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -270,8 +270,7 @@ void ram_debug_dump_bitmap(unsigned long *todump, bool expected); /* For outgoing discard bitmap */ int ram_postcopy_send_discard_bitmap(MigrationState *ms); /* For incoming postcopy discard */ -int ram_discard_range(MigrationIncomingState *mis, const char *block_name, - uint64_t start, size_t length); +int ram_discard_range(const char *block_name, uint64_t start, size_t length); int ram_postcopy_incoming_init(MigrationIncomingState *mis); void ram_postcopy_migrated_memory_release(MigrationState *ms); diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 8756364051..85fd8d72b3 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -213,8 +213,6 @@ out: static int init_range(const char *block_name, void *host_addr, ram_addr_t offset, ram_addr_t length, void *opaque) { - MigrationIncomingState *mis = opaque; - trace_postcopy_init_range(block_name, host_addr, offset, length); /* @@ -223,7 +221,7 @@ static int init_range(const char *block_name, void *host_addr, * - we're going to get the copy from the source anyway. * (Precopy will just overwrite this data, so doesn't need the discard) */ - if (ram_discard_range(mis, block_name, 0, length)) { + if (ram_discard_range(block_name, 0, length)) { return -1; } @@ -271,7 +269,7 @@ static int cleanup_range(const char *block_name, void *host_addr, */ int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages) { - if (qemu_ram_foreach_block(init_range, mis)) { + if (qemu_ram_foreach_block(init_range, NULL)) { return -1; } diff --git a/migration/ram.c b/migration/ram.c index 3f3842a38b..9ed91e51b3 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -782,7 +782,7 @@ static void ram_release_pages(const char *rbname, uint64_t offset, int pages) return; } - ram_discard_range(NULL, rbname, offset, pages << TARGET_PAGE_BITS); + ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS); } /** @@ -1608,7 +1608,7 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms) while (run_start < range) { unsigned long run_end = find_next_bit(bitmap, range, run_start + 1); - ram_discard_range(NULL, block->idstr, run_start << TARGET_PAGE_BITS, + ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS, (run_end - run_start) << TARGET_PAGE_BITS); run_start = find_next_zero_bit(bitmap, range, run_end + 1); } @@ -1948,15 +1948,12 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) * * Returns zero on success * - * @mis: current migration incoming state * @rbname: name of the RAMBlock of the request. NULL means the * same that last one. * @start: RAMBlock starting page * @length: RAMBlock size */ -int ram_discard_range(MigrationIncomingState *mis, - const char *rbname, - uint64_t start, size_t length) +int ram_discard_range(const char *rbname, uint64_t start, size_t length) { int ret = -1; diff --git a/migration/savevm.c b/migration/savevm.c index bbf055db75..7cf387fd04 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1479,8 +1479,7 @@ static int loadvm_postcopy_ram_handle_discard(MigrationIncomingState *mis, block_length = qemu_get_be64(mis->from_src_file); len -= 16; - int ret = ram_discard_range(mis, ramid, start_addr, - block_length); + int ret = ram_discard_range(ramid, start_addr, block_length); if (ret) { return ret; } -- cgit v1.2.3 From 247956946651ae0280f7b1ea88bb6237dd01c231 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 11:45:01 +0100 Subject: ram: reorganize last_sent_block We were setting it far away of when we changed it. Now everything is done inside save_page_header. Once there, reorganize code to pass RAMState. We also set CONTINUE flag in a single place. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Peter Xu --- migration/ram.c | 36 +++++++++++++++--------------------- 1 file changed, 15 insertions(+), 21 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 9ed91e51b3..1ef142fac1 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -451,18 +451,22 @@ void migrate_compress_threads_create(void) * @offset: offset inside the block for the page * in the lower bits, it contains flags */ -static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset) +static size_t save_page_header(RAMState *rs, RAMBlock *block, ram_addr_t offset) { size_t size, len; - qemu_put_be64(f, offset); + if (block == rs->last_sent_block) { + offset |= RAM_SAVE_FLAG_CONTINUE; + } + qemu_put_be64(rs->f, offset); size = 8; if (!(offset & RAM_SAVE_FLAG_CONTINUE)) { len = strlen(block->idstr); - qemu_put_byte(f, len); - qemu_put_buffer(f, (uint8_t *)block->idstr, len); + qemu_put_byte(rs->f, len); + qemu_put_buffer(rs->f, (uint8_t *)block->idstr, len); size += 1 + len; + rs->last_sent_block = block; } return size; } @@ -582,7 +586,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, } /* Send XBZRLE based compressed page */ - bytes_xbzrle = save_page_header(rs->f, block, + bytes_xbzrle = save_page_header(rs, block, offset | RAM_SAVE_FLAG_XBZRLE); qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE); qemu_put_be16(rs->f, encoded_len); @@ -767,7 +771,7 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset, if (is_zero_range(p, TARGET_PAGE_SIZE)) { rs->zero_pages++; rs->bytes_transferred += - save_page_header(rs->f, block, offset | RAM_SAVE_FLAG_COMPRESS); + save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(rs->f, 0); rs->bytes_transferred += 1; pages = 1; @@ -824,9 +828,6 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) current_addr = block->offset + offset; - if (block == rs->last_sent_block) { - offset |= RAM_SAVE_FLAG_CONTINUE; - } if (ret != RAM_SAVE_CONTROL_NOT_SUPP) { if (ret != RAM_SAVE_CONTROL_DELAYED) { if (bytes_xmit > 0) { @@ -858,8 +859,8 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) /* XBZRLE overflow or normal page */ if (pages == -1) { - rs->bytes_transferred += save_page_header(rs->f, block, - offset | RAM_SAVE_FLAG_PAGE); + rs->bytes_transferred += save_page_header(rs, block, + offset | RAM_SAVE_FLAG_PAGE); if (send_async) { qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE, migrate_release_ram() & @@ -880,10 +881,11 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) static int do_compress_ram_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset) { + RAMState *rs = &ram_state; int bytes_sent, blen; uint8_t *p = block->host + (offset & TARGET_PAGE_MASK); - bytes_sent = save_page_header(f, block, offset | + bytes_sent = save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE, migrate_compress_level()); @@ -1014,7 +1016,7 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, pages = save_zero_page(rs, block, offset, p); if (pages == -1) { /* Make sure the first page is sent out before other pages */ - bytes_xmit = save_page_header(rs->f, block, offset | + bytes_xmit = save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE, migrate_compress_level()); @@ -1031,7 +1033,6 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, ram_release_pages(block->idstr, pss->offset, pages); } } else { - offset |= RAM_SAVE_FLAG_CONTINUE; pages = save_zero_page(rs, block, offset, p); if (pages == -1) { pages = compress_page_with_multi_thread(rs, block, offset); @@ -1336,13 +1337,6 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, if (unsentmap) { clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap); } - /* Only update last_sent_block if a block was actually sent; xbzrle - * might have decided the page was identical so didn't bother writing - * to the stream. - */ - if (res > 0) { - rs->last_sent_block = pss->block; - } } return res; -- cgit v1.2.3 From 06b106889a09277617fc8c542397a9f595ee605a Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 15:18:05 +0100 Subject: ram: Use page number instead of an address for the bitmap operations We use an unsigned long for the page number. Notice that our bitmaps already got that for the index, so we have that limit. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert -- rename page to page_abs everywhere. fix trace types for pages --- migration/ram.c | 76 ++++++++++++++++++++++---------------------------- migration/trace-events | 4 +-- 2 files changed, 36 insertions(+), 44 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 1ef142fac1..52ab14b62f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -609,13 +609,12 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, * @rs: current RAM state * @rb: RAMBlock where to search for dirty pages * @start: starting address (typically so we can continue from previous page) - * @ram_addr_abs: pointer into which to store the address of the dirty page - * within the global ram_addr space + * @page_abs: pointer into where to store the dirty page */ static inline ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, ram_addr_t start, - ram_addr_t *ram_addr_abs) + unsigned long *page_abs) { unsigned long base = rb->offset >> TARGET_PAGE_BITS; unsigned long nr = base + (start >> TARGET_PAGE_BITS); @@ -632,17 +631,17 @@ ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, next = find_next_bit(bitmap, size, nr); } - *ram_addr_abs = next << TARGET_PAGE_BITS; + *page_abs = next; return (next - base) << TARGET_PAGE_BITS; } -static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr) +static inline bool migration_bitmap_clear_dirty(RAMState *rs, + unsigned long page_abs) { bool ret; - int nr = addr >> TARGET_PAGE_BITS; unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - ret = test_and_clear_bit(nr, bitmap); + ret = test_and_clear_bit(page_abs, bitmap); if (ret) { rs->migration_dirty_pages--; @@ -1054,14 +1053,13 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, * @rs: current RAM state * @pss: data about the state of the current dirty page scan * @again: set to false if the search has scanned the whole of RAM - * @ram_addr_abs: pointer into which to store the address of the dirty page - * within the global ram_addr space + * @page_abs: pointer into where to store the dirty page */ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, - bool *again, ram_addr_t *ram_addr_abs) + bool *again, unsigned long *page_abs) { pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset, - ram_addr_abs); + page_abs); if (pss->complete_round && pss->block == rs->last_seen_block && pss->offset >= rs->last_offset) { /* @@ -1108,11 +1106,10 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, * * @rs: current RAM state * @offset: used to return the offset within the RAMBlock - * @ram_addr_abs: pointer into which to store the address of the dirty page - * within the global ram_addr space + * @page_abs: pointer into where to store the dirty page */ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset, - ram_addr_t *ram_addr_abs) + unsigned long *page_abs) { RAMBlock *block = NULL; @@ -1122,8 +1119,7 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset, QSIMPLEQ_FIRST(&rs->src_page_requests); block = entry->rb; *offset = entry->offset; - *ram_addr_abs = (entry->offset + entry->rb->offset) & - TARGET_PAGE_MASK; + *page_abs = (entry->offset + entry->rb->offset) >> TARGET_PAGE_BITS; if (entry->len > TARGET_PAGE_SIZE) { entry->len -= TARGET_PAGE_SIZE; @@ -1148,18 +1144,17 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset, * * @rs: current RAM state * @pss: data about the state of the current dirty page scan - * @ram_addr_abs: pointer into which to store the address of the dirty page - * within the global ram_addr space + * @page_abs: pointer into where to store the dirty page */ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss, - ram_addr_t *ram_addr_abs) + unsigned long *page_abs) { RAMBlock *block; ram_addr_t offset; bool dirty; do { - block = unqueue_page(rs, &offset, ram_addr_abs); + block = unqueue_page(rs, &offset, page_abs); /* * We're sending this page, and since it's postcopy nothing else * will dirty it, and we must make sure it doesn't get sent again @@ -1169,17 +1164,15 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss, if (block) { unsigned long *bitmap; bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap); + dirty = test_bit(*page_abs, bitmap); if (!dirty) { - trace_get_queued_page_not_dirty( - block->idstr, (uint64_t)offset, - (uint64_t)*ram_addr_abs, - test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, - atomic_rcu_read(&rs->ram_bitmap)->unsentmap)); + trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, + *page_abs, + test_bit(*page_abs, + atomic_rcu_read(&rs->ram_bitmap)->unsentmap)); } else { - trace_get_queued_page(block->idstr, - (uint64_t)offset, - (uint64_t)*ram_addr_abs); + trace_get_queued_page(block->idstr, (uint64_t)offset, + *page_abs); } } @@ -1307,15 +1300,15 @@ err: * @ms: current migration state * @pss: data about the page we want to send * @last_stage: if we are at the completion stage - * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space + * @page_abs: page number of the dirty page */ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, - bool last_stage, ram_addr_t dirty_ram_abs) + bool last_stage, unsigned long page_abs) { int res = 0; /* Check the pages is dirty and if it is send it */ - if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) { + if (migration_bitmap_clear_dirty(rs, page_abs)) { unsigned long *unsentmap; /* * If xbzrle is on, stop using the data compression after first @@ -1335,7 +1328,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, } unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; if (unsentmap) { - clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap); + clear_bit(page_abs, unsentmap); } } @@ -1357,24 +1350,24 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, * @ms: current migration state * @pss: data about the page we want to send * @last_stage: if we are at the completion stage - * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space + * @page_abs: Page number of the dirty page */ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, bool last_stage, - ram_addr_t dirty_ram_abs) + unsigned long page_abs) { int tmppages, pages = 0; size_t pagesize = qemu_ram_pagesize(pss->block); do { - tmppages = ram_save_target_page(rs, pss, last_stage, dirty_ram_abs); + tmppages = ram_save_target_page(rs, pss, last_stage, page_abs); if (tmppages < 0) { return tmppages; } pages += tmppages; pss->offset += TARGET_PAGE_SIZE; - dirty_ram_abs += TARGET_PAGE_SIZE; + page_abs++; } while (pss->offset & (pagesize - 1)); /* The offset we leave with is the last one we looked at */ @@ -1401,8 +1394,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) PageSearchStatus pss; int pages = 0; bool again, found; - ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in - ram_addr_t space */ + unsigned long page_abs; /* Page number of the dirty page */ /* No dirty page as there is zero RAM */ if (!ram_bytes_total()) { @@ -1419,15 +1411,15 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) do { again = true; - found = get_queued_page(rs, &pss, &dirty_ram_abs); + found = get_queued_page(rs, &pss, &page_abs); if (!found) { /* priority queue empty, so just search for something dirty */ - found = find_dirty_block(rs, &pss, &again, &dirty_ram_abs); + found = find_dirty_block(rs, &pss, &again, &page_abs); } if (found) { - pages = ram_save_host_page(rs, &pss, last_stage, dirty_ram_abs); + pages = ram_save_host_page(rs, &pss, last_stage, page_abs); } } while (!pages && again); diff --git a/migration/trace-events b/migration/trace-events index 7372ce2a51..b8f01a218c 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -63,8 +63,8 @@ put_qtailq_end(const char *name, const char *reason) "%s %s" qemu_file_fclose(void) "" # migration/ram.c -get_queued_page(const char *block_name, uint64_t tmp_offset, uint64_t ram_addr) "%s/%" PRIx64 " ram_addr=%" PRIx64 -get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, uint64_t ram_addr, int sent) "%s/%" PRIx64 " ram_addr=%" PRIx64 " (sent=%d)" +get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_abs) "%s/%" PRIx64 " page_abs=%lx" +get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs, int sent) "%s/%" PRIx64 " page_abs=%lx (sent=%d)" migration_bitmap_sync_start(void) "" migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64 migration_throttle(void) "" -- cgit v1.2.3 From 269ace2951f14c230c36d7fcb5062594569a73e3 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 15:23:31 +0100 Subject: ram: Remember last_page instead of last_offset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Philippe Mathieu-Daudé -- Improve comment Fix typo --- migration/ram.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index 52ab14b62f..d501040bf1 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -171,8 +171,8 @@ struct RAMState { RAMBlock *last_seen_block; /* Last block from where we have sent data */ RAMBlock *last_sent_block; - /* Last offset we have sent data from */ - ram_addr_t last_offset; + /* Last dirty target page we have sent */ + ram_addr_t last_page; /* last ram version we have seen */ uint32_t last_version; /* We are in the first round */ @@ -1061,7 +1061,7 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset, page_abs); if (pss->complete_round && pss->block == rs->last_seen_block && - pss->offset >= rs->last_offset) { + (pss->offset >> TARGET_PAGE_BITS) >= rs->last_page) { /* * We've been once around the RAM and haven't found anything. * Give up. @@ -1402,7 +1402,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) } pss.block = rs->last_seen_block; - pss.offset = rs->last_offset; + pss.offset = rs->last_page << TARGET_PAGE_BITS; pss.complete_round = false; if (!pss.block) { @@ -1424,7 +1424,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) } while (!pages && again); rs->last_seen_block = pss.block; - rs->last_offset = pss.offset; + rs->last_page = pss.offset >> TARGET_PAGE_BITS; return pages; } @@ -1499,7 +1499,7 @@ static void ram_state_reset(RAMState *rs) { rs->last_seen_block = NULL; rs->last_sent_block = NULL; - rs->last_offset = 0; + rs->last_page = 0; rs->last_version = ram_list.version; rs->ram_bulk_stage = true; } @@ -1844,7 +1844,7 @@ static int postcopy_chunk_hostpages(MigrationState *ms) /* Easiest way to make sure we don't resume in the middle of a host-page */ rs->last_seen_block = NULL; rs->last_sent_block = NULL; - rs->last_offset = 0; + rs->last_page = 0; QLIST_FOREACH_RCU(block, &ram_list.blocks, next) { unsigned long first = block->offset >> TARGET_PAGE_BITS; -- cgit v1.2.3 From a935e30fbb46fe20459149a3be95eda54114fb85 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 15:36:51 +0100 Subject: ram: Change offset field in PageSearchStatus to page We are moving everything to work on pages, not addresses. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- migration/ram.c | 51 ++++++++++++++++++++++++++------------------------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index d501040bf1..eec398f833 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -296,8 +296,8 @@ uint64_t ram_postcopy_requests(void) struct PageSearchStatus { /* Current block being searched */ RAMBlock *block; - /* Current offset to search from */ - ram_addr_t offset; + /* Current page to search from */ + unsigned long page; /* Set once we wrap around */ bool complete_round; }; @@ -608,16 +608,16 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, * * @rs: current RAM state * @rb: RAMBlock where to search for dirty pages - * @start: starting address (typically so we can continue from previous page) + * @start: page where we start the search * @page_abs: pointer into where to store the dirty page */ static inline -ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, - ram_addr_t start, - unsigned long *page_abs) +unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, + unsigned long start, + unsigned long *page_abs) { unsigned long base = rb->offset >> TARGET_PAGE_BITS; - unsigned long nr = base + (start >> TARGET_PAGE_BITS); + unsigned long nr = base + start; uint64_t rb_size = rb->used_length; unsigned long size = base + (rb_size >> TARGET_PAGE_BITS); unsigned long *bitmap; @@ -632,7 +632,7 @@ ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, } *page_abs = next; - return (next - base) << TARGET_PAGE_BITS; + return next - base; } static inline bool migration_bitmap_clear_dirty(RAMState *rs, @@ -810,7 +810,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) int ret; bool send_async = true; RAMBlock *block = pss->block; - ram_addr_t offset = pss->offset; + ram_addr_t offset = pss->page << TARGET_PAGE_BITS; p = block->host + offset; @@ -842,7 +842,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage) * page would be stale */ xbzrle_cache_zero_page(rs, current_addr); - ram_release_pages(block->idstr, pss->offset, pages); + ram_release_pages(block->idstr, offset, pages); } else if (!rs->ram_bulk_stage && !migration_in_postcopy() && migrate_use_xbzrle()) { pages = save_xbzrle_page(rs, &p, current_addr, block, @@ -985,7 +985,7 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, uint8_t *p; int ret, blen; RAMBlock *block = pss->block; - ram_addr_t offset = pss->offset; + ram_addr_t offset = pss->page << TARGET_PAGE_BITS; p = block->host + offset; @@ -1029,14 +1029,14 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, } } if (pages > 0) { - ram_release_pages(block->idstr, pss->offset, pages); + ram_release_pages(block->idstr, offset, pages); } } else { pages = save_zero_page(rs, block, offset, p); if (pages == -1) { pages = compress_page_with_multi_thread(rs, block, offset); } else { - ram_release_pages(block->idstr, pss->offset, pages); + ram_release_pages(block->idstr, offset, pages); } } } @@ -1058,10 +1058,10 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again, unsigned long *page_abs) { - pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset, - page_abs); + pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page, + page_abs); if (pss->complete_round && pss->block == rs->last_seen_block && - (pss->offset >> TARGET_PAGE_BITS) >= rs->last_page) { + pss->page >= rs->last_page) { /* * We've been once around the RAM and haven't found anything. * Give up. @@ -1069,9 +1069,9 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, *again = false; return false; } - if (pss->offset >= pss->block->used_length) { + if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) { /* Didn't find anything in this RAM Block */ - pss->offset = 0; + pss->page = 0; pss->block = QLIST_NEXT_RCU(pss->block, next); if (!pss->block) { /* Hit the end of the list */ @@ -1193,7 +1193,7 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss, * it just requested. */ pss->block = block; - pss->offset = offset; + pss->page = offset >> TARGET_PAGE_BITS; } return !!block; @@ -1357,7 +1357,8 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, unsigned long page_abs) { int tmppages, pages = 0; - size_t pagesize = qemu_ram_pagesize(pss->block); + size_t pagesize_bits = + qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; do { tmppages = ram_save_target_page(rs, pss, last_stage, page_abs); @@ -1366,12 +1367,12 @@ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, } pages += tmppages; - pss->offset += TARGET_PAGE_SIZE; + pss->page++; page_abs++; - } while (pss->offset & (pagesize - 1)); + } while (pss->page & (pagesize_bits - 1)); /* The offset we leave with is the last one we looked at */ - pss->offset -= TARGET_PAGE_SIZE; + pss->page--; return pages; } @@ -1402,7 +1403,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) } pss.block = rs->last_seen_block; - pss.offset = rs->last_page << TARGET_PAGE_BITS; + pss.page = rs->last_page; pss.complete_round = false; if (!pss.block) { @@ -1424,7 +1425,7 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) } while (!pages && again); rs->last_seen_block = pss.block; - rs->last_page = pss.offset >> TARGET_PAGE_BITS; + rs->last_page = pss.page; return pages; } -- cgit v1.2.3 From f20e2865167d2c147ccb42a445764e1ec99483e0 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 16:19:05 +0100 Subject: ram: Use ramblock and page offset instead of absolute offset This removes the needto pass also the absolute offset. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- migration/ram.c | 65 ++++++++++++++++++++++++--------------------------------- 1 file changed, 27 insertions(+), 38 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index eec398f833..932a96e510 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -609,12 +609,10 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, * @rs: current RAM state * @rb: RAMBlock where to search for dirty pages * @start: page where we start the search - * @page_abs: pointer into where to store the dirty page */ static inline unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, - unsigned long start, - unsigned long *page_abs) + unsigned long start) { unsigned long base = rb->offset >> TARGET_PAGE_BITS; unsigned long nr = base + start; @@ -631,17 +629,18 @@ unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb, next = find_next_bit(bitmap, size, nr); } - *page_abs = next; return next - base; } static inline bool migration_bitmap_clear_dirty(RAMState *rs, - unsigned long page_abs) + RAMBlock *rb, + unsigned long page) { bool ret; unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; + unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page; - ret = test_and_clear_bit(page_abs, bitmap); + ret = test_and_clear_bit(nr, bitmap); if (ret) { rs->migration_dirty_pages--; @@ -1053,13 +1052,10 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss, * @rs: current RAM state * @pss: data about the state of the current dirty page scan * @again: set to false if the search has scanned the whole of RAM - * @page_abs: pointer into where to store the dirty page */ -static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, - bool *again, unsigned long *page_abs) +static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again) { - pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page, - page_abs); + pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page); if (pss->complete_round && pss->block == rs->last_seen_block && pss->page >= rs->last_page) { /* @@ -1106,10 +1102,8 @@ static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, * * @rs: current RAM state * @offset: used to return the offset within the RAMBlock - * @page_abs: pointer into where to store the dirty page */ -static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset, - unsigned long *page_abs) +static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset) { RAMBlock *block = NULL; @@ -1119,7 +1113,6 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset, QSIMPLEQ_FIRST(&rs->src_page_requests); block = entry->rb; *offset = entry->offset; - *page_abs = (entry->offset + entry->rb->offset) >> TARGET_PAGE_BITS; if (entry->len > TARGET_PAGE_SIZE) { entry->len -= TARGET_PAGE_SIZE; @@ -1144,17 +1137,15 @@ static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset, * * @rs: current RAM state * @pss: data about the state of the current dirty page scan - * @page_abs: pointer into where to store the dirty page */ -static bool get_queued_page(RAMState *rs, PageSearchStatus *pss, - unsigned long *page_abs) +static bool get_queued_page(RAMState *rs, PageSearchStatus *pss) { RAMBlock *block; ram_addr_t offset; bool dirty; do { - block = unqueue_page(rs, &offset, page_abs); + block = unqueue_page(rs, &offset); /* * We're sending this page, and since it's postcopy nothing else * will dirty it, and we must make sure it doesn't get sent again @@ -1163,16 +1154,18 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss, */ if (block) { unsigned long *bitmap; + unsigned long page; + bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - dirty = test_bit(*page_abs, bitmap); + page = (block->offset + offset) >> TARGET_PAGE_BITS; + dirty = test_bit(page, bitmap); if (!dirty) { trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset, - *page_abs, - test_bit(*page_abs, + page, + test_bit(page, atomic_rcu_read(&rs->ram_bitmap)->unsentmap)); } else { - trace_get_queued_page(block->idstr, (uint64_t)offset, - *page_abs); + trace_get_queued_page(block->idstr, (uint64_t)offset, page); } } @@ -1300,22 +1293,22 @@ err: * @ms: current migration state * @pss: data about the page we want to send * @last_stage: if we are at the completion stage - * @page_abs: page number of the dirty page */ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, - bool last_stage, unsigned long page_abs) + bool last_stage) { int res = 0; /* Check the pages is dirty and if it is send it */ - if (migration_bitmap_clear_dirty(rs, page_abs)) { + if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) { unsigned long *unsentmap; /* * If xbzrle is on, stop using the data compression after first * round of migration even if compression is enabled. In theory, * xbzrle can do better than compression. */ - + unsigned long page = + (pss->block->offset >> TARGET_PAGE_BITS) + pss->page; if (migrate_use_compression() && (rs->ram_bulk_stage || !migrate_use_xbzrle())) { res = ram_save_compressed_page(rs, pss, last_stage); @@ -1328,7 +1321,7 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, } unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap; if (unsentmap) { - clear_bit(page_abs, unsentmap); + clear_bit(page, unsentmap); } } @@ -1350,25 +1343,22 @@ static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss, * @ms: current migration state * @pss: data about the page we want to send * @last_stage: if we are at the completion stage - * @page_abs: Page number of the dirty page */ static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss, - bool last_stage, - unsigned long page_abs) + bool last_stage) { int tmppages, pages = 0; size_t pagesize_bits = qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS; do { - tmppages = ram_save_target_page(rs, pss, last_stage, page_abs); + tmppages = ram_save_target_page(rs, pss, last_stage); if (tmppages < 0) { return tmppages; } pages += tmppages; pss->page++; - page_abs++; } while (pss->page & (pagesize_bits - 1)); /* The offset we leave with is the last one we looked at */ @@ -1395,7 +1385,6 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) PageSearchStatus pss; int pages = 0; bool again, found; - unsigned long page_abs; /* Page number of the dirty page */ /* No dirty page as there is zero RAM */ if (!ram_bytes_total()) { @@ -1412,15 +1401,15 @@ static int ram_find_and_save_block(RAMState *rs, bool last_stage) do { again = true; - found = get_queued_page(rs, &pss, &page_abs); + found = get_queued_page(rs, &pss); if (!found) { /* priority queue empty, so just search for something dirty */ - found = find_dirty_block(rs, &pss, &again, &page_abs); + found = find_dirty_block(rs, &pss, &again); } if (found) { - pages = ram_save_host_page(rs, &pss, last_stage, page_abs); + pages = ram_save_host_page(rs, &pss, last_stage); } } while (!pages && again); -- cgit v1.2.3 From b8c4899398126a1dd80377e14d4ba471018293d3 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 17:44:30 +0100 Subject: ram: rename last_ram_offset() last_ram_pages() We always use it as pages anyways. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- exec.c | 6 +++--- include/exec/ram_addr.h | 2 +- migration/ram.c | 11 +++++------ 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/exec.c b/exec.c index 7a640c76b6..de843f4de2 100644 --- a/exec.c +++ b/exec.c @@ -1528,7 +1528,7 @@ static ram_addr_t find_ram_offset(ram_addr_t size) return offset; } -ram_addr_t last_ram_offset(void) +unsigned long last_ram_page(void) { RAMBlock *block; ram_addr_t last = 0; @@ -1538,7 +1538,7 @@ ram_addr_t last_ram_offset(void) last = MAX(last, block->offset + block->max_length); } rcu_read_unlock(); - return last; + return last >> TARGET_PAGE_BITS; } static void qemu_ram_setup_dump(void *addr, ram_addr_t size) @@ -1727,7 +1727,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) ram_addr_t old_ram_size, new_ram_size; Error *err = NULL; - old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS; + old_ram_size = last_ram_page(); qemu_mutex_lock_ramlist(); new_block->offset = find_ram_offset(new_block->max_length); diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 8a1372c66d..a8411c7821 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -53,7 +53,7 @@ static inline void *ramblock_ptr(RAMBlock *block, ram_addr_t offset) } long qemu_getrampagesize(void); -ram_addr_t last_ram_offset(void); +unsigned long last_ram_page(void); RAMBlock *qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, bool share, const char *mem_path, Error **errp); diff --git a/migration/ram.c b/migration/ram.c index 932a96e510..f972882dea 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1537,7 +1537,7 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) */ void ram_debug_dump_bitmap(unsigned long *todump, bool expected) { - int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; + unsigned long ram_pages = last_ram_page(); RAMState *rs = &ram_state; int64_t cur; int64_t linelen = 128; @@ -1904,8 +1904,7 @@ int ram_postcopy_send_discard_bitmap(MigrationState *ms) * Update the unsentmap to be unsentmap = unsentmap | dirty */ bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap; - bitmap_or(unsentmap, unsentmap, bitmap, - last_ram_offset() >> TARGET_PAGE_BITS); + bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page()); trace_ram_postcopy_send_discard_bitmap(); @@ -1953,7 +1952,7 @@ err: static int ram_state_init(RAMState *rs) { - int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */ + unsigned long ram_bitmap_pages; memset(rs, 0, sizeof(*rs)); qemu_mutex_init(&rs->bitmap_mutex); @@ -1999,7 +1998,7 @@ static int ram_state_init(RAMState *rs) rs->ram_bitmap = g_new0(struct RAMBitmap, 1); /* Skip setting bitmap if there is no RAM */ if (ram_bytes_total()) { - ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS; + ram_bitmap_pages = last_ram_page(); rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages); bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages); @@ -2460,7 +2459,7 @@ static void decompress_data_with_multi_threads(QEMUFile *f, */ int ram_postcopy_incoming_init(MigrationIncomingState *mis) { - size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS; + unsigned long ram_pages = last_ram_page(); return postcopy_ram_incoming_init(mis, ram_pages); } -- cgit v1.2.3 From 352b0de982f9d764661b5ab9c57951537ae7ac5c Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 21 Mar 2017 18:03:49 +0100 Subject: ram: Use RAMBitmap type for coherence Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- migration/ram.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/migration/ram.c b/migration/ram.c index f972882dea..2af8080cbb 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1451,7 +1451,7 @@ void free_xbzrle_decoded_buf(void) xbzrle_decoded_buf = NULL; } -static void migration_bitmap_free(struct RAMBitmap *bmap) +static void migration_bitmap_free(RAMBitmap *bmap) { g_free(bmap->bmap); g_free(bmap->unsentmap); @@ -1465,7 +1465,7 @@ static void ram_migration_cleanup(void *opaque) /* caller have hold iothread lock or is in a bh, so there is * no writing race against this migration_bitmap */ - struct RAMBitmap *bitmap = rs->ram_bitmap; + RAMBitmap *bitmap = rs->ram_bitmap; atomic_rcu_set(&rs->ram_bitmap, NULL); if (bitmap) { memory_global_dirty_log_stop(); @@ -1504,8 +1504,8 @@ void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) * no writing race against this migration_bitmap */ if (rs->ram_bitmap) { - struct RAMBitmap *old_bitmap = rs->ram_bitmap, *bitmap; - bitmap = g_new(struct RAMBitmap, 1); + RAMBitmap *old_bitmap = rs->ram_bitmap, *bitmap; + bitmap = g_new(RAMBitmap, 1); bitmap->bmap = bitmap_new(new); /* prevent migration_bitmap content from being set bit @@ -1995,7 +1995,7 @@ static int ram_state_init(RAMState *rs) rcu_read_lock(); ram_state_reset(rs); - rs->ram_bitmap = g_new0(struct RAMBitmap, 1); + rs->ram_bitmap = g_new0(RAMBitmap, 1); /* Skip setting bitmap if there is no RAM */ if (ram_bytes_total()) { ram_bitmap_pages = last_ram_page(); -- cgit v1.2.3 From fab350052698e27a1011d0d6f4257d8fecb84867 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Wed, 22 Mar 2017 17:36:57 +0100 Subject: migration: Remove MigrationState parameter from migration_is_idle() Only user don't have a MigrationState handly. Signed-off-by: Juan Quintela Reviewed-by: Dr. David Alan Gilbert --- include/migration/migration.h | 2 +- migration/migration.c | 8 +++----- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/include/migration/migration.h b/include/migration/migration.h index b84ec3de32..ba1a16cbc1 100644 --- a/include/migration/migration.h +++ b/include/migration/migration.h @@ -234,7 +234,7 @@ void remove_migration_state_change_notifier(Notifier *notify); MigrationState *migrate_init(const MigrationParams *params); bool migration_is_blocked(Error **errp); bool migration_in_setup(MigrationState *); -bool migration_is_idle(MigrationState *s); +bool migration_is_idle(void); bool migration_has_finished(MigrationState *); bool migration_has_failed(MigrationState *); /* True if outgoing migration has entered postcopy phase */ diff --git a/migration/migration.c b/migration/migration.c index 156daf976f..a92d7f7ce2 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1075,11 +1075,9 @@ bool migration_in_postcopy_after_devices(MigrationState *s) return migration_in_postcopy() && s->postcopy_after_devices; } -bool migration_is_idle(MigrationState *s) +bool migration_is_idle(void) { - if (!s) { - s = migrate_get_current(); - } + MigrationState *s = migrate_get_current(); switch (s->state) { case MIGRATION_STATUS_NONE: @@ -1144,7 +1142,7 @@ int migrate_add_blocker(Error *reason, Error **errp) return -EACCES; } - if (migration_is_idle(NULL)) { + if (migration_is_idle()) { migration_blockers = g_slist_prepend(migration_blockers, reason); return 0; } -- cgit v1.2.3 From 9bed84c19138bd161e9a6157a93ae0b25b5f7a71 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 28 Mar 2017 11:08:52 +0200 Subject: qdev: qdev_hotplug is really a bool MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Juan Quintela Reviewed-by: zhanghailiang Reviewed-by: Eric Blake Reviewed-by: Philippe Mathieu-Daudé --- hw/core/qdev.c | 4 ++-- include/hw/qdev-core.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hw/core/qdev.c b/hw/core/qdev.c index 1e7fb33246..6fa46b5245 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -39,7 +39,7 @@ #include "qapi-event.h" #include "migration/migration.h" -int qdev_hotplug = 0; +bool qdev_hotplug = false; static bool qdev_hot_added = false; static bool qdev_hot_removed = false; @@ -385,7 +385,7 @@ void qdev_machine_creation_done(void) * ok, initial machine setup is done, starting from now we can * only create hotpluggable devices */ - qdev_hotplug = 1; + qdev_hotplug = true; } bool qdev_machine_modified(void) diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index b44b476765..a96a91379f 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -386,7 +386,7 @@ Object *qdev_get_machine(void); /* FIXME: make this a link<> */ void qdev_set_parent_bus(DeviceState *dev, BusState *bus); -extern int qdev_hotplug; +extern bool qdev_hotplug; char *qdev_get_dev_path(DeviceState *dev); -- cgit v1.2.3 From 21def24a5a58ad0ea9f7b02c084387e71b11ff1b Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 28 Mar 2017 11:22:10 +0200 Subject: qdev: Export qdev_hot_removed I need to move qdev_unplug to qdev-monitor in the following patch, and it needs access to this variable. Signed-off-by: Juan Quintela Reviewed-by: Eric Blake Reviewed-by: zhanghailiang --- hw/core/qdev.c | 2 +- include/hw/qdev-core.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hw/core/qdev.c b/hw/core/qdev.c index 6fa46b5245..c26cf84cd3 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -41,7 +41,7 @@ bool qdev_hotplug = false; static bool qdev_hot_added = false; -static bool qdev_hot_removed = false; +bool qdev_hot_removed = false; const VMStateDescription *qdev_get_vmsd(DeviceState *dev) { diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index a96a91379f..f09b6b78f6 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -387,6 +387,7 @@ Object *qdev_get_machine(void); void qdev_set_parent_bus(DeviceState *dev, BusState *bus); extern bool qdev_hotplug; +extern bool qdev_hot_removed; char *qdev_get_dev_path(DeviceState *dev); -- cgit v1.2.3 From 329006799f93265bff5e10a4e1dd50b66fe09e6b Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Tue, 28 Mar 2017 11:22:51 +0200 Subject: qdev: Move qdev_unplug() to qdev-monitor.c It is not used by linux-user, otherwise I need to to create one stub for migration_is_idle() on following patch. Signed-off-by: Juan Quintela Reviewed-by: Eric Blake Reviewed-by: zhanghailiang Reviewed-by: Markus Armbruster --- hw/core/qdev.c | 34 ---------------------------------- qdev-monitor.c | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/hw/core/qdev.c b/hw/core/qdev.c index c26cf84cd3..0df00500e0 100644 --- a/hw/core/qdev.c +++ b/hw/core/qdev.c @@ -271,40 +271,6 @@ HotplugHandler *qdev_get_hotplug_handler(DeviceState *dev) return hotplug_ctrl; } -void qdev_unplug(DeviceState *dev, Error **errp) -{ - DeviceClass *dc = DEVICE_GET_CLASS(dev); - HotplugHandler *hotplug_ctrl; - HotplugHandlerClass *hdc; - - if (dev->parent_bus && !qbus_is_hotpluggable(dev->parent_bus)) { - error_setg(errp, QERR_BUS_NO_HOTPLUG, dev->parent_bus->name); - return; - } - - if (!dc->hotpluggable) { - error_setg(errp, QERR_DEVICE_NO_HOTPLUG, - object_get_typename(OBJECT(dev))); - return; - } - - qdev_hot_removed = true; - - hotplug_ctrl = qdev_get_hotplug_handler(dev); - /* hotpluggable device MUST have HotplugHandler, if it doesn't - * then something is very wrong with it */ - g_assert(hotplug_ctrl); - - /* If device supports async unplug just request it to be done, - * otherwise just remove it synchronously */ - hdc = HOTPLUG_HANDLER_GET_CLASS(hotplug_ctrl); - if (hdc->unplug_request) { - hotplug_handler_unplug_request(hotplug_ctrl, dev, errp); - } else { - hotplug_handler_unplug(hotplug_ctrl, dev, errp); - } -} - static int qdev_reset_one(DeviceState *dev, void *opaque) { device_reset(dev); diff --git a/qdev-monitor.c b/qdev-monitor.c index 5f2fcdfc45..bb3d8ba360 100644 --- a/qdev-monitor.c +++ b/qdev-monitor.c @@ -836,6 +836,40 @@ static DeviceState *find_device_state(const char *id, Error **errp) return DEVICE(obj); } +void qdev_unplug(DeviceState *dev, Error **errp) +{ + DeviceClass *dc = DEVICE_GET_CLASS(dev); + HotplugHandler *hotplug_ctrl; + HotplugHandlerClass *hdc; + + if (dev->parent_bus && !qbus_is_hotpluggable(dev->parent_bus)) { + error_setg(errp, QERR_BUS_NO_HOTPLUG, dev->parent_bus->name); + return; + } + + if (!dc->hotpluggable) { + error_setg(errp, QERR_DEVICE_NO_HOTPLUG, + object_get_typename(OBJECT(dev))); + return; + } + + qdev_hot_removed = true; + + hotplug_ctrl = qdev_get_hotplug_handler(dev); + /* hotpluggable device MUST have HotplugHandler, if it doesn't + * then something is very wrong with it */ + g_assert(hotplug_ctrl); + + /* If device supports async unplug just request it to be done, + * otherwise just remove it synchronously */ + hdc = HOTPLUG_HANDLER_GET_CLASS(hotplug_ctrl); + if (hdc->unplug_request) { + hotplug_handler_unplug_request(hotplug_ctrl, dev, errp); + } else { + hotplug_handler_unplug(hotplug_ctrl, dev, errp); + } +} + void qmp_device_del(const char *id, Error **errp) { DeviceState *dev = find_device_state(id, errp); -- cgit v1.2.3 From b06424de62b362034e41ddf77eb8cfaa641d9009 Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Wed, 22 Mar 2017 17:34:27 +0100 Subject: migration: Disable hotplug/unplug during migration Until we have reviewed what can/can't be hotplugged during migration, disable it. We can enable it later for the things that we know that work. For instance, memory hotplug during postcopy doesn't work currently. Signed-off-by: Juan Quintela Reviewed-by: zhanghailiang -- - Fix typo. Thanks Thomas. - Delay migration check after we have checked that we can hotplug that device. - more typos --- qdev-monitor.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/qdev-monitor.c b/qdev-monitor.c index bb3d8ba360..e61d596ef7 100644 --- a/qdev-monitor.c +++ b/qdev-monitor.c @@ -29,6 +29,7 @@ #include "qemu/error-report.h" #include "qemu/help_option.h" #include "sysemu/block-backend.h" +#include "migration/migration.h" /* * Aliases were a bad idea from the start. Let's keep them @@ -603,6 +604,11 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) return NULL; } + if (!migration_is_idle()) { + error_setg(errp, "device_add not allowed while migrating"); + return NULL; + } + /* create device */ dev = DEVICE(object_new(driver)); @@ -853,6 +859,11 @@ void qdev_unplug(DeviceState *dev, Error **errp) return; } + if (!migration_is_idle()) { + error_setg(errp, "device_del not allowed while migrating"); + return; + } + qdev_hot_removed = true; hotplug_ctrl = qdev_get_hotplug_handler(dev); -- cgit v1.2.3 From 66103a5796d0003cb198c25d783dcdc1596aef1f Mon Sep 17 00:00:00 2001 From: Juan Quintela Date: Wed, 22 Mar 2017 17:42:01 +0100 Subject: ram: Remove migration_bitmap_extend() We have disabled memory hotplug, so we don't need to handle migration_bitamp there. Signed-off-by: Juan Quintela Reviewed-by: Eric Blake Reviewed-by: zhanghailiang --- exec.c | 1 - include/exec/ram_addr.h | 2 -- migration/ram.c | 34 ---------------------------------- 3 files changed, 37 deletions(-) diff --git a/exec.c b/exec.c index de843f4de2..c2def9ecf2 100644 --- a/exec.c +++ b/exec.c @@ -1758,7 +1758,6 @@ static void ram_block_add(RAMBlock *new_block, Error **errp) new_ram_size = MAX(old_ram_size, (new_block->offset + new_block->max_length) >> TARGET_PAGE_BITS); if (new_ram_size > old_ram_size) { - migration_bitmap_extend(old_ram_size, new_ram_size); dirty_memory_extend(old_ram_size, new_ram_size); } /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ, diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index a8411c7821..c9ddcd0880 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -413,7 +413,5 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest, return num_dirty; } - -void migration_bitmap_extend(ram_addr_t old, ram_addr_t new); #endif #endif diff --git a/migration/ram.c b/migration/ram.c index 2af8080cbb..f48664ec62 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1496,40 +1496,6 @@ static void ram_state_reset(RAMState *rs) #define MAX_WAIT 50 /* ms, half buffered_file limit */ -void migration_bitmap_extend(ram_addr_t old, ram_addr_t new) -{ - RAMState *rs = &ram_state; - - /* called in qemu main thread, so there is - * no writing race against this migration_bitmap - */ - if (rs->ram_bitmap) { - RAMBitmap *old_bitmap = rs->ram_bitmap, *bitmap; - bitmap = g_new(RAMBitmap, 1); - bitmap->bmap = bitmap_new(new); - - /* prevent migration_bitmap content from being set bit - * by migration_bitmap_sync_range() at the same time. - * it is safe to migration if migration_bitmap is cleared bit - * at the same time. - */ - qemu_mutex_lock(&rs->bitmap_mutex); - bitmap_copy(bitmap->bmap, old_bitmap->bmap, old); - bitmap_set(bitmap->bmap, old, new - old); - - /* We don't have a way to safely extend the sentmap - * with RCU; so mark it as missing, entry to postcopy - * will fail. - */ - bitmap->unsentmap = NULL; - - atomic_rcu_set(&rs->ram_bitmap, bitmap); - qemu_mutex_unlock(&rs->bitmap_mutex); - rs->migration_dirty_pages += new - old; - call_rcu(old_bitmap, migration_bitmap_free, rcu); - } -} - /* * 'expected' is the value you expect the bitmap mostly to be full * of; it won't bother printing lines that are all this value. -- cgit v1.2.3 From e8199e4895d34136735dea7e628d0de1a5afb630 Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Wed, 12 Apr 2017 15:53:11 +0200 Subject: migration: don't close a file descriptor while it can be in use If we close the QEMUFile descriptor in process_incoming_migration_co() while it has been stopped by an error, the postcopy_ram_listen_thread() can try to continue to use it. And as the memory has been freed it is working with an invalid pointer and crashes. Fix this by releasing the memory after having managed the error case (which, in fact, calls exit()) Signed-off-by: Laurent Vivier Reviewed-by: Dr. David Alan Gilbert Reviewed-by: Juan Quintela Reviewed-by: Amit Shah Reviewed-by: Stefan Hajnoczi Signed-off-by: Juan Quintela --- migration/migration.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/migration/migration.c b/migration/migration.c index a92d7f7ce2..31e8141c12 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -435,9 +435,6 @@ static void process_incoming_migration_co(void *opaque) qemu_thread_join(&mis->colo_incoming_thread); } - qemu_fclose(f); - free_xbzrle_decoded_buf(); - if (ret < 0) { migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED); @@ -446,6 +443,9 @@ static void process_incoming_migration_co(void *opaque) exit(EXIT_FAILURE); } + qemu_fclose(f); + free_xbzrle_decoded_buf(); + mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); qemu_bh_schedule(mis->bh); } -- cgit v1.2.3 From a23a6d183986ef38b705e85cabdd2af6cdc95276 Mon Sep 17 00:00:00 2001 From: Laurent Vivier Date: Wed, 12 Apr 2017 15:53:12 +0200 Subject: virtio-rng: stop virtqueue while the CPU is stopped If we modify the virtio-rng virqueue while the vmstate is already migrated we can have some inconsistencies between the virtqueue state and the memory content. To avoid this, stop the virtqueue while the CPU is stopped. Signed-off-by: Laurent Vivier Reviewed-by: Amit Shah Reviewed-by: Stefan Hajnoczi Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- hw/virtio/trace-events | 3 +++ hw/virtio/virtio-rng.c | 29 +++++++++++++++++++++++------ include/hw/virtio/virtio-rng.h | 2 ++ 3 files changed, 28 insertions(+), 6 deletions(-) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 6926eedd3f..1f7a7c1ae1 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -11,8 +11,11 @@ virtio_set_status(void *vdev, uint8_t val) "vdev %p val %u" # hw/virtio/virtio-rng.c virtio_rng_guest_not_ready(void *rng) "rng %p: guest not ready" +virtio_rng_cpu_is_stopped(void *rng, int size) "rng %p: cpu is stopped, dropping %d bytes" +virtio_rng_popped(void *rng) "rng %p: elem popped" virtio_rng_pushed(void *rng, size_t len) "rng %p: %zd bytes pushed" virtio_rng_request(void *rng, size_t size, unsigned quota) "rng %p: %zd bytes requested, %u bytes quota left" +virtio_rng_vm_state_change(void *rng, int running, int state) "rng %p: state change to running %d state %d" # hw/virtio/virtio-balloon.c # diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c index 9639f4e89b..a6ee501051 100644 --- a/hw/virtio/virtio-rng.c +++ b/hw/virtio/virtio-rng.c @@ -53,6 +53,15 @@ static void chr_read(void *opaque, const void *buf, size_t size) return; } + /* we can't modify the virtqueue until + * our state is fully synced + */ + + if (!runstate_check(RUN_STATE_RUNNING)) { + trace_virtio_rng_cpu_is_stopped(vrng, size); + return; + } + vrng->quota_remaining -= size; offset = 0; @@ -61,6 +70,7 @@ static void chr_read(void *opaque, const void *buf, size_t size) if (!elem) { break; } + trace_virtio_rng_popped(vrng); len = iov_from_buf(elem->in_sg, elem->in_num, 0, buf + offset, size - offset); offset += len; @@ -120,17 +130,21 @@ static uint64_t get_features(VirtIODevice *vdev, uint64_t f, Error **errp) return f; } -static int virtio_rng_post_load(void *opaque, int version_id) +static void virtio_rng_vm_state_change(void *opaque, int running, + RunState state) { VirtIORNG *vrng = opaque; + trace_virtio_rng_vm_state_change(vrng, running, state); + /* We may have an element ready but couldn't process it due to a quota - * limit. Make sure to try again after live migration when the quota may - * have been reset. + * limit or because CPU was stopped. Make sure to try again when the + * CPU restart. */ - virtio_rng_process(vrng); - return 0; + if (running && is_guest_ready(vrng)) { + virtio_rng_process(vrng); + } } static void check_rate_limit(void *opaque) @@ -198,6 +212,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp) vrng->rate_limit_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, check_rate_limit, vrng); vrng->activate_timer = true; + + vrng->vmstate = qemu_add_vm_change_state_handler(virtio_rng_vm_state_change, + vrng); } static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp) @@ -205,6 +222,7 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp) VirtIODevice *vdev = VIRTIO_DEVICE(dev); VirtIORNG *vrng = VIRTIO_RNG(dev); + qemu_del_vm_change_state_handler(vrng->vmstate); timer_del(vrng->rate_limit_timer); timer_free(vrng->rate_limit_timer); virtio_cleanup(vdev); @@ -218,7 +236,6 @@ static const VMStateDescription vmstate_virtio_rng = { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, - .post_load = virtio_rng_post_load, }; static Property virtio_rng_properties[] = { diff --git a/include/hw/virtio/virtio-rng.h b/include/hw/virtio/virtio-rng.h index 2d40abdbdb..922dce7cac 100644 --- a/include/hw/virtio/virtio-rng.h +++ b/include/hw/virtio/virtio-rng.h @@ -45,6 +45,8 @@ typedef struct VirtIORNG { QEMUTimer *rate_limit_timer; int64_t quota_remaining; bool activate_timer; + + VMChangeStateEntry *vmstate; } VirtIORNG; #endif -- cgit v1.2.3 From 343001f68dd44d3052dea7d4db325aa8a4109b24 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Sat, 1 Apr 2017 16:18:42 +0800 Subject: migration: set current_active_state once MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We set it right above this one. No need to set it twice. CC: Juan Quintela CC: Dr. David Alan Gilbert Reviewed-by: Dr. David Alan Gilbert Signed-off-by: Peter Xu Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- migration/migration.c | 1 - 1 file changed, 1 deletion(-) diff --git a/migration/migration.c b/migration/migration.c index 31e8141c12..dd7f72164c 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1941,7 +1941,6 @@ static void *migration_thread(void *opaque) qemu_savevm_state_begin(s->to_dst_file, &s->params); s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; - current_active_state = MIGRATION_STATUS_ACTIVE; migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); -- cgit v1.2.3 From faec066ab8c6ed078dd34a2735a67437673bd0f0 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Sat, 1 Apr 2017 16:18:43 +0800 Subject: migration: rename max_size to threshold_size MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In migration codes (especially in migration_thread()), max_size is used in many place for the threshold value that we will start to do the final flush and jump to the next stage to dump the whole rest things to destination. However its name is confusing to first readers. Let's rename it to "threshold_size" when proper and add a comment for it. No functional change is made. CC: Juan Quintela CC: "Dr. David Alan Gilbert" Signed-off-by: Peter Xu Reviewed-by: "Dr. David Alan Gilbert" Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- include/migration/vmstate.h | 3 ++- migration/migration.c | 21 +++++++++++++-------- migration/savevm.c | 4 ++-- 3 files changed, 17 insertions(+), 11 deletions(-) diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index f2dbf8410a..dad3984c07 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -56,7 +56,8 @@ typedef struct SaveVMHandlers { /* This runs outside the iothread lock! */ int (*save_live_setup)(QEMUFile *f, void *opaque); - void (*save_live_pending)(QEMUFile *f, void *opaque, uint64_t max_size, + void (*save_live_pending)(QEMUFile *f, void *opaque, + uint64_t threshold_size, uint64_t *non_postcopiable_pending, uint64_t *postcopiable_pending); LoadStateHandler *load_state; diff --git a/migration/migration.c b/migration/migration.c index dd7f72164c..353f2728cf 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -1910,7 +1910,12 @@ static void *migration_thread(void *opaque) int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); int64_t initial_bytes = 0; - int64_t max_size = 0; + /* + * The final stage happens when the remaining data is smaller than + * this threshold; it's calculated from the requested downtime and + * measured bandwidth + */ + int64_t threshold_size = 0; int64_t start_time = initial_time; int64_t end_time; bool old_vm_running = false; @@ -1954,17 +1959,17 @@ static void *migration_thread(void *opaque) if (!qemu_file_rate_limit(s->to_dst_file)) { uint64_t pend_post, pend_nonpost; - qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost, - &pend_post); + qemu_savevm_state_pending(s->to_dst_file, threshold_size, + &pend_nonpost, &pend_post); pending_size = pend_nonpost + pend_post; - trace_migrate_pending(pending_size, max_size, + trace_migrate_pending(pending_size, threshold_size, pend_post, pend_nonpost); - if (pending_size && pending_size >= max_size) { + if (pending_size && pending_size >= threshold_size) { /* Still a significant amount to transfer */ if (migrate_postcopy_ram() && s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && - pend_nonpost <= max_size && + pend_nonpost <= threshold_size && atomic_read(&s->start_postcopy)) { if (!postcopy_start(s, &old_vm_running)) { @@ -1996,13 +2001,13 @@ static void *migration_thread(void *opaque) initial_bytes; uint64_t time_spent = current_time - initial_time; double bandwidth = (double)transferred_bytes / time_spent; - max_size = bandwidth * s->parameters.downtime_limit; + threshold_size = bandwidth * s->parameters.downtime_limit; s->mbps = (((double) transferred_bytes * 8.0) / ((double) time_spent / 1000.0)) / 1000.0 / 1000.0; trace_migrate_transferred(transferred_bytes, time_spent, - bandwidth, max_size); + bandwidth, threshold_size); /* if we haven't sent anything, we don't want to recalculate 10000 is a small enough number for our purposes */ if (ram_dirty_pages_rate() && transferred_bytes > 10000) { diff --git a/migration/savevm.c b/migration/savevm.c index 7cf387fd04..03ae1bdeb4 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -1197,7 +1197,7 @@ void qemu_savevm_state_complete_precopy(QEMUFile *f, bool iterable_only) * the result is split into the amount for units that can and * for units that can't do postcopy. */ -void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size, +void qemu_savevm_state_pending(QEMUFile *f, uint64_t threshold_size, uint64_t *res_non_postcopiable, uint64_t *res_postcopiable) { @@ -1216,7 +1216,7 @@ void qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size, continue; } } - se->ops->save_live_pending(f, se->opaque, max_size, + se->ops->save_live_pending(f, se->opaque, threshold_size, res_non_postcopiable, res_postcopiable); } } -- cgit v1.2.3 From d80a0169e3e7e8e6ae9c2bdf512d0842554c02d1 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Sat, 1 Apr 2017 16:18:44 +0800 Subject: hmp: info migrate_capability format tunes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Dump the info in a single line is hard to read. Do it one per line. Also, the first "capabilities:" didn't help much. Let's remove it. CC: "Dr. David Alan Gilbert" Signed-off-by: Peter Xu Reviewed-by: "Dr. David Alan Gilbert" Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- hmp.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/hmp.c b/hmp.c index be75e7118c..70db84f2b6 100644 --- a/hmp.c +++ b/hmp.c @@ -268,13 +268,11 @@ void hmp_info_migrate_capabilities(Monitor *mon, const QDict *qdict) caps = qmp_query_migrate_capabilities(NULL); if (caps) { - monitor_printf(mon, "capabilities: "); for (cap = caps; cap; cap = cap->next) { - monitor_printf(mon, "%s: %s ", + monitor_printf(mon, "%s: %s\n", MigrationCapability_lookup[cap->value->capability], cap->value->state ? "on" : "off"); } - monitor_printf(mon, "\n"); } qapi_free_MigrationCapabilityStatusList(caps); -- cgit v1.2.3 From 2c02468c9b1750aaa5fbeee49cdb55f101608c6a Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Sat, 1 Apr 2017 16:18:45 +0800 Subject: hmp: info migrate_parameters format tunes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do the same (one per line) to the parameter list. CC: "Dr. David Alan Gilbert" Signed-off-by: Peter Xu Reviewed-by: "Dr. David Alan Gilbert" Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Juan Quintela Signed-off-by: Juan Quintela --- hmp.c | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/hmp.c b/hmp.c index 70db84f2b6..ab407d6fd0 100644 --- a/hmp.c +++ b/hmp.c @@ -285,46 +285,44 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) params = qmp_query_migrate_parameters(NULL); if (params) { - monitor_printf(mon, "parameters:"); assert(params->has_compress_level); - monitor_printf(mon, " %s: %" PRId64, + monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_LEVEL], params->compress_level); assert(params->has_compress_threads); - monitor_printf(mon, " %s: %" PRId64, + monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_lookup[MIGRATION_PARAMETER_COMPRESS_THREADS], params->compress_threads); assert(params->has_decompress_threads); - monitor_printf(mon, " %s: %" PRId64, + monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_lookup[MIGRATION_PARAMETER_DECOMPRESS_THREADS], params->decompress_threads); assert(params->has_cpu_throttle_initial); - monitor_printf(mon, " %s: %" PRId64, + monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_lookup[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL], params->cpu_throttle_initial); assert(params->has_cpu_throttle_increment); - monitor_printf(mon, " %s: %" PRId64, + monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_lookup[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT], params->cpu_throttle_increment); - monitor_printf(mon, " %s: '%s'", + monitor_printf(mon, "%s: '%s'\n", MigrationParameter_lookup[MIGRATION_PARAMETER_TLS_CREDS], params->has_tls_creds ? params->tls_creds : ""); - monitor_printf(mon, " %s: '%s'", + monitor_printf(mon, "%s: '%s'\n", MigrationParameter_lookup[MIGRATION_PARAMETER_TLS_HOSTNAME], params->has_tls_hostname ? params->tls_hostname : ""); assert(params->has_max_bandwidth); - monitor_printf(mon, " %s: %" PRId64 " bytes/second", + monitor_printf(mon, "%s: %" PRId64 " bytes/second\n", MigrationParameter_lookup[MIGRATION_PARAMETER_MAX_BANDWIDTH], params->max_bandwidth); assert(params->has_downtime_limit); - monitor_printf(mon, " %s: %" PRId64 " milliseconds", + monitor_printf(mon, "%s: %" PRId64 " milliseconds\n", MigrationParameter_lookup[MIGRATION_PARAMETER_DOWNTIME_LIMIT], params->downtime_limit); assert(params->has_x_checkpoint_delay); - monitor_printf(mon, " %s: %" PRId64, + monitor_printf(mon, "%s: %" PRId64 "\n", MigrationParameter_lookup[MIGRATION_PARAMETER_X_CHECKPOINT_DELAY], params->x_checkpoint_delay); - monitor_printf(mon, "\n"); } qapi_free_MigrationParameters(params); -- cgit v1.2.3