diff options
author | Juan Quintela <quintela@redhat.com> | 2023-10-19 13:07:16 +0200 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2023-10-30 17:41:55 +0100 |
commit | 4e400f9091d496c513fcd513753a2d681be57a3b (patch) | |
tree | 8ca7c758330d73b0cbf1a070c62e9246b7fe4657 | |
parent | 0e195629969125e3a168a7e06e3c49d939c36ead (diff) |
migration: Remove save_page_use_compression()
After previous patch, we disable the posiblity that we use compression
together with xbzrle. So we can use directly migrate_compress().
Once there, now we don't need the rs parameter, so remove it.
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231019110724.15324-4-quintela@redhat.com>
-rw-r--r-- | migration/ram.c | 34 |
1 files changed, 7 insertions, 27 deletions
diff --git a/migration/ram.c b/migration/ram.c index e0ad732ee8..8246663f64 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1291,8 +1291,6 @@ static int ram_save_multifd_page(QEMUFile *file, RAMBlock *block, return 1; } -static bool save_page_use_compression(RAMState *rs); - static int send_queued_data(CompressParam *param) { PageSearchStatus *pss = &ram_state->pss[RAM_CHANNEL_PRECOPY]; @@ -1329,9 +1327,9 @@ static int send_queued_data(CompressParam *param) return len; } -static void ram_flush_compressed_data(RAMState *rs) +static void ram_flush_compressed_data(void) { - if (!save_page_use_compression(rs)) { + if (!migrate_compress()) { return; } @@ -1393,7 +1391,7 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss) * Also If xbzrle is on, stop using the data compression at this * point. In theory, xbzrle can do better than compression. */ - ram_flush_compressed_data(rs); + ram_flush_compressed_data(); /* Hit the end of the list */ pss->block = QLIST_FIRST_RCU(&ram_list.blocks); @@ -2042,24 +2040,6 @@ int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len) return 0; } -static bool save_page_use_compression(RAMState *rs) -{ - if (!migrate_compress()) { - return false; - } - - /* - * If xbzrle is enabled (e.g., after first round of migration), stop - * using the data compression. In theory, xbzrle can do better than - * compression. - */ - if (rs->xbzrle_started) { - return false; - } - - return true; -} - /* * try to compress the page before posting it out, return true if the page * has been properly handled by compression, otherwise needs other @@ -2068,7 +2048,7 @@ static bool save_page_use_compression(RAMState *rs) static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, ram_addr_t offset) { - if (!save_page_use_compression(rs)) { + if (!migrate_compress()) { return false; } @@ -2083,7 +2063,7 @@ static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, * much CPU resource. */ if (pss->block != pss->last_sent_block) { - ram_flush_compressed_data(rs); + ram_flush_compressed_data(); return false; } @@ -3135,7 +3115,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) * page is sent in one chunk. */ if (migrate_postcopy_ram()) { - ram_flush_compressed_data(rs); + ram_flush_compressed_data(); } /* @@ -3236,7 +3216,7 @@ static int ram_save_complete(QEMUFile *f, void *opaque) } qemu_mutex_unlock(&rs->bitmap_mutex); - ram_flush_compressed_data(rs); + ram_flush_compressed_data(); int ret = rdma_registration_stop(f, RAM_CONTROL_FINISH); if (ret < 0) { |