aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorWei Yang <richardw.yang@linux.intel.com>2019-11-07 20:39:07 +0800
committerJuan Quintela <quintela@redhat.com>2020-01-20 09:10:23 +0100
commit644acf99b8cb8437f65600cf00c2e090bf3e3bc2 (patch)
tree40089c7c38dccffe4ef236a3f8567e8797ca5ec7 /migration
parent91ba442f5c26eaf45e2f78fc7e40fe6a7fdf7b9d (diff)
migration/postcopy: enable compress during postcopy
postcopy requires to place a whole host page, while migration thread migrate memory in target page size. This makes postcopy need to collect all target pages in one host page before placing via userfaultfd. To enable compress during postcopy, there are two problems to solve: 1. Random order for target page arrival 2. Target pages in one host page arrives without interrupt by target page from other host page The first one is handled by previous cleanup patch. This patch handles the second one by: 1. Flush compress thread for each host page 2. Wait for decompress thread for before placing host page Signed-off-by: Wei Yang <richardw.yang@linux.intel.com> Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/migration.c11
-rw-r--r--migration/ram.c28
2 files changed, 27 insertions, 12 deletions
diff --git a/migration/migration.c b/migration/migration.c
index e55edee606..990bff00c0 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -1005,17 +1005,6 @@ static bool migrate_caps_check(bool *cap_list,
#endif
if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
- if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) {
- /* The decompression threads asynchronously write into RAM
- * rather than use the atomic copies needed to avoid
- * userfaulting. It should be possible to fix the decompression
- * threads for compatibility in future.
- */
- error_setg(errp, "Postcopy is not currently compatible "
- "with compression");
- return false;
- }
-
/* This check is reasonably expensive, so only when it's being
* set the first time, also it's only the destination that needs
* special support.
diff --git a/migration/ram.c b/migration/ram.c
index a7414170e5..5f20c3d15d 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -3470,6 +3470,14 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
rs->target_page_count += pages;
/*
+ * During postcopy, it is necessary to make sure one whole host
+ * page is sent in one chunk.
+ */
+ if (migrate_postcopy_ram()) {
+ flush_compressed_data(rs);
+ }
+
+ /*
* we want to check in the 1st loop, just in case it was the 1st
* time and we had to sync the dirty bitmap.
* qemu_clock_get_ns() is a bit expensive, so we only check each
@@ -4061,6 +4069,7 @@ static int ram_load_postcopy(QEMUFile *f)
void *place_source = NULL;
RAMBlock *block = NULL;
uint8_t ch;
+ int len;
addr = qemu_get_be64(f);
@@ -4078,7 +4087,8 @@ static int ram_load_postcopy(QEMUFile *f)
trace_ram_load_postcopy_loop((uint64_t)addr, flags);
place_needed = false;
- if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
+ if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
+ RAM_SAVE_FLAG_COMPRESS_PAGE)) {
block = ram_block_from_stream(f, flags);
host = host_from_ram_block_offset(block, addr);
@@ -4161,6 +4171,17 @@ static int ram_load_postcopy(QEMUFile *f)
TARGET_PAGE_SIZE);
}
break;
+ case RAM_SAVE_FLAG_COMPRESS_PAGE:
+ all_zero = false;
+ len = qemu_get_be32(f);
+ if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
+ error_report("Invalid compressed data length: %d", len);
+ ret = -EINVAL;
+ break;
+ }
+ decompress_data_with_multi_threads(f, page_buffer, len);
+ break;
+
case RAM_SAVE_FLAG_EOS:
/* normal exit */
multifd_recv_sync_main();
@@ -4172,6 +4193,11 @@ static int ram_load_postcopy(QEMUFile *f)
break;
}
+ /* Got the whole host page, wait for decompress before placing. */
+ if (place_needed) {
+ ret |= wait_for_decompress_done();
+ }
+
/* Detect for any possible file errors */
if (!ret && qemu_file_get_error(f)) {
ret = qemu_file_get_error(f);