diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2024-03-12 11:35:41 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2024-03-12 11:35:41 +0000 |
commit | 8f3f329f5e0117bd1a23a79ab751f8a7d3471e4b (patch) | |
tree | b683fa38f0994564215074e9a7f9cc2b422bb4fa /migration/ram.c | |
parent | 1c268991b3fe699fee16b1cbb9c6025d334c5b25 (diff) | |
parent | 1815338df00fd0a3fe25085564c6966f74c8f43d (diff) |
Merge tag 'migration-20240311-pull-request' of https://gitlab.com/peterx/qemu into staging
Migration pull request
- Avihai's fix to allow vmstate iterators to not starve for VFIO
- Maksim's fix on additional check on precopy load error
- Fabiano's fix on fdatasync() hang in mapped-ram
- Jonathan's fix on vring cached access over MMIO regions
- Cedric's cleanup patches 1-4 out of his error report series
- Yu's fix for RDMA migration (which used to be broken even for 8.2)
- Anthony's small cleanup/fix on err message
- Steve's patches on privatize migration.h
- Xiang's patchset to enable zero page detections in multifd threads
# -----BEGIN PGP SIGNATURE-----
#
# iIgEABYKADAWIQS5GE3CDMRX2s990ak7X8zN86vXBgUCZe9+uBIccGV0ZXJ4QHJl
# ZGhhdC5jb20ACgkQO1/MzfOr1wamaQD/SvmpMEcuRndT9LPSxzXowAGDZTBpYUfv
# 5XAbx80dS9IBAO8PJJgQJIBHBeacyLBjHP9CsdVtgw5/VW+wCsbfV4AB
# =xavb
# -----END PGP SIGNATURE-----
# gpg: Signature made Mon 11 Mar 2024 21:59:20 GMT
# gpg: using EDDSA key B9184DC20CC457DACF7DD1A93B5FCCCDF3ABD706
# gpg: issuer "peterx@redhat.com"
# gpg: Good signature from "Peter Xu <xzpeter@gmail.com>" [marginal]
# gpg: aka "Peter Xu <peterx@redhat.com>" [marginal]
# gpg: WARNING: This key is not certified with sufficiently trusted signatures!
# gpg: It is not certain that the signature belongs to the owner.
# Primary key fingerprint: B918 4DC2 0CC4 57DA CF7D D1A9 3B5F CCCD F3AB D706
* tag 'migration-20240311-pull-request' of https://gitlab.com/peterx/qemu: (34 commits)
migration/multifd: Add new migration test cases for legacy zero page checking.
migration/multifd: Enable multifd zero page checking by default.
migration/multifd: Implement ram_save_target_page_multifd to handle multifd version of MigrationOps::ram_save_target_page.
migration/multifd: Implement zero page transmission on the multifd thread.
migration/multifd: Add new migration option zero-page-detection.
migration/multifd: Allow clearing of the file_bmap from multifd
migration/multifd: Allow zero pages in file migration
migration: purge MigrationState from public interface
migration: delete unused accessors
migration: privatize colo interfaces
migration: migration_file_set_error
migration: migration_is_device
migration: migration_thread_is_self
migration: export vcpu_dirty_limit_period
migration: export migration_is_running
migration: export migration_is_active
migration: export migration_is_setup_or_active
migration: remove migration.h references
migration: export fewer options
migration: Fix format in error message
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 62 |
1 files changed, 47 insertions, 15 deletions
diff --git a/migration/ram.c b/migration/ram.c index 003c28e133..8deb84984f 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -1140,6 +1140,10 @@ static int save_zero_page(RAMState *rs, PageSearchStatus *pss, QEMUFile *file = pss->pss_channel; int len = 0; + if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_NONE) { + return 0; + } + if (!buffer_is_zero(p, TARGET_PAGE_SIZE)) { return 0; } @@ -1284,7 +1288,6 @@ static int ram_save_multifd_page(RAMBlock *block, ram_addr_t offset) if (!multifd_queue_page(block, offset)) { return -1; } - stat64_add(&mig_stats.normal_pages, 1); return 1; } @@ -2076,7 +2079,6 @@ static bool save_compress_page(RAMState *rs, PageSearchStatus *pss, */ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) { - RAMBlock *block = pss->block; ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; int res; @@ -2092,17 +2094,33 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss) return 1; } + return ram_save_page(rs, pss); +} + +/** + * ram_save_target_page_multifd: send one target page to multifd workers + * + * Returns 1 if the page was queued, -1 otherwise. + * + * @rs: current RAM state + * @pss: data about the page we want to send + */ +static int ram_save_target_page_multifd(RAMState *rs, PageSearchStatus *pss) +{ + RAMBlock *block = pss->block; + ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS; + /* - * Do not use multifd in postcopy as one whole host page should be - * placed. Meanwhile postcopy requires atomic update of pages, so even - * if host page size == guest page size the dest guest during run may - * still see partially copied pages which is data corruption. + * While using multifd live migration, we still need to handle zero + * page checking on the migration main thread. */ - if (migrate_multifd() && !migration_in_postcopy()) { - return ram_save_multifd_page(block, offset); + if (migrate_zero_page_detection() == ZERO_PAGE_DETECTION_LEGACY) { + if (save_zero_page(rs, pss, offset)) { + return 1; + } } - return ram_save_page(rs, pss); + return ram_save_multifd_page(block, offset); } /* Should be called before sending a host page */ @@ -2909,10 +2927,9 @@ void qemu_guest_free_page_hint(void *addr, size_t len) RAMBlock *block; ram_addr_t offset; size_t used_len, start, npages; - MigrationState *s = migrate_get_current(); /* This function is currently expected to be used during live migration */ - if (!migration_is_setup_or_active(s->state)) { + if (!migration_is_setup_or_active()) { return; } @@ -3110,7 +3127,12 @@ static int ram_save_setup(QEMUFile *f, void *opaque) } migration_ops = g_malloc0(sizeof(MigrationOps)); - migration_ops->ram_save_target_page = ram_save_target_page_legacy; + + if (migrate_multifd()) { + migration_ops->ram_save_target_page = ram_save_target_page_multifd; + } else { + migration_ops->ram_save_target_page = ram_save_target_page_legacy; + } bql_unlock(); ret = multifd_send_sync_main(); @@ -3150,9 +3172,13 @@ static void ram_save_file_bmap(QEMUFile *f) } } -void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset) +void ramblock_set_file_bmap_atomic(RAMBlock *block, ram_addr_t offset, bool set) { - set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); + if (set) { + set_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); + } else { + clear_bit_atomic(offset >> TARGET_PAGE_BITS, block->file_bmap); + } } /** @@ -3263,7 +3289,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) out: if (ret >= 0 - && migration_is_setup_or_active(migrate_get_current()->state)) { + && migration_is_setup_or_active()) { if (migrate_multifd() && migrate_multifd_flush_after_each_section() && !migrate_mapped_ram()) { ret = multifd_send_sync_main(); @@ -4214,6 +4240,12 @@ static int ram_load_precopy(QEMUFile *f) i++; addr = qemu_get_be64(f); + ret = qemu_file_get_error(f); + if (ret) { + error_report("Getting RAM address failed"); + break; + } + flags = addr & ~TARGET_PAGE_MASK; addr &= TARGET_PAGE_MASK; |