aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-05-05 19:17:44 +0100
committerRichard Henderson <richard.henderson@linaro.org>2023-05-05 19:17:44 +0100
commit8ad8256ac4ea305fe95967d16d2aba80b7631259 (patch)
treea9c0a2d9c03e80e5446a863cfcd484cc7d49ec34
parenta9fe9e191b4305b88c356a1ed9ac3baf89eb18aa (diff)
parentfae4009fb51b12927165667a9c9d6af93d31b1df (diff)
Merge tag 'migration-20230505-pull-request' of https://gitlab.com/juan.quintela/qemu into staging
Migration Pull request (20230505 edition) In this series: - fix block_bitmap_mapping (juan) - RDMA cleanup (juan) - qemu file cleanup (juan) Please apply. # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEGJn/jt6/WMzuA0uC9IfvGFhy1yMFAmRUUhUACgkQ9IfvGFhy # 1yMxkRAAk1jfunLx/0lfN9R2W3IDwOOPrcOOOd6e7zKb7GzccObKPlqb/eQyvjCe # FjOenJ8qgh90sW3m99ZBEla3BKekJlCB88olTjHX6FzWz7HUXMv0ip9Xx/Hg3KA/ # gW8AJhHDI+dKpevmKdhWc3sDG+TvMF6YE3hrSm5TtZ0XdHCH+McvwQm6w6O5/CN0 # HjSqV2veweNFctBRaDk9KNvQ5o9/1UYp98N3FjoRGAuxdTeilBZ2dcSmGXrRj789 # nKSCkNxEAjM0cawuo5EqZ5TCy/hFCqWA+W9hFcz63i9bZAwDu/KF7KyR62kKEh5q # X1JNKqlyuVNutM3Pn8kbTausoWUxEUgbeGI7ID/iQYmP6V36AyyYlASFlLDwPyQs # lgHdEAzPFmQlHMDior4TKE7+9ZtAE6/g5yYaIuGh04pwhlBzwJ/rgyi7Y5sP1Yqr # Y5n+y6Ku7wj+gSswZK5iLQ3OFrJfwFQHIfHtW+22oR7oc9Vg0n+1Xsp1cHdJBrWu # TzdWjX3KnSMTN9x40dJW/7wkt5+XCZcfPcTP/828dGmk0FN8TJMuCvfh79te8tUj # TQ3NDjV4TO8jZBNB3p1ZZfMmwKHvDCBLRr0aj3MVZSvAcoHPlR6yjMENhsm4ERDA # Xtsbqt3mIIq0fIvmQHgXDiUvy2FQw/w3Zhrdb9GbBFdYB/T+iFU= # =79n1 # -----END PGP SIGNATURE----- # gpg: Signature made Fri 05 May 2023 01:47:17 AM BST # gpg: using RSA key 1899FF8EDEBF58CCEE034B82F487EF185872D723 # gpg: Good signature from "Juan Quintela <quintela@redhat.com>" [undefined] # gpg: aka "Juan Quintela <quintela@trasno.org>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723 * tag 'migration-20230505-pull-request' of https://gitlab.com/juan.quintela/qemu: qemu-file: Make ram_control_save_page() use accessors for rate_limit qemu-file: Make total_transferred an uint64_t qemu-file: remove shutdown member qemu-file: No need to check for shutdown in qemu_file_rate_limit migration: qemu_file_total_transferred() function is monotonic migration: max_postcopy_bandwidth is a size parameter migration/rdma: Check for postcopy sooner migration/rdma: It makes no sense to recive that flag without RDMA migration/rdma: We can calculate the rioc from the QEMUFile migration/rdma: simplify ram_control_load_hook() migration: Make RAM_SAVE_FLAG_HOOK a normal case entry migration: Rename xbzrle_enabled xbzrle_started migration: Put zero_pages in alphabetical order migration: Document all migration_stats migration/rdma: Don't pass the QIOChannelRDMA as an opaque migration: Fix block_bitmap_mapping migration Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--migration/block-dirty-bitmap.c14
-rw-r--r--migration/block.c13
-rw-r--r--migration/migration-stats.h45
-rw-r--r--migration/migration.c4
-rw-r--r--migration/options.c9
-rw-r--r--migration/options.h4
-rw-r--r--migration/qemu-file.c35
-rw-r--r--migration/qemu-file.h4
-rw-r--r--migration/ram.c26
-rw-r--r--migration/rdma.c36
-rw-r--r--migration/savevm.c6
-rw-r--r--migration/vmstate.c2
12 files changed, 109 insertions, 89 deletions
diff --git a/migration/block-dirty-bitmap.c b/migration/block-dirty-bitmap.c
index 6624f39bc6..20f36e6bd8 100644
--- a/migration/block-dirty-bitmap.c
+++ b/migration/block-dirty-bitmap.c
@@ -606,11 +606,9 @@ static int init_dirty_bitmap_migration(DBMSaveState *s)
GHashTable *handled_by_blk = g_hash_table_new(NULL, NULL);
BlockBackend *blk;
GHashTable *alias_map = NULL;
- const BitmapMigrationNodeAliasList *block_bitmap_mapping =
- migrate_block_bitmap_mapping();
- if (block_bitmap_mapping) {
- alias_map = construct_alias_map(block_bitmap_mapping, true,
+ if (migrate_has_block_bitmap_mapping()) {
+ alias_map = construct_alias_map(migrate_block_bitmap_mapping(), true,
&error_abort);
}
@@ -1159,8 +1157,6 @@ static int dirty_bitmap_load_header(QEMUFile *f, DBMLoadState *s,
static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id)
{
GHashTable *alias_map = NULL;
- const BitmapMigrationNodeAliasList *block_bitmap_mapping =
- migrate_block_bitmap_mapping();
DBMLoadState *s = &((DBMState *)opaque)->load;
int ret = 0;
@@ -1172,9 +1168,9 @@ static int dirty_bitmap_load(QEMUFile *f, void *opaque, int version_id)
return -EINVAL;
}
- if (block_bitmap_mapping) {
- alias_map = construct_alias_map(block_bitmap_mapping,
- false, &error_abort);
+ if (migrate_has_block_bitmap_mapping()) {
+ alias_map = construct_alias_map(migrate_block_bitmap_mapping(), false,
+ &error_abort);
}
do {
diff --git a/migration/block.c b/migration/block.c
index 6d532ac7a2..a37678ce95 100644
--- a/migration/block.c
+++ b/migration/block.c
@@ -747,8 +747,7 @@ static int block_save_setup(QEMUFile *f, void *opaque)
static int block_save_iterate(QEMUFile *f, void *opaque)
{
int ret;
- int64_t last_bytes = qemu_file_total_transferred(f);
- int64_t delta_bytes;
+ uint64_t last_bytes = qemu_file_total_transferred(f);
trace_migration_block_save("iterate", block_mig_state.submitted,
block_mig_state.transferred);
@@ -800,14 +799,8 @@ static int block_save_iterate(QEMUFile *f, void *opaque)
}
qemu_put_be64(f, BLK_MIG_FLAG_EOS);
- delta_bytes = qemu_file_total_transferred(f) - last_bytes;
- if (delta_bytes > 0) {
- return 1;
- } else if (delta_bytes < 0) {
- return -1;
- } else {
- return 0;
- }
+ uint64_t delta_bytes = qemu_file_total_transferred(f) - last_bytes;
+ return (delta_bytes > 0);
}
/* Called with iothread lock taken. */
diff --git a/migration/migration-stats.h b/migration/migration-stats.h
index 149af932d7..cf8a4f0410 100644
--- a/migration/migration-stats.h
+++ b/migration/migration-stats.h
@@ -22,18 +22,61 @@
* one thread).
*/
typedef struct {
+ /*
+ * Number of bytes that were dirty last time that we synced with
+ * the guest memory. We use that to calculate the downtime. As
+ * the remaining dirty amounts to what we know that is still dirty
+ * since last iteration, not counting what the guest has dirtied
+ * since we synchronized bitmaps.
+ */
Stat64 dirty_bytes_last_sync;
+ /*
+ * Number of pages dirtied per second.
+ */
Stat64 dirty_pages_rate;
+ /*
+ * Number of times we have synchronized guest bitmaps.
+ */
Stat64 dirty_sync_count;
+ /*
+ * Number of times zero copy failed to send any page using zero
+ * copy.
+ */
Stat64 dirty_sync_missed_zero_copy;
+ /*
+ * Number of bytes sent at migration completion stage while the
+ * guest is stopped.
+ */
Stat64 downtime_bytes;
- Stat64 zero_pages;
+ /*
+ * Number of bytes sent through multifd channels.
+ */
Stat64 multifd_bytes;
+ /*
+ * Number of pages transferred that were not full of zeros.
+ */
Stat64 normal_pages;
+ /*
+ * Number of bytes sent during postcopy.
+ */
Stat64 postcopy_bytes;
+ /*
+ * Number of postcopy page faults that we have handled during
+ * postcopy stage.
+ */
Stat64 postcopy_requests;
+ /*
+ * Number of bytes sent during precopy stage.
+ */
Stat64 precopy_bytes;
+ /*
+ * Total number of bytes transferred.
+ */
Stat64 transferred;
+ /*
+ * Number of pages transferred that were full of zeros.
+ */
+ Stat64 zero_pages;
} MigrationAtomicStats;
extern MigrationAtomicStats mig_stats;
diff --git a/migration/migration.c b/migration/migration.c
index feb5ab7493..232e387109 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -2056,7 +2056,7 @@ static int postcopy_start(MigrationState *ms)
QIOChannelBuffer *bioc;
QEMUFile *fb;
int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
- int64_t bandwidth = migrate_max_postcopy_bandwidth();
+ uint64_t bandwidth = migrate_max_postcopy_bandwidth();
bool restart_block = false;
int cur_state = MIGRATION_STATUS_ACTIVE;
@@ -3176,7 +3176,7 @@ fail:
void migrate_fd_connect(MigrationState *s, Error *error_in)
{
Error *local_err = NULL;
- int64_t rate_limit;
+ uint64_t rate_limit;
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
/*
diff --git a/migration/options.c b/migration/options.c
index 53b7fc5d5d..2e759cc306 100644
--- a/migration/options.c
+++ b/migration/options.c
@@ -626,6 +626,13 @@ const BitmapMigrationNodeAliasList *migrate_block_bitmap_mapping(void)
return s->parameters.block_bitmap_mapping;
}
+bool migrate_has_block_bitmap_mapping(void)
+{
+ MigrationState *s = migrate_get_current();
+
+ return s->parameters.has_block_bitmap_mapping;
+}
+
bool migrate_block_incremental(void)
{
MigrationState *s = migrate_get_current();
@@ -710,7 +717,7 @@ uint64_t migrate_max_bandwidth(void)
return s->parameters.max_bandwidth;
}
-int64_t migrate_max_postcopy_bandwidth(void)
+uint64_t migrate_max_postcopy_bandwidth(void)
{
MigrationState *s = migrate_get_current();
diff --git a/migration/options.h b/migration/options.h
index 3c322867cd..5cca3326d6 100644
--- a/migration/options.h
+++ b/migration/options.h
@@ -71,6 +71,8 @@ bool migrate_cap_set(int cap, bool value, Error **errp);
/* parameters */
const BitmapMigrationNodeAliasList *migrate_block_bitmap_mapping(void);
+bool migrate_has_block_bitmap_mapping(void);
+
bool migrate_block_incremental(void);
uint32_t migrate_checkpoint_delay(void);
int migrate_compress_level(void);
@@ -83,7 +85,7 @@ int migrate_decompress_threads(void);
uint64_t migrate_downtime_limit(void);
uint8_t migrate_max_cpu_throttle(void);
uint64_t migrate_max_bandwidth(void);
-int64_t migrate_max_postcopy_bandwidth(void);
+uint64_t migrate_max_postcopy_bandwidth(void);
int migrate_multifd_channels(void);
MultiFDCompression migrate_multifd_compression(void);
int migrate_multifd_zlib_level(void);
diff --git a/migration/qemu-file.c b/migration/qemu-file.c
index ee04240a21..f4cfd05c67 100644
--- a/migration/qemu-file.c
+++ b/migration/qemu-file.c
@@ -51,7 +51,7 @@ struct QEMUFile {
int64_t rate_limit_used;
/* The sum of bytes transferred on the wire */
- int64_t total_transferred;
+ uint64_t total_transferred;
int buf_index;
int buf_size; /* 0 when writing */
@@ -63,8 +63,6 @@ struct QEMUFile {
int last_error;
Error *last_error_obj;
- /* has the file has been shutdown */
- bool shutdown;
};
/*
@@ -78,8 +76,6 @@ int qemu_file_shutdown(QEMUFile *f)
{
int ret = 0;
- f->shutdown = true;
-
/*
* We must set qemufile error before the real shutdown(), otherwise
* there can be a race window where we thought IO all went though
@@ -294,7 +290,7 @@ void qemu_fflush(QEMUFile *f)
return;
}
- if (f->shutdown) {
+ if (qemu_file_get_error(f)) {
return;
}
if (f->iovcnt > 0) {
@@ -340,21 +336,11 @@ void ram_control_after_iterate(QEMUFile *f, uint64_t flags)
void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data)
{
- int ret = -EINVAL;
-
if (f->hooks && f->hooks->hook_ram_load) {
- ret = f->hooks->hook_ram_load(f, flags, data);
+ int ret = f->hooks->hook_ram_load(f, flags, data);
if (ret < 0) {
qemu_file_set_error(f, ret);
}
- } else {
- /*
- * Hook is a hook specifically requested by the source sending a flag
- * that expects there to be a hook on the destination.
- */
- if (flags == RAM_CONTROL_HOOK) {
- qemu_file_set_error(f, ret);
- }
}
}
@@ -366,7 +352,7 @@ size_t ram_control_save_page(QEMUFile *f, ram_addr_t block_offset,
int ret = f->hooks->save_page(f, block_offset,
offset, size, bytes_sent);
if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
- f->rate_limit_used += size;
+ qemu_file_acct_rate_limit(f, size);
}
if (ret != RAM_SAVE_CONTROL_DELAYED &&
@@ -407,7 +393,7 @@ static ssize_t coroutine_mixed_fn qemu_fill_buffer(QEMUFile *f)
f->buf_index = 0;
f->buf_size = pending;
- if (f->shutdown) {
+ if (qemu_file_get_error(f)) {
return 0;
}
@@ -496,7 +482,7 @@ static int add_to_iovec(QEMUFile *f, const uint8_t *buf, size_t size,
} else {
if (f->iovcnt >= MAX_IOV_SIZE) {
/* Should only happen if a previous fflush failed */
- assert(f->shutdown || !qemu_file_is_writable(f));
+ assert(qemu_file_get_error(f) || !qemu_file_is_writable(f));
return 1;
}
if (may_free) {
@@ -722,9 +708,9 @@ int coroutine_mixed_fn qemu_get_byte(QEMUFile *f)
return result;
}
-int64_t qemu_file_total_transferred_fast(QEMUFile *f)
+uint64_t qemu_file_total_transferred_fast(QEMUFile *f)
{
- int64_t ret = f->total_transferred;
+ uint64_t ret = f->total_transferred;
int i;
for (i = 0; i < f->iovcnt; i++) {
@@ -734,7 +720,7 @@ int64_t qemu_file_total_transferred_fast(QEMUFile *f)
return ret;
}
-int64_t qemu_file_total_transferred(QEMUFile *f)
+uint64_t qemu_file_total_transferred(QEMUFile *f)
{
qemu_fflush(f);
return f->total_transferred;
@@ -742,9 +728,6 @@ int64_t qemu_file_total_transferred(QEMUFile *f)
int qemu_file_rate_limit(QEMUFile *f)
{
- if (f->shutdown) {
- return 1;
- }
if (qemu_file_get_error(f)) {
return 1;
}
diff --git a/migration/qemu-file.h b/migration/qemu-file.h
index d16cd50448..4f26bf6961 100644
--- a/migration/qemu-file.h
+++ b/migration/qemu-file.h
@@ -83,7 +83,7 @@ int qemu_fclose(QEMUFile *f);
*
* Returns: the total bytes transferred
*/
-int64_t qemu_file_total_transferred(QEMUFile *f);
+uint64_t qemu_file_total_transferred(QEMUFile *f);
/*
* qemu_file_total_transferred_fast:
@@ -95,7 +95,7 @@ int64_t qemu_file_total_transferred(QEMUFile *f);
*
* Returns: the total bytes transferred and queued
*/
-int64_t qemu_file_total_transferred_fast(QEMUFile *f);
+uint64_t qemu_file_total_transferred_fast(QEMUFile *f);
/*
* put_buffer without copying the buffer.
diff --git a/migration/ram.c b/migration/ram.c
index 7d81c4a39e..5e7bf20ca5 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -388,8 +388,8 @@ struct RAMState {
uint64_t xbzrle_pages_prev;
/* Amount of xbzrle encoded bytes since the beginning of the period */
uint64_t xbzrle_bytes_prev;
- /* Start using XBZRLE (e.g., after the first round). */
- bool xbzrle_enabled;
+ /* Are we really using XBZRLE (e.g., after the first round). */
+ bool xbzrle_started;
/* Are we on the last stage of migration */
bool last_stage;
/* compression statistics since the beginning of the period */
@@ -1420,7 +1420,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss)
trace_ram_save_page(block->idstr, (uint64_t)offset, p);
XBZRLE_cache_lock();
- if (rs->xbzrle_enabled && !migration_in_postcopy()) {
+ if (rs->xbzrle_started && !migration_in_postcopy()) {
pages = save_xbzrle_page(rs, pss, &p, current_addr,
block, offset);
if (!rs->last_stage) {
@@ -1636,7 +1636,7 @@ static int find_dirty_block(RAMState *rs, PageSearchStatus *pss)
pss->complete_round = true;
/* After the first round, enable XBZRLE. */
if (migrate_xbzrle()) {
- rs->xbzrle_enabled = true;
+ rs->xbzrle_started = true;
}
}
/* Didn't find anything this time, but try again on the new block */
@@ -2288,7 +2288,7 @@ static bool save_page_use_compression(RAMState *rs)
* using the data compression. In theory, xbzrle can do better than
* compression.
*/
- if (rs->xbzrle_enabled) {
+ if (rs->xbzrle_started) {
return false;
}
@@ -2357,7 +2357,7 @@ static int ram_save_target_page_legacy(RAMState *rs, PageSearchStatus *pss)
/* Must let xbzrle know, otherwise a previous (now 0'd) cached
* page would be stale
*/
- if (rs->xbzrle_enabled) {
+ if (rs->xbzrle_started) {
XBZRLE_cache_lock();
xbzrle_cache_zero_page(rs, block->offset + offset);
XBZRLE_cache_unlock();
@@ -2738,7 +2738,7 @@ static void ram_state_reset(RAMState *rs)
rs->last_seen_block = NULL;
rs->last_page = 0;
rs->last_version = ram_list.version;
- rs->xbzrle_enabled = false;
+ rs->xbzrle_started = false;
}
#define MAX_WAIT 50 /* ms, half buffered_file limit */
@@ -4445,14 +4445,12 @@ static int ram_load_precopy(QEMUFile *f)
multifd_recv_sync_main();
}
break;
+ case RAM_SAVE_FLAG_HOOK:
+ ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
+ break;
default:
- if (flags & RAM_SAVE_FLAG_HOOK) {
- ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
- } else {
- error_report("Unknown combination of migration flags: 0x%x",
- flags);
- ret = -EINVAL;
- }
+ error_report("Unknown combination of migration flags: 0x%x", flags);
+ ret = -EINVAL;
}
if (!ret) {
ret = qemu_file_get_error(f);
diff --git a/migration/rdma.c b/migration/rdma.c
index 7e747b2595..2cd8f1cc66 100644
--- a/migration/rdma.c
+++ b/migration/rdma.c
@@ -3234,6 +3234,10 @@ static size_t qemu_rdma_save_page(QEMUFile *f,
RDMAContext *rdma;
int ret;
+ if (migration_in_postcopy()) {
+ return RAM_SAVE_CONTROL_NOT_SUPP;
+ }
+
RCU_READ_LOCK_GUARD();
rdma = qatomic_rcu_read(&rioc->rdmaout);
@@ -3243,10 +3247,6 @@ static size_t qemu_rdma_save_page(QEMUFile *f,
CHECK_ERROR_STATE();
- if (migration_in_postcopy()) {
- return RAM_SAVE_CONTROL_NOT_SUPP;
- }
-
qemu_fflush(f);
/*
@@ -3527,7 +3527,7 @@ static int dest_ram_sort_func(const void *a, const void *b)
*
* Keep doing this until the source tells us to stop.
*/
-static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque)
+static int qemu_rdma_registration_handle(QEMUFile *f)
{
RDMAControlHeader reg_resp = { .len = sizeof(RDMARegisterResult),
.type = RDMA_CONTROL_REGISTER_RESULT,
@@ -3539,7 +3539,7 @@ static int qemu_rdma_registration_handle(QEMUFile *f, void *opaque)
};
RDMAControlHeader blocks = { .type = RDMA_CONTROL_RAM_BLOCKS_RESULT,
.repeat = 1 };
- QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(opaque);
+ QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
RDMAContext *rdma;
RDMALocalBlocks *local;
RDMAControlHeader head;
@@ -3811,9 +3811,10 @@ out:
* the source.
*/
static int
-rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name)
+rdma_block_notification_handle(QEMUFile *f, const char *name)
{
RDMAContext *rdma;
+ QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
int curr;
int found = -1;
@@ -3846,13 +3847,12 @@ rdma_block_notification_handle(QIOChannelRDMA *rioc, const char *name)
static int rdma_load_hook(QEMUFile *f, uint64_t flags, void *data)
{
- QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
switch (flags) {
case RAM_CONTROL_BLOCK_REG:
- return rdma_block_notification_handle(rioc, data);
+ return rdma_block_notification_handle(f, data);
case RAM_CONTROL_HOOK:
- return qemu_rdma_registration_handle(f, rioc);
+ return qemu_rdma_registration_handle(f);
default:
/* Shouldn't be called with any other values */
@@ -3866,6 +3866,10 @@ static int qemu_rdma_registration_start(QEMUFile *f,
QIOChannelRDMA *rioc = QIO_CHANNEL_RDMA(qemu_file_get_ioc(f));
RDMAContext *rdma;
+ if (migration_in_postcopy()) {
+ return 0;
+ }
+
RCU_READ_LOCK_GUARD();
rdma = qatomic_rcu_read(&rioc->rdmaout);
if (!rdma) {
@@ -3874,10 +3878,6 @@ static int qemu_rdma_registration_start(QEMUFile *f,
CHECK_ERROR_STATE();
- if (migration_in_postcopy()) {
- return 0;
- }
-
trace_qemu_rdma_registration_start(flags);
qemu_put_be64(f, RAM_SAVE_FLAG_HOOK);
qemu_fflush(f);
@@ -3897,6 +3897,10 @@ static int qemu_rdma_registration_stop(QEMUFile *f,
RDMAControlHeader head = { .len = 0, .repeat = 1 };
int ret = 0;
+ if (migration_in_postcopy()) {
+ return 0;
+ }
+
RCU_READ_LOCK_GUARD();
rdma = qatomic_rcu_read(&rioc->rdmaout);
if (!rdma) {
@@ -3905,10 +3909,6 @@ static int qemu_rdma_registration_stop(QEMUFile *f,
CHECK_ERROR_STATE();
- if (migration_in_postcopy()) {
- return 0;
- }
-
qemu_fflush(f);
ret = qemu_rdma_drain_cq(f, rdma);
diff --git a/migration/savevm.c b/migration/savevm.c
index a9d0a88e62..032044b1d5 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -927,11 +927,9 @@ static int vmstate_load(QEMUFile *f, SaveStateEntry *se)
static void vmstate_save_old_style(QEMUFile *f, SaveStateEntry *se,
JSONWriter *vmdesc)
{
- int64_t old_offset, size;
-
- old_offset = qemu_file_total_transferred_fast(f);
+ uint64_t old_offset = qemu_file_total_transferred_fast(f);
se->ops->save_state(f, se->opaque);
- size = qemu_file_total_transferred_fast(f) - old_offset;
+ uint64_t size = qemu_file_total_transferred_fast(f) - old_offset;
if (vmdesc) {
json_writer_int64(vmdesc, "size", size);
diff --git a/migration/vmstate.c b/migration/vmstate.c
index 83ca4c7d3e..351f56104e 100644
--- a/migration/vmstate.c
+++ b/migration/vmstate.c
@@ -349,7 +349,7 @@ int vmstate_save_state_v(QEMUFile *f, const VMStateDescription *vmsd,
void *first_elem = opaque + field->offset;
int i, n_elems = vmstate_n_elems(opaque, field);
int size = vmstate_size(opaque, field);
- int64_t old_offset, written_bytes;
+ uint64_t old_offset, written_bytes;
JSONWriter *vmdesc_loop = vmdesc;
trace_vmstate_save_state_loop(vmsd->name, field->name, n_elems);