aboutsummaryrefslogtreecommitdiff
path: root/migration/ram.c
diff options
context:
space:
mode:
Diffstat (limited to 'migration/ram.c')
-rw-r--r--migration/ram.c33
1 files changed, 21 insertions, 12 deletions
diff --git a/migration/ram.c b/migration/ram.c
index e4bfd39f08..2f5ce4d60b 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -394,6 +394,14 @@ struct RAMState {
/* Queue of outstanding page requests from the destination */
QemuMutex src_page_req_mutex;
QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
+
+ /*
+ * This is only used when postcopy is in recovery phase, to communicate
+ * between the migration thread and the return path thread on dirty
+ * bitmap synchronizations. This field is unused in other stages of
+ * RAM migration.
+ */
+ unsigned int postcopy_bmap_sync_requested;
};
typedef struct RAMState RAMState;
@@ -4119,21 +4127,21 @@ static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
{
RAMBlock *block;
QEMUFile *file = s->to_dst_file;
- int ramblock_count = 0;
trace_ram_dirty_bitmap_sync_start();
+ qatomic_set(&rs->postcopy_bmap_sync_requested, 0);
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
qemu_savevm_send_recv_bitmap(file, block->idstr);
trace_ram_dirty_bitmap_request(block->idstr);
- ramblock_count++;
+ qatomic_inc(&rs->postcopy_bmap_sync_requested);
}
trace_ram_dirty_bitmap_sync_wait();
/* Wait until all the ramblocks' dirty bitmap synced */
- while (ramblock_count--) {
- qemu_sem_wait(&s->rp_state.rp_sem);
+ while (qatomic_read(&rs->postcopy_bmap_sync_requested)) {
+ migration_rp_wait(s);
}
trace_ram_dirty_bitmap_sync_complete();
@@ -4141,11 +4149,6 @@ static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
return 0;
}
-static void ram_dirty_bitmap_reload_notify(MigrationState *s)
-{
- qemu_sem_post(&s->rp_state.rp_sem);
-}
-
/*
* Read the received bitmap, revert it as the initial dirty bitmap.
* This is only used when the postcopy migration is paused but wants
@@ -4159,6 +4162,7 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
uint64_t local_size = DIV_ROUND_UP(nbits, 8);
uint64_t size, end_mark;
+ RAMState *rs = ram_state;
trace_ram_dirty_bitmap_reload_begin(block->idstr);
@@ -4225,11 +4229,16 @@ int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
/* We'll recalculate migration_dirty_pages in ram_state_resume_prepare(). */
trace_ram_dirty_bitmap_reload_complete(block->idstr);
+ qatomic_dec(&rs->postcopy_bmap_sync_requested);
+
/*
- * We succeeded to sync bitmap for current ramblock. If this is
- * the last one to sync, we need to notify the main send thread.
+ * We succeeded to sync bitmap for current ramblock. Always kick the
+ * migration thread to check whether all requested bitmaps are
+ * reloaded. NOTE: it's racy to only kick when requested==0, because
+ * we don't know whether the migration thread may still be increasing
+ * it.
*/
- ram_dirty_bitmap_reload_notify(s);
+ migration_rp_kick(s);
ret = 0;
out: