diff options
author | Cédric Le Goater <clg@redhat.com> | 2024-03-20 07:49:07 +0100 |
---|---|---|
committer | Peter Xu <peterx@redhat.com> | 2024-04-23 18:36:01 -0400 |
commit | 639ec3fbf96c15b1568f52a50b9fa727cde3144b (patch) | |
tree | 04469f5ba2821828172bd72561adb861d126b517 /migration | |
parent | 92c20b2fc5cd3b423973a65aac945a605f93142e (diff) |
memory: Add Error** argument to the global_dirty_log routines
Now that the log_global*() handlers take an Error** parameter and
return a bool, do the same for memory_global_dirty_log_start() and
memory_global_dirty_log_stop(). The error is reported in the callers
for now and it will be propagated in the call stack in the next
changes.
To be noted a functional change in ram_init_bitmaps(), if the dirty
pages logger fails to start, there is no need to synchronize the dirty
pages bitmaps. colo_incoming_start_dirty_log() could be modified in a
similar way.
Cc: Stefano Stabellini <sstabellini@kernel.org>
Cc: Anthony Perard <anthony.perard@citrix.com>
Cc: Paul Durrant <paul@xen.org>
Cc: "Michael S. Tsirkin" <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Hyman Huang <yong.huang@smartx.com>
Signed-off-by: Cédric Le Goater <clg@redhat.com>
Reviewed-by: Fabiano Rosas <farosas@suse.de>
Acked-by: Peter Xu <peterx@redhat.com>
Link: https://lore.kernel.org/r/20240320064911.545001-12-clg@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r-- | migration/dirtyrate.c | 13 | ||||
-rw-r--r-- | migration/ram.c | 23 |
2 files changed, 32 insertions, 4 deletions
diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index 1d2e85746f..d02d70b7b4 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -90,9 +90,15 @@ static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, void global_dirty_log_change(unsigned int flag, bool start) { + Error *local_err = NULL; + bool ret; + bql_lock(); if (start) { - memory_global_dirty_log_start(flag); + ret = memory_global_dirty_log_start(flag, &local_err); + if (!ret) { + error_report_err(local_err); + } } else { memory_global_dirty_log_stop(flag); } @@ -608,9 +614,12 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) { int64_t start_time; DirtyPageRecord dirty_pages; + Error *local_err = NULL; bql_lock(); - memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); + if (!memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE, &local_err)) { + error_report_err(local_err); + } /* * 1'round of log sync may return all 1 bits with diff --git a/migration/ram.c b/migration/ram.c index f0bd71438a..bade3e9281 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -2862,18 +2862,32 @@ static void migration_bitmap_clear_discarded_pages(RAMState *rs) static void ram_init_bitmaps(RAMState *rs) { + Error *local_err = NULL; + bool ret = true; + qemu_mutex_lock_ramlist(); WITH_RCU_READ_LOCK_GUARD() { ram_list_init_bitmaps(); /* We don't use dirty log with background snapshots */ if (!migrate_background_snapshot()) { - memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); + ret = memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION, + &local_err); + if (!ret) { + error_report_err(local_err); + goto out_unlock; + } migration_bitmap_sync_precopy(rs, false); } } +out_unlock: qemu_mutex_unlock_ramlist(); + if (!ret) { + ram_bitmaps_destroy(); + return; + } + /* * After an eventual first bitmap sync, fixup the initial bitmap * containing all 1s to exclude any discarded pages from migration. @@ -3665,6 +3679,8 @@ int colo_init_ram_cache(void) void colo_incoming_start_dirty_log(void) { RAMBlock *block = NULL; + Error *local_err = NULL; + /* For memory_global_dirty_log_start below. */ bql_lock(); qemu_mutex_lock_ramlist(); @@ -3676,7 +3692,10 @@ void colo_incoming_start_dirty_log(void) /* Discard this dirty bitmap record */ bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS); } - memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION); + if (!memory_global_dirty_log_start(GLOBAL_DIRTY_MIGRATION, + &local_err)) { + error_report_err(local_err); + } } ram_state->migration_dirty_pages = 0; qemu_mutex_unlock_ramlist(); |