diff options
author | Juan Quintela <quintela@redhat.com> | 2023-11-03 08:42:45 +0100 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2023-11-03 10:48:37 +0100 |
commit | 0983125b405c479a6e7eb49b81cfdae969e28cee (patch) | |
tree | ce2c737ede80309780212e708ae4ca5c6d15dec7 /migration/ram.c | |
parent | ceddc48278a006e3f13b8a1881676cfb3a1ca99a (diff) |
migration: Unlock mutex in error case
We were not unlocking bitmap mutex on the error case. To fix it
forever change to enclose the code with WITH_QEMU_LOCK_GUARD().
Coverity CID 1523750.
Fixes: a2326705e5 ("migration: Stop migration immediately in RDMA error paths")
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Juan Quintela <quintela@redhat.com>
Message-ID: <20231103074245.55166-1-quintela@redhat.com>
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 104 |
1 files changed, 52 insertions, 52 deletions
diff --git a/migration/ram.c b/migration/ram.c index a0f3b86663..8c7886ab79 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -3030,71 +3030,71 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) * MAX_WAIT (if curious, further see commit 4508bd9ed8053ce) below, which * guarantees that we'll at least released it in a regular basis. */ - qemu_mutex_lock(&rs->bitmap_mutex); - WITH_RCU_READ_LOCK_GUARD() { - if (ram_list.version != rs->last_version) { - ram_state_reset(rs); - } + WITH_QEMU_LOCK_GUARD(&rs->bitmap_mutex) { + WITH_RCU_READ_LOCK_GUARD() { + if (ram_list.version != rs->last_version) { + ram_state_reset(rs); + } - /* Read version before ram_list.blocks */ - smp_rmb(); + /* Read version before ram_list.blocks */ + smp_rmb(); - ret = rdma_registration_start(f, RAM_CONTROL_ROUND); - if (ret < 0) { - qemu_file_set_error(f, ret); - goto out; - } + ret = rdma_registration_start(f, RAM_CONTROL_ROUND); + if (ret < 0) { + qemu_file_set_error(f, ret); + goto out; + } - t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); - i = 0; - while ((ret = migration_rate_exceeded(f)) == 0 || - postcopy_has_request(rs)) { - int pages; + t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); + i = 0; + while ((ret = migration_rate_exceeded(f)) == 0 || + postcopy_has_request(rs)) { + int pages; - if (qemu_file_get_error(f)) { - break; - } + if (qemu_file_get_error(f)) { + break; + } - pages = ram_find_and_save_block(rs); - /* no more pages to sent */ - if (pages == 0) { - done = 1; - break; - } + pages = ram_find_and_save_block(rs); + /* no more pages to sent */ + if (pages == 0) { + done = 1; + break; + } - if (pages < 0) { - qemu_file_set_error(f, pages); - break; - } + if (pages < 0) { + qemu_file_set_error(f, pages); + break; + } - rs->target_page_count += pages; + rs->target_page_count += pages; - /* - * During postcopy, it is necessary to make sure one whole host - * page is sent in one chunk. - */ - if (migrate_postcopy_ram()) { - compress_flush_data(); - } + /* + * During postcopy, it is necessary to make sure one whole host + * page is sent in one chunk. + */ + if (migrate_postcopy_ram()) { + compress_flush_data(); + } - /* - * we want to check in the 1st loop, just in case it was the 1st - * time and we had to sync the dirty bitmap. - * qemu_clock_get_ns() is a bit expensive, so we only check each - * some iterations - */ - if ((i & 63) == 0) { - uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / - 1000000; - if (t1 > MAX_WAIT) { - trace_ram_save_iterate_big_wait(t1, i); - break; + /* + * we want to check in the 1st loop, just in case it was the 1st + * time and we had to sync the dirty bitmap. + * qemu_clock_get_ns() is a bit expensive, so we only check each + * some iterations + */ + if ((i & 63) == 0) { + uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / + 1000000; + if (t1 > MAX_WAIT) { + trace_ram_save_iterate_big_wait(t1, i); + break; + } } + i++; } - i++; } } - qemu_mutex_unlock(&rs->bitmap_mutex); /* * Must occur before EOS (or any QEMUFile operation) |