aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCédric Le Goater <clg@redhat.com>2024-03-20 07:49:09 +0100
committerPeter Xu <peterx@redhat.com>2024-04-23 18:36:01 -0400
commit7bee8ba8bbcef27cc98bc85747258e942f8d9717 (patch)
treeb5b8bc27f042c2de0f83e5eb26e94456f0ec0800
parent16ecd25a4f324fd98cb974a0fd80390c7e136ea7 (diff)
migration: Add Error** argument to xbzrle_init()
Since the return value (-ENOMEM) is not exploited, follow the recommendations of qapi/error.h and change it to a bool Signed-off-by: Cédric Le Goater <clg@redhat.com> Reviewed-by: Fabiano Rosas <farosas@suse.de> Link: https://lore.kernel.org/r/20240320064911.545001-14-clg@redhat.com Signed-off-by: Peter Xu <peterx@redhat.com>
-rw-r--r--migration/ram.c22
1 files changed, 10 insertions, 12 deletions
diff --git a/migration/ram.c b/migration/ram.c
index 26ce11a337..70797ef5d8 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -2727,44 +2727,41 @@ int ram_discard_range(const char *rbname, uint64_t start, size_t length)
* For every allocation, we will try not to crash the VM if the
* allocation failed.
*/
-static int xbzrle_init(void)
+static bool xbzrle_init(Error **errp)
{
- Error *local_err = NULL;
-
if (!migrate_xbzrle()) {
- return 0;
+ return true;
}
XBZRLE_cache_lock();
XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.zero_target_page) {
- error_report("%s: Error allocating zero page", __func__);
+ error_setg(errp, "%s: Error allocating zero page", __func__);
goto err_out;
}
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
- TARGET_PAGE_SIZE, &local_err);
+ TARGET_PAGE_SIZE, errp);
if (!XBZRLE.cache) {
- error_report_err(local_err);
goto free_zero_page;
}
XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
if (!XBZRLE.encoded_buf) {
- error_report("%s: Error allocating encoded_buf", __func__);
+ error_setg(errp, "%s: Error allocating encoded_buf", __func__);
goto free_cache;
}
XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
if (!XBZRLE.current_buf) {
- error_report("%s: Error allocating current_buf", __func__);
+ error_setg(errp, "%s: Error allocating current_buf", __func__);
goto free_encoded_buf;
}
/* We are all good */
XBZRLE_cache_unlock();
- return 0;
+ return true;
free_encoded_buf:
g_free(XBZRLE.encoded_buf);
@@ -2777,7 +2774,7 @@ free_zero_page:
XBZRLE.zero_target_page = NULL;
err_out:
XBZRLE_cache_unlock();
- return -ENOMEM;
+ return false;
}
static bool ram_state_init(RAMState **rsp, Error **errp)
@@ -2904,7 +2901,8 @@ static int ram_init_all(RAMState **rsp)
return -1;
}
- if (xbzrle_init()) {
+ if (!xbzrle_init(&local_err)) {
+ error_report_err(local_err);
ram_state_cleanup(rsp);
return -1;
}