aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
Diffstat (limited to 'migration')
-rw-r--r--migration/Makefile.objs2
-rw-r--r--migration/migration.c34
-rw-r--r--migration/page_cache.c236
-rw-r--r--migration/page_cache.h86
-rw-r--r--migration/postcopy-ram.c18
-rw-r--r--migration/postcopy-ram.h26
-rw-r--r--migration/ram.c35
-rw-r--r--migration/savevm.c61
8 files changed, 424 insertions, 74 deletions
diff --git a/migration/Makefile.objs b/migration/Makefile.objs
index 480dd493a9..c1920b6fc0 100644
--- a/migration/Makefile.objs
+++ b/migration/Makefile.objs
@@ -1,7 +1,7 @@
common-obj-y += migration.o socket.o fd.o exec.o
common-obj-y += tls.o
common-obj-y += colo-comm.o colo.o colo-failover.o
-common-obj-y += vmstate.o
+common-obj-y += vmstate.o page_cache.o
common-obj-y += qemu-file.o
common-obj-y += qemu-file-channel.o
common-obj-y += xbzrle.o postcopy-ram.o
diff --git a/migration/migration.c b/migration/migration.c
index a5ade23e24..0304c013f3 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -17,6 +17,7 @@
#include "qemu/cutils.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
+#include "migration/blocker.h"
#include "migration/migration.h"
#include "migration/qemu-file.h"
#include "sysemu/sysemu.h"
@@ -77,13 +78,6 @@ static NotifierList migration_state_notifiers =
static bool deferred_incoming;
-/*
- * Current state of incoming postcopy; note this is not part of
- * MigrationIncomingState since it's state is used during cleanup
- * at the end as MIS is being freed.
- */
-static PostcopyState incoming_postcopy_state;
-
/* When we add fault tolerance, we could have several
migrations at once. For now we don't need to add
dynamic creation of migration */
@@ -1149,21 +1143,6 @@ void migrate_del_blocker(Error *reason)
migration_blockers = g_slist_remove(migration_blockers, reason);
}
-int check_migratable(Object *obj, Error **err)
-{
- DeviceClass *dc = DEVICE_GET_CLASS(obj);
- if (only_migratable && dc->vmsd) {
- if (dc->vmsd->unmigratable) {
- error_setg(err, "Device %s is not migratable, but "
- "--only-migratable was specified",
- object_get_typename(obj));
- return -1;
- }
- }
-
- return 0;
-}
-
void qmp_migrate_incoming(const char *uri, Error **errp)
{
Error *local_err = NULL;
@@ -2097,14 +2076,3 @@ void migrate_fd_connect(MigrationState *s)
s->migration_thread_running = true;
}
-PostcopyState postcopy_state_get(void)
-{
- return atomic_mb_read(&incoming_postcopy_state);
-}
-
-/* Set the state and return the old state */
-PostcopyState postcopy_state_set(PostcopyState new_state)
-{
- return atomic_xchg(&incoming_postcopy_state, new_state);
-}
-
diff --git a/migration/page_cache.c b/migration/page_cache.c
new file mode 100644
index 0000000000..5f8578736e
--- /dev/null
+++ b/migration/page_cache.c
@@ -0,0 +1,236 @@
+/*
+ * Page cache for QEMU
+ * The cache is base on a hash of the page address
+ *
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Orit Wasserman <owasserm@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+
+#include "qemu-common.h"
+#include "qemu/host-utils.h"
+#include "migration/page_cache.h"
+
+#ifdef DEBUG_CACHE
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stdout, "cache: " fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+/* the page in cache will not be replaced in two cycles */
+#define CACHED_PAGE_LIFETIME 2
+
+typedef struct CacheItem CacheItem;
+
+struct CacheItem {
+ uint64_t it_addr;
+ uint64_t it_age;
+ uint8_t *it_data;
+};
+
+struct PageCache {
+ CacheItem *page_cache;
+ unsigned int page_size;
+ int64_t max_num_items;
+ uint64_t max_item_age;
+ int64_t num_items;
+};
+
+PageCache *cache_init(int64_t num_pages, unsigned int page_size)
+{
+ int64_t i;
+
+ PageCache *cache;
+
+ if (num_pages <= 0) {
+ DPRINTF("invalid number of pages\n");
+ return NULL;
+ }
+
+ /* We prefer not to abort if there is no memory */
+ cache = g_try_malloc(sizeof(*cache));
+ if (!cache) {
+ DPRINTF("Failed to allocate cache\n");
+ return NULL;
+ }
+ /* round down to the nearest power of 2 */
+ if (!is_power_of_2(num_pages)) {
+ num_pages = pow2floor(num_pages);
+ DPRINTF("rounding down to %" PRId64 "\n", num_pages);
+ }
+ cache->page_size = page_size;
+ cache->num_items = 0;
+ cache->max_item_age = 0;
+ cache->max_num_items = num_pages;
+
+ DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items);
+
+ /* We prefer not to abort if there is no memory */
+ cache->page_cache = g_try_malloc((cache->max_num_items) *
+ sizeof(*cache->page_cache));
+ if (!cache->page_cache) {
+ DPRINTF("Failed to allocate cache->page_cache\n");
+ g_free(cache);
+ return NULL;
+ }
+
+ for (i = 0; i < cache->max_num_items; i++) {
+ cache->page_cache[i].it_data = NULL;
+ cache->page_cache[i].it_age = 0;
+ cache->page_cache[i].it_addr = -1;
+ }
+
+ return cache;
+}
+
+void cache_fini(PageCache *cache)
+{
+ int64_t i;
+
+ g_assert(cache);
+ g_assert(cache->page_cache);
+
+ for (i = 0; i < cache->max_num_items; i++) {
+ g_free(cache->page_cache[i].it_data);
+ }
+
+ g_free(cache->page_cache);
+ cache->page_cache = NULL;
+ g_free(cache);
+}
+
+static size_t cache_get_cache_pos(const PageCache *cache,
+ uint64_t address)
+{
+ g_assert(cache->max_num_items);
+ return (address / cache->page_size) & (cache->max_num_items - 1);
+}
+
+static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr)
+{
+ size_t pos;
+
+ g_assert(cache);
+ g_assert(cache->page_cache);
+
+ pos = cache_get_cache_pos(cache, addr);
+
+ return &cache->page_cache[pos];
+}
+
+uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
+{
+ return cache_get_by_addr(cache, addr)->it_data;
+}
+
+bool cache_is_cached(const PageCache *cache, uint64_t addr,
+ uint64_t current_age)
+{
+ CacheItem *it;
+
+ it = cache_get_by_addr(cache, addr);
+
+ if (it->it_addr == addr) {
+ /* update the it_age when the cache hit */
+ it->it_age = current_age;
+ return true;
+ }
+ return false;
+}
+
+int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata,
+ uint64_t current_age)
+{
+
+ CacheItem *it;
+
+ /* actual update of entry */
+ it = cache_get_by_addr(cache, addr);
+
+ if (it->it_data && it->it_addr != addr &&
+ it->it_age + CACHED_PAGE_LIFETIME > current_age) {
+ /* the cache page is fresh, don't replace it */
+ return -1;
+ }
+ /* allocate page */
+ if (!it->it_data) {
+ it->it_data = g_try_malloc(cache->page_size);
+ if (!it->it_data) {
+ DPRINTF("Error allocating page\n");
+ return -1;
+ }
+ cache->num_items++;
+ }
+
+ memcpy(it->it_data, pdata, cache->page_size);
+
+ it->it_age = current_age;
+ it->it_addr = addr;
+
+ return 0;
+}
+
+int64_t cache_resize(PageCache *cache, int64_t new_num_pages)
+{
+ PageCache *new_cache;
+ int64_t i;
+
+ CacheItem *old_it, *new_it;
+
+ g_assert(cache);
+
+ /* cache was not inited */
+ if (cache->page_cache == NULL) {
+ return -1;
+ }
+
+ /* same size */
+ if (pow2floor(new_num_pages) == cache->max_num_items) {
+ return cache->max_num_items;
+ }
+
+ new_cache = cache_init(new_num_pages, cache->page_size);
+ if (!(new_cache)) {
+ DPRINTF("Error creating new cache\n");
+ return -1;
+ }
+
+ /* move all data from old cache */
+ for (i = 0; i < cache->max_num_items; i++) {
+ old_it = &cache->page_cache[i];
+ if (old_it->it_addr != -1) {
+ /* check for collision, if there is, keep MRU page */
+ new_it = cache_get_by_addr(new_cache, old_it->it_addr);
+ if (new_it->it_data && new_it->it_age >= old_it->it_age) {
+ /* keep the MRU page */
+ g_free(old_it->it_data);
+ } else {
+ if (!new_it->it_data) {
+ new_cache->num_items++;
+ }
+ g_free(new_it->it_data);
+ new_it->it_data = old_it->it_data;
+ new_it->it_age = old_it->it_age;
+ new_it->it_addr = old_it->it_addr;
+ }
+ }
+ }
+
+ g_free(cache->page_cache);
+ cache->page_cache = new_cache->page_cache;
+ cache->max_num_items = new_cache->max_num_items;
+ cache->num_items = new_cache->num_items;
+
+ g_free(new_cache);
+
+ return cache->max_num_items;
+}
diff --git a/migration/page_cache.h b/migration/page_cache.h
new file mode 100644
index 0000000000..10ed53274c
--- /dev/null
+++ b/migration/page_cache.h
@@ -0,0 +1,86 @@
+/*
+ * Page cache for QEMU
+ * The cache is base on a hash of the page address
+ *
+ * Copyright 2012 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ * Orit Wasserman <owasserm@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef PAGE_CACHE_H
+#define PAGE_CACHE_H
+
+/* Page cache for storing guest pages */
+typedef struct PageCache PageCache;
+
+/**
+ * cache_init: Initialize the page cache
+ *
+ *
+ * Returns new allocated cache or NULL on error
+ *
+ * @cache pointer to the PageCache struct
+ * @num_pages: cache maximal number of cached pages
+ * @page_size: cache page size
+ */
+PageCache *cache_init(int64_t num_pages, unsigned int page_size);
+
+/**
+ * cache_fini: free all cache resources
+ * @cache pointer to the PageCache struct
+ */
+void cache_fini(PageCache *cache);
+
+/**
+ * cache_is_cached: Checks to see if the page is cached
+ *
+ * Returns %true if page is cached
+ *
+ * @cache pointer to the PageCache struct
+ * @addr: page addr
+ * @current_age: current bitmap generation
+ */
+bool cache_is_cached(const PageCache *cache, uint64_t addr,
+ uint64_t current_age);
+
+/**
+ * get_cached_data: Get the data cached for an addr
+ *
+ * Returns pointer to the data cached or NULL if not cached
+ *
+ * @cache pointer to the PageCache struct
+ * @addr: page addr
+ */
+uint8_t *get_cached_data(const PageCache *cache, uint64_t addr);
+
+/**
+ * cache_insert: insert the page into the cache. the page cache
+ * will dup the data on insert. the previous value will be overwritten
+ *
+ * Returns -1 when the page isn't inserted into cache
+ *
+ * @cache pointer to the PageCache struct
+ * @addr: page address
+ * @pdata: pointer to the page
+ * @current_age: current bitmap generation
+ */
+int cache_insert(PageCache *cache, uint64_t addr, const uint8_t *pdata,
+ uint64_t current_age);
+
+/**
+ * cache_resize: resize the page cache. In case of size reduction the extra
+ * pages will be freed
+ *
+ * Returns -1 on error new cache size on success
+ *
+ * @cache pointer to the PageCache struct
+ * @num_pages: new page cache size (in pages)
+ */
+int64_t cache_resize(PageCache *cache, int64_t num_pages);
+
+#endif
diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c
index cdadaf6578..a0489f6542 100644
--- a/migration/postcopy-ram.c
+++ b/migration/postcopy-ram.c
@@ -784,3 +784,21 @@ void postcopy_discard_send_finish(MigrationState *ms, PostcopyDiscardState *pds)
g_free(pds);
}
+
+/*
+ * Current state of incoming postcopy; note this is not part of
+ * MigrationIncomingState since it's state is used during cleanup
+ * at the end as MIS is being freed.
+ */
+static PostcopyState incoming_postcopy_state;
+
+PostcopyState postcopy_state_get(void)
+{
+ return atomic_mb_read(&incoming_postcopy_state);
+}
+
+/* Set the state and return the old state */
+PostcopyState postcopy_state_set(PostcopyState new_state)
+{
+ return atomic_xchg(&incoming_postcopy_state, new_state);
+}
diff --git a/migration/postcopy-ram.h b/migration/postcopy-ram.h
index 4c25f03be2..52d51e8007 100644
--- a/migration/postcopy-ram.h
+++ b/migration/postcopy-ram.h
@@ -81,6 +81,28 @@ int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from,
int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
size_t pagesize);
+/* The current postcopy state is read/set by postcopy_state_get/set
+ * which update it atomically.
+ * The state is updated as postcopy messages are received, and
+ * in general only one thread should be writing to the state at any one
+ * time, initially the main thread and then the listen thread;
+ * Corner cases are where either thread finishes early and/or errors.
+ * The state is checked as messages are received to ensure that
+ * the source is sending us messages in the correct order.
+ * The state is also used by the RAM reception code to know if it
+ * has to place pages atomically, and the cleanup code at the end of
+ * the main thread to know if it has to delay cleanup until the end
+ * of postcopy.
+ */
+typedef enum {
+ POSTCOPY_INCOMING_NONE = 0, /* Initial state - no postcopy */
+ POSTCOPY_INCOMING_ADVISE,
+ POSTCOPY_INCOMING_DISCARD,
+ POSTCOPY_INCOMING_LISTENING,
+ POSTCOPY_INCOMING_RUNNING,
+ POSTCOPY_INCOMING_END
+} PostcopyState;
+
/*
* Allocate a page of memory that can be mapped at a later point in time
* using postcopy_place_page
@@ -88,4 +110,8 @@ int postcopy_place_page_zero(MigrationIncomingState *mis, void *host,
*/
void *postcopy_get_tmp_page(MigrationIncomingState *mis);
+PostcopyState postcopy_state_get(void);
+/* Set the state and return the old state */
+PostcopyState postcopy_state_set(PostcopyState new_state);
+
#endif
diff --git a/migration/ram.c b/migration/ram.c
index 293d27ce83..76c118c271 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -48,8 +48,14 @@
/***********************************************************/
/* ram save/restore */
+/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
+ * worked for pages that where filled with the same char. We switched
+ * it to only search for the zero value. And to avoid confusion with
+ * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
+ */
+
#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
-#define RAM_SAVE_FLAG_COMPRESS 0x02
+#define RAM_SAVE_FLAG_ZERO 0x02
#define RAM_SAVE_FLAG_MEM_SIZE 0x04
#define RAM_SAVE_FLAG_PAGE 0x08
#define RAM_SAVE_FLAG_EOS 0x10
@@ -436,20 +442,21 @@ void migrate_compress_threads_create(void)
* @offset: offset inside the block for the page
* in the lower bits, it contains flags
*/
-static size_t save_page_header(RAMState *rs, RAMBlock *block, ram_addr_t offset)
+static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
+ ram_addr_t offset)
{
size_t size, len;
if (block == rs->last_sent_block) {
offset |= RAM_SAVE_FLAG_CONTINUE;
}
- qemu_put_be64(rs->f, offset);
+ qemu_put_be64(f, offset);
size = 8;
if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
len = strlen(block->idstr);
- qemu_put_byte(rs->f, len);
- qemu_put_buffer(rs->f, (uint8_t *)block->idstr, len);
+ qemu_put_byte(f, len);
+ qemu_put_buffer(f, (uint8_t *)block->idstr, len);
size += 1 + len;
rs->last_sent_block = block;
}
@@ -571,7 +578,7 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
}
/* Send XBZRLE based compressed page */
- bytes_xbzrle = save_page_header(rs, block,
+ bytes_xbzrle = save_page_header(rs, rs->f, block,
offset | RAM_SAVE_FLAG_XBZRLE);
qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
qemu_put_be16(rs->f, encoded_len);
@@ -745,7 +752,7 @@ static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
if (is_zero_range(p, TARGET_PAGE_SIZE)) {
rs->zero_pages++;
rs->bytes_transferred +=
- save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS);
+ save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
qemu_put_byte(rs->f, 0);
rs->bytes_transferred += 1;
pages = 1;
@@ -834,7 +841,7 @@ static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
/* XBZRLE overflow or normal page */
if (pages == -1) {
- rs->bytes_transferred += save_page_header(rs, block,
+ rs->bytes_transferred += save_page_header(rs, rs->f, block,
offset | RAM_SAVE_FLAG_PAGE);
if (send_async) {
qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
@@ -860,7 +867,7 @@ static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
int bytes_sent, blen;
uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
- bytes_sent = save_page_header(rs, block, offset |
+ bytes_sent = save_page_header(rs, f, block, offset |
RAM_SAVE_FLAG_COMPRESS_PAGE);
blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
migrate_compress_level());
@@ -991,7 +998,7 @@ static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
pages = save_zero_page(rs, block, offset, p);
if (pages == -1) {
/* Make sure the first page is sent out before other pages */
- bytes_xmit = save_page_header(rs, block, offset |
+ bytes_xmit = save_page_header(rs, rs->f, block, offset |
RAM_SAVE_FLAG_COMPRESS_PAGE);
blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
migrate_compress_level());
@@ -2405,7 +2412,7 @@ static int ram_load_postcopy(QEMUFile *f)
trace_ram_load_postcopy_loop((uint64_t)addr, flags);
place_needed = false;
- if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
+ if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
block = ram_block_from_stream(f, flags);
host = host_from_ram_block_offset(block, addr);
@@ -2452,7 +2459,7 @@ static int ram_load_postcopy(QEMUFile *f)
last_host = host;
switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
- case RAM_SAVE_FLAG_COMPRESS:
+ case RAM_SAVE_FLAG_ZERO:
ch = qemu_get_byte(f);
memset(page_buffer, ch, TARGET_PAGE_SIZE);
if (ch) {
@@ -2541,7 +2548,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
flags = addr & ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
- if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
+ if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
RAMBlock *block = ram_block_from_stream(f, flags);
@@ -2603,7 +2610,7 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
break;
- case RAM_SAVE_FLAG_COMPRESS:
+ case RAM_SAVE_FLAG_ZERO:
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
break;
diff --git a/migration/savevm.c b/migration/savevm.c
index 7f66d58a7e..f5e81948e6 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -2069,7 +2069,7 @@ int qemu_loadvm_state(QEMUFile *f)
return ret;
}
-int save_vmstate(const char *name)
+int save_vmstate(const char *name, Error **errp)
{
BlockDriverState *bs, *bs1;
QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
@@ -2079,29 +2079,27 @@ int save_vmstate(const char *name)
uint64_t vm_state_size;
qemu_timeval tv;
struct tm tm;
- Error *local_err = NULL;
AioContext *aio_context;
if (!bdrv_all_can_snapshot(&bs)) {
- error_report("Device '%s' is writable but does not support snapshots",
- bdrv_get_device_name(bs));
+ error_setg(errp, "Device '%s' is writable but does not support "
+ "snapshots", bdrv_get_device_name(bs));
return ret;
}
/* Delete old snapshots of the same name */
if (name) {
- ret = bdrv_all_delete_snapshot(name, &bs1, &local_err);
+ ret = bdrv_all_delete_snapshot(name, &bs1, errp);
if (ret < 0) {
- error_reportf_err(local_err,
- "Error while deleting snapshot on device '%s': ",
- bdrv_get_device_name(bs1));
+ error_prepend(errp, "Error while deleting snapshot on device "
+ "'%s': ", bdrv_get_device_name(bs1));
return ret;
}
}
bs = bdrv_all_find_vmstate_bs();
if (bs == NULL) {
- error_report("No block device can accept snapshots");
+ error_setg(errp, "No block device can accept snapshots");
return ret;
}
aio_context = bdrv_get_aio_context(bs);
@@ -2110,7 +2108,7 @@ int save_vmstate(const char *name)
ret = global_state_store();
if (ret) {
- error_report("Error saving global state");
+ error_setg(errp, "Error saving global state");
return ret;
}
vm_stop(RUN_STATE_SAVE_VM);
@@ -2142,21 +2140,20 @@ int save_vmstate(const char *name)
/* save the VM state */
f = qemu_fopen_bdrv(bs, 1);
if (!f) {
- error_report("Could not open VM state file");
+ error_setg(errp, "Could not open VM state file");
goto the_end;
}
- ret = qemu_savevm_state(f, &local_err);
+ ret = qemu_savevm_state(f, errp);
vm_state_size = qemu_ftell(f);
qemu_fclose(f);
if (ret < 0) {
- error_report_err(local_err);
goto the_end;
}
ret = bdrv_all_create_snapshot(sn, bs, vm_state_size, &bs);
if (ret < 0) {
- error_report("Error while creating snapshot on '%s'",
- bdrv_get_device_name(bs));
+ error_setg(errp, "Error while creating snapshot on '%s'",
+ bdrv_get_device_name(bs));
goto the_end;
}
@@ -2229,7 +2226,7 @@ void qmp_xen_load_devices_state(const char *filename, Error **errp)
migration_incoming_state_destroy();
}
-int load_vmstate(const char *name)
+int load_vmstate(const char *name, Error **errp)
{
BlockDriverState *bs, *bs_vm_state;
QEMUSnapshotInfo sn;
@@ -2239,20 +2236,22 @@ int load_vmstate(const char *name)
MigrationIncomingState *mis = migration_incoming_get_current();
if (!bdrv_all_can_snapshot(&bs)) {
- error_report("Device '%s' is writable but does not support snapshots",
- bdrv_get_device_name(bs));
+ error_setg(errp,
+ "Device '%s' is writable but does not support snapshots",
+ bdrv_get_device_name(bs));
return -ENOTSUP;
}
ret = bdrv_all_find_snapshot(name, &bs);
if (ret < 0) {
- error_report("Device '%s' does not have the requested snapshot '%s'",
- bdrv_get_device_name(bs), name);
+ error_setg(errp,
+ "Device '%s' does not have the requested snapshot '%s'",
+ bdrv_get_device_name(bs), name);
return ret;
}
bs_vm_state = bdrv_all_find_vmstate_bs();
if (!bs_vm_state) {
- error_report("No block device supports snapshots");
+ error_setg(errp, "No block device supports snapshots");
return -ENOTSUP;
}
aio_context = bdrv_get_aio_context(bs_vm_state);
@@ -2264,8 +2263,8 @@ int load_vmstate(const char *name)
if (ret < 0) {
return ret;
} else if (sn.vm_state_size == 0) {
- error_report("This is a disk-only snapshot. Revert to it offline "
- "using qemu-img.");
+ error_setg(errp, "This is a disk-only snapshot. Revert to it "
+ " offline using qemu-img");
return -EINVAL;
}
@@ -2274,7 +2273,7 @@ int load_vmstate(const char *name)
ret = bdrv_all_goto_snapshot(name, &bs);
if (ret < 0) {
- error_report("Error %d while activating snapshot '%s' on '%s'",
+ error_setg(errp, "Error %d while activating snapshot '%s' on '%s'",
ret, name, bdrv_get_device_name(bs));
return ret;
}
@@ -2282,7 +2281,7 @@ int load_vmstate(const char *name)
/* restore the VM state */
f = qemu_fopen_bdrv(bs_vm_state, 0);
if (!f) {
- error_report("Could not open VM state file");
+ error_setg(errp, "Could not open VM state file");
return -EINVAL;
}
@@ -2296,7 +2295,7 @@ int load_vmstate(const char *name)
migration_incoming_state_destroy();
if (ret < 0) {
- error_report("Error %d while loading VM state", ret);
+ error_setg(errp, "Error %d while loading VM state", ret);
return ret;
}
@@ -2318,3 +2317,13 @@ void vmstate_register_ram_global(MemoryRegion *mr)
{
vmstate_register_ram(mr, NULL);
}
+
+bool vmstate_check_only_migratable(const VMStateDescription *vmsd)
+{
+ /* check needed if --only-migratable is specified */
+ if (!only_migratable) {
+ return true;
+ }
+
+ return !(vmsd && vmsd->unmigratable);
+}