aboutsummaryrefslogtreecommitdiff
path: root/block-migration.c
diff options
context:
space:
mode:
authorPeter Lieven <pl@kamp.de>2013-07-18 09:48:50 +0200
committerStefan Hajnoczi <stefanha@redhat.com>2013-07-19 12:29:21 +0800
commit323004a39d4d8d33c744a5b108f80bfe6402fca3 (patch)
treee232d3657f529de5ac35cd785fa6fa387ab90116 /block-migration.c
parent8bf9344ad6883e6d85b69bab36d9d76e4257e9ed (diff)
block-migration: efficiently encode zero blocks
this patch adds a efficient encoding for zero blocks by adding a new flag indicating a block is completely zero. additionally bdrv_write_zeros() is used at the destination to efficiently write these zeroes. depending on the implementation this avoids that the destination target gets fully provisioned. Signed-off-by: Peter Lieven <pl@kamp.de> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'block-migration.c')
-rw-r--r--block-migration.c32
1 files changed, 26 insertions, 6 deletions
diff --git a/block-migration.c b/block-migration.c
index 2fd7699794..f803f2006f 100644
--- a/block-migration.c
+++ b/block-migration.c
@@ -29,6 +29,7 @@
#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
#define BLK_MIG_FLAG_EOS 0x02
#define BLK_MIG_FLAG_PROGRESS 0x04
+#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
#define MAX_IS_ALLOCATED_SEARCH 65536
@@ -80,6 +81,7 @@ typedef struct BlkMigState {
int shared_base;
QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
int64_t total_sector_sum;
+ bool zero_blocks;
/* Protected by lock. */
QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
@@ -114,16 +116,30 @@ static void blk_mig_unlock(void)
static void blk_send(QEMUFile *f, BlkMigBlock * blk)
{
int len;
+ uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
+
+ if (block_mig_state.zero_blocks &&
+ buffer_is_zero(blk->buf, BLOCK_SIZE)) {
+ flags |= BLK_MIG_FLAG_ZERO_BLOCK;
+ }
/* sector number and flags */
qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
- | BLK_MIG_FLAG_DEVICE_BLOCK);
+ | flags);
/* device name */
len = strlen(blk->bmds->bs->device_name);
qemu_put_byte(f, len);
qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
+ /* if a block is zero we need to flush here since the network
+ * bandwidth is now a lot higher than the storage device bandwidth.
+ * thus if we queue zero blocks we slow down the migration */
+ if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+ qemu_fflush(f);
+ return;
+ }
+
qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
}
@@ -344,6 +360,7 @@ static void init_blk_migration(QEMUFile *f)
block_mig_state.total_sector_sum = 0;
block_mig_state.prev_progress = -1;
block_mig_state.bulk_completed = 0;
+ block_mig_state.zero_blocks = migrate_zero_blocks();
bdrv_iterate(init_blk_migration_it, NULL);
}
@@ -762,12 +779,15 @@ static int block_load(QEMUFile *f, void *opaque, int version_id)
nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
}
- buf = g_malloc(BLOCK_SIZE);
-
- qemu_get_buffer(f, buf, BLOCK_SIZE);
- ret = bdrv_write(bs, addr, buf, nr_sectors);
+ if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
+ ret = bdrv_write_zeroes(bs, addr, nr_sectors);
+ } else {
+ buf = g_malloc(BLOCK_SIZE);
+ qemu_get_buffer(f, buf, BLOCK_SIZE);
+ ret = bdrv_write(bs, addr, buf, nr_sectors);
+ g_free(buf);
+ }
- g_free(buf);
if (ret < 0) {
return ret;
}