aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPavel Butsykin <pbutsykin@virtuozzo.com>2016-07-22 11:17:52 +0300
committerKevin Wolf <kwolf@redhat.com>2016-09-05 19:06:48 +0200
commit13b9414b5798539e2dbb87a570d96184fe21edf4 (patch)
treeb78f6ea0b47ef741ec29787c15fe57edf2b98dcc /block
parentdc7a4a9ed170c19b4036cef2720d3e8fb551cc0f (diff)
drive-backup: added support for data compression
The idea is simple - backup is "written-once" data. It is written block by block and it is large enough. It would be nice to save storage space and compress it. The patch adds a flag to the qmp/hmp drive-backup command which enables block compression. Compression should be implemented in the format driver to enable this feature. There are some limitations of the format driver to allow compressed writes. We can write data only once. Though for backup this is perfectly fine. These limitations are maintained by the driver and the error will be reported if we are doing something wrong. Signed-off-by: Pavel Butsykin <pbutsykin@virtuozzo.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Denis V. Lunev <den@openvz.org> CC: Jeff Cody <jcody@redhat.com> CC: Markus Armbruster <armbru@redhat.com> CC: Eric Blake <eblake@redhat.com> CC: John Snow <jsnow@redhat.com> CC: Stefan Hajnoczi <stefanha@redhat.com> CC: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/backup.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/block/backup.c b/block/backup.c
index 2c0532314f..bb3bb9a9eb 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -47,6 +47,7 @@ typedef struct BackupBlockJob {
uint64_t sectors_read;
unsigned long *done_bitmap;
int64_t cluster_size;
+ bool compress;
NotifierWithReturn before_write;
QLIST_HEAD(, CowRequest) inflight_reqs;
} BackupBlockJob;
@@ -154,7 +155,8 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
bounce_qiov.size, BDRV_REQ_MAY_UNMAP);
} else {
ret = blk_co_pwritev(job->target, start * job->cluster_size,
- bounce_qiov.size, &bounce_qiov, 0);
+ bounce_qiov.size, &bounce_qiov,
+ job->compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
}
if (ret < 0) {
trace_backup_do_cow_write_fail(job, start, ret);
@@ -477,6 +479,7 @@ static void coroutine_fn backup_run(void *opaque)
void backup_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *target, int64_t speed,
MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
+ bool compress,
BlockdevOnError on_source_error,
BlockdevOnError on_target_error,
BlockCompletionFunc *cb, void *opaque,
@@ -507,6 +510,12 @@ void backup_start(const char *job_id, BlockDriverState *bs,
return;
}
+ if (compress && target->drv->bdrv_co_pwritev_compressed == NULL) {
+ error_setg(errp, "Compression is not supported for this drive %s",
+ bdrv_get_device_name(target));
+ return;
+ }
+
if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
return;
}
@@ -555,6 +564,7 @@ void backup_start(const char *job_id, BlockDriverState *bs,
job->sync_mode = sync_mode;
job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
sync_bitmap : NULL;
+ job->compress = compress;
/* If there is no backing file on the target, we cannot rely on COW if our
* backup cluster size is smaller than the target cluster size. Even for