aboutsummaryrefslogtreecommitdiff
path: root/block/backup.c
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2019-04-29 12:08:41 +0300
committerMax Reitz <mreitz@redhat.com>2019-05-28 20:30:55 +0200
commitc334e897d08eea1f5a3a95f6a2208afe6757c103 (patch)
tree51bc3178e3cc1ad70c47a41a43d00b3730ab04ec /block/backup.c
parent9eb5a248f3e50c1f034bc6ff4b2f25c8c56515a5 (diff)
block/backup: unify different modes code path
Do full, top and incremental mode copying all in one place. This unifies the code path and helps further improvements. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: 20190429090842.57910-5-vsementsov@virtuozzo.com Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block/backup.c')
-rw-r--r--block/backup.c43
1 files changed, 10 insertions, 33 deletions
diff --git a/block/backup.c b/block/backup.c
index 78f1b79354..5b3fc9d123 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -384,15 +384,23 @@ static bool bdrv_is_unallocated_range(BlockDriverState *bs,
return offset >= end;
}
-static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
+static int coroutine_fn backup_loop(BackupBlockJob *job)
{
int ret;
bool error_is_read;
int64_t offset;
HBitmapIter hbi;
+ BlockDriverState *bs = blk_bs(job->common.blk);
hbitmap_iter_init(&hbi, job->copy_bitmap, 0);
while ((offset = hbitmap_iter_next(&hbi)) != -1) {
+ if (job->sync_mode == MIRROR_SYNC_MODE_TOP &&
+ bdrv_is_unallocated_range(bs, offset, job->cluster_size))
+ {
+ hbitmap_reset(job->copy_bitmap, offset, job->cluster_size);
+ continue;
+ }
+
do {
if (yield_and_check(job)) {
return 0;
@@ -437,7 +445,6 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
{
BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
BlockDriverState *bs = blk_bs(s->common.blk);
- int64_t offset;
int ret = 0;
QLIST_INIT(&s->inflight_reqs);
@@ -462,38 +469,8 @@ static int coroutine_fn backup_run(Job *job, Error **errp)
* notify callback service CoW requests. */
job_yield(job);
}
- } else if (s->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
- ret = backup_run_incremental(s);
} else {
- /* Both FULL and TOP SYNC_MODE's require copying.. */
- for (offset = 0; offset < s->len;
- offset += s->cluster_size) {
- bool error_is_read;
-
- if (yield_and_check(s)) {
- break;
- }
-
- if (s->sync_mode == MIRROR_SYNC_MODE_TOP &&
- bdrv_is_unallocated_range(bs, offset, s->cluster_size))
- {
- continue;
- }
-
- ret = backup_do_cow(s, offset, s->cluster_size,
- &error_is_read, false);
- if (ret < 0) {
- /* Depending on error action, fail now or retry cluster */
- BlockErrorAction action =
- backup_error_action(s, error_is_read, -ret);
- if (action == BLOCK_ERROR_ACTION_REPORT) {
- break;
- } else {
- offset -= s->cluster_size;
- continue;
- }
- }
- }
+ ret = backup_loop(s);
}
notifier_with_return_remove(&s->before_write);