diff options
author | Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> | 2017-10-12 16:53:11 +0300 |
---|---|---|
committer | Jeff Cody <jcody@redhat.com> | 2017-12-18 10:54:13 -0500 |
commit | 8cc6dc6215fa2a278fd853a2caca172e43c6263e (patch) | |
tree | b68e70373e400b1b79b6e8d85811cfa5f3db0420 /block | |
parent | a193b0f0a8d7735f4eb2ff863fd0902a5fa5eec6 (diff) |
backup: init copy_bitmap from sync_bitmap for incremental
We should not copy non-dirty clusters in write notifiers. So,
initialize copy_bitmap from sync_bitmap.
Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Reviewed-by: John Snow <jsnow@redhat.com>
Reviewed-by: Jeff Cody <jcody@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-id: 20171012135313.227864-4-vsementsov@virtuozzo.com
Signed-off-by: Jeff Cody <jcody@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/backup.c | 44 |
1 files changed, 43 insertions, 1 deletions
diff --git a/block/backup.c b/block/backup.c index 5175808093..b8901ea662 100644 --- a/block/backup.c +++ b/block/backup.c @@ -422,6 +422,43 @@ out: return ret; } +/* init copy_bitmap from sync_bitmap */ +static void backup_incremental_init_copy_bitmap(BackupBlockJob *job) +{ + BdrvDirtyBitmapIter *dbi; + int64_t offset; + int64_t end = DIV_ROUND_UP(bdrv_dirty_bitmap_size(job->sync_bitmap), + job->cluster_size); + + dbi = bdrv_dirty_iter_new(job->sync_bitmap); + while ((offset = bdrv_dirty_iter_next(dbi)) != -1) { + int64_t cluster = offset / job->cluster_size; + int64_t next_cluster; + + offset += bdrv_dirty_bitmap_granularity(job->sync_bitmap); + if (offset >= bdrv_dirty_bitmap_size(job->sync_bitmap)) { + hbitmap_set(job->copy_bitmap, cluster, end - cluster); + break; + } + + offset = bdrv_dirty_bitmap_next_zero(job->sync_bitmap, offset); + if (offset == -1) { + hbitmap_set(job->copy_bitmap, cluster, end - cluster); + break; + } + + next_cluster = DIV_ROUND_UP(offset, job->cluster_size); + hbitmap_set(job->copy_bitmap, cluster, next_cluster - cluster); + if (next_cluster >= end) { + break; + } + + bdrv_set_dirty_iter(dbi, next_cluster * job->cluster_size); + } + + bdrv_dirty_iter_free(dbi); +} + static void coroutine_fn backup_run(void *opaque) { BackupBlockJob *job = opaque; @@ -435,7 +472,12 @@ static void coroutine_fn backup_run(void *opaque) nb_clusters = DIV_ROUND_UP(job->common.len, job->cluster_size); job->copy_bitmap = hbitmap_alloc(nb_clusters, 0); - hbitmap_set(job->copy_bitmap, 0, nb_clusters); + if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) { + backup_incremental_init_copy_bitmap(job); + } else { + hbitmap_set(job->copy_bitmap, 0, nb_clusters); + } + job->before_write.notify = backup_before_write_notify; bdrv_add_before_write_notifier(bs, &job->before_write); |