aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorChanglong Xie <xiecl.fnst@cn.fujitsu.com>2016-07-27 15:01:44 +0800
committerStefan Hajnoczi <stefanha@redhat.com>2016-09-13 11:00:56 +0100
commita8bbee0edf2339e713205ddcb82b75907fbd1049 (patch)
tree41427b86ebadbc3cb7034c767da5acee54a785c6 /block
parent49d3e828f8cf8500918d4579ab6cd9c0f9584d63 (diff)
Backup: export interfaces for extra serialization
Normal backup(sync='none') workflow: step 1. NBD peformance I/O write from client to server qcow2_co_writev bdrv_co_writev ... bdrv_aligned_pwritev notifier_with_return_list_notify -> backup_do_cow bdrv_driver_pwritev // write new contents step 2. drive-backup sync=none backup_do_cow { wait_for_overlapping_requests cow_request_begin for(; start < end; start++) { bdrv_co_readv_no_serialising //read old contents from Secondary disk bdrv_co_writev // write old contents to hidden-disk } cow_request_end } step 3. Then roll back to "step 1" to write new contents to Secondary disk. And for replication, we must make sure that we only read the old contents from Secondary disk in order to keep contents consistent. 1) Replication workflow of Secondary virtio-blk ^ -------> 1 NBD | || server 3 replication || ^ ^ || | backing backing | || Secondary disk 6<-------- hidden-disk 5 <-------- active-disk 4 || | ^ || '-------------------------' || drive-backup sync=none 2 Hence, we need these interfaces to implement coarse-grained serialization between COW of Secondary disk and the read operation of replication. Example codes about how to use them: *#include "block/block_backup.h" static coroutine_fn int xxx_co_readv() { CowRequest req; BlockJob *job = secondary_disk->bs->job; if (job) { backup_wait_for_overlapping_requests(job, start, end); backup_cow_request_begin(&req, job, start, end); ret = bdrv_co_readv(); backup_cow_request_end(&req); goto out; } ret = bdrv_co_readv(); out: return ret; } Signed-off-by: Changlong Xie <xiecl.fnst@cn.fujitsu.com> Signed-off-by: Wen Congyang <wency@cn.fujitsu.com> Signed-off-by: Wang WeiWei <wangww.fnst@cn.fujitsu.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> Message-id: 1469602913-20979-4-git-send-email-xiecl.fnst@cn.fujitsu.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/backup.c41
1 files changed, 34 insertions, 7 deletions
diff --git a/block/backup.c b/block/backup.c
index dbe54e2dce..582bd0f7ee 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -28,13 +28,6 @@
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
#define SLICE_TIME 100000000ULL /* ns */
-typedef struct CowRequest {
- int64_t start;
- int64_t end;
- QLIST_ENTRY(CowRequest) list;
- CoQueue wait_queue; /* coroutines blocked on this request */
-} CowRequest;
-
typedef struct BackupBlockJob {
BlockJob common;
BlockBackend *target;
@@ -273,6 +266,40 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
bitmap_zero(backup_job->done_bitmap, len);
}
+void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num,
+ int nb_sectors)
+{
+ BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
+ int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
+ int64_t start, end;
+
+ assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+
+ start = sector_num / sectors_per_cluster;
+ end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ wait_for_overlapping_requests(backup_job, start, end);
+}
+
+void backup_cow_request_begin(CowRequest *req, BlockJob *job,
+ int64_t sector_num,
+ int nb_sectors)
+{
+ BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
+ int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
+ int64_t start, end;
+
+ assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+
+ start = sector_num / sectors_per_cluster;
+ end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ cow_request_begin(req, backup_job, start, end);
+}
+
+void backup_cow_request_end(CowRequest *req)
+{
+ cow_request_end(req);
+}
+
static const BlockJobDriver backup_job_driver = {
.instance_size = sizeof(BackupBlockJob),
.job_type = BLOCK_JOB_TYPE_BACKUP,