aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Blake <eblake@redhat.com>2017-07-07 07:44:54 -0500
committerKevin Wolf <kwolf@redhat.com>2017-07-10 13:18:06 +0200
commitf6ac207893a661a6c32819c43a2ab2789b40d12f (patch)
tree208ba8b288c44ce199e6239b0829aa764dddde91
parentcf79cdf662aad365b53fc955f45fd47d9883c8df (diff)
backup: Switch block_backup.h to byte-based
We are gradually converting to byte-based interfaces, as they are easier to reason about than sector-based. Continue by converting the public interface to backup jobs (no semantic change), including a change to CowRequest to track by bytes instead of cluster indices. Note that this does not change the difference between the public interface (starting point, and size of the subsequent range) and the internal interface (starting and end points). Signed-off-by: Eric Blake <eblake@redhat.com> Reviewed-by: John Snow <jsnow@redhat.com> Reviewed-by: Xie Changlong <xiechanglong@cmss.chinamobile.com> Reviewed-by: Jeff Cody <jcody@redhat.com> Reviewed-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
-rw-r--r--block/backup.c31
-rw-r--r--block/replication.c12
-rw-r--r--include/block/block_backup.h11
3 files changed, 28 insertions, 26 deletions
diff --git a/block/backup.c b/block/backup.c
index 4e64710b39..503fbec8c1 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -64,7 +64,7 @@ static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
do {
retry = false;
QLIST_FOREACH(req, &job->inflight_reqs, list) {
- if (end > req->start && start < req->end) {
+ if (end > req->start_byte && start < req->end_byte) {
qemu_co_queue_wait(&req->wait_queue, NULL);
retry = true;
break;
@@ -75,10 +75,10 @@ static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
/* Keep track of an in-flight request */
static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
- int64_t start, int64_t end)
+ int64_t start, int64_t end)
{
- req->start = start;
- req->end = end;
+ req->start_byte = start;
+ req->end_byte = end;
qemu_co_queue_init(&req->wait_queue);
QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
}
@@ -114,8 +114,10 @@ static int coroutine_fn backup_do_cow(BackupBlockJob *job,
sector_num * BDRV_SECTOR_SIZE,
nb_sectors * BDRV_SECTOR_SIZE);
- wait_for_overlapping_requests(job, start, end);
- cow_request_begin(&cow_request, job, start, end);
+ wait_for_overlapping_requests(job, start * job->cluster_size,
+ end * job->cluster_size);
+ cow_request_begin(&cow_request, job, start * job->cluster_size,
+ end * job->cluster_size);
for (; start < end; start++) {
if (test_bit(start, job->done_bitmap)) {
@@ -277,32 +279,29 @@ void backup_do_checkpoint(BlockJob *job, Error **errp)
bitmap_zero(backup_job->done_bitmap, len);
}
-void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num,
- int nb_sectors)
+void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset,
+ uint64_t bytes)
{
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
- int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
int64_t start, end;
assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
- start = sector_num / sectors_per_cluster;
- end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
+ end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
wait_for_overlapping_requests(backup_job, start, end);
}
void backup_cow_request_begin(CowRequest *req, BlockJob *job,
- int64_t sector_num,
- int nb_sectors)
+ int64_t offset, uint64_t bytes)
{
BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
- int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
int64_t start, end;
assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
- start = sector_num / sectors_per_cluster;
- end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ start = QEMU_ALIGN_DOWN(offset, backup_job->cluster_size);
+ end = QEMU_ALIGN_UP(offset + bytes, backup_job->cluster_size);
cow_request_begin(req, backup_job, start, end);
}
diff --git a/block/replication.c b/block/replication.c
index 3885f04c31..8f3aba7a20 100644
--- a/block/replication.c
+++ b/block/replication.c
@@ -234,10 +234,14 @@ static coroutine_fn int replication_co_readv(BlockDriverState *bs,
}
if (job) {
- backup_wait_for_overlapping_requests(child->bs->job, sector_num,
- remaining_sectors);
- backup_cow_request_begin(&req, child->bs->job, sector_num,
- remaining_sectors);
+ uint64_t remaining_bytes = remaining_sectors * BDRV_SECTOR_SIZE;
+
+ backup_wait_for_overlapping_requests(child->bs->job,
+ sector_num * BDRV_SECTOR_SIZE,
+ remaining_bytes);
+ backup_cow_request_begin(&req, child->bs->job,
+ sector_num * BDRV_SECTOR_SIZE,
+ remaining_bytes);
ret = bdrv_co_readv(bs->file, sector_num, remaining_sectors,
qiov);
backup_cow_request_end(&req);
diff --git a/include/block/block_backup.h b/include/block/block_backup.h
index 8a759477a3..994a3bd2ec 100644
--- a/include/block/block_backup.h
+++ b/include/block/block_backup.h
@@ -21,17 +21,16 @@
#include "block/block_int.h"
typedef struct CowRequest {
- int64_t start;
- int64_t end;
+ int64_t start_byte;
+ int64_t end_byte;
QLIST_ENTRY(CowRequest) list;
CoQueue wait_queue; /* coroutines blocked on this request */
} CowRequest;
-void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num,
- int nb_sectors);
+void backup_wait_for_overlapping_requests(BlockJob *job, int64_t offset,
+ uint64_t bytes);
void backup_cow_request_begin(CowRequest *req, BlockJob *job,
- int64_t sector_num,
- int nb_sectors);
+ int64_t offset, uint64_t bytes);
void backup_cow_request_end(CowRequest *req);
void backup_do_checkpoint(BlockJob *job, Error **errp);