aboutsummaryrefslogtreecommitdiff
path: root/block/block-copy.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/block-copy.c')
-rw-r--r--block/block-copy.c118
1 files changed, 51 insertions, 67 deletions
diff --git a/block/block-copy.c b/block/block-copy.c
index cd45b373a9..845b2423dc 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -126,79 +126,64 @@ void block_copy_set_callbacks(
}
/*
- * Copy range to target with a bounce buffer and return the bytes copied. If
- * error occurred, return a negative error number
+ * block_copy_do_copy
+ *
+ * Do copy of cluser-aligned chunk. @end is allowed to exceed s->len only to
+ * cover last cluster when s->len is not aligned to clusters.
+ *
+ * No sync here: nor bitmap neighter intersecting requests handling, only copy.
+ *
+ * Returns 0 on success.
*/
-static int coroutine_fn block_copy_with_bounce_buffer(BlockCopyState *s,
- int64_t start,
- int64_t end,
- bool *error_is_read)
+static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
+ int64_t start, int64_t end,
+ bool *error_is_read)
{
int ret;
- int nbytes;
- void *bounce_buffer = qemu_blockalign(s->source->bs, s->cluster_size);
+ int nbytes = MIN(end, s->len) - start;
+ void *bounce_buffer = NULL;
assert(QEMU_IS_ALIGNED(start, s->cluster_size));
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
- nbytes = MIN(s->cluster_size, s->len - start);
+ assert(QEMU_IS_ALIGNED(end, s->cluster_size));
+ assert(end < s->len || end == QEMU_ALIGN_UP(s->len, s->cluster_size));
+
+ if (s->use_copy_range) {
+ ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
+ 0, s->write_flags);
+ if (ret < 0) {
+ trace_block_copy_copy_range_fail(s, start, ret);
+ s->use_copy_range = false;
+ /* Fallback to read+write with allocated buffer */
+ } else {
+ goto out;
+ }
+ }
+
+ bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
if (ret < 0) {
- trace_block_copy_with_bounce_buffer_read_fail(s, start, ret);
+ trace_block_copy_read_fail(s, start, ret);
if (error_is_read) {
*error_is_read = true;
}
- goto fail;
+ goto out;
}
ret = bdrv_co_pwrite(s->target, start, nbytes, bounce_buffer,
s->write_flags);
if (ret < 0) {
- trace_block_copy_with_bounce_buffer_write_fail(s, start, ret);
+ trace_block_copy_write_fail(s, start, ret);
if (error_is_read) {
*error_is_read = false;
}
- goto fail;
+ goto out;
}
+out:
qemu_vfree(bounce_buffer);
- return nbytes;
-fail:
- qemu_vfree(bounce_buffer);
- bdrv_set_dirty_bitmap(s->copy_bitmap, start, s->cluster_size);
return ret;
-
-}
-
-/*
- * Copy range to target and return the bytes copied. If error occurred, return a
- * negative error number.
- */
-static int coroutine_fn block_copy_with_offload(BlockCopyState *s,
- int64_t start,
- int64_t end)
-{
- int ret;
- int nr_clusters;
- int nbytes;
-
- assert(QEMU_IS_ALIGNED(s->copy_range_size, s->cluster_size));
- assert(QEMU_IS_ALIGNED(start, s->cluster_size));
- nbytes = MIN(s->copy_range_size, MIN(end, s->len) - start);
- nr_clusters = DIV_ROUND_UP(nbytes, s->cluster_size);
- bdrv_reset_dirty_bitmap(s->copy_bitmap, start,
- s->cluster_size * nr_clusters);
- ret = bdrv_co_copy_range(s->source, start, s->target, start, nbytes,
- 0, s->write_flags);
- if (ret < 0) {
- trace_block_copy_with_offload_fail(s, start, ret);
- bdrv_set_dirty_bitmap(s->copy_bitmap, start,
- s->cluster_size * nr_clusters);
- return ret;
- }
-
- return nbytes;
}
/*
@@ -294,7 +279,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
block_copy_inflight_req_begin(s, &req, start, end);
while (start < end) {
- int64_t dirty_end;
+ int64_t next_zero, chunk_end;
if (!bdrv_dirty_bitmap_get(s->copy_bitmap, start)) {
trace_block_copy_skip(s, start);
@@ -302,10 +287,15 @@ int coroutine_fn block_copy(BlockCopyState *s,
continue; /* already copied */
}
- dirty_end = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
- (end - start));
- if (dirty_end < 0) {
- dirty_end = end;
+ chunk_end = MIN(end, start + (s->use_copy_range ?
+ s->copy_range_size : s->cluster_size));
+
+ next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
+ chunk_end - start);
+ if (next_zero >= 0) {
+ assert(next_zero > start); /* start is dirty */
+ assert(next_zero < chunk_end); /* no need to do MIN() */
+ chunk_end = next_zero;
}
if (s->skip_unallocated) {
@@ -316,27 +306,21 @@ int coroutine_fn block_copy(BlockCopyState *s,
continue;
}
/* Clamp to known allocated region */
- dirty_end = MIN(dirty_end, start + status_bytes);
+ chunk_end = MIN(chunk_end, start + status_bytes);
}
trace_block_copy_process(s, start);
- if (s->use_copy_range) {
- ret = block_copy_with_offload(s, start, dirty_end);
- if (ret < 0) {
- s->use_copy_range = false;
- }
- }
- if (!s->use_copy_range) {
- ret = block_copy_with_bounce_buffer(s, start, dirty_end,
- error_is_read);
- }
+ bdrv_reset_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
+
+ ret = block_copy_do_copy(s, start, chunk_end, error_is_read);
if (ret < 0) {
+ bdrv_set_dirty_bitmap(s->copy_bitmap, start, chunk_end - start);
break;
}
- start += ret;
- s->progress_bytes_callback(ret, s->progress_opaque);
+ s->progress_bytes_callback(chunk_end - start, s->progress_opaque);
+ start = chunk_end;
ret = 0;
}