aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2019-10-22 14:18:05 +0300
committerMax Reitz <mreitz@redhat.com>2019-10-28 11:22:31 +0100
commit0e2402452f1f2042923a5206b9ff3e9d70c77811 (patch)
treef2e6e90b0d36f019fac0b9555c4c19b98d2ebfc0 /block
parent7f739d0e5375a934f0eade3cd38e6d33673beec0 (diff)
block/block-copy: increase buffered copy request
No reason to limit buffered copy to one cluster. Let's allow up to 1 MiB. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-id: 20191022111805.3432-7-vsementsov@virtuozzo.com Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/block-copy.c48
1 files changed, 32 insertions, 16 deletions
diff --git a/block/block-copy.c b/block/block-copy.c
index 7f0ebb58f8..c39cc9cffe 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -21,6 +21,7 @@
#include "qemu/units.h"
#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
+#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB)
static void coroutine_fn block_copy_wait_inflight_reqs(BlockCopyState *s,
@@ -75,10 +76,8 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
{
BlockCopyState *s;
BdrvDirtyBitmap *copy_bitmap;
-
- /* Ignore BLOCK_COPY_MAX_COPY_RANGE if requested cluster_size is larger */
uint32_t max_transfer =
- MIN_NON_ZERO(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
+ MIN_NON_ZERO(INT_MAX,
MIN_NON_ZERO(source->bs->bl.max_transfer,
target->bs->bl.max_transfer));
@@ -100,17 +99,28 @@ BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
.mem = shres_create(BLOCK_COPY_MAX_MEM),
};
- s->copy_range_size = QEMU_ALIGN_DOWN(max_transfer, cluster_size),
- /*
- * Set use_copy_range, consider the following:
- * 1. Compression is not supported for copy_range.
- * 2. copy_range does not respect max_transfer (it's a TODO), so we factor
- * that in here. If max_transfer is smaller than the job->cluster_size,
- * we do not use copy_range (in that case it's zero after aligning down
- * above).
- */
- s->use_copy_range =
- !(write_flags & BDRV_REQ_WRITE_COMPRESSED) && s->copy_range_size > 0;
+ if (max_transfer < cluster_size) {
+ /*
+ * copy_range does not respect max_transfer. We don't want to bother
+ * with requests smaller than block-copy cluster size, so fallback to
+ * buffered copying (read and write respect max_transfer on their
+ * behalf).
+ */
+ s->use_copy_range = false;
+ s->copy_size = cluster_size;
+ } else if (write_flags & BDRV_REQ_WRITE_COMPRESSED) {
+ /* Compression is not supported for copy_range */
+ s->use_copy_range = false;
+ s->copy_size = MAX(cluster_size, BLOCK_COPY_MAX_BUFFER);
+ } else {
+ /*
+ * copy_range does not respect max_transfer (it's a TODO), so we factor
+ * that in here.
+ */
+ s->use_copy_range = true;
+ s->copy_size = MIN(MAX(cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
+ QEMU_ALIGN_DOWN(max_transfer, cluster_size));
+ }
QLIST_INIT(&s->inflight_reqs);
@@ -156,12 +166,19 @@ static int coroutine_fn block_copy_do_copy(BlockCopyState *s,
if (ret < 0) {
trace_block_copy_copy_range_fail(s, start, ret);
s->use_copy_range = false;
+ s->copy_size = MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER);
/* Fallback to read+write with allocated buffer */
} else {
goto out;
}
}
+ /*
+ * In case of failed copy_range request above, we may proceed with buffered
+ * request larger than BLOCK_COPY_MAX_BUFFER. Still, further requests will
+ * be properly limited, so don't care too much.
+ */
+
bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
ret = bdrv_co_pread(s->source, start, nbytes, bounce_buffer, 0);
@@ -290,8 +307,7 @@ int coroutine_fn block_copy(BlockCopyState *s,
continue; /* already copied */
}
- chunk_end = MIN(end, start + (s->use_copy_range ?
- s->copy_range_size : s->cluster_size));
+ chunk_end = MIN(end, start + s->copy_size);
next_zero = bdrv_dirty_bitmap_next_zero(s->copy_bitmap, start,
chunk_end - start);