aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2019-07-25 13:05:49 +0300
committerEric Blake <eblake@redhat.com>2019-08-15 13:22:13 -0500
commit99136607b1edb0129d1ebb6ecac21738ad3d9c36 (patch)
tree5a10d46634d13d7ff02853ac59193ecbd99b1bb2 /block
parent3299e5ecf7e1c9443189e50c949d97e536022c59 (diff)
block/stream: use BDRV_REQ_PREFETCH
This helps to avoid extra io, allocations and memory copying. Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Message-Id: <20190725100550.33801-3-vsementsov@virtuozzo.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> [eblake: fix comment grammar] Signed-off-by: Eric Blake <eblake@redhat.com>
Diffstat (limited to 'block')
-rw-r--r--block/stream.c24
1 files changed, 9 insertions, 15 deletions
diff --git a/block/stream.c b/block/stream.c
index 6ac1e7bec4..0d3a6ac7c3 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -22,11 +22,11 @@
enum {
/*
- * Size of data buffer for populating the image file. This should be large
- * enough to process multiple clusters in a single call, so that populating
- * contiguous regions of the image is efficient.
+ * Maximum chunk size to feed to copy-on-read. This should be
+ * large enough to process multiple clusters in a single call, so
+ * that populating contiguous regions of the image is efficient.
*/
- STREAM_BUFFER_SIZE = 512 * 1024, /* in bytes */
+ STREAM_CHUNK = 512 * 1024, /* in bytes */
};
typedef struct StreamBlockJob {
@@ -39,13 +39,12 @@ typedef struct StreamBlockJob {
} StreamBlockJob;
static int coroutine_fn stream_populate(BlockBackend *blk,
- int64_t offset, uint64_t bytes,
- void *buf)
+ int64_t offset, uint64_t bytes)
{
assert(bytes < SIZE_MAX);
- /* Copy-on-read the unallocated clusters */
- return blk_co_pread(blk, offset, bytes, buf, BDRV_REQ_COPY_ON_READ);
+ return blk_co_preadv(blk, offset, bytes, NULL,
+ BDRV_REQ_COPY_ON_READ | BDRV_REQ_PREFETCH);
}
static void stream_abort(Job *job)
@@ -117,7 +116,6 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
int error = 0;
int ret = 0;
int64_t n = 0; /* bytes */
- void *buf;
if (bs == s->bottom) {
/* Nothing to stream */
@@ -130,8 +128,6 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
}
job_progress_set_remaining(&s->common.job, len);
- buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE);
-
/* Turn on copy-on-read for the whole block device so that guest read
* requests help us make progress. Only do this when copying the entire
* backing chain since the copy-on-read operation does not take base into
@@ -154,7 +150,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
copy = false;
- ret = bdrv_is_allocated(bs, offset, STREAM_BUFFER_SIZE, &n);
+ ret = bdrv_is_allocated(bs, offset, STREAM_CHUNK, &n);
if (ret == 1) {
/* Allocated in the top, no need to copy. */
} else if (ret >= 0) {
@@ -171,7 +167,7 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
}
trace_stream_one_iteration(s, offset, n, ret);
if (copy) {
- ret = stream_populate(blk, offset, n, buf);
+ ret = stream_populate(blk, offset, n);
}
if (ret < 0) {
BlockErrorAction action =
@@ -202,8 +198,6 @@ static int coroutine_fn stream_run(Job *job, Error **errp)
bdrv_disable_copy_on_read(bs);
}
- qemu_vfree(buf);
-
/* Do not remove the backing file if an error was there but ignored. */
return error;
}