diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-06-04 18:34:04 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-06-04 18:34:04 +0100 |
commit | 0d514fa23402ab7b4f1c965e0631d953bbe4d3b7 (patch) | |
tree | 15694d41fba306b5b8e545d9a6e15bb199c64b25 /block/qcow2.c | |
parent | 5d7ad3ce103af3ab7c860a4ca97653f8ffa6e29c (diff) | |
parent | 21891a5a3011608845b5d7f1f9cce60cdc2bcc62 (diff) |
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request
* Copy offloading for qemu-img convert (iSCSI, raw, and qcow2)
If the underlying storage supports copy offloading, qemu-img convert will
use it instead of performing reads and writes. This avoids data transfers
and thus frees up storage bandwidth for other purposes. SCSI EXTENDED COPY
and Linux copy_file_range(2) are used to implement this optimization.
* Drop spurious "WARNING: I\/O thread spun for 1000 iterations" warning
# gpg: Signature made Mon 04 Jun 2018 12:20:08 BST
# gpg: using RSA key 9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>"
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8
* remotes/stefanha/tags/block-pull-request:
main-loop: drop spin_counter
qemu-img: Convert with copy offloading
block-backend: Add blk_co_copy_range
iscsi: Implement copy offloading
iscsi: Create and use iscsi_co_wait_for_task
iscsi: Query and save device designator when opening
file-posix: Implement bdrv_co_copy_range
qcow2: Implement copy offloading
raw: Implement copy offloading
raw: Check byte range uniformly
block: Introduce API for copy offloading
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block/qcow2.c')
-rw-r--r-- | block/qcow2.c | 229 |
1 files changed, 199 insertions, 30 deletions
diff --git a/block/qcow2.c b/block/qcow2.c index c87c593e83..549fee9b69 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -1761,6 +1761,39 @@ static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs, return status; } +static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs, + QCowL2Meta **pl2meta, + bool link_l2) +{ + int ret = 0; + QCowL2Meta *l2meta = *pl2meta; + + while (l2meta != NULL) { + QCowL2Meta *next; + + if (!ret && link_l2) { + ret = qcow2_alloc_cluster_link_l2(bs, l2meta); + if (ret) { + goto out; + } + } + + /* Take the request off the list of running requests */ + if (l2meta->nb_clusters != 0) { + QLIST_REMOVE(l2meta, next_in_flight); + } + + qemu_co_queue_restart_all(&l2meta->dependent_requests); + + next = l2meta->next; + g_free(l2meta); + l2meta = next; + } +out: + *pl2meta = l2meta; + return ret; +} + static coroutine_fn int qcow2_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) @@ -2047,24 +2080,9 @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, } } - while (l2meta != NULL) { - QCowL2Meta *next; - - ret = qcow2_alloc_cluster_link_l2(bs, l2meta); - if (ret < 0) { - goto fail; - } - - /* Take the request off the list of running requests */ - if (l2meta->nb_clusters != 0) { - QLIST_REMOVE(l2meta, next_in_flight); - } - - qemu_co_queue_restart_all(&l2meta->dependent_requests); - - next = l2meta->next; - g_free(l2meta); - l2meta = next; + ret = qcow2_handle_l2meta(bs, &l2meta, true); + if (ret) { + goto fail; } bytes -= cur_bytes; @@ -2075,18 +2093,7 @@ static coroutine_fn int qcow2_co_pwritev(BlockDriverState *bs, uint64_t offset, ret = 0; fail: - while (l2meta != NULL) { - QCowL2Meta *next; - - if (l2meta->nb_clusters != 0) { - QLIST_REMOVE(l2meta, next_in_flight); - } - qemu_co_queue_restart_all(&l2meta->dependent_requests); - - next = l2meta->next; - g_free(l2meta); - l2meta = next; - } + qcow2_handle_l2meta(bs, &l2meta, false); qemu_co_mutex_unlock(&s->lock); @@ -3273,6 +3280,166 @@ static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs, return ret; } +static int coroutine_fn +qcow2_co_copy_range_from(BlockDriverState *bs, + BdrvChild *src, uint64_t src_offset, + BdrvChild *dst, uint64_t dst_offset, + uint64_t bytes, BdrvRequestFlags flags) +{ + BDRVQcow2State *s = bs->opaque; + int ret; + unsigned int cur_bytes; /* number of bytes in current iteration */ + BdrvChild *child = NULL; + BdrvRequestFlags cur_flags; + + assert(!bs->encrypted); + qemu_co_mutex_lock(&s->lock); + + while (bytes != 0) { + uint64_t copy_offset = 0; + /* prepare next request */ + cur_bytes = MIN(bytes, INT_MAX); + cur_flags = flags; + + ret = qcow2_get_cluster_offset(bs, src_offset, &cur_bytes, ©_offset); + if (ret < 0) { + goto out; + } + + switch (ret) { + case QCOW2_CLUSTER_UNALLOCATED: + if (bs->backing && bs->backing->bs) { + int64_t backing_length = bdrv_getlength(bs->backing->bs); + if (src_offset >= backing_length) { + cur_flags |= BDRV_REQ_ZERO_WRITE; + } else { + child = bs->backing; + cur_bytes = MIN(cur_bytes, backing_length - src_offset); + copy_offset = src_offset; + } + } else { + cur_flags |= BDRV_REQ_ZERO_WRITE; + } + break; + + case QCOW2_CLUSTER_ZERO_PLAIN: + case QCOW2_CLUSTER_ZERO_ALLOC: + cur_flags |= BDRV_REQ_ZERO_WRITE; + break; + + case QCOW2_CLUSTER_COMPRESSED: + ret = -ENOTSUP; + goto out; + break; + + case QCOW2_CLUSTER_NORMAL: + child = bs->file; + copy_offset += offset_into_cluster(s, src_offset); + if ((copy_offset & 511) != 0) { + ret = -EIO; + goto out; + } + break; + + default: + abort(); + } + qemu_co_mutex_unlock(&s->lock); + ret = bdrv_co_copy_range_from(child, + copy_offset, + dst, dst_offset, + cur_bytes, cur_flags); + qemu_co_mutex_lock(&s->lock); + if (ret < 0) { + goto out; + } + + bytes -= cur_bytes; + src_offset += cur_bytes; + dst_offset += cur_bytes; + } + ret = 0; + +out: + qemu_co_mutex_unlock(&s->lock); + return ret; +} + +static int coroutine_fn +qcow2_co_copy_range_to(BlockDriverState *bs, + BdrvChild *src, uint64_t src_offset, + BdrvChild *dst, uint64_t dst_offset, + uint64_t bytes, BdrvRequestFlags flags) +{ + BDRVQcow2State *s = bs->opaque; + int offset_in_cluster; + int ret; + unsigned int cur_bytes; /* number of sectors in current iteration */ + uint64_t cluster_offset; + uint8_t *cluster_data = NULL; + QCowL2Meta *l2meta = NULL; + + assert(!bs->encrypted); + s->cluster_cache_offset = -1; /* disable compressed cache */ + + qemu_co_mutex_lock(&s->lock); + + while (bytes != 0) { + + l2meta = NULL; + + offset_in_cluster = offset_into_cluster(s, dst_offset); + cur_bytes = MIN(bytes, INT_MAX); + + /* TODO: + * If src->bs == dst->bs, we could simply copy by incrementing + * the refcnt, without copying user data. + * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */ + ret = qcow2_alloc_cluster_offset(bs, dst_offset, &cur_bytes, + &cluster_offset, &l2meta); + if (ret < 0) { + goto fail; + } + + assert((cluster_offset & 511) == 0); + + ret = qcow2_pre_write_overlap_check(bs, 0, + cluster_offset + offset_in_cluster, cur_bytes); + if (ret < 0) { + goto fail; + } + + qemu_co_mutex_unlock(&s->lock); + ret = bdrv_co_copy_range_to(src, src_offset, + bs->file, + cluster_offset + offset_in_cluster, + cur_bytes, flags); + qemu_co_mutex_lock(&s->lock); + if (ret < 0) { + goto fail; + } + + ret = qcow2_handle_l2meta(bs, &l2meta, true); + if (ret) { + goto fail; + } + + bytes -= cur_bytes; + dst_offset += cur_bytes; + } + ret = 0; + +fail: + qcow2_handle_l2meta(bs, &l2meta, false); + + qemu_co_mutex_unlock(&s->lock); + + qemu_vfree(cluster_data); + trace_qcow2_writev_done_req(qemu_coroutine_self(), ret); + + return ret; +} + static int qcow2_truncate(BlockDriverState *bs, int64_t offset, PreallocMode prealloc, Error **errp) { @@ -4521,6 +4688,8 @@ BlockDriver bdrv_qcow2 = { .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes, .bdrv_co_pdiscard = qcow2_co_pdiscard, + .bdrv_co_copy_range_from = qcow2_co_copy_range_from, + .bdrv_co_copy_range_to = qcow2_co_copy_range_to, .bdrv_truncate = qcow2_truncate, .bdrv_co_pwritev_compressed = qcow2_co_pwritev_compressed, .bdrv_make_empty = qcow2_make_empty, |