aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2013-03-27 11:43:49 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2013-03-28 11:52:44 +0100
commitecdd5333ab9ed3f2b848066aaaef02c027b25e36 (patch)
tree40f544da54cd4aaaaf2d5c86f3f15766221de83c
parent2c3b32d25620c26e26fd590c198ec6d9cf91da57 (diff)
qcow2: Gather clusters in a looping loop
Instead of just checking once in exactly this order if there are dependendies, non-COW clusters and new allocation, this starts looping around these. This way we can, for example, gather non-COW clusters after new allocations as long as the host cluster offsets stay contiguous. Once handle_dependencies() is extended so that COW areas of in-flight allocations can be overwritten, this allows to continue with gathering other clusters (we wouldn't be able to do that without this change because we would have missed a possible second dependency in one of the next clusters). This means that in the typical sequential write case, we can combine the COW overwrite of one cluster with the allocation of the next cluster as soon as something like Delayed COW gets actually implemented. It is only by avoiding splitting requests this way that Delayed COW actually starts improving performance noticably. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
-rw-r--r--block/qcow2-cluster.c74
-rw-r--r--tests/qemu-iotests/044.out2
2 files changed, 44 insertions, 32 deletions
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 960d446417..c71470a3db 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -770,7 +770,7 @@ out:
* must start over anyway, so consider *cur_bytes undefined.
*/
static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
- uint64_t *cur_bytes)
+ uint64_t *cur_bytes, QCowL2Meta **m)
{
BDRVQcowState *s = bs->opaque;
QCowL2Meta *old_alloc;
@@ -793,6 +793,15 @@ static int handle_dependencies(BlockDriverState *bs, uint64_t guest_offset,
bytes = 0;
}
+ /* Stop if already an l2meta exists. After yielding, it wouldn't
+ * be valid any more, so we'd have to clean up the old L2Metas
+ * and deal with requests depending on them before starting to
+ * gather new ones. Not worth the trouble. */
+ if (bytes == 0 && *m) {
+ *cur_bytes = 0;
+ return 0;
+ }
+
if (bytes == 0) {
/* Wait for the dependency to complete. We need to recheck
* the free/allocated clusters when we continue. */
@@ -1023,16 +1032,16 @@ static int handle_alloc(BlockDriverState *bs, uint64_t guest_offset,
nb_clusters = count_cow_clusters(s, nb_clusters, l2_table, l2_index);
}
+ /* This function is only called when there were no non-COW clusters, so if
+ * we can't find any unallocated or COW clusters either, something is
+ * wrong with our code. */
+ assert(nb_clusters > 0);
+
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
if (ret < 0) {
return ret;
}
- if (nb_clusters == 0) {
- *bytes = 0;
- return 0;
- }
-
/* Allocate, if necessary at a given offset in the image file */
alloc_cluster_offset = start_of_cluster(s, *host_offset);
ret = do_alloc_cluster_offset(bs, guest_offset, &alloc_cluster_offset,
@@ -1146,8 +1155,27 @@ again:
remaining = (n_end - n_start) << BDRV_SECTOR_BITS;
cluster_offset = 0;
*host_offset = 0;
+ cur_bytes = 0;
+ *m = NULL;
while (true) {
+
+ if (!*host_offset) {
+ *host_offset = start_of_cluster(s, cluster_offset);
+ }
+
+ assert(remaining >= cur_bytes);
+
+ start += cur_bytes;
+ remaining -= cur_bytes;
+ cluster_offset += cur_bytes;
+
+ if (remaining == 0) {
+ break;
+ }
+
+ cur_bytes = remaining;
+
/*
* Now start gathering as many contiguous clusters as possible:
*
@@ -1166,12 +1194,17 @@ again:
* the right synchronisation between the in-flight request and
* the new one.
*/
- cur_bytes = remaining;
- ret = handle_dependencies(bs, start, &cur_bytes);
+ ret = handle_dependencies(bs, start, &cur_bytes, m);
if (ret == -EAGAIN) {
+ /* Currently handle_dependencies() doesn't yield if we already had
+ * an allocation. If it did, we would have to clean up the L2Meta
+ * structs before starting over. */
+ assert(*m == NULL);
goto again;
} else if (ret < 0) {
return ret;
+ } else if (cur_bytes == 0) {
+ break;
} else {
/* handle_dependencies() may have decreased cur_bytes (shortened
* the allocations below) so that the next dependency is processed
@@ -1185,24 +1218,11 @@ again:
if (ret < 0) {
return ret;
} else if (ret) {
- if (!*host_offset) {
- *host_offset = start_of_cluster(s, cluster_offset);
- }
-
- start += cur_bytes;
- remaining -= cur_bytes;
- cluster_offset += cur_bytes;
-
- cur_bytes = remaining;
+ continue;
} else if (cur_bytes == 0) {
break;
}
- /* If there is something left to allocate, do that now */
- if (remaining == 0) {
- break;
- }
-
/*
* 3. If the request still hasn't completed, allocate new clusters,
* considering any cluster_offset of steps 1c or 2.
@@ -1211,15 +1231,7 @@ again:
if (ret < 0) {
return ret;
} else if (ret) {
- if (!*host_offset) {
- *host_offset = start_of_cluster(s, cluster_offset);
- }
-
- start += cur_bytes;
- remaining -= cur_bytes;
- cluster_offset += cur_bytes;
-
- break;
+ continue;
} else {
assert(cur_bytes == 0);
break;
diff --git a/tests/qemu-iotests/044.out b/tests/qemu-iotests/044.out
index 34c25c793e..5c5aa929fb 100644
--- a/tests/qemu-iotests/044.out
+++ b/tests/qemu-iotests/044.out
@@ -1,6 +1,6 @@
No errors were found on the image.
7292415/33554432 = 21.73% allocated, 0.00% fragmented, 0.00% compressed clusters
-Image end offset: 4296447488
+Image end offset: 4296448000
.
----------------------------------------------------------------------
Ran 1 tests