aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2009-01-07 17:22:19 +0000
committeraliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>2009-01-07 17:22:19 +0000
commitff4b91c2f7e51dab148aba4bf43c2f39f219e495 (patch)
treed3efd92ba3a27a85f4490ffcba7cbfba2fad4614
parentab5ccbd65de369d34f7bf2996530876790efef43 (diff)
qcow2: Fix cluster allocation (Kevin Wolf)
When allocating multiple clusters at once, the qcow2 implementation tries to find as many physically contiguous clusters as possible to allow larger writes. This search includes allocated clusters which are in the right place and still free clusters. If the range to allocate spans clusters in patterns like "10 allocated, then 10 free, then again 10 allocated" it is only checked that the chunks of allocated clusters are contiguous for themselves. However, what is actually needed is to have _all_ allocated clusters contiguous, starting at the first cluster of the allocation and spanning multiple such chunks. This patch changes the check so that each offset is not compared to the offset of the first cluster in its own chunk but to the first cluster in the whole allocation. I haven't seen it happen, but without this fix data corruption on qcow2 images is possible. Signed-off-by: Kevin Wolf <kwolf@suse.de> Acked-by: Gleb Natapov <gleb@redhat.com> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@6213 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--block-qcow2.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/block-qcow2.c b/block-qcow2.c
index d8ac647283..707109e449 100644
--- a/block-qcow2.c
+++ b/block-qcow2.c
@@ -615,7 +615,7 @@ static int size_to_clusters(BDRVQcowState *s, int64_t size)
}
static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
- uint64_t *l2_table, uint64_t mask)
+ uint64_t *l2_table, uint64_t start, uint64_t mask)
{
int i;
uint64_t offset = be64_to_cpu(l2_table[0]) & ~mask;
@@ -623,11 +623,11 @@ static int count_contiguous_clusters(uint64_t nb_clusters, int cluster_size,
if (!offset)
return 0;
- for (i = 0; i < nb_clusters; i++)
+ for (i = start; i < start + nb_clusters; i++)
if (offset + i * cluster_size != (be64_to_cpu(l2_table[i]) & ~mask))
break;
- return i;
+ return (i - start);
}
static int count_contiguous_free_clusters(uint64_t nb_clusters, uint64_t *l2_table)
@@ -714,7 +714,7 @@ static uint64_t get_cluster_offset(BlockDriverState *bs,
} else {
/* how many allocated clusters ? */
c = count_contiguous_clusters(nb_clusters, s->cluster_size,
- &l2_table[l2_index], QCOW_OFLAG_COPIED);
+ &l2_table[l2_index], 0, QCOW_OFLAG_COPIED);
}
nb_available = (c * s->cluster_sectors);
@@ -968,7 +968,7 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs,
if (cluster_offset & QCOW_OFLAG_COPIED) {
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
- &l2_table[l2_index], 0);
+ &l2_table[l2_index], 0, 0);
cluster_offset &= ~QCOW_OFLAG_COPIED;
m->nb_clusters = 0;
@@ -985,7 +985,7 @@ static uint64_t alloc_cluster_offset(BlockDriverState *bs,
while (i < nb_clusters) {
i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
- &l2_table[l2_index + i], 0);
+ &l2_table[l2_index], i, 0);
if(be64_to_cpu(l2_table[l2_index + i]))
break;