aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2-cluster.c
diff options
context:
space:
mode:
authorAlberto Garcia <berto@igalia.com>2020-07-10 18:13:10 +0200
committerMax Reitz <mreitz@redhat.com>2020-08-25 08:33:20 +0200
commita6841a2de66fa44fe52ed996b70f9fb9f7bd6ca7 (patch)
treee8491a57771b690e9613224f4c128a7f84605aaa /block/qcow2-cluster.c
parentbf4a66eed423694cfa0b3b5692211264022e2b98 (diff)
qcow2: Add subcluster support to qcow2_co_pwrite_zeroes()
This works now at the subcluster level and pwrite_zeroes_alignment is updated accordingly. qcow2_cluster_zeroize() is turned into qcow2_subcluster_zeroize() with the following changes: - The request can now be subcluster-aligned. - The cluster-aligned body of the request is still zeroized using zero_in_l2_slice() as before. - The subcluster-aligned head and tail of the request are zeroized with the new zero_l2_subclusters() function. There is just one thing to take into account for a possible future improvement: compressed clusters cannot be partially zeroized so zero_l2_subclusters() on the head or the tail can return -ENOTSUP. This makes the caller repeat the *complete* request and write actual zeroes to disk. This is sub-optimal because 1) if the head area was compressed we would still be able to use the fast path for the body and possibly the tail. 2) if the tail area was compressed we are writing zeroes to the head and the body areas, which are already zeroized. Signed-off-by: Alberto Garcia <berto@igalia.com> Reviewed-by: Max Reitz <mreitz@redhat.com> Message-Id: <17e05e2ee7e12f10dcf012da81e83ebe27eb3bef.1594396418.git.berto@igalia.com> Signed-off-by: Max Reitz <mreitz@redhat.com>
Diffstat (limited to 'block/qcow2-cluster.c')
-rw-r--r--block/qcow2-cluster.c81
1 files changed, 75 insertions, 6 deletions
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 1e84bd8e2e..9d349d61c6 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -2016,12 +2016,59 @@ static int zero_in_l2_slice(BlockDriverState *bs, uint64_t offset,
return nb_clusters;
}
-int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
- uint64_t bytes, int flags)
+static int zero_l2_subclusters(BlockDriverState *bs, uint64_t offset,
+ unsigned nb_subclusters)
+{
+ BDRVQcow2State *s = bs->opaque;
+ uint64_t *l2_slice;
+ uint64_t old_l2_bitmap, l2_bitmap;
+ int l2_index, ret, sc = offset_to_sc_index(s, offset);
+
+ /* For full clusters use zero_in_l2_slice() instead */
+ assert(nb_subclusters > 0 && nb_subclusters < s->subclusters_per_cluster);
+ assert(sc + nb_subclusters <= s->subclusters_per_cluster);
+ assert(offset_into_subcluster(s, offset) == 0);
+
+ ret = get_cluster_table(bs, offset, &l2_slice, &l2_index);
+ if (ret < 0) {
+ return ret;
+ }
+
+ switch (qcow2_get_cluster_type(bs, get_l2_entry(s, l2_slice, l2_index))) {
+ case QCOW2_CLUSTER_COMPRESSED:
+ ret = -ENOTSUP; /* We cannot partially zeroize compressed clusters */
+ goto out;
+ case QCOW2_CLUSTER_NORMAL:
+ case QCOW2_CLUSTER_UNALLOCATED:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ old_l2_bitmap = l2_bitmap = get_l2_bitmap(s, l2_slice, l2_index);
+
+ l2_bitmap |= QCOW_OFLAG_SUB_ZERO_RANGE(sc, sc + nb_subclusters);
+ l2_bitmap &= ~QCOW_OFLAG_SUB_ALLOC_RANGE(sc, sc + nb_subclusters);
+
+ if (old_l2_bitmap != l2_bitmap) {
+ set_l2_bitmap(s, l2_slice, l2_index, l2_bitmap);
+ qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_slice);
+ }
+
+ ret = 0;
+out:
+ qcow2_cache_put(s->l2_table_cache, (void **) &l2_slice);
+
+ return ret;
+}
+
+int qcow2_subcluster_zeroize(BlockDriverState *bs, uint64_t offset,
+ uint64_t bytes, int flags)
{
BDRVQcow2State *s = bs->opaque;
uint64_t end_offset = offset + bytes;
uint64_t nb_clusters;
+ unsigned head, tail;
int64_t cleared;
int ret;
@@ -2036,8 +2083,8 @@ int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
}
/* Caller must pass aligned values, except at image end */
- assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
- assert(QEMU_IS_ALIGNED(end_offset, s->cluster_size) ||
+ assert(offset_into_subcluster(s, offset) == 0);
+ assert(offset_into_subcluster(s, end_offset) == 0 ||
end_offset >= bs->total_sectors << BDRV_SECTOR_BITS);
/*
@@ -2052,11 +2099,26 @@ int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
return -ENOTSUP;
}
- /* Each L2 slice is handled by its own loop iteration */
- nb_clusters = size_to_clusters(s, bytes);
+ head = MIN(end_offset, ROUND_UP(offset, s->cluster_size)) - offset;
+ offset += head;
+
+ tail = (end_offset >= bs->total_sectors << BDRV_SECTOR_BITS) ? 0 :
+ end_offset - MAX(offset, start_of_cluster(s, end_offset));
+ end_offset -= tail;
s->cache_discards = true;
+ if (head) {
+ ret = zero_l2_subclusters(bs, offset - head,
+ size_to_subclusters(s, head));
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ /* Each L2 slice is handled by its own loop iteration */
+ nb_clusters = size_to_clusters(s, end_offset - offset);
+
while (nb_clusters > 0) {
cleared = zero_in_l2_slice(bs, offset, nb_clusters, flags);
if (cleared < 0) {
@@ -2068,6 +2130,13 @@ int qcow2_cluster_zeroize(BlockDriverState *bs, uint64_t offset,
offset += (cleared * s->cluster_size);
}
+ if (tail) {
+ ret = zero_l2_subclusters(bs, end_offset, size_to_subclusters(s, tail));
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
ret = 0;
fail:
s->cache_discards = false;