aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2-cluster.c
diff options
context:
space:
mode:
authorMax Reitz <mreitz@redhat.com>2013-09-03 10:09:52 +0200
committerKevin Wolf <kwolf@redhat.com>2013-09-12 10:12:46 +0200
commit32b6444d23d0ff618d73e5b766600cd258066169 (patch)
tree423a268eaa2281d71e0d7d1d75ade22e153510aa /block/qcow2-cluster.c
parente7108feaace8e02b3a4bf010448fc2744f753381 (diff)
qcow2-cluster: Expand zero clusters
Add functionality for expanding zero clusters. This is necessary for downgrading the image version to one without zero cluster support. For non-backed images, this function may also just discard zero clusters instead of truly expanding them. Signed-off-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/qcow2-cluster.c')
-rw-r--r--block/qcow2-cluster.c233
1 files changed, 233 insertions, 0 deletions
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index b0d688eb99..738ff73c1d 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -1497,3 +1497,236 @@ fail:
return ret;
}
+
+/*
+ * Expands all zero clusters in a specific L1 table (or deallocates them, for
+ * non-backed non-pre-allocated zero clusters).
+ *
+ * expanded_clusters is a bitmap where every bit corresponds to one cluster in
+ * the image file; a bit gets set if the corresponding cluster has been used for
+ * zero expansion (i.e., has been filled with zeroes and is referenced from an
+ * L2 table). nb_clusters contains the total cluster count of the image file,
+ * i.e., the number of bits in expanded_clusters.
+ */
+static int expand_zero_clusters_in_l1(BlockDriverState *bs, uint64_t *l1_table,
+ int l1_size, uint8_t *expanded_clusters,
+ uint64_t nb_clusters)
+{
+ BDRVQcowState *s = bs->opaque;
+ bool is_active_l1 = (l1_table == s->l1_table);
+ uint64_t *l2_table = NULL;
+ int ret;
+ int i, j;
+
+ if (!is_active_l1) {
+ /* inactive L2 tables require a buffer to be stored in when loading
+ * them from disk */
+ l2_table = qemu_blockalign(bs, s->cluster_size);
+ }
+
+ for (i = 0; i < l1_size; i++) {
+ uint64_t l2_offset = l1_table[i] & L1E_OFFSET_MASK;
+ bool l2_dirty = false;
+
+ if (!l2_offset) {
+ /* unallocated */
+ continue;
+ }
+
+ if (is_active_l1) {
+ /* get active L2 tables from cache */
+ ret = qcow2_cache_get(bs, s->l2_table_cache, l2_offset,
+ (void **)&l2_table);
+ } else {
+ /* load inactive L2 tables from disk */
+ ret = bdrv_read(bs->file, l2_offset / BDRV_SECTOR_SIZE,
+ (void *)l2_table, s->cluster_sectors);
+ }
+ if (ret < 0) {
+ goto fail;
+ }
+
+ for (j = 0; j < s->l2_size; j++) {
+ uint64_t l2_entry = be64_to_cpu(l2_table[j]);
+ int64_t offset = l2_entry & L2E_OFFSET_MASK, cluster_index;
+ int cluster_type = qcow2_get_cluster_type(l2_entry);
+
+ if (cluster_type == QCOW2_CLUSTER_NORMAL) {
+ cluster_index = offset >> s->cluster_bits;
+ assert((cluster_index >= 0) && (cluster_index < nb_clusters));
+ if (expanded_clusters[cluster_index / 8] &
+ (1 << (cluster_index % 8))) {
+ /* Probably a shared L2 table; this cluster was a zero
+ * cluster which has been expanded, its refcount
+ * therefore most likely requires an update. */
+ ret = qcow2_update_cluster_refcount(bs, cluster_index, 1,
+ QCOW2_DISCARD_NEVER);
+ if (ret < 0) {
+ goto fail;
+ }
+ /* Since we just increased the refcount, the COPIED flag may
+ * no longer be set. */
+ l2_table[j] = cpu_to_be64(l2_entry & ~QCOW_OFLAG_COPIED);
+ l2_dirty = true;
+ }
+ continue;
+ }
+ else if (qcow2_get_cluster_type(l2_entry) != QCOW2_CLUSTER_ZERO) {
+ continue;
+ }
+
+ if (!offset) {
+ /* not preallocated */
+ if (!bs->backing_hd) {
+ /* not backed; therefore we can simply deallocate the
+ * cluster */
+ l2_table[j] = 0;
+ l2_dirty = true;
+ continue;
+ }
+
+ offset = qcow2_alloc_clusters(bs, s->cluster_size);
+ if (offset < 0) {
+ ret = offset;
+ goto fail;
+ }
+ }
+
+ ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT,
+ offset, s->cluster_size);
+ if (ret < 0) {
+ qcow2_free_clusters(bs, offset, s->cluster_size,
+ QCOW2_DISCARD_ALWAYS);
+ goto fail;
+ }
+
+ ret = bdrv_write_zeroes(bs->file, offset / BDRV_SECTOR_SIZE,
+ s->cluster_sectors);
+ if (ret < 0) {
+ qcow2_free_clusters(bs, offset, s->cluster_size,
+ QCOW2_DISCARD_ALWAYS);
+ goto fail;
+ }
+
+ l2_table[j] = cpu_to_be64(offset | QCOW_OFLAG_COPIED);
+ l2_dirty = true;
+
+ cluster_index = offset >> s->cluster_bits;
+ assert((cluster_index >= 0) && (cluster_index < nb_clusters));
+ expanded_clusters[cluster_index / 8] |= 1 << (cluster_index % 8);
+ }
+
+ if (is_active_l1) {
+ if (l2_dirty) {
+ qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
+ qcow2_cache_depends_on_flush(s->l2_table_cache);
+ }
+ ret = qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
+ if (ret < 0) {
+ l2_table = NULL;
+ goto fail;
+ }
+ } else {
+ if (l2_dirty) {
+ ret = qcow2_pre_write_overlap_check(bs, QCOW2_OL_DEFAULT &
+ ~(QCOW2_OL_INACTIVE_L2 | QCOW2_OL_ACTIVE_L2), l2_offset,
+ s->cluster_size);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ ret = bdrv_write(bs->file, l2_offset / BDRV_SECTOR_SIZE,
+ (void *)l2_table, s->cluster_sectors);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+ }
+ }
+
+ ret = 0;
+
+fail:
+ if (l2_table) {
+ if (!is_active_l1) {
+ qemu_vfree(l2_table);
+ } else {
+ if (ret < 0) {
+ qcow2_cache_put(bs, s->l2_table_cache, (void **)&l2_table);
+ } else {
+ ret = qcow2_cache_put(bs, s->l2_table_cache,
+ (void **)&l2_table);
+ }
+ }
+ }
+ return ret;
+}
+
+/*
+ * For backed images, expands all zero clusters on the image. For non-backed
+ * images, deallocates all non-pre-allocated zero clusters (and claims the
+ * allocation for pre-allocated ones). This is important for downgrading to a
+ * qcow2 version which doesn't yet support metadata zero clusters.
+ */
+int qcow2_expand_zero_clusters(BlockDriverState *bs)
+{
+ BDRVQcowState *s = bs->opaque;
+ uint64_t *l1_table = NULL;
+ int cluster_to_sector_bits = s->cluster_bits - BDRV_SECTOR_BITS;
+ uint64_t nb_clusters;
+ uint8_t *expanded_clusters;
+ int ret;
+ int i, j;
+
+ nb_clusters = (bs->total_sectors + (1 << cluster_to_sector_bits) - 1)
+ >> cluster_to_sector_bits;
+ expanded_clusters = g_malloc0((nb_clusters + 7) / 8);
+
+ ret = expand_zero_clusters_in_l1(bs, s->l1_table, s->l1_size,
+ expanded_clusters, nb_clusters);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ /* Inactive L1 tables may point to active L2 tables - therefore it is
+ * necessary to flush the L2 table cache before trying to access the L2
+ * tables pointed to by inactive L1 entries (else we might try to expand
+ * zero clusters that have already been expanded); furthermore, it is also
+ * necessary to empty the L2 table cache, since it may contain tables which
+ * are now going to be modified directly on disk, bypassing the cache.
+ * qcow2_cache_empty() does both for us. */
+ ret = qcow2_cache_empty(bs, s->l2_table_cache);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ for (i = 0; i < s->nb_snapshots; i++) {
+ int l1_sectors = (s->snapshots[i].l1_size * sizeof(uint64_t) +
+ BDRV_SECTOR_SIZE - 1) / BDRV_SECTOR_SIZE;
+
+ l1_table = g_realloc(l1_table, l1_sectors * BDRV_SECTOR_SIZE);
+
+ ret = bdrv_read(bs->file, s->snapshots[i].l1_table_offset /
+ BDRV_SECTOR_SIZE, (void *)l1_table, l1_sectors);
+ if (ret < 0) {
+ goto fail;
+ }
+
+ for (j = 0; j < s->snapshots[i].l1_size; j++) {
+ be64_to_cpus(&l1_table[j]);
+ }
+
+ ret = expand_zero_clusters_in_l1(bs, l1_table, s->snapshots[i].l1_size,
+ expanded_clusters, nb_clusters);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
+ ret = 0;
+
+fail:
+ g_free(expanded_clusters);
+ g_free(l1_table);
+ return ret;
+}