aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2-cluster.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2013-06-19 13:44:18 +0200
committerStefan Hajnoczi <stefanha@redhat.com>2013-06-24 10:25:17 +0200
commit6cfcb9b8b91d303ab51b78623f2299b5288d2d51 (patch)
treed546c5428126fffa019295e4c1fb05a56100d12a /block/qcow2-cluster.c
parent492fdc6fbe17b2d45878e813e980f782ac260c30 (diff)
qcow2: Add refcount update reason to all callers
This adds a refcount update reason to all callers of update_refcounts(), so that a follow-up patch can use this information to decide whether clusters that reach a refcount of 0 should be discarded in the image file. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'block/qcow2-cluster.c')
-rw-r--r--block/qcow2-cluster.c19
1 files changed, 13 insertions, 6 deletions
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index 76f30e5c26..3191d6b3e6 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -98,14 +98,16 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
goto fail;
}
g_free(s->l1_table);
- qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t));
+ qcow2_free_clusters(bs, s->l1_table_offset, s->l1_size * sizeof(uint64_t),
+ QCOW2_DISCARD_OTHER);
s->l1_table_offset = new_l1_table_offset;
s->l1_table = new_l1_table;
s->l1_size = new_l1_size;
return 0;
fail:
g_free(new_l1_table);
- qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2);
+ qcow2_free_clusters(bs, new_l1_table_offset, new_l1_size2,
+ QCOW2_DISCARD_OTHER);
return ret;
}
@@ -548,7 +550,8 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
/* Then decrease the refcount of the old table */
if (l2_offset) {
- qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
+ qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t),
+ QCOW2_DISCARD_OTHER);
}
}
@@ -715,10 +718,14 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
/*
* If this was a COW, we need to decrease the refcount of the old cluster.
* Also flush bs->file to get the right order for L2 and refcount update.
+ *
+ * Don't discard clusters that reach a refcount of 0 (e.g. compressed
+ * clusters), the next write will reuse them anyway.
*/
if (j != 0) {
for (i = 0; i < j; i++) {
- qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1);
+ qcow2_free_any_clusters(bs, be64_to_cpu(old_cluster[i]), 1,
+ QCOW2_DISCARD_NEVER);
}
}
@@ -1339,7 +1346,7 @@ static int discard_single_l2(BlockDriverState *bs, uint64_t offset,
l2_table[l2_index + i] = cpu_to_be64(0);
/* Then decrease the refcount */
- qcow2_free_any_clusters(bs, old_offset, 1);
+ qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
}
ret = qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
@@ -1415,7 +1422,7 @@ static int zero_single_l2(BlockDriverState *bs, uint64_t offset,
qcow2_cache_entry_mark_dirty(s->l2_table_cache, l2_table);
if (old_offset & QCOW_OFLAG_COMPRESSED) {
l2_table[l2_index + i] = cpu_to_be64(QCOW_OFLAG_ZERO);
- qcow2_free_any_clusters(bs, old_offset, 1);
+ qcow2_free_any_clusters(bs, old_offset, 1, QCOW2_DISCARD_REQUEST);
} else {
l2_table[l2_index + i] |= cpu_to_be64(QCOW_OFLAG_ZERO);
}