aboutsummaryrefslogtreecommitdiff
path: root/block/qcow2-refcount.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2013-02-07 17:15:02 +0100
committerKevin Wolf <kwolf@redhat.com>2013-02-22 21:21:09 +0100
commitfba31bae2d776fb4134186a830a252523df7933f (patch)
tree1f86478145230a7e3fb53a9df13f9f24e5142056 /block/qcow2-refcount.c
parent801f70445293ec8ed2d78fd92313c2f71fa48ac9 (diff)
qcow2: record fragmentation statistics during check
The qemu-img check command can display fragmentation statistics: * Total number of clusters in virtual disk * Number of allocated clusters * Number of fragmented clusters This patch adds fragmentation statistics support to qcow2. Compressed and normal clusters count as allocated. Zero clusters are not counted as allocated unless their L2 entry has a non-zero offset (e.g. preallocation). Only the current L1 table counts towards the statistics - snapshots are ignored. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'block/qcow2-refcount.c')
-rw-r--r--block/qcow2-refcount.c25
1 files changed, 24 insertions, 1 deletions
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 4eec4b1228..771b7b2850 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -917,6 +917,7 @@ static void inc_refcounts(BlockDriverState *bs,
/* Flags for check_refcounts_l1() and check_refcounts_l2() */
enum {
CHECK_OFLAG_COPIED = 0x1, /* check QCOW_OFLAG_COPIED matches refcount */
+ CHECK_FRAG_INFO = 0x2, /* update BlockFragInfo counters */
};
/*
@@ -933,6 +934,7 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
{
BDRVQcowState *s = bs->opaque;
uint64_t *l2_table, l2_entry;
+ uint64_t next_contiguous_offset = 0;
int i, l2_size, nb_csectors, refcount;
/* Read L2 table from disk */
@@ -963,6 +965,17 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
l2_entry &= s->cluster_offset_mask;
inc_refcounts(bs, res, refcount_table, refcount_table_size,
l2_entry & ~511, nb_csectors * 512);
+
+ if (flags & CHECK_FRAG_INFO) {
+ res->bfi.allocated_clusters++;
+
+ /* Compressed clusters are fragmented by nature. Since they
+ * take up sub-sector space but we only have sector granularity
+ * I/O we need to re-read the same sectors even for adjacent
+ * compressed clusters.
+ */
+ res->bfi.fragmented_clusters++;
+ }
break;
case QCOW2_CLUSTER_ZERO:
@@ -990,6 +1003,15 @@ static int check_refcounts_l2(BlockDriverState *bs, BdrvCheckResult *res,
}
}
+ if (flags & CHECK_FRAG_INFO) {
+ res->bfi.allocated_clusters++;
+ if (next_contiguous_offset &&
+ offset != next_contiguous_offset) {
+ res->bfi.fragmented_clusters++;
+ }
+ next_contiguous_offset = offset + s->cluster_size;
+ }
+
/* Mark cluster as used */
inc_refcounts(bs, res, refcount_table,refcount_table_size,
offset, s->cluster_size);
@@ -1125,6 +1147,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
size = bdrv_getlength(bs->file);
nb_clusters = size_to_clusters(s, size);
+ res->bfi.total_clusters = nb_clusters;
refcount_table = g_malloc0(nb_clusters * sizeof(uint16_t));
/* header */
@@ -1134,7 +1157,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
/* current L1 table */
ret = check_refcounts_l1(bs, res, refcount_table, nb_clusters,
s->l1_table_offset, s->l1_size,
- CHECK_OFLAG_COPIED);
+ CHECK_OFLAG_COPIED | CHECK_FRAG_INFO);
if (ret < 0) {
goto fail;
}