aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/qcow2-cache.c25
-rw-r--r--block/qcow2-cluster.c16
-rw-r--r--block/qcow2-refcount.c57
-rw-r--r--block/qcow2.c29
-rw-r--r--block/qcow2.h8
-rw-r--r--block/qed-check.c2
-rw-r--r--block/qed.c5
-rw-r--r--block/raw-posix.c98
-rw-r--r--block/raw.c8
-rw-r--r--block/rbd.c19
-rw-r--r--block/sheepdog.c9
-rw-r--r--block/stream.c109
-rw-r--r--block/vdi.c7
13 files changed, 222 insertions, 170 deletions
diff --git a/block/qcow2-cache.c b/block/qcow2-cache.c
index 710d4b1828..2d4322a8dd 100644
--- a/block/qcow2-cache.c
+++ b/block/qcow2-cache.c
@@ -40,11 +40,9 @@ struct Qcow2Cache {
struct Qcow2Cache* depends;
int size;
bool depends_on_flush;
- bool writethrough;
};
-Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
- bool writethrough)
+Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables)
{
BDRVQcowState *s = bs->opaque;
Qcow2Cache *c;
@@ -53,7 +51,6 @@ Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
c = g_malloc0(sizeof(*c));
c->size = num_tables;
c->entries = g_malloc0(sizeof(*c->entries) * num_tables);
- c->writethrough = writethrough;
for (i = 0; i < c->size; i++) {
c->entries[i].table = qemu_blockalign(bs, s->cluster_size);
@@ -307,12 +304,7 @@ found:
*table = NULL;
assert(c->entries[i].ref >= 0);
-
- if (c->writethrough) {
- return qcow2_cache_entry_flush(bs, c, i);
- } else {
- return 0;
- }
+ return 0;
}
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
@@ -329,16 +321,3 @@ void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table)
found:
c->entries[i].dirty = true;
}
-
-bool qcow2_cache_set_writethrough(BlockDriverState *bs, Qcow2Cache *c,
- bool enable)
-{
- bool old = c->writethrough;
-
- if (!old && enable) {
- qcow2_cache_flush(bs, c);
- }
-
- c->writethrough = enable;
- return old;
-}
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index c173fcd488..d7e0e19d9c 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -540,7 +540,6 @@ static int get_cluster_table(BlockDriverState *bs, uint64_t offset,
if (l2_offset) {
qcow2_free_clusters(bs, l2_offset, s->l2_size * sizeof(uint64_t));
}
- l2_offset = s->l1_table[l1_index] & L1E_OFFSET_MASK;
}
/* find the cluster offset for the given disk offset */
@@ -643,11 +642,10 @@ int qcow2_alloc_cluster_link_l2(BlockDriverState *bs, QCowL2Meta *m)
}
if (m->nb_available & (s->cluster_sectors - 1)) {
- uint64_t end = m->nb_available & ~(uint64_t)(s->cluster_sectors - 1);
cow = true;
qemu_co_mutex_unlock(&s->lock);
- ret = copy_sectors(bs, start_sect + end, cluster_offset + (end << 9),
- m->nb_available - end, s->cluster_sectors);
+ ret = copy_sectors(bs, start_sect, cluster_offset, m->nb_available,
+ align_offset(m->nb_available, s->cluster_sectors));
qemu_co_mutex_lock(&s->lock);
if (ret < 0)
goto err;
@@ -949,8 +947,16 @@ again:
/* save info needed for meta data update */
if (nb_clusters > 0) {
+ /*
+ * requested_sectors: Number of sectors from the start of the first
+ * newly allocated cluster to the end of the (possibly shortened
+ * before) write request.
+ *
+ * avail_sectors: Number of sectors from the start of the first
+ * newly allocated to the end of the last newly allocated cluster.
+ */
int requested_sectors = n_end - keep_clusters * s->cluster_sectors;
- int avail_sectors = (keep_clusters + nb_clusters)
+ int avail_sectors = nb_clusters
<< (s->cluster_bits - BDRV_SECTOR_BITS);
*m = (QCowL2Meta) {
diff --git a/block/qcow2-refcount.c b/block/qcow2-refcount.c
index 812c93c5c7..66f391597c 100644
--- a/block/qcow2-refcount.c
+++ b/block/qcow2-refcount.c
@@ -367,7 +367,7 @@ static int alloc_refcount_block(BlockDriverState *bs,
}
for(i = 0; i < table_size; i++) {
- cpu_to_be64s(&new_table[i]);
+ be64_to_cpus(&new_table[i]);
}
/* Hook up the new refcount table in the qcow2 header */
@@ -726,13 +726,6 @@ int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int64_t old_offset, old_l2_offset;
int i, j, l1_modified = 0, nb_csectors, refcount;
int ret;
- bool old_l2_writethrough, old_refcount_writethrough;
-
- /* Switch caches to writeback mode during update */
- old_l2_writethrough =
- qcow2_cache_set_writethrough(bs, s->l2_table_cache, false);
- old_refcount_writethrough =
- qcow2_cache_set_writethrough(bs, s->refcount_block_cache, false);
l2_table = NULL;
l1_table = NULL;
@@ -856,11 +849,6 @@ fail:
qcow2_cache_put(bs, s->l2_table_cache, (void**) &l2_table);
}
- /* Enable writethrough cache mode again */
- qcow2_cache_set_writethrough(bs, s->l2_table_cache, old_l2_writethrough);
- qcow2_cache_set_writethrough(bs, s->refcount_block_cache,
- old_refcount_writethrough);
-
/* Update L1 only if it isn't deleted anyway (addend = -1) */
if (addend >= 0 && l1_modified) {
for(i = 0; i < l1_size; i++)
@@ -1122,11 +1110,12 @@ fail:
* Returns 0 if no errors are found, the number of errors in case the image is
* detected as corrupted, and -errno when an internal error occurred.
*/
-int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res)
+int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
+ BdrvCheckMode fix)
{
BDRVQcowState *s = bs->opaque;
- int64_t size;
- int nb_clusters, refcount1, refcount2, i;
+ int64_t size, i;
+ int nb_clusters, refcount1, refcount2;
QCowSnapshot *sn;
uint16_t *refcount_table;
int ret;
@@ -1170,14 +1159,15 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res)
/* Refcount blocks are cluster aligned */
if (offset & (s->cluster_size - 1)) {
- fprintf(stderr, "ERROR refcount block %d is not "
+ fprintf(stderr, "ERROR refcount block %" PRId64 " is not "
"cluster aligned; refcount table entry corrupted\n", i);
res->corruptions++;
continue;
}
if (cluster >= nb_clusters) {
- fprintf(stderr, "ERROR refcount block %d is outside image\n", i);
+ fprintf(stderr, "ERROR refcount block %" PRId64
+ " is outside image\n", i);
res->corruptions++;
continue;
}
@@ -1186,7 +1176,8 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res)
inc_refcounts(bs, res, refcount_table, nb_clusters,
offset, s->cluster_size);
if (refcount_table[cluster] != 1) {
- fprintf(stderr, "ERROR refcount block %d refcount=%d\n",
+ fprintf(stderr, "ERROR refcount block %" PRId64
+ " refcount=%d\n",
i, refcount_table[cluster]);
res->corruptions++;
}
@@ -1197,7 +1188,7 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res)
for(i = 0; i < nb_clusters; i++) {
refcount1 = get_refcount(bs, i);
if (refcount1 < 0) {
- fprintf(stderr, "Can't get refcount for cluster %d: %s\n",
+ fprintf(stderr, "Can't get refcount for cluster %" PRId64 ": %s\n",
i, strerror(-refcount1));
res->check_errors++;
continue;
@@ -1205,9 +1196,31 @@ int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res)
refcount2 = refcount_table[i];
if (refcount1 != refcount2) {
- fprintf(stderr, "%s cluster %d refcount=%d reference=%d\n",
- refcount1 < refcount2 ? "ERROR" : "Leaked",
+
+ /* Check if we're allowed to fix the mismatch */
+ int *num_fixed = NULL;
+ if (refcount1 > refcount2 && (fix & BDRV_FIX_LEAKS)) {
+ num_fixed = &res->leaks_fixed;
+ } else if (refcount1 < refcount2 && (fix & BDRV_FIX_ERRORS)) {
+ num_fixed = &res->corruptions_fixed;
+ }
+
+ fprintf(stderr, "%s cluster %" PRId64 " refcount=%d reference=%d\n",
+ num_fixed != NULL ? "Repairing" :
+ refcount1 < refcount2 ? "ERROR" :
+ "Leaked",
i, refcount1, refcount2);
+
+ if (num_fixed) {
+ ret = update_refcount(bs, i << s->cluster_bits, 1,
+ refcount2 - refcount1);
+ if (ret >= 0) {
+ (*num_fixed)++;
+ continue;
+ }
+ }
+
+ /* And if we couldn't, print an error */
if (refcount1 < refcount2) {
res->corruptions++;
} else {
diff --git a/block/qcow2.c b/block/qcow2.c
index c2e49cded3..2c1cd0a446 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -220,7 +220,6 @@ static int qcow2_open(BlockDriverState *bs, int flags)
int len, i, ret = 0;
QCowHeader header;
uint64_t ext_end;
- bool writethrough;
ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
if (ret < 0) {
@@ -298,14 +297,6 @@ static int qcow2_open(BlockDriverState *bs, int flags)
goto fail;
}
- if (!bs->read_only && s->autoclear_features != 0) {
- s->autoclear_features = 0;
- ret = qcow2_update_header(bs);
- if (ret < 0) {
- goto fail;
- }
- }
-
/* Check support for various header values */
if (header.refcount_order != 4) {
report_unsupported(bs, "%d bit reference counts",
@@ -367,10 +358,8 @@ static int qcow2_open(BlockDriverState *bs, int flags)
}
/* alloc L2 table/refcount block cache */
- writethrough = ((flags & BDRV_O_CACHE_WB) == 0);
- s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE, writethrough);
- s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE,
- writethrough);
+ s->l2_table_cache = qcow2_cache_create(bs, L2_CACHE_SIZE);
+ s->refcount_block_cache = qcow2_cache_create(bs, REFCOUNT_CACHE_SIZE);
s->cluster_cache = g_malloc(s->cluster_size);
/* one more sector for decompressed data alignment */
@@ -411,6 +400,15 @@ static int qcow2_open(BlockDriverState *bs, int flags)
goto fail;
}
+ /* Clear unknown autoclear feature bits */
+ if (!bs->read_only && s->autoclear_features != 0) {
+ s->autoclear_features = 0;
+ ret = qcow2_update_header(bs);
+ if (ret < 0) {
+ goto fail;
+ }
+ }
+
/* Initialise locks */
qemu_co_mutex_init(&s->lock);
@@ -1470,9 +1468,10 @@ static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
}
-static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result)
+static int qcow2_check(BlockDriverState *bs, BdrvCheckResult *result,
+ BdrvCheckMode fix)
{
- return qcow2_check_refcounts(bs, result);
+ return qcow2_check_refcounts(bs, result, fix);
}
#if 0
diff --git a/block/qcow2.h b/block/qcow2.h
index 93567f6451..455b6d7cfe 100644
--- a/block/qcow2.h
+++ b/block/qcow2.h
@@ -261,7 +261,8 @@ void qcow2_free_any_clusters(BlockDriverState *bs,
int qcow2_update_snapshot_refcount(BlockDriverState *bs,
int64_t l1_table_offset, int l1_size, int addend);
-int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res);
+int qcow2_check_refcounts(BlockDriverState *bs, BdrvCheckResult *res,
+ BdrvCheckMode fix);
/* qcow2-cluster.c functions */
int qcow2_grow_l1_table(BlockDriverState *bs, int min_size, bool exact_size);
@@ -296,11 +297,8 @@ void qcow2_free_snapshots(BlockDriverState *bs);
int qcow2_read_snapshots(BlockDriverState *bs);
/* qcow2-cache.c functions */
-Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables,
- bool writethrough);
+Qcow2Cache *qcow2_cache_create(BlockDriverState *bs, int num_tables);
int qcow2_cache_destroy(BlockDriverState* bs, Qcow2Cache *c);
-bool qcow2_cache_set_writethrough(BlockDriverState *bs, Qcow2Cache *c,
- bool enable);
void qcow2_cache_entry_mark_dirty(Qcow2Cache *c, void *table);
int qcow2_cache_flush(BlockDriverState *bs, Qcow2Cache *c);
diff --git a/block/qed-check.c b/block/qed-check.c
index 94327ff5b3..5edf60775b 100644
--- a/block/qed-check.c
+++ b/block/qed-check.c
@@ -87,6 +87,7 @@ static unsigned int qed_check_l2_table(QEDCheck *check, QEDTable *table)
if (!qed_check_cluster_offset(s, offset)) {
if (check->fix) {
table->offsets[i] = 0;
+ check->result->corruptions_fixed++;
} else {
check->result->corruptions++;
}
@@ -127,6 +128,7 @@ static int qed_check_l1_table(QEDCheck *check, QEDTable *table)
/* Clear invalid offset */
if (check->fix) {
table->offsets[i] = 0;
+ check->result->corruptions_fixed++;
} else {
check->result->corruptions++;
}
diff --git a/block/qed.c b/block/qed.c
index 30a31f907f..ab5972466c 100644
--- a/block/qed.c
+++ b/block/qed.c
@@ -1517,11 +1517,12 @@ static void bdrv_qed_invalidate_cache(BlockDriverState *bs)
bdrv_qed_open(bs, bs->open_flags);
}
-static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result)
+static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
+ BdrvCheckMode fix)
{
BDRVQEDState *s = bs->opaque;
- return qed_check(s, result, false);
+ return qed_check(s, result, !!fix);
}
static QEMUOptionParameter qed_create_options[] = {
diff --git a/block/raw-posix.c b/block/raw-posix.c
index 03fcfcc031..bf7700a238 100644
--- a/block/raw-posix.c
+++ b/block/raw-posix.c
@@ -52,6 +52,10 @@
#include <sys/param.h>
#include <linux/cdrom.h>
#include <linux/fd.h>
+#include <linux/fs.h>
+#endif
+#ifdef CONFIG_FIEMAP
+#include <linux/fiemap.h>
#endif
#if defined (__FreeBSD__) || defined(__FreeBSD_kernel__)
#include <sys/disk.h>
@@ -583,6 +587,99 @@ static int raw_create(const char *filename, QEMUOptionParameter *options)
return result;
}
+/*
+ * Returns true iff the specified sector is present in the disk image. Drivers
+ * not implementing the functionality are assumed to not support backing files,
+ * hence all their sectors are reported as allocated.
+ *
+ * If 'sector_num' is beyond the end of the disk image the return value is 0
+ * and 'pnum' is set to 0.
+ *
+ * 'pnum' is set to the number of sectors (including and immediately following
+ * the specified sector) that are known to be in the same
+ * allocated/unallocated state.
+ *
+ * 'nb_sectors' is the max value 'pnum' should be set to. If nb_sectors goes
+ * beyond the end of the disk image it will be clamped.
+ */
+static int coroutine_fn raw_co_is_allocated(BlockDriverState *bs,
+ int64_t sector_num,
+ int nb_sectors, int *pnum)
+{
+ BDRVRawState *s = bs->opaque;
+ off_t start, data, hole;
+ int ret;
+
+ ret = fd_open(bs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ start = sector_num * BDRV_SECTOR_SIZE;
+#ifdef CONFIG_FIEMAP
+ struct {
+ struct fiemap fm;
+ struct fiemap_extent fe;
+ } f;
+ f.fm.fm_start = start;
+ f.fm.fm_length = (int64_t)nb_sectors * BDRV_SECTOR_SIZE;
+ f.fm.fm_flags = 0;
+ f.fm.fm_extent_count = 1;
+ f.fm.fm_reserved = 0;
+ if (ioctl(s->fd, FS_IOC_FIEMAP, &f) == -1) {
+ /* Assume everything is allocated. */
+ *pnum = nb_sectors;
+ return 1;
+ }
+
+ if (f.fm.fm_mapped_extents == 0) {
+ /* No extents found, data is beyond f.fm.fm_start + f.fm.fm_length.
+ * f.fm.fm_start + f.fm.fm_length must be clamped to the file size!
+ */
+ off_t length = lseek(s->fd, 0, SEEK_END);
+ hole = f.fm.fm_start;
+ data = MIN(f.fm.fm_start + f.fm.fm_length, length);
+ } else {
+ data = f.fe.fe_logical;
+ hole = f.fe.fe_logical + f.fe.fe_length;
+ }
+#elif defined SEEK_HOLE && defined SEEK_DATA
+ hole = lseek(s->fd, start, SEEK_HOLE);
+ if (hole == -1) {
+ /* -ENXIO indicates that sector_num was past the end of the file.
+ * There is a virtual hole there. */
+ assert(errno != -ENXIO);
+
+ /* Most likely EINVAL. Assume everything is allocated. */
+ *pnum = nb_sectors;
+ return 1;
+ }
+
+ if (hole > start) {
+ data = start;
+ } else {
+ /* On a hole. We need another syscall to find its end. */
+ data = lseek(s->fd, start, SEEK_DATA);
+ if (data == -1) {
+ data = lseek(s->fd, 0, SEEK_END);
+ }
+ }
+#else
+ *pnum = nb_sectors;
+ return 1;
+#endif
+
+ if (data <= start) {
+ /* On a data extent, compute sectors to the end of the extent. */
+ *pnum = MIN(nb_sectors, (hole - start) / BDRV_SECTOR_SIZE);
+ return 1;
+ } else {
+ /* On a hole, compute sectors to the beginning of the next extent. */
+ *pnum = MIN(nb_sectors, (data - start) / BDRV_SECTOR_SIZE);
+ return 0;
+ }
+}
+
#ifdef CONFIG_XFS
static int xfs_discard(BDRVRawState *s, int64_t sector_num, int nb_sectors)
{
@@ -634,6 +731,7 @@ static BlockDriver bdrv_file = {
.bdrv_close = raw_close,
.bdrv_create = raw_create,
.bdrv_co_discard = raw_co_discard,
+ .bdrv_co_is_allocated = raw_co_is_allocated,
.bdrv_aio_readv = raw_aio_readv,
.bdrv_aio_writev = raw_aio_writev,
diff --git a/block/raw.c b/block/raw.c
index 7086e314a6..09d9b4878b 100644
--- a/block/raw.c
+++ b/block/raw.c
@@ -25,6 +25,13 @@ static void raw_close(BlockDriverState *bs)
{
}
+static int coroutine_fn raw_co_is_allocated(BlockDriverState *bs,
+ int64_t sector_num,
+ int nb_sectors, int *pnum)
+{
+ return bdrv_co_is_allocated(bs->file, sector_num, nb_sectors, pnum);
+}
+
static int64_t raw_getlength(BlockDriverState *bs)
{
return bdrv_getlength(bs->file);
@@ -108,6 +115,7 @@ static BlockDriver bdrv_raw = {
.bdrv_co_readv = raw_co_readv,
.bdrv_co_writev = raw_co_writev,
+ .bdrv_co_is_allocated = raw_co_is_allocated,
.bdrv_co_discard = raw_co_discard,
.bdrv_probe = raw_probe,
diff --git a/block/rbd.c b/block/rbd.c
index 1280d66d3c..eebc334462 100644
--- a/block/rbd.c
+++ b/block/rbd.c
@@ -476,6 +476,25 @@ static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags)
s->snap = g_strdup(snap_buf);
}
+ /*
+ * Fallback to more conservative semantics if setting cache
+ * options fails. Ignore errors from setting rbd_cache because the
+ * only possible error is that the option does not exist, and
+ * librbd defaults to no caching. If write through caching cannot
+ * be set up, fall back to no caching.
+ */
+ if (flags & BDRV_O_NOCACHE) {
+ rados_conf_set(s->cluster, "rbd_cache", "false");
+ } else {
+ rados_conf_set(s->cluster, "rbd_cache", "true");
+ if (!(flags & BDRV_O_CACHE_WB)) {
+ r = rados_conf_set(s->cluster, "rbd_cache_max_dirty", "0");
+ if (r < 0) {
+ rados_conf_set(s->cluster, "rbd_cache", "false");
+ }
+ }
+ }
+
if (strstr(conf, "conf=") == NULL) {
/* try default location, but ignore failure */
rados_conf_read_file(s->cluster, NULL);
diff --git a/block/sheepdog.c b/block/sheepdog.c
index f46ca8fb69..8877f4528d 100644
--- a/block/sheepdog.c
+++ b/block/sheepdog.c
@@ -522,8 +522,8 @@ static int send_req(int sockfd, SheepdogReq *hdr, void *data,
return ret;
}
-static int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
- unsigned int *wlen)
+static coroutine_fn int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
+ unsigned int *wlen)
{
int ret;
@@ -540,6 +540,7 @@ static int send_co_req(int sockfd, SheepdogReq *hdr, void *data,
return ret;
}
+
static int do_req(int sockfd, SheepdogReq *hdr, void *data,
unsigned int *wlen, unsigned int *rlen)
{
@@ -576,8 +577,8 @@ out:
return ret;
}
-static int do_co_req(int sockfd, SheepdogReq *hdr, void *data,
- unsigned int *wlen, unsigned int *rlen)
+static coroutine_fn int do_co_req(int sockfd, SheepdogReq *hdr, void *data,
+ unsigned int *wlen, unsigned int *rlen)
{
int ret;
diff --git a/block/stream.c b/block/stream.c
index 8e5832273b..37c46525d2 100644
--- a/block/stream.c
+++ b/block/stream.c
@@ -13,6 +13,7 @@
#include "trace.h"
#include "block_int.h"
+#include "qemu/ratelimit.h"
enum {
/*
@@ -25,34 +26,6 @@ enum {
#define SLICE_TIME 100000000ULL /* ns */
-typedef struct {
- int64_t next_slice_time;
- uint64_t slice_quota;
- uint64_t dispatched;
-} RateLimit;
-
-static int64_t ratelimit_calculate_delay(RateLimit *limit, uint64_t n)
-{
- int64_t now = qemu_get_clock_ns(rt_clock);
-
- if (limit->next_slice_time < now) {
- limit->next_slice_time = now + SLICE_TIME;
- limit->dispatched = 0;
- }
- if (limit->dispatched == 0 || limit->dispatched + n <= limit->slice_quota) {
- limit->dispatched += n;
- return 0;
- } else {
- limit->dispatched = n;
- return limit->next_slice_time - now;
- }
-}
-
-static void ratelimit_set_speed(RateLimit *limit, uint64_t speed)
-{
- limit->slice_quota = speed / (1000000000ULL / SLICE_TIME);
-}
-
typedef struct StreamBlockJob {
BlockJob common;
RateLimit limit;
@@ -98,67 +71,6 @@ static void close_unused_images(BlockDriverState *top, BlockDriverState *base,
top->backing_hd = base;
}
-/*
- * Given an image chain: [BASE] -> [INTER1] -> [INTER2] -> [TOP]
- *
- * Return true if the given sector is allocated in top.
- * Return false if the given sector is allocated in intermediate images.
- * Return true otherwise.
- *
- * 'pnum' is set to the number of sectors (including and immediately following
- * the specified sector) that are known to be in the same
- * allocated/unallocated state.
- *
- */
-static int coroutine_fn is_allocated_base(BlockDriverState *top,
- BlockDriverState *base,
- int64_t sector_num,
- int nb_sectors, int *pnum)
-{
- BlockDriverState *intermediate;
- int ret, n;
-
- ret = bdrv_co_is_allocated(top, sector_num, nb_sectors, &n);
- if (ret) {
- *pnum = n;
- return ret;
- }
-
- /*
- * Is the unallocated chunk [sector_num, n] also
- * unallocated between base and top?
- */
- intermediate = top->backing_hd;
-
- while (intermediate != base) {
- int pnum_inter;
-
- ret = bdrv_co_is_allocated(intermediate, sector_num, nb_sectors,
- &pnum_inter);
- if (ret < 0) {
- return ret;
- } else if (ret) {
- *pnum = pnum_inter;
- return 0;
- }
-
- /*
- * [sector_num, nb_sectors] is unallocated on top but intermediate
- * might have
- *
- * [sector_num+x, nr_sectors] allocated.
- */
- if (n > pnum_inter) {
- n = pnum_inter;
- }
-
- intermediate = intermediate->backing_hd;
- }
-
- *pnum = n;
- return 1;
-}
-
static void coroutine_fn stream_run(void *opaque)
{
StreamBlockJob *s = opaque;
@@ -189,6 +101,7 @@ static void coroutine_fn stream_run(void *opaque)
for (sector_num = 0; sector_num < end; sector_num += n) {
uint64_t delay_ns = 0;
+ bool copy;
wait:
/* Note that even when no rate limit is applied we need to yield
@@ -199,10 +112,20 @@ wait:
break;
}
- ret = is_allocated_base(bs, base, sector_num,
- STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n);
+ ret = bdrv_co_is_allocated(bs, sector_num,
+ STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n);
+ if (ret == 1) {
+ /* Allocated in the top, no need to copy. */
+ copy = false;
+ } else {
+ /* Copy if allocated in the intermediate images. Limit to the
+ * known-unallocated area [sector_num, sector_num+n). */
+ ret = bdrv_co_is_allocated_above(bs->backing_hd, base,
+ sector_num, n, &n);
+ copy = (ret == 1);
+ }
trace_stream_one_iteration(s, sector_num, n, ret);
- if (ret == 0) {
+ if (ret >= 0 && copy) {
if (s->common.speed) {
delay_ns = ratelimit_calculate_delay(&s->limit, n);
if (delay_ns > 0) {
@@ -248,7 +171,7 @@ static void stream_set_speed(BlockJob *job, int64_t speed, Error **errp)
error_set(errp, QERR_INVALID_PARAMETER, "speed");
return;
}
- ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE);
+ ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
}
static BlockJobType stream_job_type = {
diff --git a/block/vdi.c b/block/vdi.c
index 119d3c74da..57325d65c4 100644
--- a/block/vdi.c
+++ b/block/vdi.c
@@ -277,7 +277,8 @@ static void vdi_header_print(VdiHeader *header)
}
#endif
-static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res)
+static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res,
+ BdrvCheckMode fix)
{
/* TODO: additional checks possible. */
BDRVVdiState *s = (BDRVVdiState *)bs->opaque;
@@ -286,6 +287,10 @@ static int vdi_check(BlockDriverState *bs, BdrvCheckResult *res)
uint32_t *bmap;
logout("\n");
+ if (fix) {
+ return -ENOTSUP;
+ }
+
bmap = g_malloc(s->header.blocks_in_image * sizeof(uint32_t));
memset(bmap, 0xff, s->header.blocks_in_image * sizeof(uint32_t));