aboutsummaryrefslogtreecommitdiff
path: root/block/block-copy.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/block-copy.c')
-rw-r--r--block/block-copy.c52
1 files changed, 51 insertions, 1 deletions
diff --git a/block/block-copy.c b/block/block-copy.c
index e83870ff81..5d0076ac93 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -21,12 +21,14 @@
#include "qemu/units.h"
#include "qemu/coroutine.h"
#include "block/aio_task.h"
+#include "qemu/error-report.h"
#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
#define BLOCK_COPY_MAX_MEM (128 * MiB)
#define BLOCK_COPY_MAX_WORKERS 64
#define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
+#define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16)
typedef enum {
COPY_READ_WRITE_CLUSTER,
@@ -342,14 +344,57 @@ void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
}
}
+static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
+ Error **errp)
+{
+ int ret;
+ BlockDriverInfo bdi;
+ bool target_does_cow = bdrv_backing_chain_next(target);
+
+ /*
+ * If there is no backing file on the target, we cannot rely on COW if our
+ * backup cluster size is smaller than the target cluster size. Even for
+ * targets with a backing file, try to avoid COW if possible.
+ */
+ ret = bdrv_get_info(target, &bdi);
+ if (ret == -ENOTSUP && !target_does_cow) {
+ /* Cluster size is not defined */
+ warn_report("The target block device doesn't provide "
+ "information about the block size and it doesn't have a "
+ "backing file. The default block size of %u bytes is "
+ "used. If the actual block size of the target exceeds "
+ "this default, the backup may be unusable",
+ BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
+ return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
+ } else if (ret < 0 && !target_does_cow) {
+ error_setg_errno(errp, -ret,
+ "Couldn't determine the cluster size of the target image, "
+ "which has no backing file");
+ error_append_hint(errp,
+ "Aborting, since this may create an unusable destination image\n");
+ return ret;
+ } else if (ret < 0 && target_does_cow) {
+ /* Not fatal; just trudge on ahead. */
+ return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
+ }
+
+ return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
+}
+
BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
- int64_t cluster_size, bool use_copy_range,
+ bool use_copy_range,
bool compress, Error **errp)
{
BlockCopyState *s;
+ int64_t cluster_size;
BdrvDirtyBitmap *copy_bitmap;
bool is_fleecing;
+ cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
+ if (cluster_size < 0) {
+ return NULL;
+ }
+
copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
errp);
if (!copy_bitmap) {
@@ -960,6 +1005,11 @@ BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
return s->copy_bitmap;
}
+int64_t block_copy_cluster_size(BlockCopyState *s)
+{
+ return s->cluster_size;
+}
+
void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
{
qatomic_set(&s->skip_unallocated, skip);