aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2016-09-13 14:31:18 +0100
committerPeter Maydell <peter.maydell@linaro.org>2016-09-13 14:31:18 +0100
commit4dfbe3767af503a4cd137b15c8a9d8f30b20a6e9 (patch)
tree3e58ae8614b0f372f8fe4f4bb27b85263c3e4934 /block
parentfa9701240951093907076db0943f96972a396ef5 (diff)
parentdce8921b2baaf95974af8176406881872067adfa (diff)
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request v2: * Fixed qcow2 sanitizer warnings [Peter] * Renamed get_error test cases to get_error_all to avoid tripping "error:" grep scripts [Peter] * Added Fam's iothread stop patch # gpg: Signature made Tue 13 Sep 2016 11:02:30 BST # gpg: using RSA key 0x9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: iothread: Stop threads before main() quits tests: fix qvirtqueue_kick MAINTAINERS: add maintainer for replication support replication driver in blockdev-add tests: add unit test case for replication replication: Implement new driver for block replication replication: Introduce new APIs to do replication operation configure: support replication mirror: auto complete active commit docs: block replication's description block: Link backup into block core Backup: export interfaces for extra serialization Backup: clear all bitmap when doing block checkpoint block: unblock backup operations in backing file virtio-blk: rename virtio_device_info to virtio_blk_info linux-aio: process completions from ioq_submit() linux-aio: split processing events function linux-aio: consume events in userspace instead of calling io_getevents qcow2: avoid memcpy(dst, NULL, len) Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'block')
-rw-r--r--block/Makefile.objs3
-rw-r--r--block/backup.c59
-rw-r--r--block/linux-aio.c178
-rw-r--r--block/mirror.c13
-rw-r--r--block/qcow2-cluster.c4
-rw-r--r--block/qcow2.c5
-rw-r--r--block/replication.c659
7 files changed, 870 insertions, 51 deletions
diff --git a/block/Makefile.objs b/block/Makefile.objs
index 2593a2f8a6..55da6266fe 100644
--- a/block/Makefile.objs
+++ b/block/Makefile.objs
@@ -22,11 +22,12 @@ block-obj-$(CONFIG_ARCHIPELAGO) += archipelago.o
block-obj-$(CONFIG_LIBSSH2) += ssh.o
block-obj-y += accounting.o dirty-bitmap.o
block-obj-y += write-threshold.o
+block-obj-y += backup.o
+block-obj-$(CONFIG_REPLICATION) += replication.o
block-obj-y += crypto.o
common-obj-y += stream.o
-common-obj-y += backup.o
iscsi.o-cflags := $(LIBISCSI_CFLAGS)
iscsi.o-libs := $(LIBISCSI_LIBS)
diff --git a/block/backup.c b/block/backup.c
index bb3bb9a9eb..582bd0f7ee 100644
--- a/block/backup.c
+++ b/block/backup.c
@@ -17,6 +17,7 @@
#include "block/block.h"
#include "block/block_int.h"
#include "block/blockjob.h"
+#include "block/block_backup.h"
#include "qapi/error.h"
#include "qapi/qmp/qerror.h"
#include "qemu/ratelimit.h"
@@ -27,13 +28,6 @@
#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
#define SLICE_TIME 100000000ULL /* ns */
-typedef struct CowRequest {
- int64_t start;
- int64_t end;
- QLIST_ENTRY(CowRequest) list;
- CoQueue wait_queue; /* coroutines blocked on this request */
-} CowRequest;
-
typedef struct BackupBlockJob {
BlockJob common;
BlockBackend *target;
@@ -255,6 +249,57 @@ static void backup_attached_aio_context(BlockJob *job, AioContext *aio_context)
blk_set_aio_context(s->target, aio_context);
}
+void backup_do_checkpoint(BlockJob *job, Error **errp)
+{
+ BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
+ int64_t len;
+
+ assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+
+ if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
+ error_setg(errp, "The backup job only supports block checkpoint in"
+ " sync=none mode");
+ return;
+ }
+
+ len = DIV_ROUND_UP(backup_job->common.len, backup_job->cluster_size);
+ bitmap_zero(backup_job->done_bitmap, len);
+}
+
+void backup_wait_for_overlapping_requests(BlockJob *job, int64_t sector_num,
+ int nb_sectors)
+{
+ BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
+ int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
+ int64_t start, end;
+
+ assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+
+ start = sector_num / sectors_per_cluster;
+ end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ wait_for_overlapping_requests(backup_job, start, end);
+}
+
+void backup_cow_request_begin(CowRequest *req, BlockJob *job,
+ int64_t sector_num,
+ int nb_sectors)
+{
+ BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
+ int64_t sectors_per_cluster = cluster_size_sectors(backup_job);
+ int64_t start, end;
+
+ assert(job->driver->job_type == BLOCK_JOB_TYPE_BACKUP);
+
+ start = sector_num / sectors_per_cluster;
+ end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
+ cow_request_begin(req, backup_job, start, end);
+}
+
+void backup_cow_request_end(CowRequest *req)
+{
+ cow_request_end(req);
+}
+
static const BlockJobDriver backup_job_driver = {
.instance_size = sizeof(BackupBlockJob),
.job_type = BLOCK_JOB_TYPE_BACKUP,
diff --git a/block/linux-aio.c b/block/linux-aio.c
index e906abebb3..d4e19d444c 100644
--- a/block/linux-aio.c
+++ b/block/linux-aio.c
@@ -59,7 +59,6 @@ struct LinuxAioState {
/* I/O completion processing */
QEMUBH *completion_bh;
- struct io_event events[MAX_EVENTS];
int event_idx;
int event_max;
};
@@ -95,64 +94,153 @@ static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
laiocb->ret = ret;
if (laiocb->co) {
- qemu_coroutine_enter(laiocb->co);
+ /* Jump and continue completion for foreign requests, don't do
+ * anything for current request, it will be completed shortly. */
+ if (laiocb->co != qemu_coroutine_self()) {
+ qemu_coroutine_enter(laiocb->co);
+ }
} else {
laiocb->common.cb(laiocb->common.opaque, ret);
qemu_aio_unref(laiocb);
}
}
-/* The completion BH fetches completed I/O requests and invokes their
- * callbacks.
+/**
+ * aio_ring buffer which is shared between userspace and kernel.
*
- * The function is somewhat tricky because it supports nested event loops, for
- * example when a request callback invokes aio_poll(). In order to do this,
- * the completion events array and index are kept in LinuxAioState. The BH
- * reschedules itself as long as there are completions pending so it will
- * either be called again in a nested event loop or will be called after all
- * events have been completed. When there are no events left to complete, the
- * BH returns without rescheduling.
+ * This copied from linux/fs/aio.c, common header does not exist
+ * but AIO exists for ages so we assume ABI is stable.
*/
-static void qemu_laio_completion_bh(void *opaque)
+struct aio_ring {
+ unsigned id; /* kernel internal index number */
+ unsigned nr; /* number of io_events */
+ unsigned head; /* Written to by userland or by kernel. */
+ unsigned tail;
+
+ unsigned magic;
+ unsigned compat_features;
+ unsigned incompat_features;
+ unsigned header_length; /* size of aio_ring */
+
+ struct io_event io_events[0];
+};
+
+/**
+ * io_getevents_peek:
+ * @ctx: AIO context
+ * @events: pointer on events array, output value
+
+ * Returns the number of completed events and sets a pointer
+ * on events array. This function does not update the internal
+ * ring buffer, only reads head and tail. When @events has been
+ * processed io_getevents_commit() must be called.
+ */
+static inline unsigned int io_getevents_peek(io_context_t ctx,
+ struct io_event **events)
{
- LinuxAioState *s = opaque;
+ struct aio_ring *ring = (struct aio_ring *)ctx;
+ unsigned int head = ring->head, tail = ring->tail;
+ unsigned int nr;
- /* Fetch more completion events when empty */
- if (s->event_idx == s->event_max) {
- do {
- struct timespec ts = { 0 };
- s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
- s->events, &ts);
- } while (s->event_max == -EINTR);
-
- s->event_idx = 0;
- if (s->event_max <= 0) {
- s->event_max = 0;
- return; /* no more events */
- }
- s->io_q.in_flight -= s->event_max;
+ nr = tail >= head ? tail - head : ring->nr - head;
+ *events = ring->io_events + head;
+ /* To avoid speculative loads of s->events[i] before observing tail.
+ Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
+ smp_rmb();
+
+ return nr;
+}
+
+/**
+ * io_getevents_commit:
+ * @ctx: AIO context
+ * @nr: the number of events on which head should be advanced
+ *
+ * Advances head of a ring buffer.
+ */
+static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
+{
+ struct aio_ring *ring = (struct aio_ring *)ctx;
+
+ if (nr) {
+ ring->head = (ring->head + nr) % ring->nr;
}
+}
+
+/**
+ * io_getevents_advance_and_peek:
+ * @ctx: AIO context
+ * @events: pointer on events array, output value
+ * @nr: the number of events on which head should be advanced
+ *
+ * Advances head of a ring buffer and returns number of elements left.
+ */
+static inline unsigned int
+io_getevents_advance_and_peek(io_context_t ctx,
+ struct io_event **events,
+ unsigned int nr)
+{
+ io_getevents_commit(ctx, nr);
+ return io_getevents_peek(ctx, events);
+}
+
+/**
+ * qemu_laio_process_completions:
+ * @s: AIO state
+ *
+ * Fetches completed I/O requests and invokes their callbacks.
+ *
+ * The function is somewhat tricky because it supports nested event loops, for
+ * example when a request callback invokes aio_poll(). In order to do this,
+ * indices are kept in LinuxAioState. Function schedules BH completion so it
+ * can be called again in a nested event loop. When there are no events left
+ * to complete the BH is being canceled.
+ */
+static void qemu_laio_process_completions(LinuxAioState *s)
+{
+ struct io_event *events;
/* Reschedule so nested event loops see currently pending completions */
qemu_bh_schedule(s->completion_bh);
- /* Process completion events */
- while (s->event_idx < s->event_max) {
- struct iocb *iocb = s->events[s->event_idx].obj;
- struct qemu_laiocb *laiocb =
+ while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
+ s->event_idx))) {
+ for (s->event_idx = 0; s->event_idx < s->event_max; ) {
+ struct iocb *iocb = events[s->event_idx].obj;
+ struct qemu_laiocb *laiocb =
container_of(iocb, struct qemu_laiocb, iocb);
- laiocb->ret = io_event_ret(&s->events[s->event_idx]);
- s->event_idx++;
+ laiocb->ret = io_event_ret(&events[s->event_idx]);
- qemu_laio_process_completion(laiocb);
+ /* Change counters one-by-one because we can be nested. */
+ s->io_q.in_flight--;
+ s->event_idx++;
+ qemu_laio_process_completion(laiocb);
+ }
}
+ qemu_bh_cancel(s->completion_bh);
+
+ /* If we are nested we have to notify the level above that we are done
+ * by setting event_max to zero, upper level will then jump out of it's
+ * own `for` loop. If we are the last all counters droped to zero. */
+ s->event_max = 0;
+ s->event_idx = 0;
+}
+
+static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
+{
+ qemu_laio_process_completions(s);
if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
ioq_submit(s);
}
+}
- qemu_bh_cancel(s->completion_bh);
+static void qemu_laio_completion_bh(void *opaque)
+{
+ LinuxAioState *s = opaque;
+
+ qemu_laio_process_completions_and_submit(s);
}
static void qemu_laio_completion_cb(EventNotifier *e)
@@ -160,7 +248,7 @@ static void qemu_laio_completion_cb(EventNotifier *e)
LinuxAioState *s = container_of(e, LinuxAioState, e);
if (event_notifier_test_and_clear(&s->e)) {
- qemu_laio_completion_bh(s);
+ qemu_laio_process_completions_and_submit(s);
}
}
@@ -236,6 +324,19 @@ static void ioq_submit(LinuxAioState *s)
QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
} while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
s->io_q.blocked = (s->io_q.in_queue > 0);
+
+ if (s->io_q.in_flight) {
+ /* We can try to complete something just right away if there are
+ * still requests in-flight. */
+ qemu_laio_process_completions(s);
+ /*
+ * Even we have completed everything (in_flight == 0), the queue can
+ * have still pended requests (in_queue > 0). We do not attempt to
+ * repeat submission to avoid IO hang. The reason is simple: s->e is
+ * still set and completion callback will be called shortly and all
+ * pended requests will be submitted from there.
+ */
+ }
}
void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
@@ -293,6 +394,7 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
.co = qemu_coroutine_self(),
.nbytes = qiov->size,
.ctx = s,
+ .ret = -EINPROGRESS,
.is_read = (type == QEMU_AIO_READ),
.qiov = qiov,
};
@@ -302,7 +404,9 @@ int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
return ret;
}
- qemu_coroutine_yield();
+ if (laiocb.ret == -EINPROGRESS) {
+ qemu_coroutine_yield();
+ }
return laiocb.ret;
}
diff --git a/block/mirror.c b/block/mirror.c
index e0b3f4180f..f9d1fecaa0 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -916,7 +916,8 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
BlockCompletionFunc *cb,
void *opaque, Error **errp,
const BlockJobDriver *driver,
- bool is_none_mode, BlockDriverState *base)
+ bool is_none_mode, BlockDriverState *base,
+ bool auto_complete)
{
MirrorBlockJob *s;
@@ -952,6 +953,9 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs,
s->granularity = granularity;
s->buf_size = ROUND_UP(buf_size, granularity);
s->unmap = unmap;
+ if (auto_complete) {
+ s->should_complete = true;
+ }
s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
if (!s->dirty_bitmap) {
@@ -990,14 +994,15 @@ void mirror_start(const char *job_id, BlockDriverState *bs,
mirror_start_job(job_id, bs, target, replaces,
speed, granularity, buf_size, backing_mode,
on_source_error, on_target_error, unmap, cb, opaque, errp,
- &mirror_job_driver, is_none_mode, base);
+ &mirror_job_driver, is_none_mode, base, false);
}
void commit_active_start(const char *job_id, BlockDriverState *bs,
BlockDriverState *base, int64_t speed,
BlockdevOnError on_error,
BlockCompletionFunc *cb,
- void *opaque, Error **errp)
+ void *opaque, Error **errp,
+ bool auto_complete)
{
int64_t length, base_length;
int orig_base_flags;
@@ -1038,7 +1043,7 @@ void commit_active_start(const char *job_id, BlockDriverState *bs,
mirror_start_job(job_id, bs, base, NULL, speed, 0, 0,
MIRROR_LEAVE_BACKING_CHAIN,
on_error, on_error, false, cb, opaque, &local_err,
- &commit_active_job_driver, false, base);
+ &commit_active_job_driver, false, base, auto_complete);
if (local_err) {
error_propagate(errp, local_err);
goto error_restore_flags;
diff --git a/block/qcow2-cluster.c b/block/qcow2-cluster.c
index f94183529c..9ab445dd17 100644
--- a/block/qcow2-cluster.c
+++ b/block/qcow2-cluster.c
@@ -83,7 +83,9 @@ int qcow2_grow_l1_table(BlockDriverState *bs, uint64_t min_size,
}
memset(new_l1_table, 0, align_offset(new_l1_size2, 512));
- memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
+ if (s->l1_size) {
+ memcpy(new_l1_table, s->l1_table, s->l1_size * sizeof(uint64_t));
+ }
/* write new table (align to cluster) */
BLKDBG_EVENT(bs->file, BLKDBG_L1_GROW_ALLOC_TABLE);
diff --git a/block/qcow2.c b/block/qcow2.c
index c079aa83b6..0e53a4d666 100644
--- a/block/qcow2.c
+++ b/block/qcow2.c
@@ -1804,7 +1804,10 @@ static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
.magic = cpu_to_be32(magic),
.len = cpu_to_be32(len),
};
- memcpy(buf + sizeof(QCowExtension), s, len);
+
+ if (len) {
+ memcpy(buf + sizeof(QCowExtension), s, len);
+ }
return ext_len;
}
diff --git a/block/replication.c b/block/replication.c
new file mode 100644
index 0000000000..3bd1cf1809
--- /dev/null
+++ b/block/replication.c
@@ -0,0 +1,659 @@
+/*
+ * Replication Block filter
+ *
+ * Copyright (c) 2016 HUAWEI TECHNOLOGIES CO., LTD.
+ * Copyright (c) 2016 Intel Corporation
+ * Copyright (c) 2016 FUJITSU LIMITED
+ *
+ * Author:
+ * Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu-common.h"
+#include "block/nbd.h"
+#include "block/blockjob.h"
+#include "block/block_int.h"
+#include "block/block_backup.h"
+#include "sysemu/block-backend.h"
+#include "qapi/error.h"
+#include "replication.h"
+
+typedef struct BDRVReplicationState {
+ ReplicationMode mode;
+ int replication_state;
+ BdrvChild *active_disk;
+ BdrvChild *hidden_disk;
+ BdrvChild *secondary_disk;
+ char *top_id;
+ ReplicationState *rs;
+ Error *blocker;
+ int orig_hidden_flags;
+ int orig_secondary_flags;
+ int error;
+} BDRVReplicationState;
+
+enum {
+ BLOCK_REPLICATION_NONE, /* block replication is not started */
+ BLOCK_REPLICATION_RUNNING, /* block replication is running */
+ BLOCK_REPLICATION_FAILOVER, /* failover is running in background */
+ BLOCK_REPLICATION_FAILOVER_FAILED, /* failover failed */
+ BLOCK_REPLICATION_DONE, /* block replication is done */
+};
+
+static void replication_start(ReplicationState *rs, ReplicationMode mode,
+ Error **errp);
+static void replication_do_checkpoint(ReplicationState *rs, Error **errp);
+static void replication_get_error(ReplicationState *rs, Error **errp);
+static void replication_stop(ReplicationState *rs, bool failover,
+ Error **errp);
+
+#define REPLICATION_MODE "mode"
+#define REPLICATION_TOP_ID "top-id"
+static QemuOptsList replication_runtime_opts = {
+ .name = "replication",
+ .head = QTAILQ_HEAD_INITIALIZER(replication_runtime_opts.head),
+ .desc = {
+ {
+ .name = REPLICATION_MODE,
+ .type = QEMU_OPT_STRING,
+ },
+ {
+ .name = REPLICATION_TOP_ID,
+ .type = QEMU_OPT_STRING,
+ },
+ { /* end of list */ }
+ },
+};
+
+static ReplicationOps replication_ops = {
+ .start = replication_start,
+ .checkpoint = replication_do_checkpoint,
+ .get_error = replication_get_error,
+ .stop = replication_stop,
+};
+
+static int replication_open(BlockDriverState *bs, QDict *options,
+ int flags, Error **errp)
+{
+ int ret;
+ BDRVReplicationState *s = bs->opaque;
+ Error *local_err = NULL;
+ QemuOpts *opts = NULL;
+ const char *mode;
+ const char *top_id;
+
+ ret = -EINVAL;
+ opts = qemu_opts_create(&replication_runtime_opts, NULL, 0, &error_abort);
+ qemu_opts_absorb_qdict(opts, options, &local_err);
+ if (local_err) {
+ goto fail;
+ }
+
+ mode = qemu_opt_get(opts, REPLICATION_MODE);
+ if (!mode) {
+ error_setg(&local_err, "Missing the option mode");
+ goto fail;
+ }
+
+ if (!strcmp(mode, "primary")) {
+ s->mode = REPLICATION_MODE_PRIMARY;
+ } else if (!strcmp(mode, "secondary")) {
+ s->mode = REPLICATION_MODE_SECONDARY;
+ top_id = qemu_opt_get(opts, REPLICATION_TOP_ID);
+ s->top_id = g_strdup(top_id);
+ if (!s->top_id) {
+ error_setg(&local_err, "Missing the option top-id");
+ goto fail;
+ }
+ } else {
+ error_setg(&local_err,
+ "The option mode's value should be primary or secondary");
+ goto fail;
+ }
+
+ s->rs = replication_new(bs, &replication_ops);
+
+ ret = 0;
+
+fail:
+ qemu_opts_del(opts);
+ error_propagate(errp, local_err);
+
+ return ret;
+}
+
+static void replication_close(BlockDriverState *bs)
+{
+ BDRVReplicationState *s = bs->opaque;
+
+ if (s->replication_state == BLOCK_REPLICATION_RUNNING) {
+ replication_stop(s->rs, false, NULL);
+ }
+
+ if (s->mode == REPLICATION_MODE_SECONDARY) {
+ g_free(s->top_id);
+ }
+
+ replication_remove(s->rs);
+}
+
+static int64_t replication_getlength(BlockDriverState *bs)
+{
+ return bdrv_getlength(bs->file->bs);
+}
+
+static int replication_get_io_status(BDRVReplicationState *s)
+{
+ switch (s->replication_state) {
+ case BLOCK_REPLICATION_NONE:
+ return -EIO;
+ case BLOCK_REPLICATION_RUNNING:
+ return 0;
+ case BLOCK_REPLICATION_FAILOVER:
+ return s->mode == REPLICATION_MODE_PRIMARY ? -EIO : 0;
+ case BLOCK_REPLICATION_FAILOVER_FAILED:
+ return s->mode == REPLICATION_MODE_PRIMARY ? -EIO : 1;
+ case BLOCK_REPLICATION_DONE:
+ /*
+ * active commit job completes, and active disk and secondary_disk
+ * is swapped, so we can operate bs->file directly
+ */
+ return s->mode == REPLICATION_MODE_PRIMARY ? -EIO : 0;
+ default:
+ abort();
+ }
+}
+
+static int replication_return_value(BDRVReplicationState *s, int ret)
+{
+ if (s->mode == REPLICATION_MODE_SECONDARY) {
+ return ret;
+ }
+
+ if (ret < 0) {
+ s->error = ret;
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static coroutine_fn int replication_co_readv(BlockDriverState *bs,
+ int64_t sector_num,
+ int remaining_sectors,
+ QEMUIOVector *qiov)
+{
+ BDRVReplicationState *s = bs->opaque;
+ BdrvChild *child = s->secondary_disk;
+ BlockJob *job = NULL;
+ CowRequest req;
+ int ret;
+
+ if (s->mode == REPLICATION_MODE_PRIMARY) {
+ /* We only use it to forward primary write requests */
+ return -EIO;
+ }
+
+ ret = replication_get_io_status(s);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (child && child->bs) {
+ job = child->bs->job;
+ }
+
+ if (job) {
+ backup_wait_for_overlapping_requests(child->bs->job, sector_num,
+ remaining_sectors);
+ backup_cow_request_begin(&req, child->bs->job, sector_num,
+ remaining_sectors);
+ ret = bdrv_co_readv(bs->file, sector_num, remaining_sectors,
+ qiov);
+ backup_cow_request_end(&req);
+ goto out;
+ }
+
+ ret = bdrv_co_readv(bs->file, sector_num, remaining_sectors, qiov);
+out:
+ return replication_return_value(s, ret);
+}
+
+static coroutine_fn int replication_co_writev(BlockDriverState *bs,
+ int64_t sector_num,
+ int remaining_sectors,
+ QEMUIOVector *qiov)
+{
+ BDRVReplicationState *s = bs->opaque;
+ QEMUIOVector hd_qiov;
+ uint64_t bytes_done = 0;
+ BdrvChild *top = bs->file;
+ BdrvChild *base = s->secondary_disk;
+ BdrvChild *target;
+ int ret, n;
+
+ ret = replication_get_io_status(s);
+ if (ret < 0) {
+ goto out;
+ }
+
+ if (ret == 0) {
+ ret = bdrv_co_writev(top, sector_num,
+ remaining_sectors, qiov);
+ return replication_return_value(s, ret);
+ }
+
+ /*
+ * Failover failed, only write to active disk if the sectors
+ * have already been allocated in active disk/hidden disk.
+ */
+ qemu_iovec_init(&hd_qiov, qiov->niov);
+ while (remaining_sectors > 0) {
+ ret = bdrv_is_allocated_above(top->bs, base->bs, sector_num,
+ remaining_sectors, &n);
+ if (ret < 0) {
+ goto out1;
+ }
+
+ qemu_iovec_reset(&hd_qiov);
+ qemu_iovec_concat(&hd_qiov, qiov, bytes_done, n * BDRV_SECTOR_SIZE);
+
+ target = ret ? top : base;
+ ret = bdrv_co_writev(target, sector_num, n, &hd_qiov);
+ if (ret < 0) {
+ goto out1;
+ }
+
+ remaining_sectors -= n;
+ sector_num += n;
+ bytes_done += n * BDRV_SECTOR_SIZE;
+ }
+
+out1:
+ qemu_iovec_destroy(&hd_qiov);
+out:
+ return ret;
+}
+
+static bool replication_recurse_is_first_non_filter(BlockDriverState *bs,
+ BlockDriverState *candidate)
+{
+ return bdrv_recurse_is_first_non_filter(bs->file->bs, candidate);
+}
+
+static void secondary_do_checkpoint(BDRVReplicationState *s, Error **errp)
+{
+ Error *local_err = NULL;
+ int ret;
+
+ if (!s->secondary_disk->bs->job) {
+ error_setg(errp, "Backup job was cancelled unexpectedly");
+ return;
+ }
+
+ backup_do_checkpoint(s->secondary_disk->bs->job, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ ret = s->active_disk->bs->drv->bdrv_make_empty(s->active_disk->bs);
+ if (ret < 0) {
+ error_setg(errp, "Cannot make active disk empty");
+ return;
+ }
+
+ ret = s->hidden_disk->bs->drv->bdrv_make_empty(s->hidden_disk->bs);
+ if (ret < 0) {
+ error_setg(errp, "Cannot make hidden disk empty");
+ return;
+ }
+}
+
+static void reopen_backing_file(BDRVReplicationState *s, bool writable,
+ Error **errp)
+{
+ BlockReopenQueue *reopen_queue = NULL;
+ int orig_hidden_flags, orig_secondary_flags;
+ int new_hidden_flags, new_secondary_flags;
+ Error *local_err = NULL;
+
+ if (writable) {
+ orig_hidden_flags = s->orig_hidden_flags =
+ bdrv_get_flags(s->hidden_disk->bs);
+ new_hidden_flags = (orig_hidden_flags | BDRV_O_RDWR) &
+ ~BDRV_O_INACTIVE;
+ orig_secondary_flags = s->orig_secondary_flags =
+ bdrv_get_flags(s->secondary_disk->bs);
+ new_secondary_flags = (orig_secondary_flags | BDRV_O_RDWR) &
+ ~BDRV_O_INACTIVE;
+ } else {
+ orig_hidden_flags = (s->orig_hidden_flags | BDRV_O_RDWR) &
+ ~BDRV_O_INACTIVE;
+ new_hidden_flags = s->orig_hidden_flags;
+ orig_secondary_flags = (s->orig_secondary_flags | BDRV_O_RDWR) &
+ ~BDRV_O_INACTIVE;
+ new_secondary_flags = s->orig_secondary_flags;
+ }
+
+ if (orig_hidden_flags != new_hidden_flags) {
+ reopen_queue = bdrv_reopen_queue(reopen_queue, s->hidden_disk->bs, NULL,
+ new_hidden_flags);
+ }
+
+ if (!(orig_secondary_flags & BDRV_O_RDWR)) {
+ reopen_queue = bdrv_reopen_queue(reopen_queue, s->secondary_disk->bs,
+ NULL, new_secondary_flags);
+ }
+
+ if (reopen_queue) {
+ bdrv_reopen_multiple(reopen_queue, &local_err);
+ error_propagate(errp, local_err);
+ }
+}
+
+static void backup_job_cleanup(BDRVReplicationState *s)
+{
+ BlockDriverState *top_bs;
+
+ top_bs = bdrv_lookup_bs(s->top_id, s->top_id, NULL);
+ if (!top_bs) {
+ return;
+ }
+ bdrv_op_unblock_all(top_bs, s->blocker);
+ error_free(s->blocker);
+ reopen_backing_file(s, false, NULL);
+}
+
+static void backup_job_completed(void *opaque, int ret)
+{
+ BDRVReplicationState *s = opaque;
+
+ if (s->replication_state != BLOCK_REPLICATION_FAILOVER) {
+ /* The backup job is cancelled unexpectedly */
+ s->error = -EIO;
+ }
+
+ backup_job_cleanup(s);
+}
+
+static bool check_top_bs(BlockDriverState *top_bs, BlockDriverState *bs)
+{
+ BdrvChild *child;
+
+ /* The bs itself is the top_bs */
+ if (top_bs == bs) {
+ return true;
+ }
+
+ /* Iterate over top_bs's children */
+ QLIST_FOREACH(child, &top_bs->children, next) {
+ if (child->bs == bs || check_top_bs(child->bs, bs)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void replication_start(ReplicationState *rs, ReplicationMode mode,
+ Error **errp)
+{
+ BlockDriverState *bs = rs->opaque;
+ BDRVReplicationState *s;
+ BlockDriverState *top_bs;
+ int64_t active_length, hidden_length, disk_length;
+ AioContext *aio_context;
+ Error *local_err = NULL;
+
+ aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
+ s = bs->opaque;
+
+ if (s->replication_state != BLOCK_REPLICATION_NONE) {
+ error_setg(errp, "Block replication is running or done");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ if (s->mode != mode) {
+ error_setg(errp, "The parameter mode's value is invalid, needs %d,"
+ " but got %d", s->mode, mode);
+ aio_context_release(aio_context);
+ return;
+ }
+
+ switch (s->mode) {
+ case REPLICATION_MODE_PRIMARY:
+ break;
+ case REPLICATION_MODE_SECONDARY:
+ s->active_disk = bs->file;
+ if (!s->active_disk || !s->active_disk->bs ||
+ !s->active_disk->bs->backing) {
+ error_setg(errp, "Active disk doesn't have backing file");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ s->hidden_disk = s->active_disk->bs->backing;
+ if (!s->hidden_disk->bs || !s->hidden_disk->bs->backing) {
+ error_setg(errp, "Hidden disk doesn't have backing file");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ s->secondary_disk = s->hidden_disk->bs->backing;
+ if (!s->secondary_disk->bs || !bdrv_has_blk(s->secondary_disk->bs)) {
+ error_setg(errp, "The secondary disk doesn't have block backend");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ /* verify the length */
+ active_length = bdrv_getlength(s->active_disk->bs);
+ hidden_length = bdrv_getlength(s->hidden_disk->bs);
+ disk_length = bdrv_getlength(s->secondary_disk->bs);
+ if (active_length < 0 || hidden_length < 0 || disk_length < 0 ||
+ active_length != hidden_length || hidden_length != disk_length) {
+ error_setg(errp, "Active disk, hidden disk, secondary disk's length"
+ " are not the same");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ if (!s->active_disk->bs->drv->bdrv_make_empty ||
+ !s->hidden_disk->bs->drv->bdrv_make_empty) {
+ error_setg(errp,
+ "Active disk or hidden disk doesn't support make_empty");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ /* reopen the backing file in r/w mode */
+ reopen_backing_file(s, true, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ aio_context_release(aio_context);
+ return;
+ }
+
+ /* start backup job now */
+ error_setg(&s->blocker,
+ "Block device is in use by internal backup job");
+
+ top_bs = bdrv_lookup_bs(s->top_id, s->top_id, NULL);
+ if (!top_bs || !bdrv_is_root_node(top_bs) ||
+ !check_top_bs(top_bs, bs)) {
+ error_setg(errp, "No top_bs or it is invalid");
+ reopen_backing_file(s, false, NULL);
+ aio_context_release(aio_context);
+ return;
+ }
+ bdrv_op_block_all(top_bs, s->blocker);
+ bdrv_op_unblock(top_bs, BLOCK_OP_TYPE_DATAPLANE, s->blocker);
+
+ backup_start("replication-backup", s->secondary_disk->bs,
+ s->hidden_disk->bs, 0, MIRROR_SYNC_MODE_NONE, NULL, false,
+ BLOCKDEV_ON_ERROR_REPORT, BLOCKDEV_ON_ERROR_REPORT,
+ backup_job_completed, s, NULL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ backup_job_cleanup(s);
+ aio_context_release(aio_context);
+ return;
+ }
+ break;
+ default:
+ aio_context_release(aio_context);
+ abort();
+ }
+
+ s->replication_state = BLOCK_REPLICATION_RUNNING;
+
+ if (s->mode == REPLICATION_MODE_SECONDARY) {
+ secondary_do_checkpoint(s, errp);
+ }
+
+ s->error = 0;
+ aio_context_release(aio_context);
+}
+
+static void replication_do_checkpoint(ReplicationState *rs, Error **errp)
+{
+ BlockDriverState *bs = rs->opaque;
+ BDRVReplicationState *s;
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
+ s = bs->opaque;
+
+ if (s->mode == REPLICATION_MODE_SECONDARY) {
+ secondary_do_checkpoint(s, errp);
+ }
+ aio_context_release(aio_context);
+}
+
+static void replication_get_error(ReplicationState *rs, Error **errp)
+{
+ BlockDriverState *bs = rs->opaque;
+ BDRVReplicationState *s;
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
+ s = bs->opaque;
+
+ if (s->replication_state != BLOCK_REPLICATION_RUNNING) {
+ error_setg(errp, "Block replication is not running");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ if (s->error) {
+ error_setg(errp, "I/O error occurred");
+ aio_context_release(aio_context);
+ return;
+ }
+ aio_context_release(aio_context);
+}
+
+static void replication_done(void *opaque, int ret)
+{
+ BlockDriverState *bs = opaque;
+ BDRVReplicationState *s = bs->opaque;
+
+ if (ret == 0) {
+ s->replication_state = BLOCK_REPLICATION_DONE;
+
+ /* refresh top bs's filename */
+ bdrv_refresh_filename(bs);
+ s->active_disk = NULL;
+ s->secondary_disk = NULL;
+ s->hidden_disk = NULL;
+ s->error = 0;
+ } else {
+ s->replication_state = BLOCK_REPLICATION_FAILOVER_FAILED;
+ s->error = -EIO;
+ }
+}
+
+static void replication_stop(ReplicationState *rs, bool failover, Error **errp)
+{
+ BlockDriverState *bs = rs->opaque;
+ BDRVReplicationState *s;
+ AioContext *aio_context;
+
+ aio_context = bdrv_get_aio_context(bs);
+ aio_context_acquire(aio_context);
+ s = bs->opaque;
+
+ if (s->replication_state != BLOCK_REPLICATION_RUNNING) {
+ error_setg(errp, "Block replication is not running");
+ aio_context_release(aio_context);
+ return;
+ }
+
+ switch (s->mode) {
+ case REPLICATION_MODE_PRIMARY:
+ s->replication_state = BLOCK_REPLICATION_DONE;
+ s->error = 0;
+ break;
+ case REPLICATION_MODE_SECONDARY:
+ /*
+ * This BDS will be closed, and the job should be completed
+ * before the BDS is closed, because we will access hidden
+ * disk, secondary disk in backup_job_completed().
+ */
+ if (s->secondary_disk->bs->job) {
+ block_job_cancel_sync(s->secondary_disk->bs->job);
+ }
+
+ if (!failover) {
+ secondary_do_checkpoint(s, errp);
+ s->replication_state = BLOCK_REPLICATION_DONE;
+ aio_context_release(aio_context);
+ return;
+ }
+
+ s->replication_state = BLOCK_REPLICATION_FAILOVER;
+ commit_active_start("replication-commit", s->active_disk->bs,
+ s->secondary_disk->bs, 0, BLOCKDEV_ON_ERROR_REPORT,
+ replication_done,
+ bs, errp, true);
+ break;
+ default:
+ aio_context_release(aio_context);
+ abort();
+ }
+ aio_context_release(aio_context);
+}
+
+BlockDriver bdrv_replication = {
+ .format_name = "replication",
+ .protocol_name = "replication",
+ .instance_size = sizeof(BDRVReplicationState),
+
+ .bdrv_open = replication_open,
+ .bdrv_close = replication_close,
+
+ .bdrv_getlength = replication_getlength,
+ .bdrv_co_readv = replication_co_readv,
+ .bdrv_co_writev = replication_co_writev,
+
+ .is_filter = true,
+ .bdrv_recurse_is_first_non_filter = replication_recurse_is_first_non_filter,
+
+ .has_variable_length = true,
+};
+
+static void bdrv_replication_init(void)
+{
+ bdrv_register(&bdrv_replication);
+}
+
+block_init(bdrv_replication_init);