diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2017-08-23 17:38:01 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2017-08-23 17:38:01 +0100 |
commit | 1eed33994e28d4a0437ba6e944bbc3ec5e4a29a0 (patch) | |
tree | cacbb56b00763fc9e0956df21ca11d742a77aa14 | |
parent | 3da2bd8c4a80178fb115c70b858cecbfb1eb1067 (diff) | |
parent | 40f4a21895b5a7eae4011593837069f63460d983 (diff) |
Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2017-08-23' into staging
nbd patches for 2017-08-23
- Fam Zheng: 0/4 block: Fix non-shared storage migration
- Stefan Hajnoczi: qemu-iotests: add 194 non-shared storage migration test
- Stefan Hajnoczi: nbd-client: avoid spurious qio_channel_yield() re-entry
# gpg: Signature made Wed 23 Aug 2017 17:22:53 BST
# gpg: using RSA key 0xA7A16B4A2527436A
# gpg: Good signature from "Eric Blake <eblake@redhat.com>"
# gpg: aka "Eric Blake (Free Software Programmer) <ebb9@byu.net>"
# gpg: aka "[jpeg image of size 6874]"
# Primary key fingerprint: 71C2 CC22 B1C4 6029 27D2 F3AA A7A1 6B4A 2527 436A
* remotes/ericb/tags/pull-nbd-2017-08-23:
nbd-client: avoid spurious qio_channel_yield() re-entry
qemu-iotests: add 194 non-shared storage migration test
block: Update open_flags after ->inactivate() callback
mirror: Mark target BB as "force allow inactivate"
block-backend: Allow more "can inactivate" cases
block-backend: Refactor inactivate check
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | block.c | 7 | ||||
-rw-r--r-- | block/block-backend.c | 31 | ||||
-rw-r--r-- | block/mirror.c | 14 | ||||
-rw-r--r-- | block/nbd-client.c | 35 | ||||
-rw-r--r-- | block/nbd-client.h | 7 | ||||
-rw-r--r-- | include/sysemu/block-backend.h | 1 | ||||
-rwxr-xr-x | tests/qemu-iotests/194 | 73 | ||||
-rw-r--r-- | tests/qemu-iotests/194.out | 13 | ||||
-rw-r--r-- | tests/qemu-iotests/group | 1 | ||||
-rw-r--r-- | tests/qemu-iotests/iotests.py | 13 |
10 files changed, 170 insertions, 25 deletions
@@ -4085,21 +4085,20 @@ static int bdrv_inactivate_recurse(BlockDriverState *bs, } } - if (setting_flag) { + if (setting_flag && !(bs->open_flags & BDRV_O_INACTIVE)) { uint64_t perm, shared_perm; - bs->open_flags |= BDRV_O_INACTIVE; - QLIST_FOREACH(parent, &bs->parents, next_parent) { if (parent->role->inactivate) { ret = parent->role->inactivate(parent); if (ret < 0) { - bs->open_flags &= ~BDRV_O_INACTIVE; return ret; } } } + bs->open_flags |= BDRV_O_INACTIVE; + /* Update permissions, they may differ for inactive nodes */ bdrv_get_cumulative_perm(bs, &perm, &shared_perm); bdrv_check_perm(bs, perm, shared_perm, NULL, &error_abort); diff --git a/block/block-backend.c b/block/block-backend.c index e9798e897d..1031742401 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -70,6 +70,7 @@ struct BlockBackend { int quiesce_counter; VMChangeStateEntry *vmsh; + bool force_allow_inactivate; }; typedef struct BlockBackendAIOCB { @@ -192,6 +193,30 @@ static void blk_root_activate(BdrvChild *child, Error **errp) } } +void blk_set_force_allow_inactivate(BlockBackend *blk) +{ + blk->force_allow_inactivate = true; +} + +static bool blk_can_inactivate(BlockBackend *blk) +{ + /* If it is a guest device, inactivate is ok. */ + if (blk->dev || blk_name(blk)[0]) { + return true; + } + + /* Inactivating means no more writes to the image can be done, + * even if those writes would be changes invisible to the + * guest. For block job BBs that satisfy this, we can just allow + * it. This is the case for mirror job source, which is required + * by libvirt non-shared block migration. */ + if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) { + return true; + } + + return blk->force_allow_inactivate; +} + static int blk_root_inactivate(BdrvChild *child) { BlockBackend *blk = child->opaque; @@ -200,11 +225,7 @@ static int blk_root_inactivate(BdrvChild *child) return 0; } - /* Only inactivate BlockBackends for guest devices (which are inactive at - * this point because the VM is stopped) and unattached monitor-owned - * BlockBackends. If there is still any other user like a block job, then - * we simply can't inactivate the image. */ - if (!blk->dev && !blk_name(blk)[0]) { + if (!blk_can_inactivate(blk)) { return -EPERM; } diff --git a/block/mirror.c b/block/mirror.c index c9a6a3ca86..429751b9fe 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -1134,6 +1134,7 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, const BlockJobDriver *driver, bool is_none_mode, BlockDriverState *base, bool auto_complete, const char *filter_node_name, + bool is_mirror, Error **errp) { MirrorBlockJob *s; @@ -1222,6 +1223,15 @@ static void mirror_start_job(const char *job_id, BlockDriverState *bs, if (ret < 0) { goto fail; } + if (is_mirror) { + /* XXX: Mirror target could be a NBD server of target QEMU in the case + * of non-shared block migration. To allow migration completion, we + * have to allow "inactivate" of the target BB. When that happens, we + * know the job is drained, and the vcpus are stopped, so no write + * operation will be performed. Block layer already has assertions to + * ensure that. */ + blk_set_force_allow_inactivate(s->target); + } s->replaces = g_strdup(replaces); s->on_source_error = on_source_error; @@ -1306,7 +1316,7 @@ void mirror_start(const char *job_id, BlockDriverState *bs, speed, granularity, buf_size, backing_mode, on_source_error, on_target_error, unmap, NULL, NULL, &mirror_job_driver, is_none_mode, base, false, - filter_node_name, errp); + filter_node_name, true, errp); } void commit_active_start(const char *job_id, BlockDriverState *bs, @@ -1329,7 +1339,7 @@ void commit_active_start(const char *job_id, BlockDriverState *bs, MIRROR_LEAVE_BACKING_CHAIN, on_error, on_error, true, cb, opaque, &commit_active_job_driver, false, base, auto_complete, - filter_node_name, &local_err); + filter_node_name, false, &local_err); if (local_err) { error_propagate(errp, local_err); goto error_restore_flags; diff --git a/block/nbd-client.c b/block/nbd-client.c index 02c8e207ef..25bcaa2346 100644 --- a/block/nbd-client.c +++ b/block/nbd-client.c @@ -39,8 +39,10 @@ static void nbd_recv_coroutines_enter_all(NBDClientSession *s) int i; for (i = 0; i < MAX_NBD_REQUESTS; i++) { - if (s->recv_coroutine[i]) { - aio_co_wake(s->recv_coroutine[i]); + NBDClientRequest *req = &s->requests[i]; + + if (req->coroutine && req->receiving) { + aio_co_wake(req->coroutine); } } } @@ -88,28 +90,28 @@ static coroutine_fn void nbd_read_reply_entry(void *opaque) * one coroutine is called until the reply finishes. */ i = HANDLE_TO_INDEX(s, s->reply.handle); - if (i >= MAX_NBD_REQUESTS || !s->recv_coroutine[i]) { + if (i >= MAX_NBD_REQUESTS || + !s->requests[i].coroutine || + !s->requests[i].receiving) { break; } - /* We're woken up by the recv_coroutine itself. Note that there + /* We're woken up again by the request itself. Note that there * is no race between yielding and reentering read_reply_co. This * is because: * - * - if recv_coroutine[i] runs on the same AioContext, it is only + * - if the request runs on the same AioContext, it is only * entered after we yield * - * - if recv_coroutine[i] runs on a different AioContext, reentering + * - if the request runs on a different AioContext, reentering * read_reply_co happens through a bottom half, which can only * run after we yield. */ - aio_co_wake(s->recv_coroutine[i]); + aio_co_wake(s->requests[i].coroutine); qemu_coroutine_yield(); } - if (ret < 0) { - s->quit = true; - } + s->quit = true; nbd_recv_coroutines_enter_all(s); s->read_reply_co = NULL; } @@ -128,14 +130,17 @@ static int nbd_co_send_request(BlockDriverState *bs, s->in_flight++; for (i = 0; i < MAX_NBD_REQUESTS; i++) { - if (s->recv_coroutine[i] == NULL) { - s->recv_coroutine[i] = qemu_coroutine_self(); + if (s->requests[i].coroutine == NULL) { break; } } g_assert(qemu_in_coroutine()); assert(i < MAX_NBD_REQUESTS); + + s->requests[i].coroutine = qemu_coroutine_self(); + s->requests[i].receiving = false; + request->handle = INDEX_TO_HANDLE(s, i); if (s->quit) { @@ -173,10 +178,13 @@ static void nbd_co_receive_reply(NBDClientSession *s, NBDReply *reply, QEMUIOVector *qiov) { + int i = HANDLE_TO_INDEX(s, request->handle); int ret; /* Wait until we're woken up by nbd_read_reply_entry. */ + s->requests[i].receiving = true; qemu_coroutine_yield(); + s->requests[i].receiving = false; *reply = s->reply; if (reply->handle != request->handle || !s->ioc || s->quit) { reply->error = EIO; @@ -186,6 +194,7 @@ static void nbd_co_receive_reply(NBDClientSession *s, NULL); if (ret != request->len) { reply->error = EIO; + s->quit = true; } } @@ -200,7 +209,7 @@ static void nbd_coroutine_end(BlockDriverState *bs, NBDClientSession *s = nbd_get_client_session(bs); int i = HANDLE_TO_INDEX(s, request->handle); - s->recv_coroutine[i] = NULL; + s->requests[i].coroutine = NULL; /* Kick the read_reply_co to get the next reply. */ if (s->read_reply_co) { diff --git a/block/nbd-client.h b/block/nbd-client.h index 1935ffbcaa..b435754b82 100644 --- a/block/nbd-client.h +++ b/block/nbd-client.h @@ -17,6 +17,11 @@ #define MAX_NBD_REQUESTS 16 +typedef struct { + Coroutine *coroutine; + bool receiving; /* waiting for read_reply_co? */ +} NBDClientRequest; + typedef struct NBDClientSession { QIOChannelSocket *sioc; /* The master data channel */ QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */ @@ -27,7 +32,7 @@ typedef struct NBDClientSession { Coroutine *read_reply_co; int in_flight; - Coroutine *recv_coroutine[MAX_NBD_REQUESTS]; + NBDClientRequest requests[MAX_NBD_REQUESTS]; NBDReply reply; bool quit; } NBDClientSession; diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index 4a3730596b..aadc733daf 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -241,5 +241,6 @@ void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg); void blk_io_limits_disable(BlockBackend *blk); void blk_io_limits_enable(BlockBackend *blk, const char *group); void blk_io_limits_update_group(BlockBackend *blk, const char *group); +void blk_set_force_allow_inactivate(BlockBackend *blk); #endif diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194 new file mode 100755 index 0000000000..8028111e21 --- /dev/null +++ b/tests/qemu-iotests/194 @@ -0,0 +1,73 @@ +#!/usr/bin/env python +# +# Copyright (C) 2017 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# +# Creator/Owner: Stefan Hajnoczi <stefanha@redhat.com> +# +# Non-shared storage migration test using NBD server and drive-mirror + +import os +import atexit +import iotests + +iotests.verify_platform(['linux']) + +img_size = '1G' +source_img_path = os.path.join(iotests.test_dir, 'source.img') +dest_img_path = os.path.join(iotests.test_dir, 'dest.img') +iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, source_img_path, img_size) +iotests.qemu_img_pipe('create', '-f', iotests.imgfmt, dest_img_path, img_size) + +iotests.log('Launching VMs...') +migration_sock_path = os.path.join(iotests.test_dir, 'migration.sock') +nbd_sock_path = os.path.join(iotests.test_dir, 'nbd.sock') +source_vm = iotests.VM('source').add_drive(source_img_path) +dest_vm = (iotests.VM('dest').add_drive(dest_img_path) + .add_incoming('unix:{0}'.format(migration_sock_path))) +source_vm.launch() +atexit.register(source_vm.shutdown) +dest_vm.launch() +atexit.register(dest_vm.shutdown) + +iotests.log('Launching NBD server on destination...') +iotests.log(dest_vm.qmp('nbd-server-start', addr={'type': 'unix', 'data': {'path': nbd_sock_path}})) +iotests.log(dest_vm.qmp('nbd-server-add', device='drive0', writable=True)) + +iotests.log('Starting drive-mirror on source...') +iotests.log(source_vm.qmp( + 'drive-mirror', + device='drive0', + target='nbd+unix:///drive0?socket={0}'.format(nbd_sock_path), + sync='full', + format='raw', # always raw, the server handles the format + mode='existing')) + +iotests.log('Waiting for drive-mirror to complete...') +iotests.log(source_vm.event_wait('BLOCK_JOB_READY'), + filters=[iotests.filter_qmp_event]) + +iotests.log('Starting migration...') +source_vm.qmp('migrate-set-capabilities', + capabilities=[{'capability': 'events', 'state': True}]) +dest_vm.qmp('migrate-set-capabilities', + capabilities=[{'capability': 'events', 'state': True}]) +iotests.log(source_vm.qmp('migrate', uri='unix:{0}'.format(migration_sock_path))) + +while True: + event = source_vm.event_wait('MIGRATION') + iotests.log(event, filters=[iotests.filter_qmp_event]) + if event['data']['status'] in ('completed', 'failed'): + break diff --git a/tests/qemu-iotests/194.out b/tests/qemu-iotests/194.out new file mode 100644 index 0000000000..ae501fecac --- /dev/null +++ b/tests/qemu-iotests/194.out @@ -0,0 +1,13 @@ +Launching VMs... +Launching NBD server on destination... +{u'return': {}} +{u'return': {}} +Starting drive-mirror on source... +{u'return': {}} +Waiting for drive-mirror to complete... +{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'device': u'drive0', u'type': u'mirror', u'speed': 0, u'len': 1073741824, u'offset': 1073741824}, u'event': u'BLOCK_JOB_READY'} +Starting migration... +{u'return': {}} +{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'setup'}, u'event': u'MIGRATION'} +{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'active'}, u'event': u'MIGRATION'} +{u'timestamp': {u'seconds': 'SECS', u'microseconds': 'USECS'}, u'data': {u'status': u'completed'}, u'event': u'MIGRATION'} diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index afbdc427ea..4bd5017008 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -187,3 +187,4 @@ 189 rw auto 190 rw auto quick 192 rw auto quick +194 rw auto migration quick diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index 22439c43d3..7233983f3c 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -133,6 +133,14 @@ chown_re = re.compile(r"chown [0-9]+:[0-9]+") def filter_chown(msg): return chown_re.sub("chown UID:GID", msg) +def filter_qmp_event(event): + '''Filter a QMP event dict''' + event = dict(event) + if 'timestamp' in event: + event['timestamp']['seconds'] = 'SECS' + event['timestamp']['microseconds'] = 'USECS' + return event + def log(msg, filters=[]): for flt in filters: msg = flt(msg) @@ -200,6 +208,11 @@ class VM(qtest.QEMUQtestMachine): self._args.append(','.join(opts)) return self + def add_incoming(self, addr): + self._args.append('-incoming') + self._args.append(addr) + return self + def pause_drive(self, drive, event=None): '''Pause drive r/w operations''' if not event: |