diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2020-01-27 18:07:18 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-01-27 18:07:18 +0000 |
commit | 750fe5989f9efffce86368c6feac013f8b7b433c (patch) | |
tree | a59a377c3ba9f1e319e2b4417d09adac850f6397 | |
parent | 105b07f1ba462ec48b27e5cb74ddf81c6a79364c (diff) | |
parent | 5fbf1d56c24018772e900a40a0955175ff82f35c (diff) |
Merge remote-tracking branch 'remotes/kevin/tags/for-upstream' into staging
Block layer patches:
- iscsi: Cap block count from GET LBA STATUS (CVE-2020-1711)
- AioContext fixes in QMP commands for backup and bitmaps
- iotests fixes
# gpg: Signature made Mon 27 Jan 2020 17:49:58 GMT
# gpg: using RSA key 7F09B272C88F2FD6
# gpg: Good signature from "Kevin Wolf <kwolf@redhat.com>" [full]
# Primary key fingerprint: DC3D EB15 9A9A F95D 3D74 56FE 7F09 B272 C88F 2FD6
* remotes/kevin/tags/for-upstream:
iscsi: Don't access non-existent scsi_lba_status_descriptor
iscsi: Cap block count from GET LBA STATUS (CVE-2020-1711)
block/backup: fix memory leak in bdrv_backup_top_append()
iotests: Test handling of AioContexts with some blockdev actions
blockdev: Return bs to the proper context on snapshot abort
blockdev: Acquire AioContext on dirty bitmap functions
block/backup-top: Don't acquire context while dropping top
blockdev: honor bdrv_try_set_aio_context() context requirements
blockdev: unify qmp_blockdev_backup and blockdev-backup transaction paths
blockdev: unify qmp_drive_backup and drive-backup transaction paths
blockdev: fix coding style issues in drive_backup_prepare
iotests: Add more "skip_if_unsupported" statements to the python tests
iotests.py: Let wait_migration wait even more
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | block/backup-top.c | 7 | ||||
-rw-r--r-- | block/backup.c | 3 | ||||
-rw-r--r-- | block/iscsi.c | 7 | ||||
-rw-r--r-- | blockdev.c | 393 | ||||
-rwxr-xr-x | tests/qemu-iotests/030 | 4 | ||||
-rwxr-xr-x | tests/qemu-iotests/040 | 2 | ||||
-rwxr-xr-x | tests/qemu-iotests/041 | 39 | ||||
-rw-r--r-- | tests/qemu-iotests/141.out | 2 | ||||
-rw-r--r-- | tests/qemu-iotests/185.out | 2 | ||||
-rwxr-xr-x | tests/qemu-iotests/219 | 7 | ||||
-rw-r--r-- | tests/qemu-iotests/219.out | 8 | ||||
-rwxr-xr-x | tests/qemu-iotests/234 | 8 | ||||
-rw-r--r-- | tests/qemu-iotests/245 | 2 | ||||
-rwxr-xr-x | tests/qemu-iotests/262 | 4 | ||||
-rwxr-xr-x | tests/qemu-iotests/280 | 2 | ||||
-rwxr-xr-x | tests/qemu-iotests/281 | 247 | ||||
-rw-r--r-- | tests/qemu-iotests/281.out | 5 | ||||
-rw-r--r-- | tests/qemu-iotests/group | 1 | ||||
-rw-r--r-- | tests/qemu-iotests/iotests.py | 6 |
19 files changed, 510 insertions, 239 deletions
diff --git a/block/backup-top.c b/block/backup-top.c index 818d3f26b4..9aed2eb4c0 100644 --- a/block/backup-top.c +++ b/block/backup-top.c @@ -196,7 +196,7 @@ BlockDriverState *bdrv_backup_top_append(BlockDriverState *source, } top->total_sectors = source->total_sectors; - top->opaque = state = g_new0(BDRVBackupTopState, 1); + state = top->opaque; bdrv_ref(target); state->target = bdrv_attach_child(top, target, "target", &child_file, errp); @@ -255,9 +255,6 @@ append_failed: void bdrv_backup_top_drop(BlockDriverState *bs) { BDRVBackupTopState *s = bs->opaque; - AioContext *aio_context = bdrv_get_aio_context(bs); - - aio_context_acquire(aio_context); bdrv_drained_begin(bs); @@ -271,6 +268,4 @@ void bdrv_backup_top_drop(BlockDriverState *bs) bdrv_drained_end(bs); bdrv_unref(bs); - - aio_context_release(aio_context); } diff --git a/block/backup.c b/block/backup.c index cf62b1a38c..1383e219f5 100644 --- a/block/backup.c +++ b/block/backup.c @@ -135,8 +135,11 @@ static void backup_abort(Job *job) static void backup_clean(Job *job) { BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); + AioContext *aio_context = bdrv_get_aio_context(s->backup_top); + aio_context_acquire(aio_context); bdrv_backup_top_drop(s->backup_top); + aio_context_release(aio_context); } void backup_do_checkpoint(BlockJob *job, Error **errp) diff --git a/block/iscsi.c b/block/iscsi.c index 2aea7e3f13..c8feaa2f0e 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -701,7 +701,7 @@ static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs, struct scsi_get_lba_status *lbas = NULL; struct scsi_lba_status_descriptor *lbasd = NULL; struct IscsiTask iTask; - uint64_t lba; + uint64_t lba, max_bytes; int ret; iscsi_co_init_iscsitask(iscsilun, &iTask); @@ -721,6 +721,7 @@ static int coroutine_fn iscsi_co_block_status(BlockDriverState *bs, } lba = offset / iscsilun->block_size; + max_bytes = (iscsilun->num_blocks - lba) * iscsilun->block_size; qemu_mutex_lock(&iscsilun->mutex); retry: @@ -752,7 +753,7 @@ retry: } lbas = scsi_datain_unmarshall(iTask.task); - if (lbas == NULL) { + if (lbas == NULL || lbas->num_descriptors == 0) { ret = -EIO; goto out_unlock; } @@ -764,7 +765,7 @@ retry: goto out_unlock; } - *pnum = (int64_t) lbasd->num_blocks * iscsilun->block_size; + *pnum = MIN((int64_t) lbasd->num_blocks * iscsilun->block_size, max_bytes); if (lbasd->provisioning == SCSI_PROVISIONING_TYPE_DEALLOCATED || lbasd->provisioning == SCSI_PROVISIONING_TYPE_ANCHORED) { diff --git a/blockdev.c b/blockdev.c index 8e029e9c01..4cd9a58d36 100644 --- a/blockdev.c +++ b/blockdev.c @@ -1535,6 +1535,7 @@ static void external_snapshot_prepare(BlkActionState *common, DO_UPCAST(ExternalSnapshotState, common, common); TransactionAction *action = common->action; AioContext *aio_context; + AioContext *old_context; int ret; /* 'blockdev-snapshot' and 'blockdev-snapshot-sync' have similar @@ -1675,7 +1676,16 @@ static void external_snapshot_prepare(BlkActionState *common, goto out; } + /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + old_context = bdrv_get_aio_context(state->new_bs); + aio_context_release(aio_context); + aio_context_acquire(old_context); + ret = bdrv_try_set_aio_context(state->new_bs, aio_context, errp); + + aio_context_release(old_context); + aio_context_acquire(aio_context); + if (ret < 0) { goto out; } @@ -1721,6 +1731,8 @@ static void external_snapshot_abort(BlkActionState *common) if (state->new_bs) { if (state->overlay_appended) { AioContext *aio_context; + AioContext *tmp_context; + int ret; aio_context = bdrv_get_aio_context(state->old_bs); aio_context_acquire(aio_context); @@ -1728,6 +1740,25 @@ static void external_snapshot_abort(BlkActionState *common) bdrv_ref(state->old_bs); /* we can't let bdrv_set_backind_hd() close state->old_bs; we need it */ bdrv_set_backing_hd(state->new_bs, NULL, &error_abort); + + /* + * The call to bdrv_set_backing_hd() above returns state->old_bs to + * the main AioContext. As we're still going to be using it, return + * it to the AioContext it was before. + */ + tmp_context = bdrv_get_aio_context(state->old_bs); + if (aio_context != tmp_context) { + aio_context_release(aio_context); + aio_context_acquire(tmp_context); + + ret = bdrv_try_set_aio_context(state->old_bs, + aio_context, NULL); + assert(ret == 0); + + aio_context_release(tmp_context); + aio_context_acquire(aio_context); + } + bdrv_replace_node(state->new_bs, state->old_bs, &error_abort); bdrv_unref(state->old_bs); /* bdrv_replace_node() ref'ed old_bs */ @@ -1761,39 +1792,145 @@ typedef struct DriveBackupState { BlockJob *job; } DriveBackupState; -static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn, - Error **errp); +static BlockJob *do_backup_common(BackupCommon *backup, + BlockDriverState *bs, + BlockDriverState *target_bs, + AioContext *aio_context, + JobTxn *txn, Error **errp); static void drive_backup_prepare(BlkActionState *common, Error **errp) { DriveBackupState *state = DO_UPCAST(DriveBackupState, common, common); - BlockDriverState *bs; DriveBackup *backup; + BlockDriverState *bs; + BlockDriverState *target_bs; + BlockDriverState *source = NULL; AioContext *aio_context; + AioContext *old_context; + QDict *options; Error *local_err = NULL; + int flags; + int64_t size; + bool set_backing_hd = false; + int ret; assert(common->action->type == TRANSACTION_ACTION_KIND_DRIVE_BACKUP); backup = common->action->u.drive_backup.data; + if (!backup->has_mode) { + backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; + } + bs = bdrv_lookup_bs(backup->device, backup->device, errp); if (!bs) { return; } + if (!bs->drv) { + error_setg(errp, "Device has no medium"); + return; + } + aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); /* Paired with .clean() */ bdrv_drained_begin(bs); - state->bs = bs; + if (!backup->has_format) { + backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ? + NULL : (char *) bs->drv->format_name; + } + + /* Early check to avoid creating target */ + if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { + goto out; + } + + flags = bs->open_flags | BDRV_O_RDWR; + + /* + * See if we have a backing HD we can use to create our new image + * on top of. + */ + if (backup->sync == MIRROR_SYNC_MODE_TOP) { + source = backing_bs(bs); + if (!source) { + backup->sync = MIRROR_SYNC_MODE_FULL; + } + } + if (backup->sync == MIRROR_SYNC_MODE_NONE) { + source = bs; + flags |= BDRV_O_NO_BACKING; + set_backing_hd = true; + } + + size = bdrv_getlength(bs); + if (size < 0) { + error_setg_errno(errp, -size, "bdrv_getlength failed"); + goto out; + } + + if (backup->mode != NEW_IMAGE_MODE_EXISTING) { + assert(backup->format); + if (source) { + bdrv_refresh_filename(source); + bdrv_img_create(backup->target, backup->format, source->filename, + source->drv->format_name, NULL, + size, flags, false, &local_err); + } else { + bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL, + size, flags, false, &local_err); + } + } - state->job = do_drive_backup(backup, common->block_job_txn, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } + options = qdict_new(); + qdict_put_str(options, "discard", "unmap"); + qdict_put_str(options, "detect-zeroes", "unmap"); + if (backup->format) { + qdict_put_str(options, "driver", backup->format); + } + + target_bs = bdrv_open(backup->target, NULL, options, flags, errp); + if (!target_bs) { + goto out; + } + + /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + old_context = bdrv_get_aio_context(target_bs); + aio_context_release(aio_context); + aio_context_acquire(old_context); + + ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); + if (ret < 0) { + bdrv_unref(target_bs); + aio_context_release(old_context); + return; + } + + aio_context_release(old_context); + aio_context_acquire(aio_context); + + if (set_backing_hd) { + bdrv_set_backing_hd(target_bs, source, &local_err); + if (local_err) { + goto unref; + } + } + + state->bs = bs; + + state->job = do_backup_common(qapi_DriveBackup_base(backup), + bs, target_bs, aio_context, + common->block_job_txn, errp); + +unref: + bdrv_unref(target_bs); out: aio_context_release(aio_context); } @@ -1851,16 +1988,15 @@ typedef struct BlockdevBackupState { BlockJob *job; } BlockdevBackupState; -static BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn, - Error **errp); - static void blockdev_backup_prepare(BlkActionState *common, Error **errp) { BlockdevBackupState *state = DO_UPCAST(BlockdevBackupState, common, common); BlockdevBackup *backup; - BlockDriverState *bs, *target; + BlockDriverState *bs; + BlockDriverState *target_bs; AioContext *aio_context; - Error *local_err = NULL; + AioContext *old_context; + int ret; assert(common->action->type == TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP); backup = common->action->u.blockdev_backup.data; @@ -1870,25 +2006,33 @@ static void blockdev_backup_prepare(BlkActionState *common, Error **errp) return; } - target = bdrv_lookup_bs(backup->target, backup->target, errp); - if (!target) { + target_bs = bdrv_lookup_bs(backup->target, backup->target, errp); + if (!target_bs) { return; } + /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ aio_context = bdrv_get_aio_context(bs); + old_context = bdrv_get_aio_context(target_bs); + aio_context_acquire(old_context); + + ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); + if (ret < 0) { + aio_context_release(old_context); + return; + } + + aio_context_release(old_context); aio_context_acquire(aio_context); state->bs = bs; /* Paired with .clean() */ bdrv_drained_begin(state->bs); - state->job = do_blockdev_backup(backup, common->block_job_txn, &local_err); - if (local_err) { - error_propagate(errp, local_err); - goto out; - } + state->job = do_backup_common(qapi_BlockdevBackup_base(backup), + bs, target_bs, aio_context, + common->block_job_txn, errp); -out: aio_context_release(aio_context); } @@ -2861,6 +3005,7 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, { BlockDriverState *bs; BdrvDirtyBitmap *bitmap; + AioContext *aio_context; if (!name || name[0] == '\0') { error_setg(errp, "Bitmap name cannot be empty"); @@ -2872,11 +3017,14 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, return; } + aio_context = bdrv_get_aio_context(bs); + aio_context_acquire(aio_context); + if (has_granularity) { if (granularity < 512 || !is_power_of_2(granularity)) { error_setg(errp, "Granularity must be power of 2 " "and at least 512"); - return; + goto out; } } else { /* Default to cluster size, if available: */ @@ -2894,12 +3042,12 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, if (persistent && !bdrv_can_store_new_dirty_bitmap(bs, name, granularity, errp)) { - return; + goto out; } bitmap = bdrv_create_dirty_bitmap(bs, granularity, name, errp); if (bitmap == NULL) { - return; + goto out; } if (disabled) { @@ -2907,6 +3055,9 @@ void qmp_block_dirty_bitmap_add(const char *node, const char *name, } bdrv_dirty_bitmap_set_persistence(bitmap, persistent); + +out: + aio_context_release(aio_context); } static BdrvDirtyBitmap *do_block_dirty_bitmap_remove( @@ -2915,21 +3066,27 @@ static BdrvDirtyBitmap *do_block_dirty_bitmap_remove( { BlockDriverState *bs; BdrvDirtyBitmap *bitmap; + AioContext *aio_context; bitmap = block_dirty_bitmap_lookup(node, name, &bs, errp); if (!bitmap || !bs) { return NULL; } + aio_context = bdrv_get_aio_context(bs); + aio_context_acquire(aio_context); + if (bdrv_dirty_bitmap_check(bitmap, BDRV_BITMAP_BUSY | BDRV_BITMAP_RO, errp)) { + aio_context_release(aio_context); return NULL; } if (bdrv_dirty_bitmap_get_persistence(bitmap) && bdrv_remove_persistent_dirty_bitmap(bs, name, errp) < 0) { - return NULL; + aio_context_release(aio_context); + return NULL; } if (release) { @@ -2940,6 +3097,7 @@ static BdrvDirtyBitmap *do_block_dirty_bitmap_remove( *bitmap_bs = bs; } + aio_context_release(aio_context); return release ? NULL : bitmap; } @@ -3479,7 +3637,6 @@ static BlockJob *do_backup_common(BackupCommon *backup, BlockJob *job = NULL; BdrvDirtyBitmap *bmap = NULL; int job_flags = JOB_DEFAULT; - int ret; if (!backup->has_speed) { backup->speed = 0; @@ -3503,11 +3660,6 @@ static BlockJob *do_backup_common(BackupCommon *backup, backup->compress = false; } - ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); - if (ret < 0) { - return NULL; - } - if ((backup->sync == MIRROR_SYNC_MODE_BITMAP) || (backup->sync == MIRROR_SYNC_MODE_INCREMENTAL)) { /* done before desugaring 'incremental' to print the right message */ @@ -3587,124 +3739,13 @@ static BlockJob *do_backup_common(BackupCommon *backup, return job; } -static BlockJob *do_drive_backup(DriveBackup *backup, JobTxn *txn, - Error **errp) -{ - BlockDriverState *bs; - BlockDriverState *target_bs; - BlockDriverState *source = NULL; - BlockJob *job = NULL; - AioContext *aio_context; - QDict *options; - Error *local_err = NULL; - int flags; - int64_t size; - bool set_backing_hd = false; - - if (!backup->has_mode) { - backup->mode = NEW_IMAGE_MODE_ABSOLUTE_PATHS; - } - - bs = bdrv_lookup_bs(backup->device, backup->device, errp); - if (!bs) { - return NULL; - } - - if (!bs->drv) { - error_setg(errp, "Device has no medium"); - return NULL; - } - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - - if (!backup->has_format) { - backup->format = backup->mode == NEW_IMAGE_MODE_EXISTING ? - NULL : (char*) bs->drv->format_name; - } - - /* Early check to avoid creating target */ - if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { - goto out; - } - - flags = bs->open_flags | BDRV_O_RDWR; - - /* See if we have a backing HD we can use to create our new image - * on top of. */ - if (backup->sync == MIRROR_SYNC_MODE_TOP) { - source = backing_bs(bs); - if (!source) { - backup->sync = MIRROR_SYNC_MODE_FULL; - } - } - if (backup->sync == MIRROR_SYNC_MODE_NONE) { - source = bs; - flags |= BDRV_O_NO_BACKING; - set_backing_hd = true; - } - - size = bdrv_getlength(bs); - if (size < 0) { - error_setg_errno(errp, -size, "bdrv_getlength failed"); - goto out; - } - - if (backup->mode != NEW_IMAGE_MODE_EXISTING) { - assert(backup->format); - if (source) { - bdrv_refresh_filename(source); - bdrv_img_create(backup->target, backup->format, source->filename, - source->drv->format_name, NULL, - size, flags, false, &local_err); - } else { - bdrv_img_create(backup->target, backup->format, NULL, NULL, NULL, - size, flags, false, &local_err); - } - } - - if (local_err) { - error_propagate(errp, local_err); - goto out; - } - - options = qdict_new(); - qdict_put_str(options, "discard", "unmap"); - qdict_put_str(options, "detect-zeroes", "unmap"); - if (backup->format) { - qdict_put_str(options, "driver", backup->format); - } - - target_bs = bdrv_open(backup->target, NULL, options, flags, errp); - if (!target_bs) { - goto out; - } - - if (set_backing_hd) { - bdrv_set_backing_hd(target_bs, source, &local_err); - if (local_err) { - goto unref; - } - } - - job = do_backup_common(qapi_DriveBackup_base(backup), - bs, target_bs, aio_context, txn, errp); - -unref: - bdrv_unref(target_bs); -out: - aio_context_release(aio_context); - return job; -} - -void qmp_drive_backup(DriveBackup *arg, Error **errp) +void qmp_drive_backup(DriveBackup *backup, Error **errp) { - - BlockJob *job; - job = do_drive_backup(arg, NULL, errp); - if (job) { - job_start(&job->job); - } + TransactionAction action = { + .type = TRANSACTION_ACTION_KIND_DRIVE_BACKUP, + .u.drive_backup.data = backup, + }; + blockdev_do_action(&action, errp); } BlockDeviceInfoList *qmp_query_named_block_nodes(Error **errp) @@ -3717,41 +3758,13 @@ XDbgBlockGraph *qmp_x_debug_query_block_graph(Error **errp) return bdrv_get_xdbg_block_graph(errp); } -BlockJob *do_blockdev_backup(BlockdevBackup *backup, JobTxn *txn, - Error **errp) -{ - BlockDriverState *bs; - BlockDriverState *target_bs; - AioContext *aio_context; - BlockJob *job; - - bs = bdrv_lookup_bs(backup->device, backup->device, errp); - if (!bs) { - return NULL; - } - - target_bs = bdrv_lookup_bs(backup->target, backup->target, errp); - if (!target_bs) { - return NULL; - } - - aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); - - job = do_backup_common(qapi_BlockdevBackup_base(backup), - bs, target_bs, aio_context, txn, errp); - - aio_context_release(aio_context); - return job; -} - -void qmp_blockdev_backup(BlockdevBackup *arg, Error **errp) +void qmp_blockdev_backup(BlockdevBackup *backup, Error **errp) { - BlockJob *job; - job = do_blockdev_backup(arg, NULL, errp); - if (job) { - job_start(&job->job); - } + TransactionAction action = { + .type = TRANSACTION_ACTION_KIND_BLOCKDEV_BACKUP, + .u.blockdev_backup.data = backup, + }; + blockdev_do_action(&action, errp); } /* Parameter check and block job starting for drive mirroring. @@ -3881,6 +3894,7 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) BlockDriverState *bs; BlockDriverState *source, *target_bs; AioContext *aio_context; + AioContext *old_context; BlockMirrorBackingMode backing_mode; Error *local_err = NULL; QDict *options = NULL; @@ -3993,12 +4007,22 @@ void qmp_drive_mirror(DriveMirror *arg, Error **errp) (arg->mode == NEW_IMAGE_MODE_EXISTING || !bdrv_has_zero_init(target_bs))); + + /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + old_context = bdrv_get_aio_context(target_bs); + aio_context_release(aio_context); + aio_context_acquire(old_context); + ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); if (ret < 0) { bdrv_unref(target_bs); - goto out; + aio_context_release(old_context); + return; } + aio_context_release(old_context); + aio_context_acquire(aio_context); + blockdev_mirror_common(arg->has_job_id ? arg->job_id : NULL, bs, target_bs, arg->has_replaces, arg->replaces, arg->sync, backing_mode, zero_target, @@ -4040,6 +4064,7 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id, BlockDriverState *bs; BlockDriverState *target_bs; AioContext *aio_context; + AioContext *old_context; BlockMirrorBackingMode backing_mode = MIRROR_LEAVE_BACKING_CHAIN; Error *local_err = NULL; bool zero_target; @@ -4057,10 +4082,16 @@ void qmp_blockdev_mirror(bool has_job_id, const char *job_id, zero_target = (sync == MIRROR_SYNC_MODE_FULL); + /* Honor bdrv_try_set_aio_context() context acquisition requirements. */ + old_context = bdrv_get_aio_context(target_bs); aio_context = bdrv_get_aio_context(bs); - aio_context_acquire(aio_context); + aio_context_acquire(old_context); ret = bdrv_try_set_aio_context(target_bs, aio_context, errp); + + aio_context_release(old_context); + aio_context_acquire(aio_context); + if (ret < 0) { goto out; } diff --git a/tests/qemu-iotests/030 b/tests/qemu-iotests/030 index be35bde06f..0990681c1e 100755 --- a/tests/qemu-iotests/030 +++ b/tests/qemu-iotests/030 @@ -530,6 +530,7 @@ class TestQuorum(iotests.QMPTestCase): children = [] backing = [] + @iotests.skip_if_unsupported(['quorum']) def setUp(self): opts = ['driver=quorum', 'vote-threshold=2'] @@ -560,9 +561,6 @@ class TestQuorum(iotests.QMPTestCase): os.remove(img) def test_stream_quorum(self): - if not iotests.supports_quorum(): - return - self.assertNotEqual(qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.children[0]), qemu_io('-f', iotests.imgfmt, '-rU', '-c', 'map', self.backing[0]), 'image file map matches backing file before streaming') diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040 index 762ad1ebcb..74f62c3c4a 100755 --- a/tests/qemu-iotests/040 +++ b/tests/qemu-iotests/040 @@ -106,6 +106,7 @@ class TestSingleDrive(ImageCommitTestCase): self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xab 0 524288', backing_img).find("verification failed")) self.assertEqual(-1, qemu_io('-f', 'raw', '-c', 'read -P 0xef 524288 524288', backing_img).find("verification failed")) + @iotests.skip_if_unsupported(['throttle']) def test_commit_with_filter_and_quit(self): result = self.vm.qmp('object-add', qom_type='throttle-group', id='tg') self.assert_qmp(result, 'return', {}) @@ -125,6 +126,7 @@ class TestSingleDrive(ImageCommitTestCase): self.has_quit = True # Same as above, but this time we add the filter after starting the job + @iotests.skip_if_unsupported(['throttle']) def test_commit_plus_filter_and_quit(self): result = self.vm.qmp('object-add', qom_type='throttle-group', id='tg') self.assert_qmp(result, 'return', {}) diff --git a/tests/qemu-iotests/041 b/tests/qemu-iotests/041 index d7be30b62b..c07437fda1 100755 --- a/tests/qemu-iotests/041 +++ b/tests/qemu-iotests/041 @@ -871,6 +871,7 @@ class TestRepairQuorum(iotests.QMPTestCase): image_len = 1 * 1024 * 1024 # MB IMAGES = [ quorum_img1, quorum_img2, quorum_img3 ] + @iotests.skip_if_unsupported(['quorum']) def setUp(self): self.vm = iotests.VM() @@ -891,9 +892,8 @@ class TestRepairQuorum(iotests.QMPTestCase): #assemble the quorum block device from the individual files args = { "driver": "quorum", "node-name": "quorum0", "vote-threshold": 2, "children": [ "img0", "img1", "img2" ] } - if iotests.supports_quorum(): - result = self.vm.qmp("blockdev-add", **args) - self.assert_qmp(result, 'return', {}) + result = self.vm.qmp("blockdev-add", **args) + self.assert_qmp(result, 'return', {}) def tearDown(self): @@ -906,9 +906,6 @@ class TestRepairQuorum(iotests.QMPTestCase): pass def test_complete(self): - if not iotests.supports_quorum(): - return - self.assert_no_active_block_jobs() result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0', @@ -925,9 +922,6 @@ class TestRepairQuorum(iotests.QMPTestCase): 'target image does not match source after mirroring') def test_cancel(self): - if not iotests.supports_quorum(): - return - self.assert_no_active_block_jobs() result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0', @@ -942,9 +936,6 @@ class TestRepairQuorum(iotests.QMPTestCase): self.vm.shutdown() def test_cancel_after_ready(self): - if not iotests.supports_quorum(): - return - self.assert_no_active_block_jobs() result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0', @@ -961,9 +952,6 @@ class TestRepairQuorum(iotests.QMPTestCase): 'target image does not match source after mirroring') def test_pause(self): - if not iotests.supports_quorum(): - return - self.assert_no_active_block_jobs() result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0', @@ -989,9 +977,6 @@ class TestRepairQuorum(iotests.QMPTestCase): 'target image does not match source after mirroring') def test_medium_not_found(self): - if not iotests.supports_quorum(): - return - if iotests.qemu_default_machine != 'pc': return @@ -1003,9 +988,6 @@ class TestRepairQuorum(iotests.QMPTestCase): self.assert_qmp(result, 'error/class', 'GenericError') def test_image_not_found(self): - if not iotests.supports_quorum(): - return - result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0', sync='full', node_name='repair0', replaces='img1', mode='existing', target=quorum_repair_img, @@ -1013,9 +995,6 @@ class TestRepairQuorum(iotests.QMPTestCase): self.assert_qmp(result, 'error/class', 'GenericError') def test_device_not_found(self): - if not iotests.supports_quorum(): - return - result = self.vm.qmp('drive-mirror', job_id='job0', device='nonexistent', sync='full', node_name='repair0', @@ -1024,9 +1003,6 @@ class TestRepairQuorum(iotests.QMPTestCase): self.assert_qmp(result, 'error/class', 'GenericError') def test_wrong_sync_mode(self): - if not iotests.supports_quorum(): - return - result = self.vm.qmp('drive-mirror', device='quorum0', job_id='job0', node_name='repair0', replaces='img1', @@ -1034,27 +1010,18 @@ class TestRepairQuorum(iotests.QMPTestCase): self.assert_qmp(result, 'error/class', 'GenericError') def test_no_node_name(self): - if not iotests.supports_quorum(): - return - result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0', sync='full', replaces='img1', target=quorum_repair_img, format=iotests.imgfmt) self.assert_qmp(result, 'error/class', 'GenericError') def test_nonexistent_replaces(self): - if not iotests.supports_quorum(): - return - result = self.vm.qmp('drive-mirror', job_id='job0', device='quorum0', sync='full', node_name='repair0', replaces='img77', target=quorum_repair_img, format=iotests.imgfmt) self.assert_qmp(result, 'error/class', 'GenericError') def test_after_a_quorum_snapshot(self): - if not iotests.supports_quorum(): - return - result = self.vm.qmp('blockdev-snapshot-sync', node_name='img1', snapshot_file=quorum_snapshot_file, snapshot_node_name="snap1"); diff --git a/tests/qemu-iotests/141.out b/tests/qemu-iotests/141.out index 3645675ce8..263b680bdf 100644 --- a/tests/qemu-iotests/141.out +++ b/tests/qemu-iotests/141.out @@ -13,6 +13,8 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/m. Formatting 'TEST_DIR/o.IMGFMT', fmt=IMGFMT size=1048576 backing_file=TEST_DIR/t.IMGFMT backing_fmt=IMGFMT {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "job0"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "job0"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "job0"}} {'execute': 'blockdev-del', 'arguments': {'node-name': 'drv0'}} {"error": {"class": "GenericError", "desc": "Node 'drv0' is busy: node is used as backing hd of 'NODE_NAME'"}} {'execute': 'block-job-cancel', 'arguments': {'device': 'job0'}} diff --git a/tests/qemu-iotests/185.out b/tests/qemu-iotests/185.out index 8379ac5854..9a3b65782b 100644 --- a/tests/qemu-iotests/185.out +++ b/tests/qemu-iotests/185.out @@ -65,6 +65,8 @@ Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 l Formatting 'TEST_DIR/t.qcow2.copy', fmt=qcow2 size=67108864 cluster_size=65536 lazy_refcounts=off refcount_bits=16 {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "created", "id": "disk"}} {"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "paused", "id": "disk"}} +{"timestamp": {"seconds": TIMESTAMP, "microseconds": TIMESTAMP}, "event": "JOB_STATUS_CHANGE", "data": {"status": "running", "id": "disk"}} {"return": {}} { 'execute': 'quit' } {"return": {}} diff --git a/tests/qemu-iotests/219 b/tests/qemu-iotests/219 index e0c51662c0..655f54d881 100755 --- a/tests/qemu-iotests/219 +++ b/tests/qemu-iotests/219 @@ -63,7 +63,7 @@ def test_pause_resume(vm): # logged immediately iotests.log(vm.qmp('query-jobs')) -def test_job_lifecycle(vm, job, job_args, has_ready=False): +def test_job_lifecycle(vm, job, job_args, has_ready=False, is_mirror=False): global img_size iotests.log('') @@ -135,6 +135,9 @@ def test_job_lifecycle(vm, job, job_args, has_ready=False): iotests.log('Waiting for PENDING state...') iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE'))) iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE'))) + if is_mirror: + iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE'))) + iotests.log(iotests.filter_qmp_event(vm.event_wait('JOB_STATUS_CHANGE'))) if not job_args.get('auto-finalize', True): # PENDING state: @@ -218,7 +221,7 @@ with iotests.FilePath('disk.img') as disk_path, \ for auto_finalize in [True, False]: for auto_dismiss in [True, False]: - test_job_lifecycle(vm, 'drive-backup', job_args={ + test_job_lifecycle(vm, 'drive-backup', is_mirror=True, job_args={ 'device': 'drive0-node', 'target': copy_path, 'sync': 'full', diff --git a/tests/qemu-iotests/219.out b/tests/qemu-iotests/219.out index 8ebd3fee60..0ea5d0b9d5 100644 --- a/tests/qemu-iotests/219.out +++ b/tests/qemu-iotests/219.out @@ -135,6 +135,8 @@ Pause/resume in RUNNING {"return": {}} Waiting for PENDING state... +{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "concluded"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} @@ -186,6 +188,8 @@ Pause/resume in RUNNING {"return": {}} Waiting for PENDING state... +{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "concluded"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} @@ -245,6 +249,8 @@ Pause/resume in RUNNING {"return": {}} Waiting for PENDING state... +{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"return": [{"current-progress": 4194304, "id": "job0", "status": "pending", "total-progress": 4194304, "type": "backup"}]} @@ -304,6 +310,8 @@ Pause/resume in RUNNING {"return": {}} Waiting for PENDING state... +{"data": {"id": "job0", "status": "paused"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} +{"data": {"id": "job0", "status": "running"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "waiting"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"id": "job0", "status": "pending"}, "event": "JOB_STATUS_CHANGE", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"return": [{"current-progress": 4194304, "id": "job0", "status": "pending", "total-progress": 4194304, "type": "backup"}]} diff --git a/tests/qemu-iotests/234 b/tests/qemu-iotests/234 index 34c818c485..59a7f949ec 100755 --- a/tests/qemu-iotests/234 +++ b/tests/qemu-iotests/234 @@ -69,9 +69,9 @@ with iotests.FilePath('img') as img_path, \ iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo_a))) with iotests.Timeout(3, 'Migration does not complete'): # Wait for the source first (which includes setup=setup) - vm_a.wait_migration() + vm_a.wait_migration('postmigrate') # Wait for the destination second (which does not) - vm_b.wait_migration() + vm_b.wait_migration('running') iotests.log(vm_a.qmp('query-migrate')['return']['status']) iotests.log(vm_b.qmp('query-migrate')['return']['status']) @@ -98,9 +98,9 @@ with iotests.FilePath('img') as img_path, \ iotests.log(vm_b.qmp('migrate', uri='exec:cat >%s' % (fifo_b))) with iotests.Timeout(3, 'Migration does not complete'): # Wait for the source first (which includes setup=setup) - vm_b.wait_migration() + vm_b.wait_migration('postmigrate') # Wait for the destination second (which does not) - vm_a.wait_migration() + vm_a.wait_migration('running') iotests.log(vm_a.qmp('query-migrate')['return']['status']) iotests.log(vm_b.qmp('query-migrate')['return']['status']) diff --git a/tests/qemu-iotests/245 b/tests/qemu-iotests/245 index e66a23c5f0..d12b253065 100644 --- a/tests/qemu-iotests/245 +++ b/tests/qemu-iotests/245 @@ -478,6 +478,7 @@ class TestBlockdevReopen(iotests.QMPTestCase): # This test verifies that we can't change the children of a block # device during a reopen operation in a way that would create # cycles in the node graph + @iotests.skip_if_unsupported(['blkverify']) def test_graph_cycles(self): opts = [] @@ -534,6 +535,7 @@ class TestBlockdevReopen(iotests.QMPTestCase): self.assert_qmp(result, 'return', {}) # Misc reopen tests with different block drivers + @iotests.skip_if_unsupported(['quorum', 'throttle']) def test_misc_drivers(self): #################### ###### quorum ###### diff --git a/tests/qemu-iotests/262 b/tests/qemu-iotests/262 index 0963daa806..bbcb5260a6 100755 --- a/tests/qemu-iotests/262 +++ b/tests/qemu-iotests/262 @@ -71,9 +71,9 @@ with iotests.FilePath('img') as img_path, \ iotests.log(vm_a.qmp('migrate', uri='exec:cat >%s' % (fifo))) with iotests.Timeout(3, 'Migration does not complete'): # Wait for the source first (which includes setup=setup) - vm_a.wait_migration() + vm_a.wait_migration('postmigrate') # Wait for the destination second (which does not) - vm_b.wait_migration() + vm_b.wait_migration('running') iotests.log(vm_a.qmp('query-migrate')['return']['status']) iotests.log(vm_b.qmp('query-migrate')['return']['status']) diff --git a/tests/qemu-iotests/280 b/tests/qemu-iotests/280 index 0b1fa8e1d8..85e9114c5e 100755 --- a/tests/qemu-iotests/280 +++ b/tests/qemu-iotests/280 @@ -45,7 +45,7 @@ with iotests.FilePath('base') as base_path , \ vm.qmp_log('migrate', uri='exec:cat > /dev/null') with iotests.Timeout(3, 'Migration does not complete'): - vm.wait_migration() + vm.wait_migration('postmigrate') iotests.log('\nVM is now stopped:') iotests.log(vm.qmp('query-migrate')['return']['status']) diff --git a/tests/qemu-iotests/281 b/tests/qemu-iotests/281 new file mode 100755 index 0000000000..269d583b2c --- /dev/null +++ b/tests/qemu-iotests/281 @@ -0,0 +1,247 @@ +#!/usr/bin/env python +# +# Test cases for blockdev + IOThread interactions +# +# Copyright (C) 2019 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. +# + +import os +import iotests +from iotests import qemu_img + +image_len = 64 * 1024 * 1024 + +# Test for RHBZ#1782175 +class TestDirtyBitmapIOThread(iotests.QMPTestCase): + drive0_img = os.path.join(iotests.test_dir, 'drive0.img') + images = { 'drive0': drive0_img } + + def setUp(self): + for name in self.images: + qemu_img('create', '-f', iotests.imgfmt, + self.images[name], str(image_len)) + + self.vm = iotests.VM() + self.vm.add_object('iothread,id=iothread0') + + for name in self.images: + self.vm.add_blockdev('driver=file,filename=%s,node-name=file_%s' + % (self.images[name], name)) + self.vm.add_blockdev('driver=qcow2,file=file_%s,node-name=%s' + % (name, name)) + + self.vm.launch() + self.vm.qmp('x-blockdev-set-iothread', + node_name='drive0', iothread='iothread0', + force=True) + + def tearDown(self): + self.vm.shutdown() + for name in self.images: + os.remove(self.images[name]) + + def test_add_dirty_bitmap(self): + result = self.vm.qmp( + 'block-dirty-bitmap-add', + node='drive0', + name='bitmap1', + persistent=True, + ) + + self.assert_qmp(result, 'return', {}) + + +# Test for RHBZ#1746217 & RHBZ#1773517 +class TestNBDMirrorIOThread(iotests.QMPTestCase): + nbd_sock = os.path.join(iotests.sock_dir, 'nbd.sock') + drive0_img = os.path.join(iotests.test_dir, 'drive0.img') + mirror_img = os.path.join(iotests.test_dir, 'mirror.img') + images = { 'drive0': drive0_img, 'mirror': mirror_img } + + def setUp(self): + for name in self.images: + qemu_img('create', '-f', iotests.imgfmt, + self.images[name], str(image_len)) + + self.vm_src = iotests.VM(path_suffix='src') + self.vm_src.add_object('iothread,id=iothread0') + self.vm_src.add_blockdev('driver=file,filename=%s,node-name=file0' + % (self.drive0_img)) + self.vm_src.add_blockdev('driver=qcow2,file=file0,node-name=drive0') + self.vm_src.launch() + self.vm_src.qmp('x-blockdev-set-iothread', + node_name='drive0', iothread='iothread0', + force=True) + + self.vm_tgt = iotests.VM(path_suffix='tgt') + self.vm_tgt.add_object('iothread,id=iothread0') + self.vm_tgt.add_blockdev('driver=file,filename=%s,node-name=file0' + % (self.mirror_img)) + self.vm_tgt.add_blockdev('driver=qcow2,file=file0,node-name=drive0') + self.vm_tgt.launch() + self.vm_tgt.qmp('x-blockdev-set-iothread', + node_name='drive0', iothread='iothread0', + force=True) + + def tearDown(self): + self.vm_src.shutdown() + self.vm_tgt.shutdown() + for name in self.images: + os.remove(self.images[name]) + + def test_nbd_mirror(self): + result = self.vm_tgt.qmp( + 'nbd-server-start', + addr={ + 'type': 'unix', + 'data': { 'path': self.nbd_sock } + } + ) + self.assert_qmp(result, 'return', {}) + + result = self.vm_tgt.qmp( + 'nbd-server-add', + device='drive0', + writable=True + ) + self.assert_qmp(result, 'return', {}) + + result = self.vm_src.qmp( + 'drive-mirror', + device='drive0', + target='nbd+unix:///drive0?socket=' + self.nbd_sock, + sync='full', + mode='existing', + speed=64*1024*1024, + job_id='j1' + ) + self.assert_qmp(result, 'return', {}) + + self.vm_src.event_wait(name="BLOCK_JOB_READY") + + +# Test for RHBZ#1779036 +class TestExternalSnapshotAbort(iotests.QMPTestCase): + drive0_img = os.path.join(iotests.test_dir, 'drive0.img') + snapshot_img = os.path.join(iotests.test_dir, 'snapshot.img') + images = { 'drive0': drive0_img, 'snapshot': snapshot_img } + + def setUp(self): + for name in self.images: + qemu_img('create', '-f', iotests.imgfmt, + self.images[name], str(image_len)) + + self.vm = iotests.VM() + self.vm.add_object('iothread,id=iothread0') + self.vm.add_blockdev('driver=file,filename=%s,node-name=file0' + % (self.drive0_img)) + self.vm.add_blockdev('driver=qcow2,file=file0,node-name=drive0') + self.vm.launch() + self.vm.qmp('x-blockdev-set-iothread', + node_name='drive0', iothread='iothread0', + force=True) + + def tearDown(self): + self.vm.shutdown() + for name in self.images: + os.remove(self.images[name]) + + def test_external_snapshot_abort(self): + # Use a two actions transaction with a bogus values on the second + # one to trigger an abort of the transaction. + result = self.vm.qmp('transaction', actions=[ + { + 'type': 'blockdev-snapshot-sync', + 'data': { 'node-name': 'drive0', + 'snapshot-file': self.snapshot_img, + 'snapshot-node-name': 'snap1', + 'mode': 'absolute-paths', + 'format': 'qcow2' } + }, + { + 'type': 'blockdev-snapshot-sync', + 'data': { 'node-name': 'drive0', + 'snapshot-file': '/fakesnapshot', + 'snapshot-node-name': 'snap2', + 'mode': 'absolute-paths', + 'format': 'qcow2' } + }, + ]) + + # Crashes on failure, we expect this error. + self.assert_qmp(result, 'error/class', 'GenericError') + + +# Test for RHBZ#1782111 +class TestBlockdevBackupAbort(iotests.QMPTestCase): + drive0_img = os.path.join(iotests.test_dir, 'drive0.img') + drive1_img = os.path.join(iotests.test_dir, 'drive1.img') + snap0_img = os.path.join(iotests.test_dir, 'snap0.img') + snap1_img = os.path.join(iotests.test_dir, 'snap1.img') + images = { 'drive0': drive0_img, + 'drive1': drive1_img, + 'snap0': snap0_img, + 'snap1': snap1_img } + + def setUp(self): + for name in self.images: + qemu_img('create', '-f', iotests.imgfmt, + self.images[name], str(image_len)) + + self.vm = iotests.VM() + self.vm.add_object('iothread,id=iothread0') + self.vm.add_device('virtio-scsi,iothread=iothread0') + + for name in self.images: + self.vm.add_blockdev('driver=file,filename=%s,node-name=file_%s' + % (self.images[name], name)) + self.vm.add_blockdev('driver=qcow2,file=file_%s,node-name=%s' + % (name, name)) + + self.vm.add_device('scsi-hd,drive=drive0') + self.vm.add_device('scsi-hd,drive=drive1') + self.vm.launch() + + def tearDown(self): + self.vm.shutdown() + for name in self.images: + os.remove(self.images[name]) + + def test_blockdev_backup_abort(self): + # Use a two actions transaction with a bogus values on the second + # one to trigger an abort of the transaction. + result = self.vm.qmp('transaction', actions=[ + { + 'type': 'blockdev-backup', + 'data': { 'device': 'drive0', + 'target': 'snap0', + 'sync': 'full', + 'job-id': 'j1' } + }, + { + 'type': 'blockdev-backup', + 'data': { 'device': 'drive1', + 'target': 'snap1', + 'sync': 'full' } + }, + ]) + + # Hangs on failure, we expect this error. + self.assert_qmp(result, 'error/class', 'GenericError') + +if __name__ == '__main__': + iotests.main(supported_fmts=['qcow2'], + supported_protocols=['file']) diff --git a/tests/qemu-iotests/281.out b/tests/qemu-iotests/281.out new file mode 100644 index 0000000000..89968f35d7 --- /dev/null +++ b/tests/qemu-iotests/281.out @@ -0,0 +1,5 @@ +.... +---------------------------------------------------------------------- +Ran 4 tests + +OK diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index cb2b789e44..e041cc1ee3 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -288,3 +288,4 @@ 277 rw quick 279 rw backing quick 280 rw migration quick +281 rw quick diff --git a/tests/qemu-iotests/iotests.py b/tests/qemu-iotests/iotests.py index 13fd8b5cd2..0b62c42851 100644 --- a/tests/qemu-iotests/iotests.py +++ b/tests/qemu-iotests/iotests.py @@ -668,12 +668,16 @@ class VM(qtest.QEMUQtestMachine): } ])) - def wait_migration(self): + def wait_migration(self, expect_runstate): while True: event = self.event_wait('MIGRATION') log(event, filters=[filter_qmp_event]) if event['data']['status'] == 'completed': break + # The event may occur in finish-migrate, so wait for the expected + # post-migration runstate + while self.qmp('query-status')['return']['status'] != expect_runstate: + pass def node_info(self, node_name): nodes = self.qmp('query-named-block-nodes') |