From bb6756895459f181e2f25e877d3d7a10c297b5c8 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Sun, 10 Dec 2017 00:11:13 +0100 Subject: test-bdrv-drain: bdrv_drain() works with cross-AioContext events As long as nobody keeps the other I/O thread from working, there is no reason why bdrv_drain() wouldn't work with cross-AioContext events. The key is that the root request we're waiting for is in the AioContext we're polling (which it always is for bdrv_drain()) so that aio_poll() is woken up in the end. Add a test case that shows that it works. Remove the comment in bdrv_drain() that claims otherwise. Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 187 +++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 186 insertions(+), 1 deletion(-) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index a11c4cfbf2..fb68539d17 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -27,9 +27,13 @@ #include "block/blockjob_int.h" #include "sysemu/block-backend.h" #include "qapi/error.h" +#include "iothread.h" + +static QemuEvent done_event; typedef struct BDRVTestState { int drain_count; + AioContext *bh_indirection_ctx; } BDRVTestState; static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs) @@ -50,16 +54,29 @@ static void bdrv_test_close(BlockDriverState *bs) g_assert_cmpint(s->drain_count, >, 0); } +static void co_reenter_bh(void *opaque) +{ + aio_co_wake(opaque); +} + static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { + BDRVTestState *s = bs->opaque; + /* We want this request to stay until the polling loop in drain waits for * it to complete. We need to sleep a while as bdrv_drain_invoke() comes * first and polls its result, too, but it shouldn't accidentally complete * this request yet. */ qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); + if (s->bh_indirection_ctx) { + aio_bh_schedule_oneshot(s->bh_indirection_ctx, co_reenter_bh, + qemu_coroutine_self()); + qemu_coroutine_yield(); + } + return 0; } @@ -490,6 +507,164 @@ static void test_graph_change(void) blk_unref(blk_b); } +struct test_iothread_data { + BlockDriverState *bs; + enum drain_type drain_type; + int *aio_ret; +}; + +static void test_iothread_drain_entry(void *opaque) +{ + struct test_iothread_data *data = opaque; + + aio_context_acquire(bdrv_get_aio_context(data->bs)); + do_drain_begin(data->drain_type, data->bs); + g_assert_cmpint(*data->aio_ret, ==, 0); + do_drain_end(data->drain_type, data->bs); + aio_context_release(bdrv_get_aio_context(data->bs)); + + qemu_event_set(&done_event); +} + +static void test_iothread_aio_cb(void *opaque, int ret) +{ + int *aio_ret = opaque; + *aio_ret = ret; + qemu_event_set(&done_event); +} + +/* + * Starts an AIO request on a BDS that runs in the AioContext of iothread 1. + * The request involves a BH on iothread 2 before it can complete. + * + * @drain_thread = 0 means that do_drain_begin/end are called from the main + * thread, @drain_thread = 1 means that they are called from iothread 1. Drain + * for this BDS cannot be called from iothread 2 because only the main thread + * may do cross-AioContext polling. + */ +static void test_iothread_common(enum drain_type drain_type, int drain_thread) +{ + BlockBackend *blk; + BlockDriverState *bs; + BDRVTestState *s; + BlockAIOCB *acb; + int aio_ret; + struct test_iothread_data data; + + IOThread *a = iothread_new(); + IOThread *b = iothread_new(); + AioContext *ctx_a = iothread_get_aio_context(a); + AioContext *ctx_b = iothread_get_aio_context(b); + + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = NULL, + .iov_len = 0, + }; + qemu_iovec_init_external(&qiov, &iov, 1); + + /* bdrv_drain_all() may only be called from the main loop thread */ + if (drain_type == BDRV_DRAIN_ALL && drain_thread != 0) { + goto out; + } + + blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); + bs = bdrv_new_open_driver(&bdrv_test, "test-node", BDRV_O_RDWR, + &error_abort); + s = bs->opaque; + blk_insert_bs(blk, bs, &error_abort); + + blk_set_aio_context(blk, ctx_a); + aio_context_acquire(ctx_a); + + s->bh_indirection_ctx = ctx_b; + + aio_ret = -EINPROGRESS; + if (drain_thread == 0) { + acb = blk_aio_preadv(blk, 0, &qiov, 0, test_iothread_aio_cb, &aio_ret); + } else { + acb = blk_aio_preadv(blk, 0, &qiov, 0, aio_ret_cb, &aio_ret); + } + g_assert(acb != NULL); + g_assert_cmpint(aio_ret, ==, -EINPROGRESS); + + aio_context_release(ctx_a); + + data = (struct test_iothread_data) { + .bs = bs, + .drain_type = drain_type, + .aio_ret = &aio_ret, + }; + + switch (drain_thread) { + case 0: + if (drain_type != BDRV_DRAIN_ALL) { + aio_context_acquire(ctx_a); + } + + /* The request is running on the IOThread a. Draining its block device + * will make sure that it has completed as far as the BDS is concerned, + * but the drain in this thread can continue immediately after + * bdrv_dec_in_flight() and aio_ret might be assigned only slightly + * later. */ + qemu_event_reset(&done_event); + do_drain_begin(drain_type, bs); + g_assert_cmpint(bs->in_flight, ==, 0); + + if (drain_type != BDRV_DRAIN_ALL) { + aio_context_release(ctx_a); + } + qemu_event_wait(&done_event); + if (drain_type != BDRV_DRAIN_ALL) { + aio_context_acquire(ctx_a); + } + + g_assert_cmpint(aio_ret, ==, 0); + do_drain_end(drain_type, bs); + + if (drain_type != BDRV_DRAIN_ALL) { + aio_context_release(ctx_a); + } + break; + case 1: + qemu_event_reset(&done_event); + aio_bh_schedule_oneshot(ctx_a, test_iothread_drain_entry, &data); + qemu_event_wait(&done_event); + break; + default: + g_assert_not_reached(); + } + + aio_context_acquire(ctx_a); + blk_set_aio_context(blk, qemu_get_aio_context()); + aio_context_release(ctx_a); + + bdrv_unref(bs); + blk_unref(blk); + +out: + iothread_join(a); + iothread_join(b); +} + +static void test_iothread_drain_all(void) +{ + test_iothread_common(BDRV_DRAIN_ALL, 0); + test_iothread_common(BDRV_DRAIN_ALL, 1); +} + +static void test_iothread_drain(void) +{ + test_iothread_common(BDRV_DRAIN, 0); + test_iothread_common(BDRV_DRAIN, 1); +} + +static void test_iothread_drain_subtree(void) +{ + test_iothread_common(BDRV_SUBTREE_DRAIN, 0); + test_iothread_common(BDRV_SUBTREE_DRAIN, 1); +} + typedef struct TestBlockJob { BlockJob common; @@ -618,10 +793,13 @@ static void test_blockjob_drain_subtree(void) int main(int argc, char **argv) { + int ret; + bdrv_init(); qemu_init_main_loop(&error_abort); g_test_init(&argc, &argv, NULL); + qemu_event_init(&done_event, false); g_test_add_func("/bdrv-drain/driver-cb/drain_all", test_drv_cb_drain_all); g_test_add_func("/bdrv-drain/driver-cb/drain", test_drv_cb_drain); @@ -648,10 +826,17 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/multiparent", test_multiparent); g_test_add_func("/bdrv-drain/graph-change", test_graph_change); + g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); + g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); + g_test_add_func("/bdrv-drain/iothread/drain_subtree", + test_iothread_drain_subtree); + g_test_add_func("/bdrv-drain/blockjob/drain_all", test_blockjob_drain_all); g_test_add_func("/bdrv-drain/blockjob/drain", test_blockjob_drain); g_test_add_func("/bdrv-drain/blockjob/drain_subtree", test_blockjob_drain_subtree); - return g_test_run(); + ret = g_test_run(); + qemu_event_destroy(&done_event); + return ret; } -- cgit v1.2.3 From 79ab8b21dc19c08adc407504e456ff64b9dacb66 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Thu, 14 Dec 2017 10:27:23 +0100 Subject: block: Use bdrv_do_drain_begin/end in bdrv_drain_all() bdrv_do_drain_begin/end() implement already everything that bdrv_drain_all_begin/end() need and currently still do manually: Disable external events, call parent drain callbacks, call block driver callbacks. It also does two more things: The first is incrementing bs->quiesce_counter. bdrv_drain_all() already stood out in the test case by behaving different from the other drain variants. Adding this is not only safe, but in fact a bug fix. The second is calling bdrv_drain_recurse(). We already do that later in the same function in a loop, so basically doing an early first iteration doesn't hurt. Signed-off-by: Kevin Wolf Reviewed-by: Stefan Hajnoczi --- tests/test-bdrv-drain.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index fb68539d17..c7f107142c 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -276,8 +276,7 @@ static void test_quiesce_common(enum drain_type drain_type, bool recursive) static void test_quiesce_drain_all(void) { - // XXX drain_all doesn't quiesce - //test_quiesce_common(BDRV_DRAIN_ALL, true); + test_quiesce_common(BDRV_DRAIN_ALL, true); } static void test_quiesce_drain(void) @@ -319,12 +318,7 @@ static void test_nested(void) for (outer = 0; outer < DRAIN_TYPE_MAX; outer++) { for (inner = 0; inner < DRAIN_TYPE_MAX; inner++) { - /* XXX bdrv_drain_all() doesn't increase the quiesce_counter */ - int bs_quiesce = (outer != BDRV_DRAIN_ALL) + - (inner != BDRV_DRAIN_ALL); - int backing_quiesce = (outer == BDRV_SUBTREE_DRAIN) + - (inner == BDRV_SUBTREE_DRAIN); - int backing_cb_cnt = (outer != BDRV_DRAIN) + + int backing_quiesce = (outer != BDRV_DRAIN) + (inner != BDRV_DRAIN); g_assert_cmpint(bs->quiesce_counter, ==, 0); @@ -335,10 +329,10 @@ static void test_nested(void) do_drain_begin(outer, bs); do_drain_begin(inner, bs); - g_assert_cmpint(bs->quiesce_counter, ==, bs_quiesce); + g_assert_cmpint(bs->quiesce_counter, ==, 2); g_assert_cmpint(backing->quiesce_counter, ==, backing_quiesce); g_assert_cmpint(s->drain_count, ==, 2); - g_assert_cmpint(backing_s->drain_count, ==, backing_cb_cnt); + g_assert_cmpint(backing_s->drain_count, ==, backing_quiesce); do_drain_end(inner, bs); do_drain_end(outer, bs); -- cgit v1.2.3 From 6d0252f2f9cb49925deb1c41101462c9481dfc90 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Wed, 4 Apr 2018 13:26:16 +0200 Subject: tests/test-bdrv-drain: bdrv_drain_all() works in coroutines now Since we use bdrv_do_drained_begin/end() for bdrv_drain_all_begin/end(), coroutine context is automatically left with a BH, preventing the deadlocks that made bdrv_drain_all*() unsafe in coroutine context. Now that we even removed the old polling code as dead code, it's obvious that it's compatible now. Enable the coroutine test cases for bdrv_drain_all(). Signed-off-by: Kevin Wolf Reviewed-by: Stefan Hajnoczi --- tests/test-bdrv-drain.c | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index c7f107142c..cc03bc171b 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -233,6 +233,11 @@ static void test_drv_cb_drain_subtree(void) test_drv_cb_common(BDRV_SUBTREE_DRAIN, true); } +static void test_drv_cb_co_drain_all(void) +{ + call_in_coroutine(test_drv_cb_drain_all); +} + static void test_drv_cb_co_drain(void) { call_in_coroutine(test_drv_cb_drain); @@ -289,6 +294,11 @@ static void test_quiesce_drain_subtree(void) test_quiesce_common(BDRV_SUBTREE_DRAIN, true); } +static void test_quiesce_co_drain_all(void) +{ + call_in_coroutine(test_quiesce_drain_all); +} + static void test_quiesce_co_drain(void) { call_in_coroutine(test_quiesce_drain); @@ -800,7 +810,8 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/driver-cb/drain_subtree", test_drv_cb_drain_subtree); - // XXX bdrv_drain_all() doesn't work in coroutine context + g_test_add_func("/bdrv-drain/driver-cb/co/drain_all", + test_drv_cb_co_drain_all); g_test_add_func("/bdrv-drain/driver-cb/co/drain", test_drv_cb_co_drain); g_test_add_func("/bdrv-drain/driver-cb/co/drain_subtree", test_drv_cb_co_drain_subtree); @@ -811,7 +822,8 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/quiesce/drain_subtree", test_quiesce_drain_subtree); - // XXX bdrv_drain_all() doesn't work in coroutine context + g_test_add_func("/bdrv-drain/quiesce/co/drain_all", + test_quiesce_co_drain_all); g_test_add_func("/bdrv-drain/quiesce/co/drain", test_quiesce_co_drain); g_test_add_func("/bdrv-drain/quiesce/co/drain_subtree", test_quiesce_co_drain_subtree); -- cgit v1.2.3 From 89bd030533e3592ca0a995450dcfc5d53e459e20 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Thu, 22 Mar 2018 14:11:20 +0100 Subject: block: Really pause block jobs on drain We already requested that block jobs be paused in .bdrv_drained_begin, but no guarantee was made that the job was actually inactive at the point where bdrv_drained_begin() returned. This introduces a new callback BdrvChildRole.bdrv_drained_poll() and uses it to make bdrv_drain_poll() consider block jobs using the node to be drained. For the test case to work as expected, we have to switch from block_job_sleep_ns() to qemu_co_sleep_ns() so that the test job is even considered active and must be waited for when draining the node. Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index cc03bc171b..22d31c953e 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -686,7 +686,11 @@ static void coroutine_fn test_job_start(void *opaque) job_transition_to_ready(&s->common.job); while (!s->should_complete) { - job_sleep_ns(&s->common.job, 100000); + /* Avoid block_job_sleep_ns() because it marks the job as !busy. We + * want to emulate some actual activity (probably some I/O) here so + * that drain has to wait for this acitivity to stop. */ + qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); + job_pause_point(&s->common.job); } job_defer_to_main_loop(&s->common.job, test_job_completed, NULL); @@ -733,7 +737,7 @@ static void test_blockjob_common(enum drain_type drain_type) g_assert_cmpint(job->job.pause_count, ==, 0); g_assert_false(job->job.paused); - g_assert_false(job->job.busy); /* We're in job_sleep_ns() */ + g_assert_true(job->job.busy); /* We're in job_sleep_ns() */ do_drain_begin(drain_type, src); @@ -743,15 +747,14 @@ static void test_blockjob_common(enum drain_type drain_type) } else { g_assert_cmpint(job->job.pause_count, ==, 1); } - /* XXX We don't wait until the job is actually paused. Is this okay? */ - /* g_assert_true(job->job.paused); */ + g_assert_true(job->job.paused); g_assert_false(job->job.busy); /* The job is paused */ do_drain_end(drain_type, src); g_assert_cmpint(job->job.pause_count, ==, 0); g_assert_false(job->job.paused); - g_assert_false(job->job.busy); /* We're in job_sleep_ns() */ + g_assert_true(job->job.busy); /* We're in qemu_co_sleep_ns() */ do_drain_begin(drain_type, target); @@ -761,15 +764,14 @@ static void test_blockjob_common(enum drain_type drain_type) } else { g_assert_cmpint(job->job.pause_count, ==, 1); } - /* XXX We don't wait until the job is actually paused. Is this okay? */ - /* g_assert_true(job->job.paused); */ + g_assert_true(job->job.paused); g_assert_false(job->job.busy); /* The job is paused */ do_drain_end(drain_type, target); g_assert_cmpint(job->job.pause_count, ==, 0); g_assert_false(job->job.paused); - g_assert_false(job->job.busy); /* We're in job_sleep_ns() */ + g_assert_true(job->job.busy); /* We're in job_sleep_ns() */ ret = job_complete_sync(&job->job, &error_abort); g_assert_cmpint(ret, ==, 0); -- cgit v1.2.3 From 4c8158e359d194394c64acd21caf5e3f3f3141c2 Mon Sep 17 00:00:00 2001 From: Max Reitz Date: Wed, 28 Feb 2018 19:04:54 +0100 Subject: test-bdrv-drain: Add test for node deletion This patch adds two bdrv-drain tests for what happens if some BDS goes away during the drainage. The basic idea is that you have a parent BDS with some child nodes. Then, you drain one of the children. Because of that, the party who actually owns the parent decides to (A) delete it, or (B) detach all its children from it -- both while the child is still being drained. A real-world case where this can happen is the mirror block job, which may exit if you drain one of its children. Signed-off-by: Max Reitz Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 169 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 169 insertions(+) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index 22d31c953e..e8a5515b08 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -797,6 +797,172 @@ static void test_blockjob_drain_subtree(void) test_blockjob_common(BDRV_SUBTREE_DRAIN); } + +typedef struct BDRVTestTopState { + BdrvChild *wait_child; +} BDRVTestTopState; + +static void bdrv_test_top_close(BlockDriverState *bs) +{ + BdrvChild *c, *next_c; + QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { + bdrv_unref_child(bs, c); + } +} + +static int coroutine_fn bdrv_test_top_co_preadv(BlockDriverState *bs, + uint64_t offset, uint64_t bytes, + QEMUIOVector *qiov, int flags) +{ + BDRVTestTopState *tts = bs->opaque; + return bdrv_co_preadv(tts->wait_child, offset, bytes, qiov, flags); +} + +static BlockDriver bdrv_test_top_driver = { + .format_name = "test_top_driver", + .instance_size = sizeof(BDRVTestTopState), + + .bdrv_close = bdrv_test_top_close, + .bdrv_co_preadv = bdrv_test_top_co_preadv, + + .bdrv_child_perm = bdrv_format_default_perms, +}; + +typedef struct TestCoDeleteByDrainData { + BlockBackend *blk; + bool detach_instead_of_delete; + bool done; +} TestCoDeleteByDrainData; + +static void coroutine_fn test_co_delete_by_drain(void *opaque) +{ + TestCoDeleteByDrainData *dbdd = opaque; + BlockBackend *blk = dbdd->blk; + BlockDriverState *bs = blk_bs(blk); + BDRVTestTopState *tts = bs->opaque; + void *buffer = g_malloc(65536); + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = buffer, + .iov_len = 65536, + }; + + qemu_iovec_init_external(&qiov, &iov, 1); + + /* Pretend some internal write operation from parent to child. + * Important: We have to read from the child, not from the parent! + * Draining works by first propagating it all up the tree to the + * root and then waiting for drainage from root to the leaves + * (protocol nodes). If we have a request waiting on the root, + * everything will be drained before we go back down the tree, but + * we do not want that. We want to be in the middle of draining + * when this following requests returns. */ + bdrv_co_preadv(tts->wait_child, 0, 65536, &qiov, 0); + + g_assert_cmpint(bs->refcnt, ==, 1); + + if (!dbdd->detach_instead_of_delete) { + blk_unref(blk); + } else { + BdrvChild *c, *next_c; + QLIST_FOREACH_SAFE(c, &bs->children, next, next_c) { + bdrv_unref_child(bs, c); + } + } + + dbdd->done = true; +} + +/** + * Test what happens when some BDS has some children, you drain one of + * them and this results in the BDS being deleted. + * + * If @detach_instead_of_delete is set, the BDS is not going to be + * deleted but will only detach all of its children. + */ +static void do_test_delete_by_drain(bool detach_instead_of_delete) +{ + BlockBackend *blk; + BlockDriverState *bs, *child_bs, *null_bs; + BDRVTestTopState *tts; + TestCoDeleteByDrainData dbdd; + Coroutine *co; + + bs = bdrv_new_open_driver(&bdrv_test_top_driver, "top", BDRV_O_RDWR, + &error_abort); + bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; + tts = bs->opaque; + + null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, + &error_abort); + bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort); + + /* This child will be the one to pass to requests through to, and + * it will stall until a drain occurs */ + child_bs = bdrv_new_open_driver(&bdrv_test, "child", BDRV_O_RDWR, + &error_abort); + child_bs->total_sectors = 65536 >> BDRV_SECTOR_BITS; + /* Takes our reference to child_bs */ + tts->wait_child = bdrv_attach_child(bs, child_bs, "wait-child", &child_file, + &error_abort); + + /* This child is just there to be deleted + * (for detach_instead_of_delete == true) */ + null_bs = bdrv_open("null-co://", NULL, NULL, BDRV_O_RDWR | BDRV_O_PROTOCOL, + &error_abort); + bdrv_attach_child(bs, null_bs, "null-child", &child_file, &error_abort); + + blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); + blk_insert_bs(blk, bs, &error_abort); + + /* Referenced by blk now */ + bdrv_unref(bs); + + g_assert_cmpint(bs->refcnt, ==, 1); + g_assert_cmpint(child_bs->refcnt, ==, 1); + g_assert_cmpint(null_bs->refcnt, ==, 1); + + + dbdd = (TestCoDeleteByDrainData){ + .blk = blk, + .detach_instead_of_delete = detach_instead_of_delete, + .done = false, + }; + co = qemu_coroutine_create(test_co_delete_by_drain, &dbdd); + qemu_coroutine_enter(co); + + /* Drain the child while the read operation is still pending. + * This should result in the operation finishing and + * test_co_delete_by_drain() resuming. Thus, @bs will be deleted + * and the coroutine will exit while this drain operation is still + * in progress. */ + bdrv_ref(child_bs); + bdrv_drain(child_bs); + bdrv_unref(child_bs); + + while (!dbdd.done) { + aio_poll(qemu_get_aio_context(), true); + } + + if (detach_instead_of_delete) { + /* Here, the reference has not passed over to the coroutine, + * so we have to delete the BB ourselves */ + blk_unref(blk); + } +} + + +static void test_delete_by_drain(void) +{ + do_test_delete_by_drain(false); +} + +static void test_detach_by_drain(void) +{ + do_test_delete_by_drain(true); +} + + int main(int argc, char **argv) { int ret; @@ -844,6 +1010,9 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/blockjob/drain_subtree", test_blockjob_drain_subtree); + g_test_add_func("/bdrv-drain/deletion", test_delete_by_drain); + g_test_add_func("/bdrv-drain/detach", test_detach_by_drain); + ret = g_test_run(); qemu_event_destroy(&done_event); return ret; -- cgit v1.2.3 From ebd31837618cdc7bda83090773dcdd87475d55b7 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Fri, 23 Mar 2018 12:40:21 +0100 Subject: test-bdrv-drain: Test node deletion in subtree recursion If bdrv_do_drained_begin() polls during its subtree recursion, the graph can change and mess up the bs->children iteration. Test that this doesn't happen. Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 38 +++++++++++++++++++++++++++++--------- 1 file changed, 29 insertions(+), 9 deletions(-) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index e8a5515b08..0c8f632f2d 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -880,7 +880,8 @@ static void coroutine_fn test_co_delete_by_drain(void *opaque) * If @detach_instead_of_delete is set, the BDS is not going to be * deleted but will only detach all of its children. */ -static void do_test_delete_by_drain(bool detach_instead_of_delete) +static void do_test_delete_by_drain(bool detach_instead_of_delete, + enum drain_type drain_type) { BlockBackend *blk; BlockDriverState *bs, *child_bs, *null_bs; @@ -936,9 +937,23 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete) * test_co_delete_by_drain() resuming. Thus, @bs will be deleted * and the coroutine will exit while this drain operation is still * in progress. */ - bdrv_ref(child_bs); - bdrv_drain(child_bs); - bdrv_unref(child_bs); + switch (drain_type) { + case BDRV_DRAIN: + bdrv_ref(child_bs); + bdrv_drain(child_bs); + bdrv_unref(child_bs); + break; + case BDRV_SUBTREE_DRAIN: + /* Would have to ref/unref bs here for !detach_instead_of_delete, but + * then the whole test becomes pointless because the graph changes + * don't occur during the drain any more. */ + assert(detach_instead_of_delete); + bdrv_subtree_drained_begin(bs); + bdrv_subtree_drained_end(bs); + break; + default: + g_assert_not_reached(); + } while (!dbdd.done) { aio_poll(qemu_get_aio_context(), true); @@ -951,15 +966,19 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete) } } - static void test_delete_by_drain(void) { - do_test_delete_by_drain(false); + do_test_delete_by_drain(false, BDRV_DRAIN); } static void test_detach_by_drain(void) { - do_test_delete_by_drain(true); + do_test_delete_by_drain(true, BDRV_DRAIN); +} + +static void test_detach_by_drain_subtree(void) +{ + do_test_delete_by_drain(true, BDRV_SUBTREE_DRAIN); } @@ -1010,8 +1029,9 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/blockjob/drain_subtree", test_blockjob_drain_subtree); - g_test_add_func("/bdrv-drain/deletion", test_delete_by_drain); - g_test_add_func("/bdrv-drain/detach", test_detach_by_drain); + g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); + g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); + g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); ret = g_test_run(); qemu_event_destroy(&done_event); -- cgit v1.2.3 From 231281ab42dad2b407b941e36ad11cbc6586e937 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Fri, 23 Mar 2018 18:48:39 +0100 Subject: test-bdrv-drain: Graph change through parent callback Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 130 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 130 insertions(+) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index 0c8f632f2d..abb287e597 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -982,6 +982,135 @@ static void test_detach_by_drain_subtree(void) } +struct detach_by_parent_data { + BlockDriverState *parent_b; + BdrvChild *child_b; + BlockDriverState *c; + BdrvChild *child_c; +}; + +static void detach_by_parent_aio_cb(void *opaque, int ret) +{ + struct detach_by_parent_data *data = opaque; + + g_assert_cmpint(ret, ==, 0); + bdrv_unref_child(data->parent_b, data->child_b); + + bdrv_ref(data->c); + data->child_c = bdrv_attach_child(data->parent_b, data->c, "PB-C", + &child_file, &error_abort); +} + +/* + * Initial graph: + * + * PA PB + * \ / \ + * A B C + * + * PA has a pending write request whose callback changes the child nodes of PB: + * It removes B and adds C instead. The subtree of PB is drained, which will + * indirectly drain the write request, too. + */ +static void test_detach_by_parent_cb(void) +{ + BlockBackend *blk; + BlockDriverState *parent_a, *parent_b, *a, *b, *c; + BdrvChild *child_a, *child_b; + BlockAIOCB *acb; + struct detach_by_parent_data data; + + QEMUIOVector qiov; + struct iovec iov = { + .iov_base = NULL, + .iov_len = 0, + }; + qemu_iovec_init_external(&qiov, &iov, 1); + + /* Create all involved nodes */ + parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, + &error_abort); + parent_b = bdrv_new_open_driver(&bdrv_test, "parent-b", 0, + &error_abort); + + a = bdrv_new_open_driver(&bdrv_test, "a", BDRV_O_RDWR, &error_abort); + b = bdrv_new_open_driver(&bdrv_test, "b", BDRV_O_RDWR, &error_abort); + c = bdrv_new_open_driver(&bdrv_test, "c", BDRV_O_RDWR, &error_abort); + + /* blk is a BB for parent-a */ + blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); + blk_insert_bs(blk, parent_a, &error_abort); + bdrv_unref(parent_a); + + /* Set child relationships */ + bdrv_ref(b); + bdrv_ref(a); + child_b = bdrv_attach_child(parent_b, b, "PB-B", &child_file, &error_abort); + child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_backing, &error_abort); + + bdrv_ref(a); + bdrv_attach_child(parent_a, a, "PA-A", &child_file, &error_abort); + + g_assert_cmpint(parent_a->refcnt, ==, 1); + g_assert_cmpint(parent_b->refcnt, ==, 1); + g_assert_cmpint(a->refcnt, ==, 3); + g_assert_cmpint(b->refcnt, ==, 2); + g_assert_cmpint(c->refcnt, ==, 1); + + g_assert(QLIST_FIRST(&parent_b->children) == child_a); + g_assert(QLIST_NEXT(child_a, next) == child_b); + g_assert(QLIST_NEXT(child_b, next) == NULL); + + /* Start the evil write request */ + data = (struct detach_by_parent_data) { + .parent_b = parent_b, + .child_b = child_b, + .c = c, + }; + acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, &data); + g_assert(acb != NULL); + + /* Drain and check the expected result */ + bdrv_subtree_drained_begin(parent_b); + + g_assert(data.child_c != NULL); + + g_assert_cmpint(parent_a->refcnt, ==, 1); + g_assert_cmpint(parent_b->refcnt, ==, 1); + g_assert_cmpint(a->refcnt, ==, 3); + g_assert_cmpint(b->refcnt, ==, 1); + g_assert_cmpint(c->refcnt, ==, 2); + + g_assert(QLIST_FIRST(&parent_b->children) == data.child_c); + g_assert(QLIST_NEXT(data.child_c, next) == child_a); + g_assert(QLIST_NEXT(child_a, next) == NULL); + + g_assert_cmpint(parent_a->quiesce_counter, ==, 1); + g_assert_cmpint(parent_b->quiesce_counter, ==, 1); + g_assert_cmpint(a->quiesce_counter, ==, 1); + g_assert_cmpint(b->quiesce_counter, ==, 0); + g_assert_cmpint(c->quiesce_counter, ==, 1); + + bdrv_subtree_drained_end(parent_b); + + bdrv_unref(parent_b); + blk_unref(blk); + + /* XXX Once bdrv_close() unref's children instead of just detaching them, + * this won't be necessary any more. */ + bdrv_unref(a); + bdrv_unref(a); + bdrv_unref(c); + + g_assert_cmpint(a->refcnt, ==, 1); + g_assert_cmpint(b->refcnt, ==, 1); + g_assert_cmpint(c->refcnt, ==, 1); + bdrv_unref(a); + bdrv_unref(b); + bdrv_unref(c); +} + + int main(int argc, char **argv) { int ret; @@ -1032,6 +1161,7 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); + g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); ret = g_test_run(); qemu_event_destroy(&done_event); -- cgit v1.2.3 From 57320ca961c2e8488e1884b4ebbcb929b6901dc6 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Fri, 23 Mar 2018 20:10:52 +0100 Subject: test-bdrv-drain: Test that bdrv_drain_invoke() doesn't poll This adds a test case that goes wrong if bdrv_drain_invoke() calls aio_poll(). Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 102 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 88 insertions(+), 14 deletions(-) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index abb287e597..5cc179a778 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -34,12 +34,16 @@ static QemuEvent done_event; typedef struct BDRVTestState { int drain_count; AioContext *bh_indirection_ctx; + bool sleep_in_drain_begin; } BDRVTestState; static void coroutine_fn bdrv_test_co_drain_begin(BlockDriverState *bs) { BDRVTestState *s = bs->opaque; s->drain_count++; + if (s->sleep_in_drain_begin) { + qemu_co_sleep_ns(QEMU_CLOCK_REALTIME, 100000); + } } static void coroutine_fn bdrv_test_co_drain_end(BlockDriverState *bs) @@ -80,6 +84,22 @@ static int coroutine_fn bdrv_test_co_preadv(BlockDriverState *bs, return 0; } +static void bdrv_test_child_perm(BlockDriverState *bs, BdrvChild *c, + const BdrvChildRole *role, + BlockReopenQueue *reopen_queue, + uint64_t perm, uint64_t shared, + uint64_t *nperm, uint64_t *nshared) +{ + /* bdrv_format_default_perms() accepts only these two, so disguise + * detach_by_driver_cb_role as one of them. */ + if (role != &child_file && role != &child_backing) { + role = &child_file; + } + + bdrv_format_default_perms(bs, c, role, reopen_queue, perm, shared, + nperm, nshared); +} + static BlockDriver bdrv_test = { .format_name = "test", .instance_size = sizeof(BDRVTestState), @@ -90,7 +110,7 @@ static BlockDriver bdrv_test = { .bdrv_co_drain_begin = bdrv_test_co_drain_begin, .bdrv_co_drain_end = bdrv_test_co_drain_end, - .bdrv_child_perm = bdrv_format_default_perms, + .bdrv_child_perm = bdrv_test_child_perm, }; static void aio_ret_cb(void *opaque, int ret) @@ -987,13 +1007,14 @@ struct detach_by_parent_data { BdrvChild *child_b; BlockDriverState *c; BdrvChild *child_c; + bool by_parent_cb; }; +static struct detach_by_parent_data detach_by_parent_data; -static void detach_by_parent_aio_cb(void *opaque, int ret) +static void detach_indirect_bh(void *opaque) { struct detach_by_parent_data *data = opaque; - g_assert_cmpint(ret, ==, 0); bdrv_unref_child(data->parent_b, data->child_b); bdrv_ref(data->c); @@ -1001,6 +1022,25 @@ static void detach_by_parent_aio_cb(void *opaque, int ret) &child_file, &error_abort); } +static void detach_by_parent_aio_cb(void *opaque, int ret) +{ + struct detach_by_parent_data *data = &detach_by_parent_data; + + g_assert_cmpint(ret, ==, 0); + if (data->by_parent_cb) { + detach_indirect_bh(data); + } +} + +static void detach_by_driver_cb_drained_begin(BdrvChild *child) +{ + aio_bh_schedule_oneshot(qemu_get_current_aio_context(), + detach_indirect_bh, &detach_by_parent_data); + child_file.drained_begin(child); +} + +static BdrvChildRole detach_by_driver_cb_role; + /* * Initial graph: * @@ -1008,17 +1048,25 @@ static void detach_by_parent_aio_cb(void *opaque, int ret) * \ / \ * A B C * - * PA has a pending write request whose callback changes the child nodes of PB: - * It removes B and adds C instead. The subtree of PB is drained, which will - * indirectly drain the write request, too. + * by_parent_cb == true: Test that parent callbacks don't poll + * + * PA has a pending write request whose callback changes the child nodes of + * PB: It removes B and adds C instead. The subtree of PB is drained, which + * will indirectly drain the write request, too. + * + * by_parent_cb == false: Test that bdrv_drain_invoke() doesn't poll + * + * PA's BdrvChildRole has a .drained_begin callback that schedules a BH + * that does the same graph change. If bdrv_drain_invoke() calls it, the + * state is messed up, but if it is only polled in the single + * BDRV_POLL_WHILE() at the end of the drain, this should work fine. */ -static void test_detach_by_parent_cb(void) +static void test_detach_indirect(bool by_parent_cb) { BlockBackend *blk; BlockDriverState *parent_a, *parent_b, *a, *b, *c; BdrvChild *child_a, *child_b; BlockAIOCB *acb; - struct detach_by_parent_data data; QEMUIOVector qiov; struct iovec iov = { @@ -1027,6 +1075,12 @@ static void test_detach_by_parent_cb(void) }; qemu_iovec_init_external(&qiov, &iov, 1); + if (!by_parent_cb) { + detach_by_driver_cb_role = child_file; + detach_by_driver_cb_role.drained_begin = + detach_by_driver_cb_drained_begin; + } + /* Create all involved nodes */ parent_a = bdrv_new_open_driver(&bdrv_test, "parent-a", BDRV_O_RDWR, &error_abort); @@ -1042,6 +1096,13 @@ static void test_detach_by_parent_cb(void) blk_insert_bs(blk, parent_a, &error_abort); bdrv_unref(parent_a); + /* If we want to get bdrv_drain_invoke() to call aio_poll(), the driver + * callback must not return immediately. */ + if (!by_parent_cb) { + BDRVTestState *s = parent_a->opaque; + s->sleep_in_drain_begin = true; + } + /* Set child relationships */ bdrv_ref(b); bdrv_ref(a); @@ -1049,7 +1110,9 @@ static void test_detach_by_parent_cb(void) child_a = bdrv_attach_child(parent_b, a, "PB-A", &child_backing, &error_abort); bdrv_ref(a); - bdrv_attach_child(parent_a, a, "PA-A", &child_file, &error_abort); + bdrv_attach_child(parent_a, a, "PA-A", + by_parent_cb ? &child_file : &detach_by_driver_cb_role, + &error_abort); g_assert_cmpint(parent_a->refcnt, ==, 1); g_assert_cmpint(parent_b->refcnt, ==, 1); @@ -1062,18 +1125,19 @@ static void test_detach_by_parent_cb(void) g_assert(QLIST_NEXT(child_b, next) == NULL); /* Start the evil write request */ - data = (struct detach_by_parent_data) { + detach_by_parent_data = (struct detach_by_parent_data) { .parent_b = parent_b, .child_b = child_b, .c = c, + .by_parent_cb = by_parent_cb, }; - acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, &data); + acb = blk_aio_preadv(blk, 0, &qiov, 0, detach_by_parent_aio_cb, NULL); g_assert(acb != NULL); /* Drain and check the expected result */ bdrv_subtree_drained_begin(parent_b); - g_assert(data.child_c != NULL); + g_assert(detach_by_parent_data.child_c != NULL); g_assert_cmpint(parent_a->refcnt, ==, 1); g_assert_cmpint(parent_b->refcnt, ==, 1); @@ -1081,8 +1145,8 @@ static void test_detach_by_parent_cb(void) g_assert_cmpint(b->refcnt, ==, 1); g_assert_cmpint(c->refcnt, ==, 2); - g_assert(QLIST_FIRST(&parent_b->children) == data.child_c); - g_assert(QLIST_NEXT(data.child_c, next) == child_a); + g_assert(QLIST_FIRST(&parent_b->children) == detach_by_parent_data.child_c); + g_assert(QLIST_NEXT(detach_by_parent_data.child_c, next) == child_a); g_assert(QLIST_NEXT(child_a, next) == NULL); g_assert_cmpint(parent_a->quiesce_counter, ==, 1); @@ -1110,6 +1174,15 @@ static void test_detach_by_parent_cb(void) bdrv_unref(c); } +static void test_detach_by_parent_cb(void) +{ + test_detach_indirect(true); +} + +static void test_detach_by_driver_cb(void) +{ + test_detach_indirect(false); +} int main(int argc, char **argv) { @@ -1162,6 +1235,7 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); + g_test_add_func("/bdrv-drain/detach/driver_cb", test_detach_by_driver_cb); ret = g_test_run(); qemu_event_destroy(&done_event); -- cgit v1.2.3 From 19f7a7e574a099dca13120441fbe723cea9c1dc2 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Wed, 28 Mar 2018 18:29:06 +0200 Subject: test-bdrv-drain: Test graph changes in drain_all section This tests both adding and remove a node between bdrv_drain_all_begin() and bdrv_drain_all_end(), and enabled the existing detach test for drain_all. Signed-off-by: Kevin Wolf --- tests/test-bdrv-drain.c | 75 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 73 insertions(+), 2 deletions(-) (limited to 'tests') diff --git a/tests/test-bdrv-drain.c b/tests/test-bdrv-drain.c index 5cc179a778..291a050f86 100644 --- a/tests/test-bdrv-drain.c +++ b/tests/test-bdrv-drain.c @@ -452,7 +452,7 @@ static void test_multiparent(void) blk_unref(blk_b); } -static void test_graph_change(void) +static void test_graph_change_drain_subtree(void) { BlockBackend *blk_a, *blk_b; BlockDriverState *bs_a, *bs_b, *backing; @@ -531,6 +531,63 @@ static void test_graph_change(void) blk_unref(blk_b); } +static void test_graph_change_drain_all(void) +{ + BlockBackend *blk_a, *blk_b; + BlockDriverState *bs_a, *bs_b; + BDRVTestState *a_s, *b_s; + + /* Create node A with a BlockBackend */ + blk_a = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); + bs_a = bdrv_new_open_driver(&bdrv_test, "test-node-a", BDRV_O_RDWR, + &error_abort); + a_s = bs_a->opaque; + blk_insert_bs(blk_a, bs_a, &error_abort); + + g_assert_cmpint(bs_a->quiesce_counter, ==, 0); + g_assert_cmpint(a_s->drain_count, ==, 0); + + /* Call bdrv_drain_all_begin() */ + bdrv_drain_all_begin(); + + g_assert_cmpint(bs_a->quiesce_counter, ==, 1); + g_assert_cmpint(a_s->drain_count, ==, 1); + + /* Create node B with a BlockBackend */ + blk_b = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); + bs_b = bdrv_new_open_driver(&bdrv_test, "test-node-b", BDRV_O_RDWR, + &error_abort); + b_s = bs_b->opaque; + blk_insert_bs(blk_b, bs_b, &error_abort); + + g_assert_cmpint(bs_a->quiesce_counter, ==, 1); + g_assert_cmpint(bs_b->quiesce_counter, ==, 1); + g_assert_cmpint(a_s->drain_count, ==, 1); + g_assert_cmpint(b_s->drain_count, ==, 1); + + /* Unref and finally delete node A */ + blk_unref(blk_a); + + g_assert_cmpint(bs_a->quiesce_counter, ==, 1); + g_assert_cmpint(bs_b->quiesce_counter, ==, 1); + g_assert_cmpint(a_s->drain_count, ==, 1); + g_assert_cmpint(b_s->drain_count, ==, 1); + + bdrv_unref(bs_a); + + g_assert_cmpint(bs_b->quiesce_counter, ==, 1); + g_assert_cmpint(b_s->drain_count, ==, 1); + + /* End the drained section */ + bdrv_drain_all_end(); + + g_assert_cmpint(bs_b->quiesce_counter, ==, 0); + g_assert_cmpint(b_s->drain_count, ==, 0); + + bdrv_unref(bs_b); + blk_unref(blk_b); +} + struct test_iothread_data { BlockDriverState *bs; enum drain_type drain_type; @@ -971,6 +1028,10 @@ static void do_test_delete_by_drain(bool detach_instead_of_delete, bdrv_subtree_drained_begin(bs); bdrv_subtree_drained_end(bs); break; + case BDRV_DRAIN_ALL: + bdrv_drain_all_begin(); + bdrv_drain_all_end(); + break; default: g_assert_not_reached(); } @@ -991,6 +1052,11 @@ static void test_delete_by_drain(void) do_test_delete_by_drain(false, BDRV_DRAIN); } +static void test_detach_by_drain_all(void) +{ + do_test_delete_by_drain(true, BDRV_DRAIN_ALL); +} + static void test_detach_by_drain(void) { do_test_delete_by_drain(true, BDRV_DRAIN); @@ -1219,7 +1285,11 @@ int main(int argc, char **argv) g_test_add_func("/bdrv-drain/nested", test_nested); g_test_add_func("/bdrv-drain/multiparent", test_multiparent); - g_test_add_func("/bdrv-drain/graph-change", test_graph_change); + + g_test_add_func("/bdrv-drain/graph-change/drain_subtree", + test_graph_change_drain_subtree); + g_test_add_func("/bdrv-drain/graph-change/drain_all", + test_graph_change_drain_all); g_test_add_func("/bdrv-drain/iothread/drain_all", test_iothread_drain_all); g_test_add_func("/bdrv-drain/iothread/drain", test_iothread_drain); @@ -1232,6 +1302,7 @@ int main(int argc, char **argv) test_blockjob_drain_subtree); g_test_add_func("/bdrv-drain/deletion/drain", test_delete_by_drain); + g_test_add_func("/bdrv-drain/detach/drain_all", test_detach_by_drain_all); g_test_add_func("/bdrv-drain/detach/drain", test_detach_by_drain); g_test_add_func("/bdrv-drain/detach/drain_subtree", test_detach_by_drain_subtree); g_test_add_func("/bdrv-drain/detach/parent_cb", test_detach_by_parent_cb); -- cgit v1.2.3 From a33fbb4f8b64226becf502a123733776ce319b24 Mon Sep 17 00:00:00 2001 From: Max Reitz Date: Wed, 13 Jun 2018 20:18:16 +0200 Subject: hbitmap: Add @advance param to hbitmap_iter_next() This new parameter allows the caller to just query the next dirty position without moving the iterator. Signed-off-by: Max Reitz Reviewed-by: Fam Zheng Reviewed-by: John Snow Message-id: 20180613181823.13618-8-mreitz@redhat.com Signed-off-by: Max Reitz --- tests/test-hbitmap.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) (limited to 'tests') diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index f29631f939..f2158f767d 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -46,7 +46,7 @@ static void hbitmap_test_check(TestHBitmapData *data, i = first; for (;;) { - next = hbitmap_iter_next(&hbi); + next = hbitmap_iter_next(&hbi, true); if (next < 0) { next = data->size; } @@ -435,25 +435,25 @@ static void test_hbitmap_iter_granularity(TestHBitmapData *data, /* Note that hbitmap_test_check has to be invoked manually in this test. */ hbitmap_test_init(data, 131072 << 7, 7); hbitmap_iter_init(&hbi, data->hb, 0); - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); hbitmap_test_set(data, ((L2 + L1 + 1) << 7) + 8, 8); hbitmap_iter_init(&hbi, data->hb, 0); - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); hbitmap_test_set(data, (131072 << 7) - 8, 8); hbitmap_iter_init(&hbi, data->hb, 0); - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, 131071 << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi), ==, 131071 << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi), <, 0); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); + g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); } static void hbitmap_test_set_boundary_bits(TestHBitmapData *data, ssize_t diff) @@ -893,7 +893,7 @@ static void test_hbitmap_serialize_zeroes(TestHBitmapData *data, for (i = 0; i < num_positions; i++) { hbitmap_deserialize_zeroes(data->hb, positions[i], min_l1, true); hbitmap_iter_init(&iter, data->hb, 0); - next = hbitmap_iter_next(&iter); + next = hbitmap_iter_next(&iter, true); if (i == num_positions - 1) { g_assert_cmpint(next, ==, -1); } else { @@ -919,10 +919,10 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, hbitmap_iter_init(&hbi, data->hb, BITS_PER_LONG - 1); - hbitmap_iter_next(&hbi); + hbitmap_iter_next(&hbi, true); hbitmap_reset_all(data->hb); - hbitmap_iter_next(&hbi); + hbitmap_iter_next(&hbi, true); } static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) -- cgit v1.2.3 From 269576848ec3d57d2d958cf5ac69b08c44adf816 Mon Sep 17 00:00:00 2001 From: Max Reitz Date: Wed, 13 Jun 2018 20:18:17 +0200 Subject: test-hbitmap: Add non-advancing iter_next tests Add a function that wraps hbitmap_iter_next() and always calls it in non-advancing mode first, and in advancing mode next. The result should always be the same. By using this function everywhere we called hbitmap_iter_next() before, we should get good test coverage for non-advancing hbitmap_iter_next(). Signed-off-by: Max Reitz Reviewed-by: Fam Zheng Reviewed-by: John Snow Message-id: 20180613181823.13618-9-mreitz@redhat.com Signed-off-by: Max Reitz --- tests/test-hbitmap.c | 36 ++++++++++++++++++++++++------------ 1 file changed, 24 insertions(+), 12 deletions(-) (limited to 'tests') diff --git a/tests/test-hbitmap.c b/tests/test-hbitmap.c index f2158f767d..5e67ac1d3a 100644 --- a/tests/test-hbitmap.c +++ b/tests/test-hbitmap.c @@ -30,6 +30,18 @@ typedef struct TestHBitmapData { } TestHBitmapData; +static int64_t check_hbitmap_iter_next(HBitmapIter *hbi) +{ + int next0, next1; + + next0 = hbitmap_iter_next(hbi, false); + next1 = hbitmap_iter_next(hbi, true); + + g_assert_cmpint(next0, ==, next1); + + return next0; +} + /* Check that the HBitmap and the shadow bitmap contain the same data, * ignoring the same "first" bits. */ @@ -46,7 +58,7 @@ static void hbitmap_test_check(TestHBitmapData *data, i = first; for (;;) { - next = hbitmap_iter_next(&hbi, true); + next = check_hbitmap_iter_next(&hbi); if (next < 0) { next = data->size; } @@ -435,25 +447,25 @@ static void test_hbitmap_iter_granularity(TestHBitmapData *data, /* Note that hbitmap_test_check has to be invoked manually in this test. */ hbitmap_test_init(data, 131072 << 7, 7); hbitmap_iter_init(&hbi, data->hb, 0); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); hbitmap_test_set(data, ((L2 + L1 + 1) << 7) + 8, 8); hbitmap_iter_init(&hbi, data->hb, 0); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); hbitmap_test_set(data, (131072 << 7) - 8, 8); hbitmap_iter_init(&hbi, data->hb, 0); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, (L2 + L1 + 1) << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, (L2 + L1 + 1) << 7); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, 131071 << 7); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); hbitmap_iter_init(&hbi, data->hb, (L2 + L1 + 2) << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), ==, 131071 << 7); - g_assert_cmpint(hbitmap_iter_next(&hbi, true), <, 0); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), ==, 131071 << 7); + g_assert_cmpint(check_hbitmap_iter_next(&hbi), <, 0); } static void hbitmap_test_set_boundary_bits(TestHBitmapData *data, ssize_t diff) @@ -893,7 +905,7 @@ static void test_hbitmap_serialize_zeroes(TestHBitmapData *data, for (i = 0; i < num_positions; i++) { hbitmap_deserialize_zeroes(data->hb, positions[i], min_l1, true); hbitmap_iter_init(&iter, data->hb, 0); - next = hbitmap_iter_next(&iter, true); + next = check_hbitmap_iter_next(&iter); if (i == num_positions - 1) { g_assert_cmpint(next, ==, -1); } else { @@ -919,10 +931,10 @@ static void test_hbitmap_iter_and_reset(TestHBitmapData *data, hbitmap_iter_init(&hbi, data->hb, BITS_PER_LONG - 1); - hbitmap_iter_next(&hbi, true); + check_hbitmap_iter_next(&hbi); hbitmap_reset_all(data->hb); - hbitmap_iter_next(&hbi, true); + check_hbitmap_iter_next(&hbi); } static void test_hbitmap_next_zero_check(TestHBitmapData *data, int64_t start) -- cgit v1.2.3 From e38da02091eeed56bb370ec9d72c4367d4e9ada3 Mon Sep 17 00:00:00 2001 From: Max Reitz Date: Wed, 13 Jun 2018 20:18:23 +0200 Subject: iotests: Add test for active mirroring Signed-off-by: Max Reitz Reviewed-by: Fam Zheng Reviewed-by: Alberto Garcia Message-id: 20180613181823.13618-15-mreitz@redhat.com Signed-off-by: Max Reitz --- tests/qemu-iotests/151 | 120 +++++++++++++++++++++++++++++++++++++++++++++ tests/qemu-iotests/151.out | 5 ++ tests/qemu-iotests/group | 1 + 3 files changed, 126 insertions(+) create mode 100755 tests/qemu-iotests/151 create mode 100644 tests/qemu-iotests/151.out (limited to 'tests') diff --git a/tests/qemu-iotests/151 b/tests/qemu-iotests/151 new file mode 100755 index 0000000000..fe53b9f446 --- /dev/null +++ b/tests/qemu-iotests/151 @@ -0,0 +1,120 @@ +#!/usr/bin/env python +# +# Tests for active mirroring +# +# Copyright (C) 2018 Red Hat, Inc. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . +# + +import os +import iotests +from iotests import qemu_img + +source_img = os.path.join(iotests.test_dir, 'source.' + iotests.imgfmt) +target_img = os.path.join(iotests.test_dir, 'target.' + iotests.imgfmt) + +class TestActiveMirror(iotests.QMPTestCase): + image_len = 128 * 1024 * 1024 # MB + potential_writes_in_flight = True + + def setUp(self): + qemu_img('create', '-f', iotests.imgfmt, source_img, '128M') + qemu_img('create', '-f', iotests.imgfmt, target_img, '128M') + + blk_source = {'id': 'source', + 'if': 'none', + 'node-name': 'source-node', + 'driver': iotests.imgfmt, + 'file': {'driver': 'file', + 'filename': source_img}} + + blk_target = {'node-name': 'target-node', + 'driver': iotests.imgfmt, + 'file': {'driver': 'file', + 'filename': target_img}} + + self.vm = iotests.VM() + self.vm.add_drive_raw(self.vm.qmp_to_opts(blk_source)) + self.vm.add_blockdev(self.vm.qmp_to_opts(blk_target)) + self.vm.add_device('virtio-blk,drive=source') + self.vm.launch() + + def tearDown(self): + self.vm.shutdown() + + if not self.potential_writes_in_flight: + self.assertTrue(iotests.compare_images(source_img, target_img), + 'mirror target does not match source') + + os.remove(source_img) + os.remove(target_img) + + def doActiveIO(self, sync_source_and_target): + # Fill the source image + self.vm.hmp_qemu_io('source', + 'write -P 1 0 %i' % self.image_len); + + # Start some background requests + for offset in range(1 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024): + self.vm.hmp_qemu_io('source', 'aio_write -P 2 %i 1M' % offset) + for offset in range(2 * self.image_len / 8, 3 * self.image_len / 8, 1024 * 1024): + self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) + + # Start the block job + result = self.vm.qmp('blockdev-mirror', + job_id='mirror', + filter_node_name='mirror-node', + device='source-node', + target='target-node', + sync='full', + copy_mode='write-blocking') + self.assert_qmp(result, 'return', {}) + + # Start some more requests + for offset in range(3 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024): + self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) + for offset in range(4 * self.image_len / 8, 5 * self.image_len / 8, 1024 * 1024): + self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) + + # Wait for the READY event + self.wait_ready(drive='mirror') + + # Now start some final requests; all of these (which land on + # the source) should be settled using the active mechanism. + # The mirror code itself asserts that the source BDS's dirty + # bitmap will stay clean between READY and COMPLETED. + for offset in range(5 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024): + self.vm.hmp_qemu_io('source', 'aio_write -P 3 %i 1M' % offset) + for offset in range(6 * self.image_len / 8, 7 * self.image_len / 8, 1024 * 1024): + self.vm.hmp_qemu_io('source', 'aio_write -z %i 1M' % offset) + + if sync_source_and_target: + # If source and target should be in sync after the mirror, + # we have to flush before completion + self.vm.hmp_qemu_io('source', 'aio_flush') + self.potential_writes_in_flight = False + + self.complete_and_wait(drive='mirror', wait_ready=False) + + def testActiveIO(self): + self.doActiveIO(False) + + def testActiveIOFlushed(self): + self.doActiveIO(True) + + + +if __name__ == '__main__': + iotests.main(supported_fmts=['qcow2', 'raw']) diff --git a/tests/qemu-iotests/151.out b/tests/qemu-iotests/151.out new file mode 100644 index 0000000000..fbc63e62f8 --- /dev/null +++ b/tests/qemu-iotests/151.out @@ -0,0 +1,5 @@ +.. +---------------------------------------------------------------------- +Ran 2 tests + +OK diff --git a/tests/qemu-iotests/group b/tests/qemu-iotests/group index 937a3d0a4d..eea75819d2 100644 --- a/tests/qemu-iotests/group +++ b/tests/qemu-iotests/group @@ -157,6 +157,7 @@ 148 rw auto quick 149 rw auto sudo 150 rw auto quick +151 rw auto 152 rw auto quick 153 rw auto quick 154 rw auto backing quick -- cgit v1.2.3