aboutsummaryrefslogtreecommitdiff
path: root/block.c
diff options
context:
space:
mode:
authorMax Reitz <mreitz@redhat.com>2019-07-19 11:26:14 +0200
committerKevin Wolf <kwolf@redhat.com>2019-07-19 13:19:16 +0200
commite037c09c78520cbdb6da7cfc6ad0256d5870b814 (patch)
treeee4b9ffaf87c06445fe091a6dbeed72246005d16 /block.c
parent1b285657687c7f08761759092fa05fa33578fc00 (diff)
block: Do not poll in bdrv_do_drained_end()
We should never poll anywhere in bdrv_do_drained_end() (including its recursive callees like bdrv_drain_invoke()), because it does not cope well with graph changes. In fact, it has been written based on the postulation that no graph changes will happen in it. Instead, the callers that want to poll must poll, i.e. all currently globally available wrappers: bdrv_drained_end(), bdrv_subtree_drained_end(), bdrv_unapply_subtree_drain(), and bdrv_drain_all_end(). Graph changes there do not matter. They can poll simply by passing a pointer to a drained_end_counter and wait until it reaches 0. This patch also adds a non-polling global wrapper for bdrv_do_drained_end() that takes a drained_end_counter pointer. We need such a variant because now no function called anywhere from bdrv_do_drained_end() must poll. This includes BdrvChildRole.drained_end(), which already must not poll according to its interface documentation, but bdrv_child_cb_drained_end() just violates that by invoking bdrv_drained_end() (which does poll). Therefore, BdrvChildRole.drained_end() must take a *drained_end_counter parameter, which bdrv_child_cb_drained_end() can pass on to the new bdrv_drained_end_no_poll() function. Note that we now have a pattern of all drained_end-related functions either polling or receiving a *drained_end_counter to let the caller poll based on that. A problem with a single poll loop is that when the drained section in bdrv_set_aio_context_ignore() ends, some nodes in the subgraph may be in the old contexts, while others are in the new context already. To let the collective poll in bdrv_drained_end() work correctly, we must not hold a lock to the old context, so that the old context can make progress in case it is different from the current context. (In the process, remove the comment saying that the current context is always the old context, because it is wrong.) In all other places, all nodes in a subtree must be in the same context, so we can just poll that. The exception of course is bdrv_drain_all_end(), but that always runs in the main context, so we can just poll NULL (like bdrv_drain_all_begin() does). Signed-off-by: Max Reitz <mreitz@redhat.com> Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block.c')
-rw-r--r--block.c37
1 files changed, 29 insertions, 8 deletions
diff --git a/block.c b/block.c
index 8440712ca0..9c94f7f28a 100644
--- a/block.c
+++ b/block.c
@@ -911,10 +911,11 @@ static bool bdrv_child_cb_drained_poll(BdrvChild *child)
return bdrv_drain_poll(bs, false, NULL, false);
}
-static void bdrv_child_cb_drained_end(BdrvChild *child)
+static void bdrv_child_cb_drained_end(BdrvChild *child,
+ int *drained_end_counter)
{
BlockDriverState *bs = child->opaque;
- bdrv_drained_end(bs);
+ bdrv_drained_end_no_poll(bs, drained_end_counter);
}
static void bdrv_child_cb_attach(BdrvChild *child)
@@ -5923,9 +5924,11 @@ static void bdrv_attach_aio_context(BlockDriverState *bs,
void bdrv_set_aio_context_ignore(BlockDriverState *bs,
AioContext *new_context, GSList **ignore)
{
+ AioContext *old_context = bdrv_get_aio_context(bs);
+ AioContext *current_context = qemu_get_current_aio_context();
BdrvChild *child;
- if (bdrv_get_aio_context(bs) == new_context) {
+ if (old_context == new_context) {
return;
}
@@ -5949,13 +5952,31 @@ void bdrv_set_aio_context_ignore(BlockDriverState *bs,
bdrv_detach_aio_context(bs);
- /* This function executes in the old AioContext so acquire the new one in
- * case it runs in a different thread.
- */
- aio_context_acquire(new_context);
+ /* Acquire the new context, if necessary */
+ if (current_context != new_context) {
+ aio_context_acquire(new_context);
+ }
+
bdrv_attach_aio_context(bs, new_context);
+
+ /*
+ * If this function was recursively called from
+ * bdrv_set_aio_context_ignore(), there may be nodes in the
+ * subtree that have not yet been moved to the new AioContext.
+ * Release the old one so bdrv_drained_end() can poll them.
+ */
+ if (current_context != old_context) {
+ aio_context_release(old_context);
+ }
+
bdrv_drained_end(bs);
- aio_context_release(new_context);
+
+ if (current_context != old_context) {
+ aio_context_acquire(old_context);
+ }
+ if (current_context != new_context) {
+ aio_context_release(new_context);
+ }
}
static bool bdrv_parent_can_set_aio_context(BdrvChild *c, AioContext *ctx,