aboutsummaryrefslogtreecommitdiff
path: root/block/mirror.c
diff options
context:
space:
mode:
authorKevin Wolf <kwolf@redhat.com>2023-11-15 18:20:10 +0100
committerKevin Wolf <kwolf@redhat.com>2023-11-21 12:45:21 +0100
commit6bc0bcc89f847839cf3d459a55290dda8801d9d3 (patch)
tree8e37ecf19e54f557f80666bb69d64f8df6a0803d /block/mirror.c
parentbb092d6d8f53b9a5ce76e3f5a5a66e6a0475295f (diff)
block: Fix deadlocks in bdrv_graph_wrunlock()
bdrv_graph_wrunlock() calls aio_poll(), which may run callbacks that have a nested event loop. Nested event loops can depend on other iothreads making progress, so in order to allow them to make progress it must not hold the AioContext lock of another thread while calling aio_poll(). This introduces a @bs parameter to bdrv_graph_wrunlock() whose AioContext is temporarily dropped (which matches bdrv_graph_wrlock()), and a bdrv_graph_wrunlock_ctx() that can be used if the BlockDriverState doesn't necessarily exist any more when unlocking. This also requires a change to bdrv_schedule_unref(), which was relying on the incorrectly taken lock. It needs to take the lock itself now. While this is a separate bug, it can't be fixed a separate patch because otherwise the intermediate state would either deadlock or try to release a lock that we don't even hold. Signed-off-by: Kevin Wolf <kwolf@redhat.com> Message-ID: <20231115172012.112727-3-kwolf@redhat.com> Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com> [kwolf: Fixed up bdrv_schedule_unref()] Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block/mirror.c')
-rw-r--r--block/mirror.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/block/mirror.c b/block/mirror.c
index 2096fade90..cd9d3ad4a8 100644
--- a/block/mirror.c
+++ b/block/mirror.c
@@ -773,7 +773,7 @@ static int mirror_exit_common(Job *job)
"would not lead to an abrupt change of visible data",
to_replace->node_name, target_bs->node_name);
}
- bdrv_graph_wrunlock();
+ bdrv_graph_wrunlock(target_bs);
bdrv_drained_end(to_replace);
if (local_err) {
error_report_err(local_err);
@@ -798,7 +798,7 @@ static int mirror_exit_common(Job *job)
block_job_remove_all_bdrv(bjob);
bdrv_graph_wrlock(mirror_top_bs);
bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort);
- bdrv_graph_wrunlock();
+ bdrv_graph_wrunlock(mirror_top_bs);
bdrv_drained_end(target_bs);
bdrv_unref(target_bs);
@@ -1920,7 +1920,7 @@ static BlockJob *mirror_start_job(
BLK_PERM_CONSISTENT_READ,
errp);
if (ret < 0) {
- bdrv_graph_wrunlock();
+ bdrv_graph_wrunlock(bs);
goto fail;
}
@@ -1965,17 +1965,17 @@ static BlockJob *mirror_start_job(
ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
iter_shared_perms, errp);
if (ret < 0) {
- bdrv_graph_wrunlock();
+ bdrv_graph_wrunlock(bs);
goto fail;
}
}
if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
- bdrv_graph_wrunlock();
+ bdrv_graph_wrunlock(bs);
goto fail;
}
}
- bdrv_graph_wrunlock();
+ bdrv_graph_wrunlock(bs);
QTAILQ_INIT(&s->ops_in_flight);
@@ -2006,7 +2006,7 @@ fail:
bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
&error_abort);
bdrv_replace_node(mirror_top_bs, bs, &error_abort);
- bdrv_graph_wrunlock();
+ bdrv_graph_wrunlock(bs);
bdrv_drained_end(bs);
bdrv_unref(mirror_top_bs);