diff options
author | Kevin Wolf <kwolf@redhat.com> | 2023-11-15 18:20:10 +0100 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2023-11-21 12:45:21 +0100 |
commit | 6bc0bcc89f847839cf3d459a55290dda8801d9d3 (patch) | |
tree | 8e37ecf19e54f557f80666bb69d64f8df6a0803d /block.c | |
parent | bb092d6d8f53b9a5ce76e3f5a5a66e6a0475295f (diff) |
block: Fix deadlocks in bdrv_graph_wrunlock()
bdrv_graph_wrunlock() calls aio_poll(), which may run callbacks that
have a nested event loop. Nested event loops can depend on other
iothreads making progress, so in order to allow them to make progress it
must not hold the AioContext lock of another thread while calling
aio_poll().
This introduces a @bs parameter to bdrv_graph_wrunlock() whose
AioContext is temporarily dropped (which matches bdrv_graph_wrlock()),
and a bdrv_graph_wrunlock_ctx() that can be used if the BlockDriverState
doesn't necessarily exist any more when unlocking.
This also requires a change to bdrv_schedule_unref(), which was relying
on the incorrectly taken lock. It needs to take the lock itself now.
While this is a separate bug, it can't be fixed a separate patch because
otherwise the intermediate state would either deadlock or try to release
a lock that we don't even hold.
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Message-ID: <20231115172012.112727-3-kwolf@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
[kwolf: Fixed up bdrv_schedule_unref()]
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'block.c')
-rw-r--r-- | block.c | 39 |
1 files changed, 24 insertions, 15 deletions
@@ -1713,7 +1713,7 @@ open_failed: bdrv_unref_child(bs, bs->file); assert(!bs->file); } - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(NULL); g_free(bs->opaque); bs->opaque = NULL; @@ -3577,7 +3577,7 @@ int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, bdrv_drained_begin(drain_bs); bdrv_graph_wrlock(backing_hd); ret = bdrv_set_backing_hd_drained(bs, backing_hd, errp); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(backing_hd); bdrv_drained_end(drain_bs); bdrv_unref(drain_bs); @@ -3796,7 +3796,7 @@ BdrvChild *bdrv_open_child(const char *filename, child = bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, errp); aio_context_release(ctx); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(NULL); return child; } @@ -4652,7 +4652,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) bdrv_graph_wrlock(NULL); tran_commit(tran); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(NULL); QTAILQ_FOREACH_REVERSE(bs_entry, bs_queue, entry) { BlockDriverState *bs = bs_entry->state.bs; @@ -4671,7 +4671,7 @@ int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp) abort: bdrv_graph_wrlock(NULL); tran_abort(tran); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(NULL); QTAILQ_FOREACH_SAFE(bs_entry, bs_queue, entry, next) { if (bs_entry->prepared) { @@ -4857,7 +4857,7 @@ bdrv_reopen_parse_file_or_backing(BDRVReopenState *reopen_state, ret = bdrv_set_file_or_backing_noperm(bs, new_child_bs, is_backing, tran, errp); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock_ctx(ctx); if (old_ctx != ctx) { aio_context_release(ctx); @@ -5216,7 +5216,7 @@ static void bdrv_close(BlockDriverState *bs) assert(!bs->backing); assert(!bs->file); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(bs); g_free(bs->opaque); bs->opaque = NULL; @@ -5511,7 +5511,7 @@ int bdrv_drop_filter(BlockDriverState *bs, Error **errp) bdrv_drained_begin(child_bs); bdrv_graph_wrlock(bs); ret = bdrv_replace_node_common(bs, child_bs, true, true, errp); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(bs); bdrv_drained_end(child_bs); return ret; @@ -5593,7 +5593,7 @@ out: tran_finalize(tran, ret); bdrv_refresh_limits(bs_top, NULL, NULL); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(bs_top); bdrv_drained_end(bs_top); bdrv_drained_end(bs_new); @@ -5631,7 +5631,7 @@ int bdrv_replace_child_bs(BdrvChild *child, BlockDriverState *new_bs, tran_finalize(tran, ret); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(new_bs); bdrv_drained_end(old_bs); bdrv_drained_end(new_bs); bdrv_unref(old_bs); @@ -5720,7 +5720,7 @@ BlockDriverState *bdrv_insert_node(BlockDriverState *bs, QDict *options, bdrv_drained_begin(new_node_bs); bdrv_graph_wrlock(new_node_bs); ret = bdrv_replace_node(bs, new_node_bs, errp); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(new_node_bs); bdrv_drained_end(new_node_bs); bdrv_drained_end(bs); bdrv_unref(bs); @@ -6015,7 +6015,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, * That's a FIXME. */ bdrv_replace_node_common(top, base, false, false, &local_err); - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(base); if (local_err) { error_report_err(local_err); @@ -6052,7 +6052,7 @@ int bdrv_drop_intermediate(BlockDriverState *top, BlockDriverState *base, goto exit; exit_wrlock: - bdrv_graph_wrunlock(); + bdrv_graph_wrunlock(base); exit: bdrv_drained_end(base); bdrv_unref(top); @@ -7254,6 +7254,16 @@ void bdrv_unref(BlockDriverState *bs) } } +static void bdrv_schedule_unref_bh(void *opaque) +{ + BlockDriverState *bs = opaque; + AioContext *ctx = bdrv_get_aio_context(bs); + + aio_context_acquire(ctx); + bdrv_unref(bs); + aio_context_release(ctx); +} + /* * Release a BlockDriverState reference while holding the graph write lock. * @@ -7267,8 +7277,7 @@ void bdrv_schedule_unref(BlockDriverState *bs) if (!bs) { return; } - aio_bh_schedule_oneshot(qemu_get_aio_context(), - (QEMUBHFunc *) bdrv_unref, bs); + aio_bh_schedule_oneshot(qemu_get_aio_context(), bdrv_schedule_unref_bh, bs); } struct BdrvOpBlocker { |