From 4db7ba3b87447fd06cd7e23dab69fdae6011496d Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Wed, 10 May 2023 22:35:54 +0200 Subject: block: Call .bdrv_co_create(_opts) unlocked These are functions that modify the graph, so they must be able to take a writer lock. This is impossible if they already hold the reader lock. If they need a reader lock for some of their operations, they should take it internally. Many of them go through blk_*(), which will always take the lock itself. Direct calls of bdrv_*() need to take the reader lock. Note that while locking for bdrv_co_*() calls is checked by TSA, this is not the case for the mixed_coroutine_fns bdrv_*(). Holding the lock is still required when they are called from coroutine context like here! This effectively reverts 4ec8df0183, but adds some internal locking instead. Signed-off-by: Kevin Wolf Message-Id: <20230510203601.418015-2-kwolf@redhat.com> Reviewed-by: Eric Blake Signed-off-by: Kevin Wolf --- include/block/block-global-state.h | 8 ++++---- include/block/block_int-common.h | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/include/block/block-global-state.h b/include/block/block-global-state.h index 2d93423d35..f347199bff 100644 --- a/include/block/block-global-state.h +++ b/include/block/block-global-state.h @@ -58,14 +58,14 @@ BlockDriver *bdrv_find_protocol(const char *filename, Error **errp); BlockDriver *bdrv_find_format(const char *format_name); -int coroutine_fn GRAPH_RDLOCK +int coroutine_fn GRAPH_UNLOCKED bdrv_co_create(BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp); -int co_wrapper_bdrv_rdlock bdrv_create(BlockDriver *drv, const char *filename, - QemuOpts *opts, Error **errp); +int co_wrapper bdrv_create(BlockDriver *drv, const char *filename, + QemuOpts *opts, Error **errp); -int coroutine_fn GRAPH_RDLOCK +int coroutine_fn GRAPH_UNLOCKED bdrv_co_create_file(const char *filename, QemuOpts *opts, Error **errp); BlockDriverState *bdrv_new(void); diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h index dbec0e3bb4..6492a1e538 100644 --- a/include/block/block_int-common.h +++ b/include/block/block_int-common.h @@ -250,10 +250,10 @@ struct BlockDriver { BlockDriverState *bs, QDict *options, int flags, Error **errp); void (*bdrv_close)(BlockDriverState *bs); - int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_create)( + int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create)( BlockdevCreateOptions *opts, Error **errp); - int coroutine_fn GRAPH_RDLOCK_PTR (*bdrv_co_create_opts)( + int coroutine_fn GRAPH_UNLOCKED_PTR (*bdrv_co_create_opts)( BlockDriver *drv, const char *filename, QemuOpts *opts, Error **errp); int (*bdrv_amend_options)(BlockDriverState *bs, -- cgit v1.2.3 From 018e5987b57589e6e9089c2d2ef31db4e7519fd5 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Wed, 10 May 2023 22:36:00 +0200 Subject: blockjob: Adhere to rate limit even when reentered early When jobs are sleeping, for example to enforce a given rate limit, they can be reentered early, in particular in order to get paused, to update the rate limit or to get cancelled. Before this patch, they behave in this case as if they had fully completed their rate limiting delay. This means that requests are sped up beyond their limit, violating the constraints that the user gave us. Change the block jobs to sleep in a loop until the necessary delay is completed, while still allowing cancelling them immediately as well pausing (handled by the pause point in job_sleep_ns()) and updating the rate limit. This change is also motivated by iotests cases being prone to fail because drain operations pause and unpause them so often that block jobs complete earlier than they are supposed to. In particular, the next commit would fail iotests 030 without this change. Signed-off-by: Kevin Wolf Message-Id: <20230510203601.418015-8-kwolf@redhat.com> Reviewed-by: Eric Blake Signed-off-by: Kevin Wolf --- include/block/blockjob_int.h | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/block/blockjob_int.h b/include/block/blockjob_int.h index f008446285..104824040c 100644 --- a/include/block/blockjob_int.h +++ b/include/block/blockjob_int.h @@ -126,12 +126,18 @@ void block_job_user_resume(Job *job); */ /** - * block_job_ratelimit_get_delay: + * block_job_ratelimit_processed_bytes: * - * Calculate and return delay for the next request in ns. See the documentation - * of ratelimit_calculate_delay() for details. + * To be called after some work has been done. Adjusts the delay for the next + * request. See the documentation of ratelimit_calculate_delay() for details. */ -int64_t block_job_ratelimit_get_delay(BlockJob *job, uint64_t n); +void block_job_ratelimit_processed_bytes(BlockJob *job, uint64_t n); + +/** + * Put the job to sleep (assuming that it wasn't canceled) to throttle it to the + * right speed according to its rate limiting. + */ +void block_job_ratelimit_sleep(BlockJob *job); /** * block_job_error_action: -- cgit v1.2.3 From 7c1f51bf38de8cea4ed5030467646c37b46edeb7 Mon Sep 17 00:00:00 2001 From: Kevin Wolf Date: Wed, 17 May 2023 17:28:33 +0200 Subject: nbd/server: Fix drained_poll to wake coroutine in right AioContext nbd_drained_poll() generally runs in the main thread, not whatever iothread the NBD server coroutine is meant to run in, so it can't directly reenter the coroutines to wake them up. The code seems to have the right intention, it specifies the correct AioContext when it calls qemu_aio_coroutine_enter(). However, this functions doesn't schedule the coroutine to run in that AioContext, but it assumes it is already called in the home thread of the AioContext. To fix this, add a new thread-safe qio_channel_wake_read() that can be called in the main thread to wake up the coroutine in its AioContext, and use this in nbd_drained_poll(). Cc: qemu-stable@nongnu.org Signed-off-by: Kevin Wolf Message-Id: <20230517152834.277483-3-kwolf@redhat.com> Reviewed-by: Eric Blake Signed-off-by: Kevin Wolf --- include/io/channel.h | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'include') diff --git a/include/io/channel.h b/include/io/channel.h index 446a566e5e..229bf36910 100644 --- a/include/io/channel.h +++ b/include/io/channel.h @@ -757,6 +757,16 @@ void qio_channel_detach_aio_context(QIOChannel *ioc); void coroutine_fn qio_channel_yield(QIOChannel *ioc, GIOCondition condition); +/** + * qio_channel_wake_read: + * @ioc: the channel object + * + * If qio_channel_yield() is currently waiting for the channel to become + * readable, interrupt it and reenter immediately. This function is safe to call + * from any thread. + */ +void qio_channel_wake_read(QIOChannel *ioc); + /** * qio_channel_wait: * @ioc: the channel object -- cgit v1.2.3