aboutsummaryrefslogtreecommitdiff
path: root/util/aio-posix.c
diff options
context:
space:
mode:
authorChao Gao <chao.gao@intel.com>2022-07-10 20:08:49 +0800
committerStefan Hajnoczi <stefanha@redhat.com>2023-01-23 15:01:22 -0500
commit816a430c517eae48da5a31207ca43151df3203b0 (patch)
treeb11ef6a89c70073c8dc92cfa7fb93078bbc005a5 /util/aio-posix.c
parent00b1faea41d283e931256aa78aa975a369ec3ae6 (diff)
util/aio: Defer disabling poll mode as long as possible
When we measure FIO read performance (cache=writethrough, bs=4k, iodepth=64) in VMs, ~80K/s notifications (e.g., EPT_MISCONFIG) are observed from guest to qemu. It turns out those frequent notificatons are caused by interference from worker threads. Worker threads queue bottom halves after completing IO requests. Pending bottom halves may lead to either aio_compute_timeout() zeros timeout and pass it to try_poll_mode() or run_poll_handlers() returns no progress after noticing pending aio_notify() events. Both cause run_poll_handlers() to call poll_set_started(false) to disable poll mode. However, for both cases, as timeout is already zeroed, the event loop (i.e., aio_poll()) just processes bottom halves and then starts the next event loop iteration. So, disabling poll mode has no value but leads to unnecessary notifications from guest. To minimize unnecessary notifications from guest, defer disabling poll mode to when the event loop is about to be blocked. With this patch applied, FIO seq-read performance (bs=4k, iodepth=64, cache=writethrough) in VMs increases from 330K/s to 413K/s IOPS. Suggested-by: Stefan Hajnoczi <stefanha@redhat.com> Signed-off-by: Chao Gao <chao.gao@intel.com> Message-id: 20220710120849.63086-1-chao.gao@intel.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'util/aio-posix.c')
-rw-r--r--util/aio-posix.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 731f3826c0..6cc6256d53 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -585,18 +585,16 @@ static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
+ /*
+ * Enable poll mode. It pairs with the poll_set_started() in
+ * aio_poll() which disables poll mode.
+ */
poll_set_started(ctx, ready_list, true);
if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
return true;
}
}
-
- if (poll_set_started(ctx, ready_list, false)) {
- *timeout = 0;
- return true;
- }
-
return false;
}
@@ -657,6 +655,17 @@ bool aio_poll(AioContext *ctx, bool blocking)
* system call---a single round of run_poll_handlers_once suffices.
*/
if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
+ /*
+ * Disable poll mode. poll mode should be disabled before the call
+ * of ctx->fdmon_ops->wait() so that guest's notification can wake
+ * up IO threads when some work becomes pending. It is essential to
+ * avoid hangs or unnecessary latency.
+ */
+ if (poll_set_started(ctx, &ready_list, false)) {
+ timeout = 0;
+ progress = true;
+ }
+
ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
}