aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-03-11 14:41:27 +0000
committerPeter Maydell <peter.maydell@linaro.org>2020-03-11 14:41:27 +0000
commit6e8a73e911f066527e775e04b98f31ebd19db600 (patch)
treee7e14f8a09d10c275e4d8164b8b3091fedcdbc46 /util
parentba29883206d92a29ad5a466e679ccfc2ee6132ef (diff)
parentd37d0e365afb6825a90d8356fc6adcc1f58f40f3 (diff)
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request # gpg: Signature made Wed 11 Mar 2020 12:40:36 GMT # gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8 # gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full] # gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full] # Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8 * remotes/stefanha/tags/block-pull-request: aio-posix: remove idle poll handlers to improve scalability aio-posix: support userspace polling of fd monitoring aio-posix: add io_uring fd monitoring implementation aio-posix: simplify FDMonOps->update() prototype aio-posix: extract ppoll(2) and epoll(7) fd monitoring aio-posix: move RCU_READ_LOCK() into run_poll_handlers() aio-posix: completely stop polling when disabled aio-posix: remove confusing QLIST_SAFE_REMOVE() qemu/queue.h: clear linked list pointers on remove Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util')
-rw-r--r--util/Makefile.objs3
-rw-r--r--util/aio-posix.c451
-rw-r--r--util/aio-posix.h81
-rw-r--r--util/fdmon-epoll.c155
-rw-r--r--util/fdmon-io_uring.c332
-rw-r--r--util/fdmon-poll.c107
-rw-r--r--util/trace-events2
7 files changed, 824 insertions, 307 deletions
diff --git a/util/Makefile.objs b/util/Makefile.objs
index 6b38b67cf1..6718a38b61 100644
--- a/util/Makefile.objs
+++ b/util/Makefile.objs
@@ -5,6 +5,9 @@ util-obj-y += aiocb.o async.o aio-wait.o thread-pool.o qemu-timer.o
util-obj-y += main-loop.o
util-obj-$(call lnot,$(CONFIG_ATOMIC64)) += atomic64.o
util-obj-$(CONFIG_POSIX) += aio-posix.o
+util-obj-$(CONFIG_POSIX) += fdmon-poll.o
+util-obj-$(CONFIG_EPOLL_CREATE1) += fdmon-epoll.o
+util-obj-$(CONFIG_LINUX_IO_URING) += fdmon-io_uring.o
util-obj-$(CONFIG_POSIX) += compatfd.o
util-obj-$(CONFIG_POSIX) += event_notifier-posix.o
util-obj-$(CONFIG_POSIX) += mmap-alloc.o
diff --git a/util/aio-posix.c b/util/aio-posix.c
index 9e1befc0c0..cd6cf0a4a9 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -20,191 +20,25 @@
#include "qemu/sockets.h"
#include "qemu/cutils.h"
#include "trace.h"
-#ifdef CONFIG_EPOLL_CREATE1
-#include <sys/epoll.h>
-#endif
+#include "aio-posix.h"
-struct AioHandler
-{
- GPollFD pfd;
- IOHandler *io_read;
- IOHandler *io_write;
- AioPollFn *io_poll;
- IOHandler *io_poll_begin;
- IOHandler *io_poll_end;
- void *opaque;
- bool is_external;
- QLIST_ENTRY(AioHandler) node;
- QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */
- QLIST_ENTRY(AioHandler) node_deleted;
-};
-
-/* Add a handler to a ready list */
-static void add_ready_handler(AioHandlerList *ready_list,
- AioHandler *node,
- int revents)
-{
- QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
- node->pfd.revents = revents;
- QLIST_INSERT_HEAD(ready_list, node, node_ready);
-}
-
-#ifdef CONFIG_EPOLL_CREATE1
-
-/* The fd number threshold to switch to epoll */
-#define EPOLL_ENABLE_THRESHOLD 64
-
-static void aio_epoll_disable(AioContext *ctx)
-{
- ctx->epoll_enabled = false;
- if (!ctx->epoll_available) {
- return;
- }
- ctx->epoll_available = false;
- close(ctx->epollfd);
-}
-
-static inline int epoll_events_from_pfd(int pfd_events)
-{
- return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
- (pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
- (pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
- (pfd_events & G_IO_ERR ? EPOLLERR : 0);
-}
-
-static bool aio_epoll_try_enable(AioContext *ctx)
-{
- AioHandler *node;
- struct epoll_event event;
-
- QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
- int r;
- if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
- continue;
- }
- event.events = epoll_events_from_pfd(node->pfd.events);
- event.data.ptr = node;
- r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
- if (r) {
- return false;
- }
- }
- ctx->epoll_enabled = true;
- return true;
-}
-
-static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
-{
- struct epoll_event event;
- int r;
- int ctl;
-
- if (!ctx->epoll_enabled) {
- return;
- }
- if (!node->pfd.events) {
- ctl = EPOLL_CTL_DEL;
- } else {
- event.data.ptr = node;
- event.events = epoll_events_from_pfd(node->pfd.events);
- ctl = is_new ? EPOLL_CTL_ADD : EPOLL_CTL_MOD;
- }
-
- r = epoll_ctl(ctx->epollfd, ctl, node->pfd.fd, &event);
- if (r) {
- aio_epoll_disable(ctx);
- }
-}
-
-static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
- int64_t timeout)
-{
- GPollFD pfd = {
- .fd = ctx->epollfd,
- .events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR,
- };
- AioHandler *node;
- int i, ret = 0;
- struct epoll_event events[128];
-
- if (timeout > 0) {
- ret = qemu_poll_ns(&pfd, 1, timeout);
- if (ret > 0) {
- timeout = 0;
- }
- }
- if (timeout <= 0 || ret > 0) {
- ret = epoll_wait(ctx->epollfd, events,
- ARRAY_SIZE(events),
- timeout);
- if (ret <= 0) {
- goto out;
- }
- for (i = 0; i < ret; i++) {
- int ev = events[i].events;
- int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
- (ev & EPOLLOUT ? G_IO_OUT : 0) |
- (ev & EPOLLHUP ? G_IO_HUP : 0) |
- (ev & EPOLLERR ? G_IO_ERR : 0);
-
- node = events[i].data.ptr;
- add_ready_handler(ready_list, node, revents);
- }
- }
-out:
- return ret;
-}
+/* Stop userspace polling on a handler if it isn't active for some time */
+#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
-static bool aio_epoll_enabled(AioContext *ctx)
+bool aio_poll_disabled(AioContext *ctx)
{
- /* Fall back to ppoll when external clients are disabled. */
- return !aio_external_disabled(ctx) && ctx->epoll_enabled;
+ return atomic_read(&ctx->poll_disable_cnt);
}
-static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
- unsigned npfd, int64_t timeout)
+void aio_add_ready_handler(AioHandlerList *ready_list,
+ AioHandler *node,
+ int revents)
{
- if (!ctx->epoll_available) {
- return false;
- }
- if (aio_epoll_enabled(ctx)) {
- return true;
- }
- if (npfd >= EPOLL_ENABLE_THRESHOLD) {
- if (aio_epoll_try_enable(ctx)) {
- return true;
- } else {
- aio_epoll_disable(ctx);
- }
- }
- return false;
-}
-
-#else
-
-static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
-{
-}
-
-static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
- int64_t timeout)
-{
- assert(false);
-}
-
-static bool aio_epoll_enabled(AioContext *ctx)
-{
- return false;
-}
-
-static bool aio_epoll_check_poll(AioContext *ctx, GPollFD *pfds,
- unsigned npfd, int64_t timeout)
-{
- return false;
+ QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
+ node->pfd.revents = revents;
+ QLIST_INSERT_HEAD(ready_list, node, node_ready);
}
-#endif
-
static AioHandler *find_aio_handler(AioContext *ctx, int fd)
{
AioHandler *node;
@@ -231,16 +65,23 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
g_source_remove_poll(&ctx->source, &node->pfd);
}
+ node->pfd.revents = 0;
+
+ /* If the fd monitor has already marked it deleted, leave it alone */
+ if (QLIST_IS_INSERTED(node, node_deleted)) {
+ return false;
+ }
+
/* If a read is in progress, just mark the node as deleted */
if (qemu_lockcnt_count(&ctx->list_lock)) {
QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
- node->pfd.revents = 0;
return false;
}
/* Otherwise, delete it for real. We can't just mark it as
* deleted because deleted nodes are only cleaned up while
* no one is walking the handlers list.
*/
+ QLIST_SAFE_REMOVE(node, node_poll);
QLIST_REMOVE(node, node);
return true;
}
@@ -300,9 +141,6 @@ void aio_set_fd_handler(AioContext *ctx,
QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
}
- if (node) {
- deleted = aio_remove_fd_handler(ctx, node);
- }
/* No need to order poll_disable_cnt writes against other updates;
* the counter is only used to avoid wasting time and latency on
@@ -313,11 +151,9 @@ void aio_set_fd_handler(AioContext *ctx,
atomic_set(&ctx->poll_disable_cnt,
atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
- if (new_node) {
- aio_epoll_update(ctx, new_node, is_new);
- } else if (node) {
- /* Unregister deleted fd_handler */
- aio_epoll_update(ctx, node, false);
+ ctx->fdmon_ops->update(ctx, node, new_node);
+ if (node) {
+ deleted = aio_remove_fd_handler(ctx, node);
}
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
@@ -361,18 +197,19 @@ void aio_set_event_notifier_poll(AioContext *ctx,
(IOHandler *)io_poll_end);
}
-static void poll_set_started(AioContext *ctx, bool started)
+static bool poll_set_started(AioContext *ctx, bool started)
{
AioHandler *node;
+ bool progress = false;
if (started == ctx->poll_started) {
- return;
+ return false;
}
ctx->poll_started = started;
qemu_lockcnt_inc(&ctx->list_lock);
- QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
+ QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
IOHandler *fn;
if (QLIST_IS_INSERTED(node, node_deleted)) {
@@ -388,8 +225,15 @@ static void poll_set_started(AioContext *ctx, bool started)
if (fn) {
fn(node->opaque);
}
+
+ /* Poll one last time in case ->io_poll_end() raced with the event */
+ if (!started) {
+ progress = node->io_poll(node->opaque) || progress;
+ }
}
qemu_lockcnt_dec(&ctx->list_lock);
+
+ return progress;
}
@@ -446,6 +290,7 @@ static void aio_free_deleted_handlers(AioContext *ctx)
while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
QLIST_REMOVE(node, node);
QLIST_REMOVE(node, node_deleted);
+ QLIST_SAFE_REMOVE(node, node_poll);
g_free(node);
}
@@ -460,6 +305,22 @@ static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
revents = node->pfd.revents & node->pfd.events;
node->pfd.revents = 0;
+ /*
+ * Start polling AioHandlers when they become ready because activity is
+ * likely to continue. Note that starvation is theoretically possible when
+ * fdmon_supports_polling(), but only until the fd fires for the first
+ * time.
+ */
+ if (!QLIST_IS_INSERTED(node, node_deleted) &&
+ !QLIST_IS_INSERTED(node, node_poll) &&
+ node->io_poll) {
+ trace_poll_add(ctx, node, node->pfd.fd, revents);
+ if (ctx->poll_started && node->io_poll_begin) {
+ node->io_poll_begin(node->opaque);
+ }
+ QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
+ }
+
if (!QLIST_IS_INSERTED(node, node_deleted) &&
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
@@ -493,7 +354,7 @@ static bool aio_dispatch_ready_handlers(AioContext *ctx,
AioHandler *node;
while ((node = QLIST_FIRST(ready_list))) {
- QLIST_SAFE_REMOVE(node, node_ready);
+ QLIST_REMOVE(node, node_ready);
progress = aio_dispatch_handler(ctx, node) || progress;
}
@@ -524,71 +385,19 @@ void aio_dispatch(AioContext *ctx)
timerlistgroup_run_timers(&ctx->tlg);
}
-/* These thread-local variables are used only in a small part of aio_poll
- * around the call to the poll() system call. In particular they are not
- * used while aio_poll is performing callbacks, which makes it much easier
- * to think about reentrancy!
- *
- * Stack-allocated arrays would be perfect but they have size limitations;
- * heap allocation is expensive enough that we want to reuse arrays across
- * calls to aio_poll(). And because poll() has to be called without holding
- * any lock, the arrays cannot be stored in AioContext. Thread-local data
- * has none of the disadvantages of these three options.
- */
-static __thread GPollFD *pollfds;
-static __thread AioHandler **nodes;
-static __thread unsigned npfd, nalloc;
-static __thread Notifier pollfds_cleanup_notifier;
-
-static void pollfds_cleanup(Notifier *n, void *unused)
-{
- g_assert(npfd == 0);
- g_free(pollfds);
- g_free(nodes);
- nalloc = 0;
-}
-
-static void add_pollfd(AioHandler *node)
-{
- if (npfd == nalloc) {
- if (nalloc == 0) {
- pollfds_cleanup_notifier.notify = pollfds_cleanup;
- qemu_thread_atexit_add(&pollfds_cleanup_notifier);
- nalloc = 8;
- } else {
- g_assert(nalloc <= INT_MAX);
- nalloc *= 2;
- }
- pollfds = g_renew(GPollFD, pollfds, nalloc);
- nodes = g_renew(AioHandler *, nodes, nalloc);
- }
- nodes[npfd] = node;
- pollfds[npfd] = (GPollFD) {
- .fd = node->pfd.fd,
- .events = node->pfd.events,
- };
- npfd++;
-}
-
-static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
+static bool run_poll_handlers_once(AioContext *ctx,
+ int64_t now,
+ int64_t *timeout)
{
bool progress = false;
AioHandler *node;
+ AioHandler *tmp;
- /*
- * Optimization: ->io_poll() handlers often contain RCU read critical
- * sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
- * -> rcu_read_lock() -> ... sequences with expensive memory
- * synchronization primitives. Make the entire polling loop an RCU
- * critical section because nested rcu_read_lock()/rcu_read_unlock() calls
- * are cheap.
- */
- RCU_READ_LOCK_GUARD();
-
- QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
- if (!QLIST_IS_INSERTED(node, node_deleted) && node->io_poll &&
- aio_node_check(ctx, node->is_external) &&
+ QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
+ if (aio_node_check(ctx, node->is_external) &&
node->io_poll(node->opaque)) {
+ node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
+
/*
* Polling was successful, exit try_poll_mode immediately
* to adjust the next polling time.
@@ -605,6 +414,50 @@ static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
return progress;
}
+static bool fdmon_supports_polling(AioContext *ctx)
+{
+ return ctx->fdmon_ops->need_wait != aio_poll_disabled;
+}
+
+static bool remove_idle_poll_handlers(AioContext *ctx, int64_t now)
+{
+ AioHandler *node;
+ AioHandler *tmp;
+ bool progress = false;
+
+ /*
+ * File descriptor monitoring implementations without userspace polling
+ * support suffer from starvation when a subset of handlers is polled
+ * because fds will not be processed in a timely fashion. Don't remove
+ * idle poll handlers.
+ */
+ if (!fdmon_supports_polling(ctx)) {
+ return false;
+ }
+
+ QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
+ if (node->poll_idle_timeout == 0LL) {
+ node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
+ } else if (now >= node->poll_idle_timeout) {
+ trace_poll_remove(ctx, node, node->pfd.fd);
+ node->poll_idle_timeout = 0LL;
+ QLIST_SAFE_REMOVE(node, node_poll);
+ if (ctx->poll_started && node->io_poll_end) {
+ node->io_poll_end(node->opaque);
+
+ /*
+ * Final poll in case ->io_poll_end() races with an event.
+ * Nevermind about re-adding the handler in the rare case where
+ * this causes progress.
+ */
+ progress = node->io_poll(node->opaque) || progress;
+ }
+ }
+ }
+
+ return progress;
+}
+
/* run_poll_handlers:
* @ctx: the AioContext
* @max_ns: maximum time to poll for, in nanoseconds
@@ -628,13 +481,28 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
+ /*
+ * Optimization: ->io_poll() handlers often contain RCU read critical
+ * sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
+ * -> rcu_read_lock() -> ... sequences with expensive memory
+ * synchronization primitives. Make the entire polling loop an RCU
+ * critical section because nested rcu_read_lock()/rcu_read_unlock() calls
+ * are cheap.
+ */
+ RCU_READ_LOCK_GUARD();
+
start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
do {
- progress = run_poll_handlers_once(ctx, timeout);
+ progress = run_poll_handlers_once(ctx, start_time, timeout);
elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
max_ns = qemu_soonest_timeout(*timeout, max_ns);
assert(!(max_ns && progress));
- } while (elapsed_time < max_ns && !atomic_read(&ctx->poll_disable_cnt));
+ } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
+
+ if (remove_idle_poll_handlers(ctx, start_time + elapsed_time)) {
+ *timeout = 0;
+ progress = true;
+ }
/* If time has passed with no successful polling, adjust *timeout to
* keep the same ending time.
@@ -660,9 +528,14 @@ static bool run_poll_handlers(AioContext *ctx, int64_t max_ns, int64_t *timeout)
*/
static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
{
- int64_t max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
+ int64_t max_ns;
- if (max_ns && !atomic_read(&ctx->poll_disable_cnt)) {
+ if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
+ return false;
+ }
+
+ max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
+ if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
poll_set_started(ctx, true);
if (run_poll_handlers(ctx, max_ns, timeout)) {
@@ -670,19 +543,17 @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
}
}
- poll_set_started(ctx, false);
+ if (poll_set_started(ctx, false)) {
+ *timeout = 0;
+ return true;
+ }
- /* Even if we don't run busy polling, try polling once in case it can make
- * progress and the caller will be able to avoid ppoll(2)/epoll_wait(2).
- */
- return run_poll_handlers_once(ctx, timeout);
+ return false;
}
bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
- AioHandler *node;
- int i;
int ret = 0;
bool progress;
int64_t timeout;
@@ -714,27 +585,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* If polling is allowed, non-blocking aio_poll does not need the
* system call---a single round of run_poll_handlers_once suffices.
*/
- if (timeout || atomic_read(&ctx->poll_disable_cnt)) {
- assert(npfd == 0);
-
- /* fill pollfds */
-
- if (!aio_epoll_enabled(ctx)) {
- QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
- if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events
- && aio_node_check(ctx, node->is_external)) {
- add_pollfd(node);
- }
- }
- }
-
- /* wait until next event */
- if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
- npfd = 0; /* pollfds[] is not being used */
- ret = aio_epoll(ctx, &ready_list, timeout);
- } else {
- ret = qemu_poll_ns(pollfds, npfd, timeout);
- }
+ if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
+ ret = ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
}
if (blocking) {
@@ -783,19 +635,6 @@ bool aio_poll(AioContext *ctx, bool blocking)
}
}
- /* if we have any readable fds, dispatch event */
- if (ret > 0) {
- for (i = 0; i < npfd; i++) {
- int revents = pollfds[i].revents;
-
- if (revents) {
- add_ready_handler(&ready_list, nodes[i], revents);
- }
- }
- }
-
- npfd = 0;
-
progress |= aio_bh_poll(ctx);
if (ret > 0) {
@@ -813,23 +652,21 @@ bool aio_poll(AioContext *ctx, bool blocking)
void aio_context_setup(AioContext *ctx)
{
-#ifdef CONFIG_EPOLL_CREATE1
- assert(!ctx->epollfd);
- ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
- if (ctx->epollfd == -1) {
- fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
- ctx->epoll_available = false;
- } else {
- ctx->epoll_available = true;
+ ctx->fdmon_ops = &fdmon_poll_ops;
+ ctx->epollfd = -1;
+
+ /* Use the fastest fd monitoring implementation if available */
+ if (fdmon_io_uring_setup(ctx)) {
+ return;
}
-#endif
+
+ fdmon_epoll_setup(ctx);
}
void aio_context_destroy(AioContext *ctx)
{
-#ifdef CONFIG_EPOLL_CREATE1
- aio_epoll_disable(ctx);
-#endif
+ fdmon_io_uring_destroy(ctx);
+ fdmon_epoll_disable(ctx);
}
void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
diff --git a/util/aio-posix.h b/util/aio-posix.h
new file mode 100644
index 0000000000..c80c04506a
--- /dev/null
+++ b/util/aio-posix.h
@@ -0,0 +1,81 @@
+/*
+ * AioContext POSIX event loop implementation internal APIs
+ *
+ * Copyright IBM, Corp. 2008
+ * Copyright Red Hat, Inc. 2020
+ *
+ * Authors:
+ * Anthony Liguori <aliguori@us.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#ifndef AIO_POSIX_H
+#define AIO_POSIX_H
+
+#include "block/aio.h"
+
+struct AioHandler {
+ GPollFD pfd;
+ IOHandler *io_read;
+ IOHandler *io_write;
+ AioPollFn *io_poll;
+ IOHandler *io_poll_begin;
+ IOHandler *io_poll_end;
+ void *opaque;
+ QLIST_ENTRY(AioHandler) node;
+ QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */
+ QLIST_ENTRY(AioHandler) node_deleted;
+ QLIST_ENTRY(AioHandler) node_poll;
+#ifdef CONFIG_LINUX_IO_URING
+ QSLIST_ENTRY(AioHandler) node_submitted;
+ unsigned flags; /* see fdmon-io_uring.c */
+#endif
+ int64_t poll_idle_timeout; /* when to stop userspace polling */
+ bool is_external;
+};
+
+/* Add a handler to a ready list */
+void aio_add_ready_handler(AioHandlerList *ready_list, AioHandler *node,
+ int revents);
+
+extern const FDMonOps fdmon_poll_ops;
+
+#ifdef CONFIG_EPOLL_CREATE1
+bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd);
+void fdmon_epoll_setup(AioContext *ctx);
+void fdmon_epoll_disable(AioContext *ctx);
+#else
+static inline bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
+{
+ return false;
+}
+
+static inline void fdmon_epoll_setup(AioContext *ctx)
+{
+}
+
+static inline void fdmon_epoll_disable(AioContext *ctx)
+{
+}
+#endif /* !CONFIG_EPOLL_CREATE1 */
+
+#ifdef CONFIG_LINUX_IO_URING
+bool fdmon_io_uring_setup(AioContext *ctx);
+void fdmon_io_uring_destroy(AioContext *ctx);
+#else
+static inline bool fdmon_io_uring_setup(AioContext *ctx)
+{
+ return false;
+}
+
+static inline void fdmon_io_uring_destroy(AioContext *ctx)
+{
+}
+#endif /* !CONFIG_LINUX_IO_URING */
+
+#endif /* AIO_POSIX_H */
diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c
new file mode 100644
index 0000000000..fcd989d47d
--- /dev/null
+++ b/util/fdmon-epoll.c
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * epoll(7) file descriptor monitoring
+ */
+
+#include "qemu/osdep.h"
+#include <sys/epoll.h>
+#include "qemu/rcu_queue.h"
+#include "aio-posix.h"
+
+/* The fd number threshold to switch to epoll */
+#define EPOLL_ENABLE_THRESHOLD 64
+
+void fdmon_epoll_disable(AioContext *ctx)
+{
+ if (ctx->epollfd >= 0) {
+ close(ctx->epollfd);
+ ctx->epollfd = -1;
+ }
+
+ /* Switch back */
+ ctx->fdmon_ops = &fdmon_poll_ops;
+}
+
+static inline int epoll_events_from_pfd(int pfd_events)
+{
+ return (pfd_events & G_IO_IN ? EPOLLIN : 0) |
+ (pfd_events & G_IO_OUT ? EPOLLOUT : 0) |
+ (pfd_events & G_IO_HUP ? EPOLLHUP : 0) |
+ (pfd_events & G_IO_ERR ? EPOLLERR : 0);
+}
+
+static void fdmon_epoll_update(AioContext *ctx,
+ AioHandler *old_node,
+ AioHandler *new_node)
+{
+ struct epoll_event event = {
+ .data.ptr = new_node,
+ .events = new_node ? epoll_events_from_pfd(new_node->pfd.events) : 0,
+ };
+ int r;
+
+ if (!new_node) {
+ r = epoll_ctl(ctx->epollfd, EPOLL_CTL_DEL, old_node->pfd.fd, &event);
+ } else if (!old_node) {
+ r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, new_node->pfd.fd, &event);
+ } else {
+ r = epoll_ctl(ctx->epollfd, EPOLL_CTL_MOD, new_node->pfd.fd, &event);
+ }
+
+ if (r) {
+ fdmon_epoll_disable(ctx);
+ }
+}
+
+static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list,
+ int64_t timeout)
+{
+ GPollFD pfd = {
+ .fd = ctx->epollfd,
+ .events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR,
+ };
+ AioHandler *node;
+ int i, ret = 0;
+ struct epoll_event events[128];
+
+ /* Fall back while external clients are disabled */
+ if (atomic_read(&ctx->external_disable_cnt)) {
+ return fdmon_poll_ops.wait(ctx, ready_list, timeout);
+ }
+
+ if (timeout > 0) {
+ ret = qemu_poll_ns(&pfd, 1, timeout);
+ if (ret > 0) {
+ timeout = 0;
+ }
+ }
+ if (timeout <= 0 || ret > 0) {
+ ret = epoll_wait(ctx->epollfd, events,
+ ARRAY_SIZE(events),
+ timeout);
+ if (ret <= 0) {
+ goto out;
+ }
+ for (i = 0; i < ret; i++) {
+ int ev = events[i].events;
+ int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
+ (ev & EPOLLOUT ? G_IO_OUT : 0) |
+ (ev & EPOLLHUP ? G_IO_HUP : 0) |
+ (ev & EPOLLERR ? G_IO_ERR : 0);
+
+ node = events[i].data.ptr;
+ aio_add_ready_handler(ready_list, node, revents);
+ }
+ }
+out:
+ return ret;
+}
+
+static const FDMonOps fdmon_epoll_ops = {
+ .update = fdmon_epoll_update,
+ .wait = fdmon_epoll_wait,
+ .need_wait = aio_poll_disabled,
+};
+
+static bool fdmon_epoll_try_enable(AioContext *ctx)
+{
+ AioHandler *node;
+ struct epoll_event event;
+
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
+ int r;
+ if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
+ continue;
+ }
+ event.events = epoll_events_from_pfd(node->pfd.events);
+ event.data.ptr = node;
+ r = epoll_ctl(ctx->epollfd, EPOLL_CTL_ADD, node->pfd.fd, &event);
+ if (r) {
+ return false;
+ }
+ }
+
+ ctx->fdmon_ops = &fdmon_epoll_ops;
+ return true;
+}
+
+bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
+{
+ if (ctx->epollfd < 0) {
+ return false;
+ }
+
+ /* Do not upgrade while external clients are disabled */
+ if (atomic_read(&ctx->external_disable_cnt)) {
+ return false;
+ }
+
+ if (npfd >= EPOLL_ENABLE_THRESHOLD) {
+ if (fdmon_epoll_try_enable(ctx)) {
+ return true;
+ } else {
+ fdmon_epoll_disable(ctx);
+ }
+ }
+ return false;
+}
+
+void fdmon_epoll_setup(AioContext *ctx)
+{
+ ctx->epollfd = epoll_create1(EPOLL_CLOEXEC);
+ if (ctx->epollfd == -1) {
+ fprintf(stderr, "Failed to create epoll instance: %s", strerror(errno));
+ }
+}
diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c
new file mode 100644
index 0000000000..893b79b622
--- /dev/null
+++ b/util/fdmon-io_uring.c
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Linux io_uring file descriptor monitoring
+ *
+ * The Linux io_uring API supports file descriptor monitoring with a few
+ * advantages over existing APIs like poll(2) and epoll(7):
+ *
+ * 1. Userspace polling of events is possible because the completion queue (cq
+ * ring) is shared between the kernel and userspace. This allows
+ * applications that rely on userspace polling to also monitor file
+ * descriptors in the same userspace polling loop.
+ *
+ * 2. Submission and completion is batched and done together in a single system
+ * call. This minimizes the number of system calls.
+ *
+ * 3. File descriptor monitoring is O(1) like epoll(7) so it scales better than
+ * poll(2).
+ *
+ * 4. Nanosecond timeouts are supported so it requires fewer syscalls than
+ * epoll(7).
+ *
+ * This code only monitors file descriptors and does not do asynchronous disk
+ * I/O. Implementing disk I/O efficiently has other requirements and should
+ * use a separate io_uring so it does not make sense to unify the code.
+ *
+ * File descriptor monitoring is implemented using the following operations:
+ *
+ * 1. IORING_OP_POLL_ADD - adds a file descriptor to be monitored.
+ * 2. IORING_OP_POLL_REMOVE - removes a file descriptor being monitored. When
+ * the poll mask changes for a file descriptor it is first removed and then
+ * re-added with the new poll mask, so this operation is also used as part
+ * of modifying an existing monitored file descriptor.
+ * 3. IORING_OP_TIMEOUT - added every time a blocking syscall is made to wait
+ * for events. This operation self-cancels if another event completes
+ * before the timeout.
+ *
+ * io_uring calls the submission queue the "sq ring" and the completion queue
+ * the "cq ring". Ring entries are called "sqe" and "cqe", respectively.
+ *
+ * The code is structured so that sq/cq rings are only modified within
+ * fdmon_io_uring_wait(). Changes to AioHandlers are made by enqueuing them on
+ * ctx->submit_list so that fdmon_io_uring_wait() can submit IORING_OP_POLL_ADD
+ * and/or IORING_OP_POLL_REMOVE sqes for them.
+ */
+
+#include "qemu/osdep.h"
+#include <poll.h>
+#include "qemu/rcu_queue.h"
+#include "aio-posix.h"
+
+enum {
+ FDMON_IO_URING_ENTRIES = 128, /* sq/cq ring size */
+
+ /* AioHandler::flags */
+ FDMON_IO_URING_PENDING = (1 << 0),
+ FDMON_IO_URING_ADD = (1 << 1),
+ FDMON_IO_URING_REMOVE = (1 << 2),
+};
+
+static inline int poll_events_from_pfd(int pfd_events)
+{
+ return (pfd_events & G_IO_IN ? POLLIN : 0) |
+ (pfd_events & G_IO_OUT ? POLLOUT : 0) |
+ (pfd_events & G_IO_HUP ? POLLHUP : 0) |
+ (pfd_events & G_IO_ERR ? POLLERR : 0);
+}
+
+static inline int pfd_events_from_poll(int poll_events)
+{
+ return (poll_events & POLLIN ? G_IO_IN : 0) |
+ (poll_events & POLLOUT ? G_IO_OUT : 0) |
+ (poll_events & POLLHUP ? G_IO_HUP : 0) |
+ (poll_events & POLLERR ? G_IO_ERR : 0);
+}
+
+/*
+ * Returns an sqe for submitting a request. Only be called within
+ * fdmon_io_uring_wait().
+ */
+static struct io_uring_sqe *get_sqe(AioContext *ctx)
+{
+ struct io_uring *ring = &ctx->fdmon_io_uring;
+ struct io_uring_sqe *sqe = io_uring_get_sqe(ring);
+ int ret;
+
+ if (likely(sqe)) {
+ return sqe;
+ }
+
+ /* No free sqes left, submit pending sqes first */
+ ret = io_uring_submit(ring);
+ assert(ret > 1);
+ sqe = io_uring_get_sqe(ring);
+ assert(sqe);
+ return sqe;
+}
+
+/* Atomically enqueue an AioHandler for sq ring submission */
+static void enqueue(AioHandlerSList *head, AioHandler *node, unsigned flags)
+{
+ unsigned old_flags;
+
+ old_flags = atomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags);
+ if (!(old_flags & FDMON_IO_URING_PENDING)) {
+ QSLIST_INSERT_HEAD_ATOMIC(head, node, node_submitted);
+ }
+}
+
+/* Dequeue an AioHandler for sq ring submission. Called by fill_sq_ring(). */
+static AioHandler *dequeue(AioHandlerSList *head, unsigned *flags)
+{
+ AioHandler *node = QSLIST_FIRST(head);
+
+ if (!node) {
+ return NULL;
+ }
+
+ /* Doesn't need to be atomic since fill_sq_ring() moves the list */
+ QSLIST_REMOVE_HEAD(head, node_submitted);
+
+ /*
+ * Don't clear FDMON_IO_URING_REMOVE. It's sticky so it can serve two
+ * purposes: telling fill_sq_ring() to submit IORING_OP_POLL_REMOVE and
+ * telling process_cqe() to delete the AioHandler when its
+ * IORING_OP_POLL_ADD completes.
+ */
+ *flags = atomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING |
+ FDMON_IO_URING_ADD));
+ return node;
+}
+
+static void fdmon_io_uring_update(AioContext *ctx,
+ AioHandler *old_node,
+ AioHandler *new_node)
+{
+ if (new_node) {
+ enqueue(&ctx->submit_list, new_node, FDMON_IO_URING_ADD);
+ }
+
+ if (old_node) {
+ /*
+ * Deletion is tricky because IORING_OP_POLL_ADD and
+ * IORING_OP_POLL_REMOVE are async. We need to wait for the original
+ * IORING_OP_POLL_ADD to complete before this handler can be freed
+ * safely.
+ *
+ * It's possible that the file descriptor becomes ready and the
+ * IORING_OP_POLL_ADD cqe is enqueued before IORING_OP_POLL_REMOVE is
+ * submitted, too.
+ *
+ * Mark this handler deleted right now but don't place it on
+ * ctx->deleted_aio_handlers yet. Instead, manually fudge the list
+ * entry to make QLIST_IS_INSERTED() think this handler has been
+ * inserted and other code recognizes this AioHandler as deleted.
+ *
+ * Once the original IORING_OP_POLL_ADD completes we enqueue the
+ * handler on the real ctx->deleted_aio_handlers list to be freed.
+ */
+ assert(!QLIST_IS_INSERTED(old_node, node_deleted));
+ old_node->node_deleted.le_prev = &old_node->node_deleted.le_next;
+
+ enqueue(&ctx->submit_list, old_node, FDMON_IO_URING_REMOVE);
+ }
+}
+
+static void add_poll_add_sqe(AioContext *ctx, AioHandler *node)
+{
+ struct io_uring_sqe *sqe = get_sqe(ctx);
+ int events = poll_events_from_pfd(node->pfd.events);
+
+ io_uring_prep_poll_add(sqe, node->pfd.fd, events);
+ io_uring_sqe_set_data(sqe, node);
+}
+
+static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node)
+{
+ struct io_uring_sqe *sqe = get_sqe(ctx);
+
+ io_uring_prep_poll_remove(sqe, node);
+}
+
+/* Add a timeout that self-cancels when another cqe becomes ready */
+static void add_timeout_sqe(AioContext *ctx, int64_t ns)
+{
+ struct io_uring_sqe *sqe;
+ struct __kernel_timespec ts = {
+ .tv_sec = ns / NANOSECONDS_PER_SECOND,
+ .tv_nsec = ns % NANOSECONDS_PER_SECOND,
+ };
+
+ sqe = get_sqe(ctx);
+ io_uring_prep_timeout(sqe, &ts, 1, 0);
+}
+
+/* Add sqes from ctx->submit_list for submission */
+static void fill_sq_ring(AioContext *ctx)
+{
+ AioHandlerSList submit_list;
+ AioHandler *node;
+ unsigned flags;
+
+ QSLIST_MOVE_ATOMIC(&submit_list, &ctx->submit_list);
+
+ while ((node = dequeue(&submit_list, &flags))) {
+ /* Order matters, just in case both flags were set */
+ if (flags & FDMON_IO_URING_ADD) {
+ add_poll_add_sqe(ctx, node);
+ }
+ if (flags & FDMON_IO_URING_REMOVE) {
+ add_poll_remove_sqe(ctx, node);
+ }
+ }
+}
+
+/* Returns true if a handler became ready */
+static bool process_cqe(AioContext *ctx,
+ AioHandlerList *ready_list,
+ struct io_uring_cqe *cqe)
+{
+ AioHandler *node = io_uring_cqe_get_data(cqe);
+ unsigned flags;
+
+ /* poll_timeout and poll_remove have a zero user_data field */
+ if (!node) {
+ return false;
+ }
+
+ /*
+ * Deletion can only happen when IORING_OP_POLL_ADD completes. If we race
+ * with enqueue() here then we can safely clear the FDMON_IO_URING_REMOVE
+ * bit before IORING_OP_POLL_REMOVE is submitted.
+ */
+ flags = atomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE);
+ if (flags & FDMON_IO_URING_REMOVE) {
+ QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
+ return false;
+ }
+
+ aio_add_ready_handler(ready_list, node, pfd_events_from_poll(cqe->res));
+
+ /* IORING_OP_POLL_ADD is one-shot so we must re-arm it */
+ add_poll_add_sqe(ctx, node);
+ return true;
+}
+
+static int process_cq_ring(AioContext *ctx, AioHandlerList *ready_list)
+{
+ struct io_uring *ring = &ctx->fdmon_io_uring;
+ struct io_uring_cqe *cqe;
+ unsigned num_cqes = 0;
+ unsigned num_ready = 0;
+ unsigned head;
+
+ io_uring_for_each_cqe(ring, head, cqe) {
+ if (process_cqe(ctx, ready_list, cqe)) {
+ num_ready++;
+ }
+
+ num_cqes++;
+ }
+
+ io_uring_cq_advance(ring, num_cqes);
+ return num_ready;
+}
+
+static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list,
+ int64_t timeout)
+{
+ unsigned wait_nr = 1; /* block until at least one cqe is ready */
+ int ret;
+
+ /* Fall back while external clients are disabled */
+ if (atomic_read(&ctx->external_disable_cnt)) {
+ return fdmon_poll_ops.wait(ctx, ready_list, timeout);
+ }
+
+ if (timeout == 0) {
+ wait_nr = 0; /* non-blocking */
+ } else if (timeout > 0) {
+ add_timeout_sqe(ctx, timeout);
+ }
+
+ fill_sq_ring(ctx);
+
+ ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr);
+ assert(ret >= 0);
+
+ return process_cq_ring(ctx, ready_list);
+}
+
+static bool fdmon_io_uring_need_wait(AioContext *ctx)
+{
+ return io_uring_cq_ready(&ctx->fdmon_io_uring);
+}
+
+static const FDMonOps fdmon_io_uring_ops = {
+ .update = fdmon_io_uring_update,
+ .wait = fdmon_io_uring_wait,
+ .need_wait = fdmon_io_uring_need_wait,
+};
+
+bool fdmon_io_uring_setup(AioContext *ctx)
+{
+ int ret;
+
+ ret = io_uring_queue_init(FDMON_IO_URING_ENTRIES, &ctx->fdmon_io_uring, 0);
+ if (ret != 0) {
+ return false;
+ }
+
+ QSLIST_INIT(&ctx->submit_list);
+ ctx->fdmon_ops = &fdmon_io_uring_ops;
+ return true;
+}
+
+void fdmon_io_uring_destroy(AioContext *ctx)
+{
+ if (ctx->fdmon_ops == &fdmon_io_uring_ops) {
+ AioHandler *node;
+
+ io_uring_queue_exit(&ctx->fdmon_io_uring);
+
+ /* No need to submit these anymore, just free them. */
+ while ((node = QSLIST_FIRST_RCU(&ctx->submit_list))) {
+ QSLIST_REMOVE_HEAD_RCU(&ctx->submit_list, node_submitted);
+ QLIST_REMOVE(node, node);
+ g_free(node);
+ }
+
+ ctx->fdmon_ops = &fdmon_poll_ops;
+ }
+}
diff --git a/util/fdmon-poll.c b/util/fdmon-poll.c
new file mode 100644
index 0000000000..488067b679
--- /dev/null
+++ b/util/fdmon-poll.c
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * poll(2) file descriptor monitoring
+ *
+ * Uses ppoll(2) when available, g_poll() otherwise.
+ */
+
+#include "qemu/osdep.h"
+#include "aio-posix.h"
+#include "qemu/rcu_queue.h"
+
+/*
+ * These thread-local variables are used only in fdmon_poll_wait() around the
+ * call to the poll() system call. In particular they are not used while
+ * aio_poll is performing callbacks, which makes it much easier to think about
+ * reentrancy!
+ *
+ * Stack-allocated arrays would be perfect but they have size limitations;
+ * heap allocation is expensive enough that we want to reuse arrays across
+ * calls to aio_poll(). And because poll() has to be called without holding
+ * any lock, the arrays cannot be stored in AioContext. Thread-local data
+ * has none of the disadvantages of these three options.
+ */
+static __thread GPollFD *pollfds;
+static __thread AioHandler **nodes;
+static __thread unsigned npfd, nalloc;
+static __thread Notifier pollfds_cleanup_notifier;
+
+static void pollfds_cleanup(Notifier *n, void *unused)
+{
+ g_assert(npfd == 0);
+ g_free(pollfds);
+ g_free(nodes);
+ nalloc = 0;
+}
+
+static void add_pollfd(AioHandler *node)
+{
+ if (npfd == nalloc) {
+ if (nalloc == 0) {
+ pollfds_cleanup_notifier.notify = pollfds_cleanup;
+ qemu_thread_atexit_add(&pollfds_cleanup_notifier);
+ nalloc = 8;
+ } else {
+ g_assert(nalloc <= INT_MAX);
+ nalloc *= 2;
+ }
+ pollfds = g_renew(GPollFD, pollfds, nalloc);
+ nodes = g_renew(AioHandler *, nodes, nalloc);
+ }
+ nodes[npfd] = node;
+ pollfds[npfd] = (GPollFD) {
+ .fd = node->pfd.fd,
+ .events = node->pfd.events,
+ };
+ npfd++;
+}
+
+static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
+ int64_t timeout)
+{
+ AioHandler *node;
+ int ret;
+
+ assert(npfd == 0);
+
+ QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
+ if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events
+ && aio_node_check(ctx, node->is_external)) {
+ add_pollfd(node);
+ }
+ }
+
+ /* epoll(7) is faster above a certain number of fds */
+ if (fdmon_epoll_try_upgrade(ctx, npfd)) {
+ return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
+ }
+
+ ret = qemu_poll_ns(pollfds, npfd, timeout);
+ if (ret > 0) {
+ int i;
+
+ for (i = 0; i < npfd; i++) {
+ int revents = pollfds[i].revents;
+
+ if (revents) {
+ aio_add_ready_handler(ready_list, nodes[i], revents);
+ }
+ }
+ }
+
+ npfd = 0;
+ return ret;
+}
+
+static void fdmon_poll_update(AioContext *ctx,
+ AioHandler *old_node,
+ AioHandler *new_node)
+{
+ /* Do nothing, AioHandler already contains the state we'll need */
+}
+
+const FDMonOps fdmon_poll_ops = {
+ .update = fdmon_poll_update,
+ .wait = fdmon_poll_wait,
+ .need_wait = aio_poll_disabled,
+};
diff --git a/util/trace-events b/util/trace-events
index 83b6639018..0ce42822eb 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -5,6 +5,8 @@ run_poll_handlers_begin(void *ctx, int64_t max_ns, int64_t timeout) "ctx %p max_
run_poll_handlers_end(void *ctx, bool progress, int64_t timeout) "ctx %p progress %d new timeout %"PRId64
poll_shrink(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
poll_grow(void *ctx, int64_t old, int64_t new) "ctx %p old %"PRId64" new %"PRId64
+poll_add(void *ctx, void *node, int fd, unsigned revents) "ctx %p node %p fd %d revents 0x%x"
+poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d"
# async.c
aio_co_schedule(void *ctx, void *co) "ctx %p co %p"