aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/block/aio.h17
-rw-r--r--include/block/blockjob.h4
-rw-r--r--include/block/coroutine.h2
3 files changed, 5 insertions, 18 deletions
diff --git a/include/block/aio.h b/include/block/aio.h
index a92511bd3b..d81250cf2b 100644
--- a/include/block/aio.h
+++ b/include/block/aio.h
@@ -220,7 +220,7 @@ bool aio_poll(AioContext *ctx, bool blocking);
#ifdef CONFIG_POSIX
/* Register a file descriptor and associated callbacks. Behaves very similarly
* to qemu_set_fd_handler2. Unlike qemu_set_fd_handler2, these callbacks will
- * be invoked when using qemu_aio_wait().
+ * be invoked when using aio_poll().
*
* Code that invokes AIO completion functions should rely on this function
* instead of qemu_set_fd_handler[2].
@@ -234,7 +234,7 @@ void aio_set_fd_handler(AioContext *ctx,
/* Register an event notifier and associated callbacks. Behaves very similarly
* to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
- * will be invoked when using qemu_aio_wait().
+ * will be invoked when using aio_poll().
*
* Code that invokes AIO completion functions should rely on this function
* instead of event_notifier_set_handler.
@@ -251,19 +251,6 @@ GSource *aio_get_g_source(AioContext *ctx);
/* Return the ThreadPool bound to this AioContext */
struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
-/* Functions to operate on the main QEMU AioContext. */
-
-bool qemu_aio_wait(void);
-void qemu_aio_set_event_notifier(EventNotifier *notifier,
- EventNotifierHandler *io_read);
-
-#ifdef CONFIG_POSIX
-void qemu_aio_set_fd_handler(int fd,
- IOHandler *io_read,
- IOHandler *io_write,
- void *opaque);
-#endif
-
/**
* aio_timer_new:
* @ctx: the aio context
diff --git a/include/block/blockjob.h b/include/block/blockjob.h
index f3cf63f9f5..60aa835ec0 100644
--- a/include/block/blockjob.h
+++ b/include/block/blockjob.h
@@ -74,7 +74,7 @@ struct BlockJob {
* Set to true if the job should cancel itself. The flag must
* always be tested just before toggling the busy flag from false
* to true. After a job has been cancelled, it should only yield
- * if #qemu_aio_wait will ("sooner or later") reenter the coroutine.
+ * if #aio_poll will ("sooner or later") reenter the coroutine.
*/
bool cancelled;
@@ -87,7 +87,7 @@ struct BlockJob {
/**
* Set to false by the job while it is in a quiescent state, where
* no I/O is pending and the job has yielded on any condition
- * that is not detected by #qemu_aio_wait, such as a timer.
+ * that is not detected by #aio_poll, such as a timer.
*/
bool busy;
diff --git a/include/block/coroutine.h b/include/block/coroutine.h
index a1797ae3d8..b408f96b82 100644
--- a/include/block/coroutine.h
+++ b/include/block/coroutine.h
@@ -212,7 +212,7 @@ void coroutine_fn co_sleep_ns(QEMUClockType type, int64_t ns);
* Yield the coroutine for a given duration
*
* Behaves similarly to co_sleep_ns(), but the sleeping coroutine will be
- * resumed when using qemu_aio_wait().
+ * resumed when using aio_poll().
*/
void coroutine_fn co_aio_sleep_ns(AioContext *ctx, QEMUClockType type,
int64_t ns);