diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2014-08-29 18:40:04 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2014-08-29 18:40:04 +0100 |
commit | 988f46361465db0d4fce50e71fa0ff8f9d20483e (patch) | |
tree | fa637b2f366d47581d9fc6dc01a0ef722e906d9a /aio-posix.c | |
parent | 8b3030114a449e66c68450acaac4b66f26d91416 (diff) | |
parent | 8df3abfceef557551f00adac1618ddd6fe46f85c (diff) |
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Block pull request
# gpg: Signature made Fri 29 Aug 2014 17:25:58 BST using RSA key ID 81AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>"
* remotes/stefanha/tags/block-pull-request: (35 commits)
quorum: Fix leak of opts in quorum_open
blkverify: Fix leak of opts in blkverify_open
nfs: Fix leak of opts in nfs_file_open
curl: Don't deref NULL pointer in call to aio_poll.
curl: Allow a cookie or cookies to be sent with http/https requests.
virtio-blk: allow drive_del with dataplane
block: acquire AioContext in do_drive_del()
linux-aio: avoid deadlock in nested aio_poll() calls
qemu-iotests: add multiwrite test cases
block: fix overlapping multiwrite requests
nbd: Follow the BDS' AIO context
block: Add AIO context notifiers
nbd: Drop nbd_can_read()
sheepdog: fix a core dump while do auto-reconnecting
aio-win32: add support for sockets
qemu-coroutine-io: fix for Win32
AioContext: introduce aio_prepare
aio-win32: add aio_set_dispatching optimization
test-aio: test timers on Windows too
AioContext: export and use aio_dispatch
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'aio-posix.c')
-rw-r--r-- | aio-posix.c | 58 |
1 files changed, 19 insertions, 39 deletions
diff --git a/aio-posix.c b/aio-posix.c index 2eada2e049..d3ac06e238 100644 --- a/aio-posix.c +++ b/aio-posix.c @@ -100,6 +100,11 @@ void aio_set_event_notifier(AioContext *ctx, (IOHandler *)io_read, NULL, notifier); } +bool aio_prepare(AioContext *ctx) +{ + return false; +} + bool aio_pending(AioContext *ctx) { AioHandler *node; @@ -119,12 +124,21 @@ bool aio_pending(AioContext *ctx) return false; } -static bool aio_dispatch(AioContext *ctx) +bool aio_dispatch(AioContext *ctx) { AioHandler *node; bool progress = false; /* + * If there are callbacks left that have been queued, we need to call them. + * Do not call select in this case, because it is possible that the caller + * does not need a complete flush (as is the case for aio_poll loops). + */ + if (aio_bh_poll(ctx)) { + progress = true; + } + + /* * We have to walk very carefully in case aio_set_fd_handler is * called while we're walking. */ @@ -184,22 +198,9 @@ bool aio_poll(AioContext *ctx, bool blocking) /* aio_notify can avoid the expensive event_notifier_set if * everything (file descriptors, bottom halves, timers) will - * be re-evaluated before the next blocking poll(). This happens - * in two cases: - * - * 1) when aio_poll is called with blocking == false - * - * 2) when we are called after poll(). If we are called before - * poll(), bottom halves will not be re-evaluated and we need - * aio_notify() if blocking == true. - * - * The first aio_dispatch() only does something when AioContext is - * running as a GSource, and in that case aio_poll is used only - * with blocking == false, so this optimization is already quite - * effective. However, the code is ugly and should be restructured - * to have a single aio_dispatch() call. To do this, we need to - * reorganize aio_poll into a prepare/poll/dispatch model like - * glib's. + * be re-evaluated before the next blocking poll(). This is + * already true when aio_poll is called with blocking == false; + * if blocking == true, it is only true after poll() returns. * * If we're in a nested event loop, ctx->dispatching might be true. * In that case we can restore it just before returning, but we @@ -207,26 +208,6 @@ bool aio_poll(AioContext *ctx, bool blocking) */ aio_set_dispatching(ctx, !blocking); - /* - * If there are callbacks left that have been queued, we need to call them. - * Do not call select in this case, because it is possible that the caller - * does not need a complete flush (as is the case for aio_poll loops). - */ - if (aio_bh_poll(ctx)) { - blocking = false; - progress = true; - } - - /* Re-evaluate condition (1) above. */ - aio_set_dispatching(ctx, !blocking); - if (aio_dispatch(ctx)) { - progress = true; - } - - if (progress && !blocking) { - goto out; - } - ctx->walking_handlers++; g_array_set_size(ctx->pollfds, 0); @@ -249,7 +230,7 @@ bool aio_poll(AioContext *ctx, bool blocking) /* wait until next event */ ret = qemu_poll_ns((GPollFD *)ctx->pollfds->data, ctx->pollfds->len, - blocking ? timerlistgroup_deadline_ns(&ctx->tlg) : 0); + blocking ? aio_compute_timeout(ctx) : 0); /* if we have any readable fds, dispatch event */ if (ret > 0) { @@ -268,7 +249,6 @@ bool aio_poll(AioContext *ctx, bool blocking) progress = true; } -out: aio_set_dispatching(ctx, was_dispatching); return progress; } |