diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2015-07-22 12:52:34 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2015-07-22 12:52:34 +0100 |
commit | dc94bd9166af5236a56bd5bb06845911915a925c (patch) | |
tree | da7f57b66fdc24f5904dc64e2def74de296a9987 /async.c | |
parent | b9c46307996856d03ddc1527468ff5401ac03a79 (diff) | |
parent | 05e514b1d4d5bd4209e2c8bbc76ff05c85a235f3 (diff) |
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
# gpg: Signature made Wed Jul 22 12:43:35 2015 BST using RSA key ID 81AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>"
# gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>"
* remotes/stefanha/tags/block-pull-request:
AioContext: optimize clearing the EventNotifier
AioContext: fix broken placement of event_notifier_test_and_clear
AioContext: fix broken ctx->dispatching optimization
aio-win32: reorganize polling loop
tests: remove irrelevant assertions from test-aio
qemu-timer: initialize "timers_done_ev" to set
mirror: Speed up bitmap initial scanning
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'async.c')
-rw-r--r-- | async.c | 35 |
1 files changed, 21 insertions, 14 deletions
@@ -184,6 +184,8 @@ aio_ctx_prepare(GSource *source, gint *timeout) { AioContext *ctx = (AioContext *) source; + atomic_or(&ctx->notify_me, 1); + /* We assume there is no timeout already supplied */ *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); @@ -200,6 +202,9 @@ aio_ctx_check(GSource *source) AioContext *ctx = (AioContext *) source; QEMUBH *bh; + atomic_and(&ctx->notify_me, ~1); + aio_notify_accept(ctx); + for (bh = ctx->first_bh; bh; bh = bh->next) { if (!bh->deleted && bh->scheduled) { return true; @@ -254,24 +259,22 @@ ThreadPool *aio_get_thread_pool(AioContext *ctx) return ctx->thread_pool; } -void aio_set_dispatching(AioContext *ctx, bool dispatching) +void aio_notify(AioContext *ctx) { - ctx->dispatching = dispatching; - if (!dispatching) { - /* Write ctx->dispatching before reading e.g. bh->scheduled. - * Optimization: this is only needed when we're entering the "unsafe" - * phase where other threads must call event_notifier_set. - */ - smp_mb(); + /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs + * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. + */ + smp_mb(); + if (ctx->notify_me) { + event_notifier_set(&ctx->notifier); + atomic_mb_set(&ctx->notified, true); } } -void aio_notify(AioContext *ctx) +void aio_notify_accept(AioContext *ctx) { - /* Write e.g. bh->scheduled before reading ctx->dispatching. */ - smp_mb(); - if (!ctx->dispatching) { - event_notifier_set(&ctx->notifier); + if (atomic_xchg(&ctx->notified, false)) { + event_notifier_test_and_clear(&ctx->notifier); } } @@ -286,6 +289,10 @@ static void aio_rfifolock_cb(void *opaque) aio_notify(opaque); } +static void event_notifier_dummy_cb(EventNotifier *e) +{ +} + AioContext *aio_context_new(Error **errp) { int ret; @@ -300,7 +307,7 @@ AioContext *aio_context_new(Error **errp) g_source_set_can_recurse(&ctx->source, true); aio_set_event_notifier(ctx, &ctx->notifier, (EventNotifierHandler *) - event_notifier_test_and_clear); + event_notifier_dummy_cb); ctx->thread_pool = NULL; qemu_mutex_init(&ctx->bh_lock); rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx); |