diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2020-04-09 19:00:41 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-04-09 19:00:41 +0100 |
commit | 17e1e49814096a3daaa8e5a73acd56a0f30bdc18 (patch) | |
tree | a652eb8d95fe90b4e9ba8056ca5afadec87f89dc /util | |
parent | 8bac3ba57eecc466b7e73dabf7d19328a59f684e (diff) | |
parent | 5710a3e09f9b85801e5ce70797a4a511e5fc9e2c (diff) |
Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging
Pull request
Fixes for QEMU on aarch64 ARM hosts and fdmon-io_uring.
# gpg: Signature made Thu 09 Apr 2020 18:42:01 BST
# gpg: using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg: aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35 775A 9CA4 ABB3 81AB 73C8
* remotes/stefanha/tags/block-pull-request:
async: use explicit memory barriers
aio-wait: delegate polling of main AioContext if BQL not held
aio-posix: signal-proof fdmon-io_uring
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'util')
-rw-r--r-- | util/aio-posix.c | 16 | ||||
-rw-r--r-- | util/aio-win32.c | 17 | ||||
-rw-r--r-- | util/async.c | 16 | ||||
-rw-r--r-- | util/fdmon-io_uring.c | 10 |
4 files changed, 48 insertions, 11 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c index cd6cf0a4a9..c3613d299e 100644 --- a/util/aio-posix.c +++ b/util/aio-posix.c @@ -559,6 +559,11 @@ bool aio_poll(AioContext *ctx, bool blocking) int64_t timeout; int64_t start = 0; + /* + * There cannot be two concurrent aio_poll calls for the same AioContext (or + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). + * We rely on this below to avoid slow locked accesses to ctx->notify_me. + */ assert(in_aio_context_home_thread(ctx)); /* aio_notify can avoid the expensive event_notifier_set if @@ -569,7 +574,13 @@ bool aio_poll(AioContext *ctx, bool blocking) * so disable the optimization now. */ if (blocking) { - atomic_add(&ctx->notify_me, 2); + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); + /* + * Write ctx->notify_me before computing the timeout + * (reading bottom half flags, etc.). Pairs with + * smp_mb in aio_notify(). + */ + smp_mb(); } qemu_lockcnt_inc(&ctx->list_lock); @@ -590,7 +601,8 @@ bool aio_poll(AioContext *ctx, bool blocking) } if (blocking) { - atomic_sub(&ctx->notify_me, 2); + /* Finish the poll before clearing the flag. */ + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); aio_notify_accept(ctx); } diff --git a/util/aio-win32.c b/util/aio-win32.c index a23b9c364d..729d533faf 100644 --- a/util/aio-win32.c +++ b/util/aio-win32.c @@ -321,6 +321,12 @@ bool aio_poll(AioContext *ctx, bool blocking) int count; int timeout; + /* + * There cannot be two concurrent aio_poll calls for the same AioContext (or + * an aio_poll concurrent with a GSource prepare/check/dispatch callback). + * We rely on this below to avoid slow locked accesses to ctx->notify_me. + */ + assert(in_aio_context_home_thread(ctx)); progress = false; /* aio_notify can avoid the expensive event_notifier_set if @@ -331,7 +337,13 @@ bool aio_poll(AioContext *ctx, bool blocking) * so disable the optimization now. */ if (blocking) { - atomic_add(&ctx->notify_me, 2); + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2); + /* + * Write ctx->notify_me before computing the timeout + * (reading bottom half flags, etc.). Pairs with + * smp_mb in aio_notify(). + */ + smp_mb(); } qemu_lockcnt_inc(&ctx->list_lock); @@ -364,8 +376,7 @@ bool aio_poll(AioContext *ctx, bool blocking) ret = WaitForMultipleObjects(count, events, FALSE, timeout); if (blocking) { assert(first); - assert(in_aio_context_home_thread(ctx)); - atomic_sub(&ctx->notify_me, 2); + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2); aio_notify_accept(ctx); } diff --git a/util/async.c b/util/async.c index b94518b948..3165a28f2f 100644 --- a/util/async.c +++ b/util/async.c @@ -249,7 +249,14 @@ aio_ctx_prepare(GSource *source, gint *timeout) { AioContext *ctx = (AioContext *) source; - atomic_or(&ctx->notify_me, 1); + atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1); + + /* + * Write ctx->notify_me before computing the timeout + * (reading bottom half flags, etc.). Pairs with + * smp_mb in aio_notify(). + */ + smp_mb(); /* We assume there is no timeout already supplied */ *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); @@ -268,7 +275,8 @@ aio_ctx_check(GSource *source) QEMUBH *bh; BHListSlice *s; - atomic_and(&ctx->notify_me, ~1); + /* Finish computing the timeout before clearing the flag. */ + atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1); aio_notify_accept(ctx); QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { @@ -411,10 +419,10 @@ LuringState *aio_get_linux_io_uring(AioContext *ctx) void aio_notify(AioContext *ctx) { /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs - * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll. + * with smp_mb in aio_ctx_prepare or aio_poll. */ smp_mb(); - if (ctx->notify_me) { + if (atomic_read(&ctx->notify_me)) { event_notifier_set(&ctx->notifier); atomic_mb_set(&ctx->notified, true); } diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c index b4d6109f20..d5a80ed6fb 100644 --- a/util/fdmon-io_uring.c +++ b/util/fdmon-io_uring.c @@ -88,7 +88,10 @@ static struct io_uring_sqe *get_sqe(AioContext *ctx) } /* No free sqes left, submit pending sqes first */ - ret = io_uring_submit(ring); + do { + ret = io_uring_submit(ring); + } while (ret == -EINTR); + assert(ret > 1); sqe = io_uring_get_sqe(ring); assert(sqe); @@ -282,7 +285,10 @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list, fill_sq_ring(ctx); - ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); + do { + ret = io_uring_submit_and_wait(&ctx->fdmon_io_uring, wait_nr); + } while (ret == -EINTR); + assert(ret >= 0); return process_cq_ring(ctx, ready_list); |