aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2017-02-13 19:12:39 +0100
committerStefan Hajnoczi <stefanha@redhat.com>2017-02-21 11:39:40 +0000
commitfed20a70e39bb9385020bdc4e8839d95326df8e2 (patch)
tree07af96e10e918b1393a1ea94205f8802f4a0d1df /util
parent91bcea4899017891983b9149bd50cb283e78dfc0 (diff)
coroutine-lock: make CoMutex thread-safe
This uses the lock-free mutex described in the paper '"Blocking without Locking", or LFTHREADS: A lock-free thread library' by Gidenstam and Papatriantafilou. The same technique is used in OSv, and in fact the code is essentially a conversion to C of OSv's code. [Added missing coroutine_fn in tests/test-aio-multithread.c. --Stefan] Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Message-id: 20170213181244.16297-2-pbonzini@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'util')
-rw-r--r--util/qemu-coroutine-lock.c153
-rw-r--r--util/trace-events1
2 files changed, 144 insertions, 10 deletions
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index e6afd1aa6c..25da9fa8d0 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -20,6 +20,10 @@
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
+ *
+ * The lock-free mutex implementation is based on OSv
+ * (core/lfmutex.cc, include/lockfree/mutex.hh).
+ * Copyright (C) 2013 Cloudius Systems, Ltd.
*/
#include "qemu/osdep.h"
@@ -111,27 +115,119 @@ bool qemu_co_queue_empty(CoQueue *queue)
return QSIMPLEQ_FIRST(&queue->entries) == NULL;
}
+/* The wait records are handled with a multiple-producer, single-consumer
+ * lock-free queue. There cannot be two concurrent pop_waiter() calls
+ * because pop_waiter() can only be called while mutex->handoff is zero.
+ * This can happen in three cases:
+ * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
+ * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
+ * not take part in the handoff.
+ * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
+ * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail
+ * the cmpxchg (it will see either 0 or the next sequence value) and
+ * exit. The next hand-off cannot begin until qemu_co_mutex_lock has
+ * woken up someone.
+ * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
+ * In this case another iteration starts with mutex->handoff == 0;
+ * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
+ * qemu_co_mutex_unlock will go back to case (1).
+ *
+ * The following functions manage this queue.
+ */
+typedef struct CoWaitRecord {
+ Coroutine *co;
+ QSLIST_ENTRY(CoWaitRecord) next;
+} CoWaitRecord;
+
+static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
+{
+ w->co = qemu_coroutine_self();
+ QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
+}
+
+static void move_waiters(CoMutex *mutex)
+{
+ QSLIST_HEAD(, CoWaitRecord) reversed;
+ QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
+ while (!QSLIST_EMPTY(&reversed)) {
+ CoWaitRecord *w = QSLIST_FIRST(&reversed);
+ QSLIST_REMOVE_HEAD(&reversed, next);
+ QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
+ }
+}
+
+static CoWaitRecord *pop_waiter(CoMutex *mutex)
+{
+ CoWaitRecord *w;
+
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
+ move_waiters(mutex);
+ if (QSLIST_EMPTY(&mutex->to_pop)) {
+ return NULL;
+ }
+ }
+ w = QSLIST_FIRST(&mutex->to_pop);
+ QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
+ return w;
+}
+
+static bool has_waiters(CoMutex *mutex)
+{
+ return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
+}
+
void qemu_co_mutex_init(CoMutex *mutex)
{
memset(mutex, 0, sizeof(*mutex));
- qemu_co_queue_init(&mutex->queue);
}
-void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
+static void coroutine_fn qemu_co_mutex_lock_slowpath(CoMutex *mutex)
{
Coroutine *self = qemu_coroutine_self();
+ CoWaitRecord w;
+ unsigned old_handoff;
trace_qemu_co_mutex_lock_entry(mutex, self);
+ w.co = self;
+ push_waiter(mutex, &w);
+
+ /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
+ * a concurrent unlock() the responsibility of waking somebody up.
+ */
+ old_handoff = atomic_mb_read(&mutex->handoff);
+ if (old_handoff &&
+ has_waiters(mutex) &&
+ atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
+ /* There can be no concurrent pops, because there can be only
+ * one active handoff at a time.
+ */
+ CoWaitRecord *to_wake = pop_waiter(mutex);
+ Coroutine *co = to_wake->co;
+ if (co == self) {
+ /* We got the lock ourselves! */
+ assert(to_wake == &w);
+ return;
+ }
- while (mutex->locked) {
- qemu_co_queue_wait(&mutex->queue);
+ aio_co_wake(co);
}
- mutex->locked = true;
+ qemu_coroutine_yield();
+ trace_qemu_co_mutex_lock_return(mutex, self);
+}
+
+void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
+{
+ Coroutine *self = qemu_coroutine_self();
+
+ if (atomic_fetch_inc(&mutex->locked) == 0) {
+ /* Uncontended. */
+ trace_qemu_co_mutex_lock_uncontended(mutex, self);
+ } else {
+ qemu_co_mutex_lock_slowpath(mutex);
+ }
mutex->holder = self;
self->locks_held++;
-
- trace_qemu_co_mutex_lock_return(mutex, self);
}
void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
@@ -140,14 +236,51 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
trace_qemu_co_mutex_unlock_entry(mutex, self);
- assert(mutex->locked == true);
+ assert(mutex->locked);
assert(mutex->holder == self);
assert(qemu_in_coroutine());
- mutex->locked = false;
mutex->holder = NULL;
self->locks_held--;
- qemu_co_queue_next(&mutex->queue);
+ if (atomic_fetch_dec(&mutex->locked) == 1) {
+ /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
+ return;
+ }
+
+ for (;;) {
+ CoWaitRecord *to_wake = pop_waiter(mutex);
+ unsigned our_handoff;
+
+ if (to_wake) {
+ Coroutine *co = to_wake->co;
+ aio_co_wake(co);
+ break;
+ }
+
+ /* Some concurrent lock() is in progress (we know this because
+ * mutex->locked was >1) but it hasn't yet put itself on the wait
+ * queue. Pick a sequence number for the handoff protocol (not 0).
+ */
+ if (++mutex->sequence == 0) {
+ mutex->sequence = 1;
+ }
+
+ our_handoff = mutex->sequence;
+ atomic_mb_set(&mutex->handoff, our_handoff);
+ if (!has_waiters(mutex)) {
+ /* The concurrent lock has not added itself yet, so it
+ * will be able to pick our handoff.
+ */
+ break;
+ }
+
+ /* Try to do the handoff protocol ourselves; if somebody else has
+ * already taken it, however, we're done and they're responsible.
+ */
+ if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
+ break;
+ }
+ }
trace_qemu_co_mutex_unlock_return(mutex, self);
}
diff --git a/util/trace-events b/util/trace-events
index 65c978715a..ac27d94a97 100644
--- a/util/trace-events
+++ b/util/trace-events
@@ -28,6 +28,7 @@ qemu_coroutine_terminate(void *co) "self %p"
# util/qemu-coroutine-lock.c
qemu_co_queue_run_restart(void *co) "co %p"
+qemu_co_mutex_lock_uncontended(void *mutex, void *self) "mutex %p self %p"
qemu_co_mutex_lock_entry(void *mutex, void *self) "mutex %p self %p"
qemu_co_mutex_lock_return(void *mutex, void *self) "mutex %p self %p"
qemu_co_mutex_unlock_entry(void *mutex, void *self) "mutex %p self %p"