diff options
Diffstat (limited to 'util/qemu-coroutine-lock.c')
-rw-r--r-- | util/qemu-coroutine-lock.c | 252 |
1 files changed, 227 insertions, 25 deletions
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c index 14cf9ce458..6328eed26b 100644 --- a/util/qemu-coroutine-lock.c +++ b/util/qemu-coroutine-lock.c @@ -20,13 +20,19 @@ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. + * + * The lock-free mutex implementation is based on OSv + * (core/lfmutex.cc, include/lockfree/mutex.hh). + * Copyright (C) 2013 Cloudius Systems, Ltd. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "qemu/coroutine.h" #include "qemu/coroutine_int.h" +#include "qemu/processor.h" #include "qemu/queue.h" +#include "block/aio.h" #include "trace.h" void qemu_co_queue_init(CoQueue *queue) @@ -34,12 +40,30 @@ void qemu_co_queue_init(CoQueue *queue) QSIMPLEQ_INIT(&queue->entries); } -void coroutine_fn qemu_co_queue_wait(CoQueue *queue) +void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex) { Coroutine *self = qemu_coroutine_self(); QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next); + + if (mutex) { + qemu_co_mutex_unlock(mutex); + } + + /* There is no race condition here. Other threads will call + * aio_co_schedule on our AioContext, which can reenter this + * coroutine but only after this yield and after the main loop + * has gone through the next iteration. + */ qemu_coroutine_yield(); assert(qemu_in_coroutine()); + + /* TODO: OSv implements wait morphing here, where the wakeup + * primitive automatically places the woken coroutine on the + * mutex's queue. This avoids the thundering herd effect. + */ + if (mutex) { + qemu_co_mutex_lock(mutex); + } } /** @@ -63,7 +87,6 @@ void qemu_co_queue_run_restart(Coroutine *co) static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) { - Coroutine *self = qemu_coroutine_self(); Coroutine *next; if (QSIMPLEQ_EMPTY(&queue->entries)) { @@ -72,8 +95,7 @@ static bool qemu_co_queue_do_restart(CoQueue *queue, bool single) while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) { QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next); - QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, next, co_queue_next); - trace_qemu_co_queue_next(next); + aio_co_wake(next); if (single) { break; } @@ -112,27 +134,157 @@ bool qemu_co_queue_empty(CoQueue *queue) return QSIMPLEQ_FIRST(&queue->entries) == NULL; } +/* The wait records are handled with a multiple-producer, single-consumer + * lock-free queue. There cannot be two concurrent pop_waiter() calls + * because pop_waiter() can only be called while mutex->handoff is zero. + * This can happen in three cases: + * - in qemu_co_mutex_unlock, before the hand-off protocol has started. + * In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and + * not take part in the handoff. + * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from + * qemu_co_mutex_unlock. In this case, qemu_co_mutex_unlock will fail + * the cmpxchg (it will see either 0 or the next sequence value) and + * exit. The next hand-off cannot begin until qemu_co_mutex_lock has + * woken up someone. + * - in qemu_co_mutex_unlock, if it takes the hand-off token itself. + * In this case another iteration starts with mutex->handoff == 0; + * a concurrent qemu_co_mutex_lock will fail the cmpxchg, and + * qemu_co_mutex_unlock will go back to case (1). + * + * The following functions manage this queue. + */ +typedef struct CoWaitRecord { + Coroutine *co; + QSLIST_ENTRY(CoWaitRecord) next; +} CoWaitRecord; + +static void push_waiter(CoMutex *mutex, CoWaitRecord *w) +{ + w->co = qemu_coroutine_self(); + QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next); +} + +static void move_waiters(CoMutex *mutex) +{ + QSLIST_HEAD(, CoWaitRecord) reversed; + QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push); + while (!QSLIST_EMPTY(&reversed)) { + CoWaitRecord *w = QSLIST_FIRST(&reversed); + QSLIST_REMOVE_HEAD(&reversed, next); + QSLIST_INSERT_HEAD(&mutex->to_pop, w, next); + } +} + +static CoWaitRecord *pop_waiter(CoMutex *mutex) +{ + CoWaitRecord *w; + + if (QSLIST_EMPTY(&mutex->to_pop)) { + move_waiters(mutex); + if (QSLIST_EMPTY(&mutex->to_pop)) { + return NULL; + } + } + w = QSLIST_FIRST(&mutex->to_pop); + QSLIST_REMOVE_HEAD(&mutex->to_pop, next); + return w; +} + +static bool has_waiters(CoMutex *mutex) +{ + return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push); +} + void qemu_co_mutex_init(CoMutex *mutex) { memset(mutex, 0, sizeof(*mutex)); - qemu_co_queue_init(&mutex->queue); } -void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) +static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co) +{ + /* Read co before co->ctx; pairs with smp_wmb() in + * qemu_coroutine_enter(). + */ + smp_read_barrier_depends(); + mutex->ctx = co->ctx; + aio_co_wake(co); +} + +static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx, + CoMutex *mutex) { Coroutine *self = qemu_coroutine_self(); + CoWaitRecord w; + unsigned old_handoff; trace_qemu_co_mutex_lock_entry(mutex, self); + w.co = self; + push_waiter(mutex, &w); + + /* This is the "Responsibility Hand-Off" protocol; a lock() picks from + * a concurrent unlock() the responsibility of waking somebody up. + */ + old_handoff = atomic_mb_read(&mutex->handoff); + if (old_handoff && + has_waiters(mutex) && + atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) { + /* There can be no concurrent pops, because there can be only + * one active handoff at a time. + */ + CoWaitRecord *to_wake = pop_waiter(mutex); + Coroutine *co = to_wake->co; + if (co == self) { + /* We got the lock ourselves! */ + assert(to_wake == &w); + mutex->ctx = ctx; + return; + } + + qemu_co_mutex_wake(mutex, co); + } - while (mutex->locked) { - qemu_co_queue_wait(&mutex->queue); + qemu_coroutine_yield(); + trace_qemu_co_mutex_lock_return(mutex, self); +} + +void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex) +{ + AioContext *ctx = qemu_get_current_aio_context(); + Coroutine *self = qemu_coroutine_self(); + int waiters, i; + + /* Running a very small critical section on pthread_mutex_t and CoMutex + * shows that pthread_mutex_t is much faster because it doesn't actually + * go to sleep. What happens is that the critical section is shorter + * than the latency of entering the kernel and thus FUTEX_WAIT always + * fails. With CoMutex there is no such latency but you still want to + * avoid wait and wakeup. So introduce it artificially. + */ + i = 0; +retry_fast_path: + waiters = atomic_cmpxchg(&mutex->locked, 0, 1); + if (waiters != 0) { + while (waiters == 1 && ++i < 1000) { + if (atomic_read(&mutex->ctx) == ctx) { + break; + } + if (atomic_read(&mutex->locked) == 0) { + goto retry_fast_path; + } + cpu_relax(); + } + waiters = atomic_fetch_inc(&mutex->locked); } - mutex->locked = true; + if (waiters == 0) { + /* Uncontended. */ + trace_qemu_co_mutex_lock_uncontended(mutex, self); + mutex->ctx = ctx; + } else { + qemu_co_mutex_lock_slowpath(ctx, mutex); + } mutex->holder = self; self->locks_held++; - - trace_qemu_co_mutex_lock_return(mutex, self); } void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) @@ -141,14 +293,51 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex) trace_qemu_co_mutex_unlock_entry(mutex, self); - assert(mutex->locked == true); + assert(mutex->locked); assert(mutex->holder == self); assert(qemu_in_coroutine()); - mutex->locked = false; + mutex->ctx = NULL; mutex->holder = NULL; self->locks_held--; - qemu_co_queue_next(&mutex->queue); + if (atomic_fetch_dec(&mutex->locked) == 1) { + /* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */ + return; + } + + for (;;) { + CoWaitRecord *to_wake = pop_waiter(mutex); + unsigned our_handoff; + + if (to_wake) { + qemu_co_mutex_wake(mutex, to_wake->co); + break; + } + + /* Some concurrent lock() is in progress (we know this because + * mutex->locked was >1) but it hasn't yet put itself on the wait + * queue. Pick a sequence number for the handoff protocol (not 0). + */ + if (++mutex->sequence == 0) { + mutex->sequence = 1; + } + + our_handoff = mutex->sequence; + atomic_mb_set(&mutex->handoff, our_handoff); + if (!has_waiters(mutex)) { + /* The concurrent lock has not added itself yet, so it + * will be able to pick our handoff. + */ + break; + } + + /* Try to do the handoff protocol ourselves; if somebody else has + * already taken it, however, we're done and they're responsible. + */ + if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) { + break; + } + } trace_qemu_co_mutex_unlock_return(mutex, self); } @@ -157,16 +346,22 @@ void qemu_co_rwlock_init(CoRwlock *lock) { memset(lock, 0, sizeof(*lock)); qemu_co_queue_init(&lock->queue); + qemu_co_mutex_init(&lock->mutex); } void qemu_co_rwlock_rdlock(CoRwlock *lock) { Coroutine *self = qemu_coroutine_self(); - while (lock->writer) { - qemu_co_queue_wait(&lock->queue); + qemu_co_mutex_lock(&lock->mutex); + /* For fairness, wait if a writer is in line. */ + while (lock->pending_writer) { + qemu_co_queue_wait(&lock->queue, &lock->mutex); } lock->reader++; + qemu_co_mutex_unlock(&lock->mutex); + + /* The rest of the read-side critical section is run without the mutex. */ self->locks_held++; } @@ -175,10 +370,13 @@ void qemu_co_rwlock_unlock(CoRwlock *lock) Coroutine *self = qemu_coroutine_self(); assert(qemu_in_coroutine()); - if (lock->writer) { - lock->writer = false; + if (!lock->reader) { + /* The critical section started in qemu_co_rwlock_wrlock. */ qemu_co_queue_restart_all(&lock->queue); } else { + self->locks_held--; + + qemu_co_mutex_lock(&lock->mutex); lock->reader--; assert(lock->reader >= 0); /* Wakeup only one waiting writer */ @@ -186,16 +384,20 @@ void qemu_co_rwlock_unlock(CoRwlock *lock) qemu_co_queue_next(&lock->queue); } } - self->locks_held--; + qemu_co_mutex_unlock(&lock->mutex); } void qemu_co_rwlock_wrlock(CoRwlock *lock) { - Coroutine *self = qemu_coroutine_self(); - - while (lock->writer || lock->reader) { - qemu_co_queue_wait(&lock->queue); + qemu_co_mutex_lock(&lock->mutex); + lock->pending_writer++; + while (lock->reader) { + qemu_co_queue_wait(&lock->queue, &lock->mutex); } - lock->writer = true; - self->locks_held++; + lock->pending_writer--; + + /* The rest of the write-side critical section is run with + * the mutex taken, so that lock->reader remains zero. + * There is no need to update self->locks_held. + */ } |