diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2014-03-03 11:30:03 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2014-03-13 14:42:21 +0100 |
commit | 2da61b671eb89fcaa306738f44eed472977d6587 (patch) | |
tree | 61673e48ab889a6ea113ff4d2be2d48d7108d1a1 /util/rfifolock.c | |
parent | 11f590b1a242492a0108da42f40f0e2b20f0a778 (diff) |
rfifolock: add recursive FIFO lock
QemuMutex does not guarantee fairness and cannot be acquired
recursively:
Fairness means each locker gets a turn and the scheduler cannot cause
starvation.
Recursive locking is useful for composition, it allows a sequence of
locking operations to be invoked atomically by acquiring the lock around
them.
This patch adds RFifoLock, a recursive lock that guarantees FIFO order.
Its first user is added in the next patch.
RFifoLock has one additional feature: it can be initialized with an
optional contention callback. The callback is invoked whenever a thread
must wait for the lock. For example, it can be used to poke the current
owner so that they release the lock soon.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'util/rfifolock.c')
-rw-r--r-- | util/rfifolock.c | 78 |
1 files changed, 78 insertions, 0 deletions
diff --git a/util/rfifolock.c b/util/rfifolock.c new file mode 100644 index 0000000000..afbf7488df --- /dev/null +++ b/util/rfifolock.c @@ -0,0 +1,78 @@ +/* + * Recursive FIFO lock + * + * Copyright Red Hat, Inc. 2013 + * + * Authors: + * Stefan Hajnoczi <stefanha@redhat.com> + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + * + */ + +#include <assert.h> +#include "qemu/rfifolock.h" + +void rfifolock_init(RFifoLock *r, void (*cb)(void *), void *opaque) +{ + qemu_mutex_init(&r->lock); + r->head = 0; + r->tail = 0; + qemu_cond_init(&r->cond); + r->nesting = 0; + r->cb = cb; + r->cb_opaque = opaque; +} + +void rfifolock_destroy(RFifoLock *r) +{ + qemu_cond_destroy(&r->cond); + qemu_mutex_destroy(&r->lock); +} + +/* + * Theory of operation: + * + * In order to ensure FIFO ordering, implement a ticketlock. Threads acquiring + * the lock enqueue themselves by incrementing the tail index. When the lock + * is unlocked, the head is incremented and waiting threads are notified. + * + * Recursive locking does not take a ticket since the head is only incremented + * when the outermost recursive caller unlocks. + */ +void rfifolock_lock(RFifoLock *r) +{ + qemu_mutex_lock(&r->lock); + + /* Take a ticket */ + unsigned int ticket = r->tail++; + + if (r->nesting > 0 && qemu_thread_is_self(&r->owner_thread)) { + r->tail--; /* put ticket back, we're nesting */ + } else { + while (ticket != r->head) { + /* Invoke optional contention callback */ + if (r->cb) { + r->cb(r->cb_opaque); + } + qemu_cond_wait(&r->cond, &r->lock); + } + } + + qemu_thread_get_self(&r->owner_thread); + r->nesting++; + qemu_mutex_unlock(&r->lock); +} + +void rfifolock_unlock(RFifoLock *r) +{ + qemu_mutex_lock(&r->lock); + assert(r->nesting > 0); + assert(qemu_thread_is_self(&r->owner_thread)); + if (--r->nesting == 0) { + r->head++; + qemu_cond_broadcast(&r->cond); + } + qemu_mutex_unlock(&r->lock); +} |