aboutsummaryrefslogtreecommitdiff
path: root/util/qemu-coroutine-lock.c
diff options
context:
space:
mode:
Diffstat (limited to 'util/qemu-coroutine-lock.c')
-rw-r--r--util/qemu-coroutine-lock.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index 846ff9167f..78fb79acf8 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -40,13 +40,13 @@ void qemu_co_queue_init(CoQueue *queue)
QSIMPLEQ_INIT(&queue->entries);
}
-void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
+void coroutine_fn qemu_co_queue_wait_impl(CoQueue *queue, QemuLockable *lock)
{
Coroutine *self = qemu_coroutine_self();
QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
- if (mutex) {
- qemu_co_mutex_unlock(mutex);
+ if (lock) {
+ qemu_lockable_unlock(lock);
}
/* There is no race condition here. Other threads will call
@@ -60,9 +60,11 @@ void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
/* TODO: OSv implements wait morphing here, where the wakeup
* primitive automatically places the woken coroutine on the
* mutex's queue. This avoids the thundering herd effect.
+ * This could be implemented for CoMutexes, but not really for
+ * other cases of QemuLockable.
*/
- if (mutex) {
- qemu_co_mutex_lock(mutex);
+ if (lock) {
+ qemu_lockable_lock(lock);
}
}
@@ -130,7 +132,7 @@ void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue)
qemu_co_queue_do_restart(queue, false);
}
-bool qemu_co_enter_next(CoQueue *queue)
+bool qemu_co_enter_next_impl(CoQueue *queue, QemuLockable *lock)
{
Coroutine *next;
@@ -140,7 +142,13 @@ bool qemu_co_enter_next(CoQueue *queue)
}
QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
- qemu_coroutine_enter(next);
+ if (lock) {
+ qemu_lockable_unlock(lock);
+ }
+ aio_co_wake(next);
+ if (lock) {
+ qemu_lockable_lock(lock);
+ }
return true;
}