aboutsummaryrefslogtreecommitdiff
path: root/util/thread-pool.c
diff options
context:
space:
mode:
Diffstat (limited to 'util/thread-pool.c')
-rw-r--r--util/thread-pool.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/util/thread-pool.c b/util/thread-pool.c
index 31113b5860..a70abb8a59 100644
--- a/util/thread-pool.c
+++ b/util/thread-pool.c
@@ -48,7 +48,7 @@ struct ThreadPoolElement {
/* Access to this list is protected by lock. */
QTAILQ_ENTRY(ThreadPoolElement) reqs;
- /* Access to this list is protected by the global mutex. */
+ /* This list is only written by the thread pool's mother thread. */
QLIST_ENTRY(ThreadPoolElement) all;
};
@@ -175,7 +175,6 @@ static void thread_pool_completion_bh(void *opaque)
ThreadPool *pool = opaque;
ThreadPoolElement *elem, *next;
- aio_context_acquire(pool->ctx);
restart:
QLIST_FOREACH_SAFE(elem, &pool->head, all, next) {
if (elem->state != THREAD_DONE) {
@@ -195,9 +194,7 @@ restart:
*/
qemu_bh_schedule(pool->completion_bh);
- aio_context_release(pool->ctx);
elem->common.cb(elem->common.opaque, elem->ret);
- aio_context_acquire(pool->ctx);
/* We can safely cancel the completion_bh here regardless of someone
* else having scheduled it meanwhile because we reenter the
@@ -211,7 +208,6 @@ restart:
qemu_aio_unref(elem);
}
}
- aio_context_release(pool->ctx);
}
static void thread_pool_cancel(BlockAIOCB *acb)
@@ -251,6 +247,9 @@ BlockAIOCB *thread_pool_submit_aio(ThreadPool *pool,
{
ThreadPoolElement *req;
+ /* Assert that the thread submitting work is the same running the pool */
+ assert(pool->ctx == qemu_get_current_aio_context());
+
req = qemu_aio_get(&thread_pool_aiocb_info, NULL, cb, opaque);
req->func = func;
req->arg = arg;