aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2023-03-03 12:51:33 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2023-05-25 10:18:33 +0200
commit0ff25537018c0939919a35886265c38db28b2a8a (patch)
tree3cc80836b6b28de353bffe206a05235a65e92737
parent3e6bed619a1d13858e540e01aae275abdf9146ae (diff)
monitor: cleanup fetching of QMP requests
Use a continue statement so that "after going to sleep" is treated the same way as "after processing a request". Pull the monitor_lock critical section out of monitor_qmp_requests_pop_any_with_lock() and protect qmp_dispatcher_co_shutdown with the monitor_lock. The two changes are complex to separate because monitor_qmp_dispatcher_co() previously had a complicated logic to check for shutdown both before and after going to sleep. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--monitor/monitor.c9
-rw-r--r--monitor/qmp.c40
2 files changed, 22 insertions, 27 deletions
diff --git a/monitor/monitor.c b/monitor/monitor.c
index c4ed2547c2..042a1ab918 100644
--- a/monitor/monitor.c
+++ b/monitor/monitor.c
@@ -56,7 +56,10 @@ IOThread *mon_iothread;
/* Coroutine to dispatch the requests received from I/O thread */
Coroutine *qmp_dispatcher_co;
-/* Set to true when the dispatcher coroutine should terminate */
+/*
+ * Set to true when the dispatcher coroutine should terminate. Protected
+ * by monitor_lock.
+ */
bool qmp_dispatcher_co_shutdown;
/*
@@ -679,7 +682,9 @@ void monitor_cleanup(void)
* we'll just leave them in the queue without sending a response
* and monitor_data_destroy() will free them.
*/
- qmp_dispatcher_co_shutdown = true;
+ WITH_QEMU_LOCK_GUARD(&monitor_lock) {
+ qmp_dispatcher_co_shutdown = true;
+ }
if (!qatomic_xchg(&qmp_dispatcher_co_busy, true)) {
aio_co_wake(qmp_dispatcher_co);
}
diff --git a/monitor/qmp.c b/monitor/qmp.c
index f0cc6dc886..dfc2156328 100644
--- a/monitor/qmp.c
+++ b/monitor/qmp.c
@@ -178,8 +178,6 @@ static QMPRequest *monitor_qmp_requests_pop_any_with_lock(void)
Monitor *mon;
MonitorQMP *qmp_mon;
- QEMU_LOCK_GUARD(&monitor_lock);
-
QTAILQ_FOREACH(mon, &mon_list, entry) {
if (!monitor_is_qmp(mon)) {
continue;
@@ -215,6 +213,10 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
MonitorQMP *mon;
while (true) {
+ /*
+ * busy must be set to true again by whoever
+ * rescheduled us to avoid double scheduling
+ */
assert(qatomic_mb_read(&qmp_dispatcher_co_busy) == true);
/*
@@ -224,36 +226,23 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
*/
qatomic_mb_set(&qmp_dispatcher_co_busy, false);
- /* On shutdown, don't take any more requests from the queue */
- if (qmp_dispatcher_co_shutdown) {
- qatomic_set(&qmp_dispatcher_co, NULL);
- return;
+ WITH_QEMU_LOCK_GUARD(&monitor_lock) {
+ /* On shutdown, don't take any more requests from the queue */
+ if (qmp_dispatcher_co_shutdown) {
+ return NULL;
+ }
+
+ req_obj = monitor_qmp_requests_pop_any_with_lock();
}
- while (!(req_obj = monitor_qmp_requests_pop_any_with_lock())) {
+ if (!req_obj) {
/*
* No more requests to process. Wait to be reentered from
* handle_qmp_command() when it pushes more requests, or
* from monitor_cleanup() when it requests shutdown.
*/
- if (!qmp_dispatcher_co_shutdown) {
- qemu_coroutine_yield();
-
- /*
- * busy must be set to true again by whoever
- * rescheduled us to avoid double scheduling
- */
- assert(qatomic_xchg(&qmp_dispatcher_co_busy, false) == true);
- }
-
- /*
- * qmp_dispatcher_co_shutdown may have changed if we
- * yielded and were reentered from monitor_cleanup()
- */
- if (qmp_dispatcher_co_shutdown) {
- qatomic_set(&qmp_dispatcher_co, NULL);
- return;
- }
+ qemu_coroutine_yield();
+ continue;
}
trace_monitor_qmp_in_band_dequeue(req_obj,
@@ -342,6 +331,7 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data)
aio_co_schedule(iohandler_get_aio_context(), qmp_dispatcher_co);
qemu_coroutine_yield();
}
+ qatomic_set(&qmp_dispatcher_co, NULL);
}
static void handle_qmp_command(void *opaque, QObject *req, Error *err)