aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cpu-exec-common.c2
-rw-r--r--cpu-exec.c20
-rw-r--r--cpus.c19
-rw-r--r--include/exec/exec-all.h3
4 files changed, 18 insertions, 26 deletions
diff --git a/cpu-exec-common.c b/cpu-exec-common.c
index e2bc053372..0504a9457b 100644
--- a/cpu-exec-common.c
+++ b/cpu-exec-common.c
@@ -23,8 +23,6 @@
#include "exec/exec-all.h"
#include "exec/memory-internal.h"
-bool exit_request;
-
/* exit the current TB, but without causing any exception to be raised */
void cpu_loop_exit_noexc(CPUState *cpu)
{
diff --git a/cpu-exec.c b/cpu-exec.c
index 1bd3d72002..85f14d4194 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -568,15 +568,13 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
*tb_exit = ret & TB_EXIT_MASK;
switch (*tb_exit) {
case TB_EXIT_REQUESTED:
- /* Something asked us to stop executing
- * chained TBs; just continue round the main
- * loop. Whatever requested the exit will also
- * have set something else (eg exit_request or
- * interrupt_request) which we will handle
- * next time around the loop. But we need to
- * ensure the zeroing of tcg_exit_req (see cpu_tb_exec)
- * comes before the next read of cpu->exit_request
- * or cpu->interrupt_request.
+ /* Something asked us to stop executing chained TBs; just
+ * continue round the main loop. Whatever requested the exit
+ * will also have set something else (eg interrupt_request)
+ * which we will handle next time around the loop. But we
+ * need to ensure the tcg_exit_req read in generated code
+ * comes before the next read of cpu->exit_request or
+ * cpu->interrupt_request.
*/
smp_mb();
*last_tb = NULL;
@@ -630,10 +628,6 @@ int cpu_exec(CPUState *cpu)
rcu_read_lock();
- if (unlikely(atomic_mb_read(&exit_request))) {
- cpu->exit_request = 1;
- }
-
cc->cpu_exec_enter(cpu);
/* Calculate difference between guest clock and host clock.
diff --git a/cpus.c b/cpus.c
index 0ae8f69be5..e165d18785 100644
--- a/cpus.c
+++ b/cpus.c
@@ -793,7 +793,6 @@ static inline int64_t qemu_tcg_next_kick(void)
static void qemu_cpu_kick_rr_cpu(void)
{
CPUState *cpu;
- atomic_mb_set(&exit_request, 1);
do {
cpu = atomic_mb_read(&tcg_current_rr_cpu);
if (cpu) {
@@ -1316,11 +1315,11 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
start_tcg_kick_timer();
- /* process any pending work */
- atomic_mb_set(&exit_request, 1);
-
cpu = first_cpu;
+ /* process any pending work */
+ cpu->exit_request = 1;
+
while (1) {
/* Account partial waits to QEMU_CLOCK_VIRTUAL. */
qemu_account_warp_timer();
@@ -1329,7 +1328,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
cpu = first_cpu;
}
- for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) {
+ while (cpu && !cpu->queued_work_first && !cpu->exit_request) {
+
atomic_mb_set(&tcg_current_rr_cpu, cpu);
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
@@ -1349,12 +1349,15 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
break;
}
- } /* for cpu.. */
+ cpu = CPU_NEXT(cpu);
+ } /* while (cpu && !cpu->exit_request).. */
+
/* Does not need atomic_mb_set because a spurious wakeup is okay. */
atomic_set(&tcg_current_rr_cpu, NULL);
- /* Pairs with smp_wmb in qemu_cpu_kick. */
- atomic_mb_set(&exit_request, 0);
+ if (cpu && cpu->exit_request) {
+ atomic_mb_set(&cpu->exit_request, 0);
+ }
handle_icount_deadline();
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 4e34fc4cc1..82f0e12327 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -404,7 +404,4 @@ bool memory_region_is_unassigned(MemoryRegion *mr);
/* vl.c */
extern int singlestep;
-/* cpu-exec.c, accessed with atomic_mb_read/atomic_mb_set */
-extern bool exit_request;
-
#endif