diff options
author | Alex Bennée <alex.bennee@linaro.org> | 2017-02-23 18:29:10 +0000 |
---|---|---|
committer | Alex Bennée <alex.bennee@linaro.org> | 2017-02-24 10:32:45 +0000 |
commit | 791158d93b27f22a17c2ada06621831d54f09a2c (patch) | |
tree | 7ca792b10da6cfd7970f334436520f4cc2d425dd /cpus.c | |
parent | 6546706d28bbcec5c14601b446c0a1cde5256597 (diff) |
tcg: rename tcg_current_cpu to tcg_current_rr_cpu
..and make the definition local to cpus. In preparation for MTTCG the
concept of a global tcg_current_cpu will no longer make sense. However
we still need to keep track of it in the single-threaded case to be able
to exit quickly when required.
qemu_cpu_kick_no_halt() moves and becomes qemu_cpu_kick_rr_cpu() to
emphasise its use-case. qemu_cpu_kick now kicks the relevant cpu as
well as qemu_kick_rr_cpu() which will become a no-op in MTTCG.
For the time being the setting of the global exit_request remains.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
Diffstat (limited to 'cpus.c')
-rw-r--r-- | cpus.c | 41 |
1 files changed, 22 insertions, 19 deletions
@@ -780,8 +780,7 @@ void configure_icount(QemuOpts *opts, Error **errp) */ static QEMUTimer *tcg_kick_vcpu_timer; - -static void qemu_cpu_kick_no_halt(void); +static CPUState *tcg_current_rr_cpu; #define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) @@ -790,10 +789,23 @@ static inline int64_t qemu_tcg_next_kick(void) return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; } +/* Kick the currently round-robin scheduled vCPU */ +static void qemu_cpu_kick_rr_cpu(void) +{ + CPUState *cpu; + atomic_mb_set(&exit_request, 1); + do { + cpu = atomic_mb_read(&tcg_current_rr_cpu); + if (cpu) { + cpu_exit(cpu); + } + } while (cpu != atomic_mb_read(&tcg_current_rr_cpu)); +} + static void kick_tcg_thread(void *opaque) { timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); - qemu_cpu_kick_no_halt(); + qemu_cpu_kick_rr_cpu(); } static void start_tcg_kick_timer(void) @@ -813,7 +825,6 @@ static void stop_tcg_kick_timer(void) } } - /***********************************************************/ void hw_error(const char *fmt, ...) { @@ -1324,6 +1335,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } for (; cpu != NULL && !exit_request; cpu = CPU_NEXT(cpu)) { + atomic_mb_set(&tcg_current_rr_cpu, cpu); qemu_clock_enable(QEMU_CLOCK_VIRTUAL, (cpu->singlestep_enabled & SSTEP_NOTIMER) == 0); @@ -1343,6 +1355,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } } /* for cpu.. */ + /* Does not need atomic_mb_set because a spurious wakeup is okay. */ + atomic_set(&tcg_current_rr_cpu, NULL); /* Pairs with smp_wmb in qemu_cpu_kick. */ atomic_mb_set(&exit_request, 0); @@ -1421,24 +1435,13 @@ static void qemu_cpu_kick_thread(CPUState *cpu) #endif } -static void qemu_cpu_kick_no_halt(void) -{ - CPUState *cpu; - /* Ensure whatever caused the exit has reached the CPU threads before - * writing exit_request. - */ - atomic_mb_set(&exit_request, 1); - cpu = atomic_mb_read(&tcg_current_cpu); - if (cpu) { - cpu_exit(cpu); - } -} - void qemu_cpu_kick(CPUState *cpu) { qemu_cond_broadcast(cpu->halt_cond); if (tcg_enabled()) { - qemu_cpu_kick_no_halt(); + cpu_exit(cpu); + /* Also ensure current RR cpu is kicked */ + qemu_cpu_kick_rr_cpu(); } else { if (hax_enabled()) { /* @@ -1486,7 +1489,7 @@ void qemu_mutex_lock_iothread(void) atomic_dec(&iothread_requesting_mutex); } else { if (qemu_mutex_trylock(&qemu_global_mutex)) { - qemu_cpu_kick_no_halt(); + qemu_cpu_kick_rr_cpu(); qemu_mutex_lock(&qemu_global_mutex); } atomic_dec(&iothread_requesting_mutex); |