diff options
author | Alex Bennée <alex.bennee@linaro.org> | 2017-02-23 18:29:09 +0000 |
---|---|---|
committer | Alex Bennée <alex.bennee@linaro.org> | 2017-02-24 10:32:45 +0000 |
commit | 6546706d28bbcec5c14601b446c0a1cde5256597 (patch) | |
tree | e5297c60dbd9958bb6a5d8223995f689fb5a74cb /cpus.c | |
parent | 8d4e9146b3568022ea5730d92841345d41275d66 (diff) |
tcg: add kick timer for single-threaded vCPU emulation
Currently we rely on the side effect of the main loop grabbing the
iothread_mutex to give any long running basic block chains a kick to
ensure the next vCPU is scheduled. As this code is being re-factored and
rationalised we now do it explicitly here.
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Pranith Kumar <bobby.prani@gmail.com>
Diffstat (limited to 'cpus.c')
-rw-r--r-- | cpus.c | 61 |
1 files changed, 61 insertions, 0 deletions
@@ -768,6 +768,53 @@ void configure_icount(QemuOpts *opts, Error **errp) } /***********************************************************/ +/* TCG vCPU kick timer + * + * The kick timer is responsible for moving single threaded vCPU + * emulation on to the next vCPU. If more than one vCPU is running a + * timer event with force a cpu->exit so the next vCPU can get + * scheduled. + * + * The timer is removed if all vCPUs are idle and restarted again once + * idleness is complete. + */ + +static QEMUTimer *tcg_kick_vcpu_timer; + +static void qemu_cpu_kick_no_halt(void); + +#define TCG_KICK_PERIOD (NANOSECONDS_PER_SECOND / 10) + +static inline int64_t qemu_tcg_next_kick(void) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + TCG_KICK_PERIOD; +} + +static void kick_tcg_thread(void *opaque) +{ + timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); + qemu_cpu_kick_no_halt(); +} + +static void start_tcg_kick_timer(void) +{ + if (!tcg_kick_vcpu_timer && CPU_NEXT(first_cpu)) { + tcg_kick_vcpu_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + kick_tcg_thread, NULL); + timer_mod(tcg_kick_vcpu_timer, qemu_tcg_next_kick()); + } +} + +static void stop_tcg_kick_timer(void) +{ + if (tcg_kick_vcpu_timer) { + timer_del(tcg_kick_vcpu_timer); + tcg_kick_vcpu_timer = NULL; + } +} + + +/***********************************************************/ void hw_error(const char *fmt, ...) { va_list ap; @@ -1021,9 +1068,12 @@ static void qemu_wait_io_event_common(CPUState *cpu) static void qemu_tcg_wait_io_event(CPUState *cpu) { while (all_cpu_threads_idle()) { + stop_tcg_kick_timer(); qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex); } + start_tcg_kick_timer(); + while (iothread_requesting_mutex) { qemu_cond_wait(&qemu_io_proceeded_cond, &qemu_global_mutex); } @@ -1223,6 +1273,15 @@ static void deal_with_unplugged_cpus(void) } } +/* Single-threaded TCG + * + * In the single-threaded case each vCPU is simulated in turn. If + * there is more than a single vCPU we create a simple timer to kick + * the vCPU and ensure we don't get stuck in a tight loop in one vCPU. + * This is done explicitly rather than relying on side-effects + * elsewhere. + */ + static void *qemu_tcg_cpu_thread_fn(void *arg) { CPUState *cpu = arg; @@ -1249,6 +1308,8 @@ static void *qemu_tcg_cpu_thread_fn(void *arg) } } + start_tcg_kick_timer(); + /* process any pending work */ atomic_mb_set(&exit_request, 1); |