diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2018-09-11 13:15:32 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-10-02 18:47:55 +0200 |
commit | 9b4e6f496601d3cd35fd8d09c9e2103999fd5c33 (patch) | |
tree | ccab9fa8b0930f3cc1bf9e675b721679a0e869d6 /cpus.c | |
parent | 39fe576c82bc02749410ea16045109f7b7d4af62 (diff) |
cpus: take seqlock across qemu_icount updates
Even though writes of qemu_icount can safely race with reads in
qemu_icount_raw, qemu_icount is also read by icount_adjust, which
runs in the I/O thread. Therefore, writes do needs protection of
the vm_clock_lock; for simplicity the patch protects it with both
seqlock+spinlock, which we already do for hosts that lack 64-bit atomics.
The bug actually predated the introduction of vm_clock_lock;
cpu_update_icount would have needed the BQL before the spinlock was
introduced.
Reported-by: Emilio G. Cota <cota@braap.org>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'cpus.c')
-rw-r--r-- | cpus.c | 22 |
1 files changed, 14 insertions, 8 deletions
@@ -245,21 +245,27 @@ static int64_t cpu_get_icount_executed(CPUState *cpu) * account executed instructions. This is done by the TCG vCPU * thread so the main-loop can see time has moved forward. */ -void cpu_update_icount(CPUState *cpu) +static void cpu_update_icount_locked(CPUState *cpu) { int64_t executed = cpu_get_icount_executed(cpu); cpu->icount_budget -= executed; -#ifndef CONFIG_ATOMIC64 - seqlock_write_lock(&timers_state.vm_clock_seqlock, - &timers_state.vm_clock_lock); -#endif atomic_set__nocheck(&timers_state.qemu_icount, timers_state.qemu_icount + executed); -#ifndef CONFIG_ATOMIC64 +} + +/* + * Update the global shared timer_state.qemu_icount to take into + * account executed instructions. This is done by the TCG vCPU + * thread so the main-loop can see time has moved forward. + */ +void cpu_update_icount(CPUState *cpu) +{ + seqlock_write_lock(&timers_state.vm_clock_seqlock, + &timers_state.vm_clock_lock); + cpu_update_icount_locked(cpu); seqlock_write_unlock(&timers_state.vm_clock_seqlock, &timers_state.vm_clock_lock); -#endif } static int64_t cpu_get_icount_raw_locked(void) @@ -272,7 +278,7 @@ static int64_t cpu_get_icount_raw_locked(void) exit(1); } /* Take into account what has run */ - cpu_update_icount(cpu); + cpu_update_icount_locked(cpu); } /* The read is protected by the seqlock, so __nocheck is okay. */ return atomic_read__nocheck(&timers_state.qemu_icount); |