aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2013-10-03 15:17:25 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2013-10-17 17:31:00 +0200
commit17a15f1b768fe2aab8c5f360b05c0daddf0c438b (patch)
treeb930d26ba7ec4e7923a1abf57154253047c2e6fd
parenta3270e19ccf05603dfaf09e1f18510f7c93095e0 (diff)
icount: make it thread-safe
This lets threads other than the I/O thread use vm_clock even in -icount mode. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--cpus.c37
1 files changed, 31 insertions, 6 deletions
diff --git a/cpus.c b/cpus.c
index 6203d98432..398229ecbd 100644
--- a/cpus.c
+++ b/cpus.c
@@ -132,7 +132,7 @@ typedef struct TimersState {
static TimersState timers_state;
/* Return the virtual CPU time, based on the instruction counter. */
-int64_t cpu_get_icount(void)
+static int64_t cpu_get_icount_locked(void)
{
int64_t icount;
CPUState *cpu = current_cpu;
@@ -148,6 +148,19 @@ int64_t cpu_get_icount(void)
return qemu_icount_bias + (icount << icount_time_shift);
}
+int64_t cpu_get_icount(void)
+{
+ int64_t icount;
+ unsigned start;
+
+ do {
+ start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
+ icount = cpu_get_icount_locked();
+ } while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
+
+ return icount;
+}
+
/* return the host CPU cycle counter and handle stop/restart */
/* Caller must hold the BQL */
int64_t cpu_get_ticks(void)
@@ -249,8 +262,9 @@ static void icount_adjust(void)
return;
}
- cur_time = cpu_get_clock();
- cur_icount = cpu_get_icount();
+ seqlock_write_lock(&timers_state.vm_clock_seqlock);
+ cur_time = cpu_get_clock_locked();
+ cur_icount = cpu_get_icount_locked();
delta = cur_icount - cur_time;
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
@@ -268,6 +282,7 @@ static void icount_adjust(void)
}
last_delta = delta;
qemu_icount_bias = cur_icount - (qemu_icount << icount_time_shift);
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock);
}
static void icount_adjust_rt(void *opaque)
@@ -292,10 +307,14 @@ static int64_t qemu_icount_round(int64_t count)
static void icount_warp_rt(void *opaque)
{
- if (vm_clock_warp_start == -1) {
+ /* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
+ * changes from -1 to another value, so the race here is okay.
+ */
+ if (atomic_read(&vm_clock_warp_start) == -1) {
return;
}
+ seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (runstate_is_running()) {
int64_t clock = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
int64_t warp_delta;
@@ -306,14 +325,15 @@ static void icount_warp_rt(void *opaque)
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
* far ahead of real time.
*/
- int64_t cur_time = cpu_get_clock();
- int64_t cur_icount = cpu_get_icount();
+ int64_t cur_time = cpu_get_clock_locked();
+ int64_t cur_icount = cpu_get_icount_locked();
int64_t delta = cur_time - cur_icount;
warp_delta = MIN(warp_delta, delta);
}
qemu_icount_bias += warp_delta;
}
vm_clock_warp_start = -1;
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock);
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -327,7 +347,10 @@ void qtest_clock_warp(int64_t dest)
while (clock < dest) {
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL);
int64_t warp = MIN(dest - clock, deadline);
+ seqlock_write_lock(&timers_state.vm_clock_seqlock);
qemu_icount_bias += warp;
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock);
+
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
}
@@ -391,9 +414,11 @@ void qemu_clock_warp(QEMUClockType type)
* you will not be sending network packets continuously instead of
* every 100ms.
*/
+ seqlock_write_lock(&timers_state.vm_clock_seqlock);
if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
vm_clock_warp_start = clock;
}
+ seqlock_write_unlock(&timers_state.vm_clock_seqlock);
timer_mod_anticipate(icount_warp_timer, clock + deadline);
} else if (deadline == 0) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);