aboutsummaryrefslogtreecommitdiff
path: root/cpus.c
diff options
context:
space:
mode:
Diffstat (limited to 'cpus.c')
-rw-r--r--cpus.c85
1 files changed, 66 insertions, 19 deletions
diff --git a/cpus.c b/cpus.c
index e8139de534..f99253746a 100644
--- a/cpus.c
+++ b/cpus.c
@@ -120,16 +120,11 @@ static bool all_cpu_threads_idle(void)
/* Protected by TimersState seqlock */
static bool icount_sleep = true;
-static int64_t vm_clock_warp_start = -1;
/* Conversion factor from emulated instructions to virtual clock ticks. */
static int icount_time_shift;
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
#define MAX_ICOUNT_SHIFT 10
-static QEMUTimer *icount_rt_timer;
-static QEMUTimer *icount_vm_timer;
-static QEMUTimer *icount_warp_timer;
-
typedef struct TimersState {
/* Protected by BQL. */
int64_t cpu_ticks_prev;
@@ -147,6 +142,11 @@ typedef struct TimersState {
int64_t qemu_icount_bias;
/* Only written by TCG thread */
int64_t qemu_icount;
+ /* for adjusting icount */
+ int64_t vm_clock_warp_start;
+ QEMUTimer *icount_rt_timer;
+ QEMUTimer *icount_vm_timer;
+ QEMUTimer *icount_warp_timer;
} TimersState;
static TimersState timers_state;
@@ -432,14 +432,14 @@ static void icount_adjust(void)
static void icount_adjust_rt(void *opaque)
{
- timer_mod(icount_rt_timer,
+ timer_mod(timers_state.icount_rt_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
icount_adjust();
}
static void icount_adjust_vm(void *opaque)
{
- timer_mod(icount_vm_timer,
+ timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
icount_adjust();
@@ -460,7 +460,7 @@ static void icount_warp_rt(void)
*/
do {
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
- warp_start = vm_clock_warp_start;
+ warp_start = timers_state.vm_clock_warp_start;
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
if (warp_start == -1) {
@@ -473,7 +473,7 @@ static void icount_warp_rt(void)
cpu_get_clock_locked());
int64_t warp_delta;
- warp_delta = clock - vm_clock_warp_start;
+ warp_delta = clock - timers_state.vm_clock_warp_start;
if (use_icount == 2) {
/*
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
@@ -485,7 +485,7 @@ static void icount_warp_rt(void)
}
timers_state.qemu_icount_bias += warp_delta;
}
- vm_clock_warp_start = -1;
+ timers_state.vm_clock_warp_start = -1;
seqlock_write_end(&timers_state.vm_clock_seqlock);
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
@@ -594,11 +594,13 @@ void qemu_start_warp_timer(void)
* every 100ms.
*/
seqlock_write_begin(&timers_state.vm_clock_seqlock);
- if (vm_clock_warp_start == -1 || vm_clock_warp_start > clock) {
- vm_clock_warp_start = clock;
+ if (timers_state.vm_clock_warp_start == -1
+ || timers_state.vm_clock_warp_start > clock) {
+ timers_state.vm_clock_warp_start = clock;
}
seqlock_write_end(&timers_state.vm_clock_seqlock);
- timer_mod_anticipate(icount_warp_timer, clock + deadline);
+ timer_mod_anticipate(timers_state.icount_warp_timer,
+ clock + deadline);
}
} else if (deadline == 0) {
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -623,7 +625,7 @@ static void qemu_account_warp_timer(void)
return;
}
- timer_del(icount_warp_timer);
+ timer_del(timers_state.icount_warp_timer);
icount_warp_rt();
}
@@ -632,6 +634,45 @@ static bool icount_state_needed(void *opaque)
return use_icount;
}
+static bool warp_timer_state_needed(void *opaque)
+{
+ TimersState *s = opaque;
+ return s->icount_warp_timer != NULL;
+}
+
+static bool adjust_timers_state_needed(void *opaque)
+{
+ TimersState *s = opaque;
+ return s->icount_rt_timer != NULL;
+}
+
+/*
+ * Subsection for warp timer migration is optional, because may not be created
+ */
+static const VMStateDescription icount_vmstate_warp_timer = {
+ .name = "timer/icount/warp_timer",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = warp_timer_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT64(vm_clock_warp_start, TimersState),
+ VMSTATE_TIMER_PTR(icount_warp_timer, TimersState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static const VMStateDescription icount_vmstate_adjust_timers = {
+ .name = "timer/icount/timers",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = adjust_timers_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_TIMER_PTR(icount_rt_timer, TimersState),
+ VMSTATE_TIMER_PTR(icount_vm_timer, TimersState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
/*
* This is a subsection for icount migration.
*/
@@ -644,6 +685,11 @@ static const VMStateDescription icount_vmstate_timers = {
VMSTATE_INT64(qemu_icount_bias, TimersState),
VMSTATE_INT64(qemu_icount, TimersState),
VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &icount_vmstate_warp_timer,
+ &icount_vmstate_adjust_timers,
+ NULL
}
};
@@ -754,7 +800,7 @@ void configure_icount(QemuOpts *opts, Error **errp)
icount_sleep = qemu_opt_get_bool(opts, "sleep", true);
if (icount_sleep) {
- icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
+ timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
icount_timer_cb, NULL);
}
@@ -788,13 +834,14 @@ void configure_icount(QemuOpts *opts, Error **errp)
the virtual time trigger catches emulated time passing too fast.
Realtime triggers occur even when idle, so use them less frequently
than VM triggers. */
- icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
+ timers_state.vm_clock_warp_start = -1;
+ timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
icount_adjust_rt, NULL);
- timer_mod(icount_rt_timer,
+ timer_mod(timers_state.icount_rt_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
- icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
+ timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
icount_adjust_vm, NULL);
- timer_mod(icount_vm_timer,
+ timer_mod(timers_state.icount_vm_timer,
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
NANOSECONDS_PER_SECOND / 10);
}