diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2013-10-28 17:32:18 +0100 |
---|---|---|
committer | Anthony Liguori <aliguori@amazon.com> | 2013-11-06 21:47:05 -0800 |
commit | 5f3e31012e334f3410e04abae7f88565df17c91a (patch) | |
tree | 75930e80e1a4fa885717c3528054d15cbda51704 | |
parent | cd5be5829c1ce87aa6b3a7806524fac07ac9a757 (diff) |
timers: fix stop/cont with -icount
Stop/cont commands are broken with -icount due to a deadlock. The
real problem is that the computation of timers_state.cpu_ticks_offset
makes no sense with -icount enabled: we set it to an icount clock value
in cpu_disable_ticks, and subtract a TSC (or similar, whatever
cpu_get_real_ticks happens to return) value in cpu_enable_ticks.
The fix is simple. timers_state.cpu_ticks_offset is only used
together with cpu_get_real_ticks, so we can use cpu_get_real_ticks
in cpu_disable_ticks. There is no need to update cpu_ticks_prev
at the time cpu_disable_ticks is called; instead, we can do it
the next time cpu_get_ticks is called.
The change to cpu_disable_ticks is the important part of the patch.
The rest modifies the code to always check timers_state.cpu_ticks_prev,
even when the ticks are not advancing (i.e. the VM is stopped). It also
makes a similar change to cpu_get_clock_locked, so that the code remains
similar for cpu_get_ticks and cpu_get_clock_locked.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1382977938-13844-1-git-send-email-pbonzini@redhat.com
Signed-off-by: Anthony Liguori <aliguori@amazon.com>
-rw-r--r-- | cpus.c | 42 |
1 files changed, 22 insertions, 20 deletions
@@ -165,36 +165,38 @@ int64_t cpu_get_icount(void) /* Caller must hold the BQL */ int64_t cpu_get_ticks(void) { + int64_t ticks; + if (use_icount) { return cpu_get_icount(); } - if (!timers_state.cpu_ticks_enabled) { - return timers_state.cpu_ticks_offset; - } else { - int64_t ticks; - ticks = cpu_get_real_ticks(); - if (timers_state.cpu_ticks_prev > ticks) { - /* Note: non increasing ticks may happen if the host uses - software suspend */ - timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; - } - timers_state.cpu_ticks_prev = ticks; - return ticks + timers_state.cpu_ticks_offset; + + ticks = timers_state.cpu_ticks_offset; + if (timers_state.cpu_ticks_enabled) { + ticks += cpu_get_real_ticks(); + } + + if (timers_state.cpu_ticks_prev > ticks) { + /* Note: non increasing ticks may happen if the host uses + software suspend */ + timers_state.cpu_ticks_offset += timers_state.cpu_ticks_prev - ticks; + ticks = timers_state.cpu_ticks_prev; } + + timers_state.cpu_ticks_prev = ticks; + return ticks; } static int64_t cpu_get_clock_locked(void) { - int64_t ti; + int64_t ticks; - if (!timers_state.cpu_ticks_enabled) { - ti = timers_state.cpu_clock_offset; - } else { - ti = get_clock(); - ti += timers_state.cpu_clock_offset; + ticks = timers_state.cpu_clock_offset; + if (timers_state.cpu_ticks_enabled) { + ticks += get_clock(); } - return ti; + return ticks; } /* return the host CPU monotonic timer and handle stop/restart */ @@ -235,7 +237,7 @@ void cpu_disable_ticks(void) /* Here, the really thing protected by seqlock is cpu_clock_offset. */ seqlock_write_lock(&timers_state.vm_clock_seqlock); if (timers_state.cpu_ticks_enabled) { - timers_state.cpu_ticks_offset = cpu_get_ticks(); + timers_state.cpu_ticks_offset += cpu_get_real_ticks(); timers_state.cpu_clock_offset = cpu_get_clock_locked(); timers_state.cpu_ticks_enabled = 0; } |