aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hw/i386/kvm/clock.c18
-rw-r--r--target-i386/kvm.c45
-rw-r--r--target-i386/kvm_i386.h1
3 files changed, 48 insertions, 16 deletions
diff --git a/hw/i386/kvm/clock.c b/hw/i386/kvm/clock.c
index efdf165848..0593a3f1f5 100644
--- a/hw/i386/kvm/clock.c
+++ b/hw/i386/kvm/clock.c
@@ -17,7 +17,7 @@
#include "qemu/host-utils.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
-#include "sysemu/cpus.h"
+#include "kvm_i386.h"
#include "hw/sysbus.h"
#include "hw/kvm/clock.h"
@@ -125,21 +125,7 @@ static void kvmclock_vm_state_change(void *opaque, int running,
return;
}
- cpu_synchronize_all_states();
- /* In theory, the cpu_synchronize_all_states() call above wouldn't
- * affect the rest of the code, as the VCPU state inside CPUState
- * is supposed to always match the VCPU state on the kernel side.
- *
- * In practice, calling cpu_synchronize_state() too soon will load the
- * kernel-side APIC state into X86CPU.apic_state too early, APIC state
- * won't be reloaded later because CPUState.vcpu_dirty==true, and
- * outdated APIC state may be migrated to another host.
- *
- * The real fix would be to make sure outdated APIC state is read
- * from the kernel again when necessary. While this is not fixed, we
- * need the cpu_clean_all_dirty() call below.
- */
- cpu_clean_all_dirty();
+ kvm_synchronize_all_tsc();
ret = kvm_vm_ioctl(kvm_state, KVM_GET_CLOCK, &data);
if (ret < 0) {
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 64046cb69d..2a9953b2d4 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -111,6 +111,51 @@ bool kvm_allows_irq0_override(void)
return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
}
+static int kvm_get_tsc(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ struct {
+ struct kvm_msrs info;
+ struct kvm_msr_entry entries[1];
+ } msr_data;
+ int ret;
+
+ if (env->tsc_valid) {
+ return 0;
+ }
+
+ msr_data.info.nmsrs = 1;
+ msr_data.entries[0].index = MSR_IA32_TSC;
+ env->tsc_valid = !runstate_is_running();
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data);
+ if (ret < 0) {
+ return ret;
+ }
+
+ env->tsc = msr_data.entries[0].data;
+ return 0;
+}
+
+static inline void do_kvm_synchronize_tsc(void *arg)
+{
+ CPUState *cpu = arg;
+
+ kvm_get_tsc(cpu);
+}
+
+void kvm_synchronize_all_tsc(void)
+{
+ CPUState *cpu;
+
+ if (kvm_enabled()) {
+ CPU_FOREACH(cpu) {
+ run_on_cpu(cpu, do_kvm_synchronize_tsc, cpu);
+ }
+ }
+}
+
static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max)
{
struct kvm_cpuid2 *cpuid;
diff --git a/target-i386/kvm_i386.h b/target-i386/kvm_i386.h
index e557e94f44..c1b312ba2a 100644
--- a/target-i386/kvm_i386.h
+++ b/target-i386/kvm_i386.h
@@ -15,6 +15,7 @@
bool kvm_allows_irq0_override(void);
bool kvm_has_smm(void);
+void kvm_synchronize_all_tsc(void);
void kvm_arch_reset_vcpu(X86CPU *cs);
void kvm_arch_do_init_vcpu(X86CPU *cs);