aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2017-05-26 14:46:28 +1000
committerDavid Gibson <david@gibson.dropbear.id.au>2017-06-06 08:53:24 +1000
commit75e972dab51ba3bc02760e8142fc0c22eb611e3a (patch)
tree8677457b56cbbb8388b4100fb49ba1811833525d
parent1b6e74824659ef10bd2a2924a98df388b78e175e (diff)
migration: Mark CPU states dirty before incoming migration/loadvm
As a rule, CPU internal state should never be updated when !cpu->kvm_vcpu_dirty (or the HAX equivalent). If that is done, then subsequent calls to cpu_synchronize_state() - usually safe and idempotent - will clobber state. However, we routinely do this during a loadvm or incoming migration. Usually this is called shortly after a reset, which will clear all the cpu dirty flags with cpu_synchronize_all_post_reset(). Nothing is expected to set the dirty flags again before the cpu state is loaded from the incoming stream. This means that it isn't safe to call cpu_synchronize_state() from a post_load handler, which is non-obvious and potentially inconvenient. We could cpu_synchronize_all_state() before the loadvm, but that would be overkill since a) we expect the state to already be synchronized from the reset and b) we expect to completely rewrite the state with a call to cpu_synchronize_all_post_init() at the end of qemu_loadvm_state(). To clear this up, this patch introduces cpu_synchronize_pre_loadvm() and associated helpers, which simply marks the cpu state as dirty without actually changing anything. i.e. it says we want to discard any existing KVM (or HAX) state and replace it with what we're going to load. Cc: Juan Quintela <quintela@redhat.com> Cc: Dave Gilbert <dgilbert@redhat.com> Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Juan Quintela <quintela@redhat.com>
-rw-r--r--cpus.c9
-rw-r--r--include/sysemu/cpus.h1
-rw-r--r--include/sysemu/hax.h1
-rw-r--r--include/sysemu/hw_accel.h10
-rw-r--r--include/sysemu/kvm.h1
-rw-r--r--kvm-all.c10
-rw-r--r--migration/savevm.c2
-rw-r--r--target/i386/hax-all.c10
8 files changed, 44 insertions, 0 deletions
diff --git a/cpus.c b/cpus.c
index 516e5cbac1..6398439946 100644
--- a/cpus.c
+++ b/cpus.c
@@ -921,6 +921,15 @@ void cpu_synchronize_all_post_init(void)
}
}
+void cpu_synchronize_all_pre_loadvm(void)
+{
+ CPUState *cpu;
+
+ CPU_FOREACH(cpu) {
+ cpu_synchronize_pre_loadvm(cpu);
+ }
+}
+
static int do_vm_stop(RunState state)
{
int ret = 0;
diff --git a/include/sysemu/cpus.h b/include/sysemu/cpus.h
index a8053f1715..731756d948 100644
--- a/include/sysemu/cpus.h
+++ b/include/sysemu/cpus.h
@@ -27,6 +27,7 @@ void qemu_timer_notify_cb(void *opaque, QEMUClockType type);
void cpu_synchronize_all_states(void);
void cpu_synchronize_all_post_reset(void);
void cpu_synchronize_all_post_init(void);
+void cpu_synchronize_all_pre_loadvm(void);
void qtest_clock_warp(int64_t dest);
diff --git a/include/sysemu/hax.h b/include/sysemu/hax.h
index d9f023918e..232a68ab1b 100644
--- a/include/sysemu/hax.h
+++ b/include/sysemu/hax.h
@@ -33,6 +33,7 @@ int hax_populate_ram(uint64_t va, uint32_t size);
void hax_cpu_synchronize_state(CPUState *cpu);
void hax_cpu_synchronize_post_reset(CPUState *cpu);
void hax_cpu_synchronize_post_init(CPUState *cpu);
+void hax_cpu_synchronize_pre_loadvm(CPUState *cpu);
#ifdef CONFIG_HAX
diff --git a/include/sysemu/hw_accel.h b/include/sysemu/hw_accel.h
index c9b3105bc7..469ffda460 100644
--- a/include/sysemu/hw_accel.h
+++ b/include/sysemu/hw_accel.h
@@ -45,4 +45,14 @@ static inline void cpu_synchronize_post_init(CPUState *cpu)
}
}
+static inline void cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ if (kvm_enabled()) {
+ kvm_cpu_synchronize_pre_loadvm(cpu);
+ }
+ if (hax_enabled()) {
+ hax_cpu_synchronize_pre_loadvm(cpu);
+ }
+}
+
#endif /* QEMU_HW_ACCEL_H */
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index 5cc83f2003..a45c145560 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -459,6 +459,7 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram_addr,
void kvm_cpu_synchronize_state(CPUState *cpu);
void kvm_cpu_synchronize_post_reset(CPUState *cpu);
void kvm_cpu_synchronize_post_init(CPUState *cpu);
+void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
void kvm_init_cpu_signals(CPUState *cpu);
diff --git a/kvm-all.c b/kvm-all.c
index 7df27c8522..494b9256aa 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1896,6 +1896,16 @@ void kvm_cpu_synchronize_post_init(CPUState *cpu)
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
+static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+ cpu->kvm_vcpu_dirty = true;
+}
+
+void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
#ifdef KVM_HAVE_MCE_INJECTION
static __thread void *pending_sigbus_addr;
static __thread int pending_sigbus_code;
diff --git a/migration/savevm.c b/migration/savevm.c
index 035c127fdd..1993ca23fe 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1999,6 +1999,8 @@ int qemu_loadvm_state(QEMUFile *f)
}
}
+ cpu_synchronize_all_pre_loadvm();
+
ret = qemu_loadvm_state_main(f, mis);
qemu_event_set(&mis->main_thread_load_event);
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index 73469311d6..097db5cae1 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -635,6 +635,16 @@ void hax_cpu_synchronize_post_init(CPUState *cpu)
run_on_cpu(cpu, do_hax_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
}
+static void do_hax_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
+{
+ cpu->hax_vcpu_dirty = true;
+}
+
+void hax_cpu_synchronize_pre_loadvm(CPUState *cpu)
+{
+ run_on_cpu(cpu, do_hax_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
+}
+
int hax_smp_cpu_exec(CPUState *cpu)
{
CPUArchState *env = (CPUArchState *) (cpu->env_ptr);