aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c31
1 files changed, 31 insertions, 0 deletions
diff --git a/exec.c b/exec.c
index 16f5870caa..1df966d17a 100644
--- a/exec.c
+++ b/exec.c
@@ -197,6 +197,7 @@ typedef struct subpage_t {
static void io_mem_init(void);
static void memory_map_init(void);
+static void tcg_log_global_after_sync(MemoryListener *listener);
static void tcg_commit(MemoryListener *listener);
static MemoryRegion io_mem_watch;
@@ -905,6 +906,7 @@ void cpu_address_space_init(CPUState *cpu, int asidx,
newas->cpu = cpu;
newas->as = as;
if (tcg_enabled()) {
+ newas->tcg_as_listener.log_global_after_sync = tcg_log_global_after_sync;
newas->tcg_as_listener.commit = tcg_commit;
memory_listener_register(&newas->tcg_as_listener, as);
}
@@ -3142,6 +3144,35 @@ void address_space_dispatch_free(AddressSpaceDispatch *d)
g_free(d);
}
+static void do_nothing(CPUState *cpu, run_on_cpu_data d)
+{
+}
+
+static void tcg_log_global_after_sync(MemoryListener *listener)
+{
+ CPUAddressSpace *cpuas;
+
+ /* Wait for the CPU to end the current TB. This avoids the following
+ * incorrect race:
+ *
+ * vCPU migration
+ * ---------------------- -------------------------
+ * TLB check -> slow path
+ * notdirty_mem_write
+ * write to RAM
+ * mark dirty
+ * clear dirty flag
+ * TLB check -> fast path
+ * read memory
+ * write to RAM
+ *
+ * by pushing the migration thread's memory read after the vCPU thread has
+ * written the memory.
+ */
+ cpuas = container_of(listener, CPUAddressSpace, tcg_as_listener);
+ run_on_cpu(cpuas->cpu, do_nothing, RUN_ON_CPU_NULL);
+}
+
static void tcg_commit(MemoryListener *listener)
{
CPUAddressSpace *cpuas;