diff options
-rw-r--r-- | cpu-exec.c | 14 | ||||
-rw-r--r-- | cpus.c | 5 | ||||
-rw-r--r-- | include/exec/exec-all.h | 1 |
3 files changed, 9 insertions, 11 deletions
diff --git a/cpu-exec.c b/cpu-exec.c index 713540fc8f..5153f1b632 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -342,6 +342,7 @@ static void cpu_handle_debug_exception(CPUState *cpu) /* main execution loop */ volatile sig_atomic_t exit_request; +CPUState *tcg_current_cpu; int cpu_exec(CPUState *cpu) { @@ -368,15 +369,7 @@ int cpu_exec(CPUState *cpu) } current_cpu = cpu; - - /* As long as current_cpu is null, up to the assignment just above, - * requests by other threads to exit the execution loop are expected to - * be issued using the exit_request global. We must make sure that our - * evaluation of the global value is performed past the current_cpu - * value transition point, which requires a memory barrier as well as - * an instruction scheduling constraint on modern architectures. */ - smp_mb(); - + atomic_mb_set(&tcg_current_cpu, cpu); rcu_read_lock(); if (unlikely(exit_request)) { @@ -579,5 +572,8 @@ int cpu_exec(CPUState *cpu) /* fail safe : never use current_cpu outside cpu_exec() */ current_cpu = NULL; + + /* Does not need atomic_mb_set because a spurious wakeup is okay. */ + atomic_set(&tcg_current_cpu, NULL); return ret; } @@ -663,8 +663,9 @@ static void cpu_handle_guest_debug(CPUState *cpu) static void cpu_signal(int sig) { - if (current_cpu) { - cpu_exit(current_cpu); + CPUState *cpu = atomic_mb_read(&tcg_current_cpu); + if (cpu) { + cpu_exit(cpu); } exit_request = 1; } diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 83b925172f..d5dd48f759 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -387,6 +387,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); extern int singlestep; /* cpu-exec.c */ +extern CPUState *tcg_current_cpu; extern volatile sig_atomic_t exit_request; #if !defined(CONFIG_USER_ONLY) |