diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2016-08-31 16:56:04 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-09-27 11:57:30 +0200 |
commit | ab129972c8b41e15b0521895a46fd9c752b68a5e (patch) | |
tree | 2333219f1b6366ad23aa0b1e3ffa9c9ee2d5c0fe | |
parent | 0e55539c076a61b0b10a1aea1158fc20fb159d99 (diff) |
cpus-common: move exclusive work infrastructure from linux-user
This will serve as the base for async_safe_run_on_cpu. Because
start_exclusive uses CPU_FOREACH, merge exclusive_lock with
qemu_cpu_list_lock: together with a call to exclusive_idle (via
cpu_exec_start/end) in cpu_list_add, this protects exclusive work
against concurrent CPU addition and removal.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | bsd-user/main.c | 17 | ||||
-rw-r--r-- | cpus-common.c | 82 | ||||
-rw-r--r-- | cpus.c | 2 | ||||
-rw-r--r-- | include/qom/cpu.h | 44 | ||||
-rw-r--r-- | linux-user/main.c | 87 |
5 files changed, 127 insertions, 105 deletions
diff --git a/bsd-user/main.c b/bsd-user/main.c index 6dfa91230f..35125b720f 100644 --- a/bsd-user/main.c +++ b/bsd-user/main.c @@ -67,23 +67,6 @@ int cpu_get_pic_interrupt(CPUX86State *env) } #endif -/* These are no-ops because we are not threadsafe. */ -static inline void cpu_exec_start(CPUState *cpu) -{ -} - -static inline void cpu_exec_end(CPUState *cpu) -{ -} - -static inline void start_exclusive(void) -{ -} - -static inline void end_exclusive(void) -{ -} - void fork_start(void) { } diff --git a/cpus-common.c b/cpus-common.c index d6cd426235..7d935fd3dd 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -23,11 +23,21 @@ #include "sysemu/cpus.h" static QemuMutex qemu_cpu_list_lock; +static QemuCond exclusive_cond; +static QemuCond exclusive_resume; static QemuCond qemu_work_cond; +static int pending_cpus; + void qemu_init_cpu_list(void) { + /* This is needed because qemu_init_cpu_list is also called by the + * child process in a fork. */ + pending_cpus = 0; + qemu_mutex_init(&qemu_cpu_list_lock); + qemu_cond_init(&exclusive_cond); + qemu_cond_init(&exclusive_resume); qemu_cond_init(&qemu_work_cond); } @@ -55,6 +65,12 @@ static int cpu_get_free_index(void) return cpu_index; } +static void finish_safe_work(CPUState *cpu) +{ + cpu_exec_start(cpu); + cpu_exec_end(cpu); +} + void cpu_list_add(CPUState *cpu) { qemu_mutex_lock(&qemu_cpu_list_lock); @@ -66,6 +82,8 @@ void cpu_list_add(CPUState *cpu) } QTAILQ_INSERT_TAIL(&cpus, cpu, node); qemu_mutex_unlock(&qemu_cpu_list_lock); + + finish_safe_work(cpu); } void cpu_list_remove(CPUState *cpu) @@ -148,6 +166,70 @@ void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) queue_work_on_cpu(cpu, wi); } +/* Wait for pending exclusive operations to complete. The CPU list lock + must be held. */ +static inline void exclusive_idle(void) +{ + while (pending_cpus) { + qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock); + } +} + +/* Start an exclusive operation. + Must only be called from outside cpu_exec, takes + qemu_cpu_list_lock. */ +void start_exclusive(void) +{ + CPUState *other_cpu; + + qemu_mutex_lock(&qemu_cpu_list_lock); + exclusive_idle(); + + /* Make all other cpus stop executing. */ + pending_cpus = 1; + CPU_FOREACH(other_cpu) { + if (other_cpu->running) { + pending_cpus++; + qemu_cpu_kick(other_cpu); + } + } + while (pending_cpus > 1) { + qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); + } +} + +/* Finish an exclusive operation. Releases qemu_cpu_list_lock. */ +void end_exclusive(void) +{ + pending_cpus = 0; + qemu_cond_broadcast(&exclusive_resume); + qemu_mutex_unlock(&qemu_cpu_list_lock); +} + +/* Wait for exclusive ops to finish, and begin cpu execution. */ +void cpu_exec_start(CPUState *cpu) +{ + qemu_mutex_lock(&qemu_cpu_list_lock); + exclusive_idle(); + cpu->running = true; + qemu_mutex_unlock(&qemu_cpu_list_lock); +} + +/* Mark cpu as not executing, and release pending exclusive ops. */ +void cpu_exec_end(CPUState *cpu) +{ + qemu_mutex_lock(&qemu_cpu_list_lock); + cpu->running = false; + if (pending_cpus > 1) { + pending_cpus--; + if (pending_cpus == 1) { + qemu_cond_signal(&exclusive_cond); + } + } + exclusive_idle(); + qemu_mutex_unlock(&qemu_cpu_list_lock); +} + void process_queued_cpu_work(CPUState *cpu) { struct qemu_work_item *wi; @@ -1457,7 +1457,9 @@ static int tcg_cpu_exec(CPUState *cpu) cpu->icount_decr.u16.low = decr; cpu->icount_extra = count; } + cpu_exec_start(cpu); ret = cpu_exec(cpu); + cpu_exec_end(cpu); #ifdef CONFIG_PROFILER tcg_time += profile_getclock() - ti; #endif diff --git a/include/qom/cpu.h b/include/qom/cpu.h index c04e510ef1..f8726140f6 100644 --- a/include/qom/cpu.h +++ b/include/qom/cpu.h @@ -242,7 +242,8 @@ struct qemu_work_item; * @nr_threads: Number of threads within this CPU. * @numa_node: NUMA node this CPU is belonging to. * @host_tid: Host thread ID. - * @running: #true if CPU is currently running (usermode). + * @running: #true if CPU is currently running; + * valid under cpu_list_lock. * @created: Indicates whether the CPU thread has been successfully created. * @interrupt_request: Indicates a pending interrupt request. * @halted: Nonzero if the CPU is in suspended state. @@ -819,6 +820,47 @@ void cpu_remove_sync(CPUState *cpu); void process_queued_cpu_work(CPUState *cpu); /** + * cpu_exec_start: + * @cpu: The CPU for the current thread. + * + * Record that a CPU has started execution and can be interrupted with + * cpu_exit. + */ +void cpu_exec_start(CPUState *cpu); + +/** + * cpu_exec_end: + * @cpu: The CPU for the current thread. + * + * Record that a CPU has stopped execution and exclusive sections + * can be executed without interrupting it. + */ +void cpu_exec_end(CPUState *cpu); + +/** + * start_exclusive: + * + * Wait for a concurrent exclusive section to end, and then start + * a section of work that is run while other CPUs are not running + * between cpu_exec_start and cpu_exec_end. CPUs that are running + * cpu_exec are exited immediately. CPUs that call cpu_exec_start + * during the exclusive section go to sleep until this CPU calls + * end_exclusive. + * + * Returns with the CPU list lock taken (which nests outside all + * other locks except the BQL). + */ +void start_exclusive(void); + +/** + * end_exclusive: + * + * Concludes an exclusive execution section started by start_exclusive. + * Releases the CPU list lock. + */ +void end_exclusive(void); + +/** * qemu_init_vcpu: * @cpu: The vCPU to initialize. * diff --git a/linux-user/main.c b/linux-user/main.c index e3eca40a75..c8f8573614 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -107,28 +107,11 @@ int cpu_get_pic_interrupt(CPUX86State *env) /***********************************************************/ /* Helper routines for implementing atomic operations. */ -/* To implement exclusive operations we force all cpus to syncronise. - We don't require a full sync, only that no cpus are executing guest code. - The alternative is to map target atomic ops onto host equivalents, - which requires quite a lot of per host/target work. */ -static QemuMutex exclusive_lock; -static QemuCond exclusive_cond; -static QemuCond exclusive_resume; -static int pending_cpus; - -void qemu_init_cpu_loop(void) -{ - qemu_mutex_init(&exclusive_lock); - qemu_cond_init(&exclusive_cond); - qemu_cond_init(&exclusive_resume); -} - /* Make sure everything is in a consistent state for calling fork(). */ void fork_start(void) { cpu_list_lock(); qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock); - qemu_mutex_lock(&exclusive_lock); mmap_fork_start(); } @@ -144,84 +127,15 @@ void fork_end(int child) QTAILQ_REMOVE(&cpus, cpu, node); } } - pending_cpus = 0; - qemu_mutex_init(&exclusive_lock); - qemu_cond_init(&exclusive_cond); - qemu_cond_init(&exclusive_resume); qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock); qemu_init_cpu_list(); gdbserver_fork(thread_cpu); } else { - qemu_mutex_unlock(&exclusive_lock); qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock); cpu_list_unlock(); } } -/* Wait for pending exclusive operations to complete. The exclusive lock - must be held. */ -static inline void exclusive_idle(void) -{ - while (pending_cpus) { - qemu_cond_wait(&exclusive_resume, &exclusive_lock); - } -} - -/* Start an exclusive operation. - Must only be called from outside cpu_exec. */ -static inline void start_exclusive(void) -{ - CPUState *other_cpu; - - qemu_mutex_lock(&exclusive_lock); - exclusive_idle(); - - pending_cpus = 1; - /* Make all other cpus stop executing. */ - CPU_FOREACH(other_cpu) { - if (other_cpu->running) { - pending_cpus++; - cpu_exit(other_cpu); - } - } - while (pending_cpus > 1) { - qemu_cond_wait(&exclusive_cond, &exclusive_lock); - } -} - -/* Finish an exclusive operation. */ -static inline void __attribute__((unused)) end_exclusive(void) -{ - pending_cpus = 0; - qemu_cond_broadcast(&exclusive_resume); - qemu_mutex_unlock(&exclusive_lock); -} - -/* Wait for exclusive ops to finish, and begin cpu execution. */ -static inline void cpu_exec_start(CPUState *cpu) -{ - qemu_mutex_lock(&exclusive_lock); - exclusive_idle(); - cpu->running = true; - qemu_mutex_unlock(&exclusive_lock); -} - -/* Mark cpu as not executing, and release pending exclusive ops. */ -static inline void cpu_exec_end(CPUState *cpu) -{ - qemu_mutex_lock(&exclusive_lock); - cpu->running = false; - if (pending_cpus > 1) { - pending_cpus--; - if (pending_cpus == 1) { - qemu_cond_signal(&exclusive_cond); - } - } - exclusive_idle(); - qemu_mutex_unlock(&exclusive_lock); -} - - #ifdef TARGET_I386 /***********************************************************/ /* CPUX86 core interface */ @@ -4245,7 +4159,6 @@ int main(int argc, char **argv, char **envp) int execfd; qemu_init_cpu_list(); - qemu_init_cpu_loop(); module_call_init(MODULE_INIT_QOM); if ((envlist = envlist_create()) == NULL) { |