aboutsummaryrefslogtreecommitdiff
path: root/cpus.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2015-09-14 16:13:16 +0100
committerPeter Maydell <peter.maydell@linaro.org>2015-09-14 16:13:16 +0100
commita2aa09e18186801931763fbd40a751fa39971b18 (patch)
tree550e6dc13cae6ef93986d69f7fb1d62620f07709 /cpus.c
parent7e4804dafd4689312ef1172b549927a973bb5414 (diff)
parent47d4be12c3997343e436c6cca89aefbbbeb70863 (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* Support for jemalloc * qemu_mutex_lock_iothread "No such process" fix * cutils: qemu_strto* wrappers * iohandler.c simplification * Many other fixes and misc patches. And some MTTCG work (with Emilio's fixes squashed): * Signal-free TCG kick * Removing spinlock in favor of QemuMutex * User-mode emulation multi-threading fixes/docs # gpg: Signature made Thu 10 Sep 2015 09:03:07 BST using RSA key ID 78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" * remotes/bonzini/tags/for-upstream: (44 commits) cutils: work around platform differences in strto{l,ul,ll,ull} cpu-exec: fix lock hierarchy for user-mode emulation exec: make mmap_lock/mmap_unlock globally available tcg: comment on which functions have to be called with mmap_lock held tcg: add memory barriers in page_find_alloc accesses remove unused spinlock. replace spinlock by QemuMutex. cpus: remove tcg_halt_cond and tcg_cpu_thread globals cpus: protect work list with work_mutex scripts/dump-guest-memory.py: fix after RAMBlock change configure: Add support for jemalloc add macro file for coccinelle configure: factor out adding disas configure vhost-scsi: fix wrong vhost-scsi firmware path checkpatch: remove tests that are not relevant outside the kernel checkpatch: adapt some tests to QEMU CODING_STYLE: update mixed declaration rules qmp: Add example usage of strto*l() qemu wrapper cutils: Add qemu_strtoull() wrapper cutils: Add qemu_strtoll() wrapper ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'cpus.c')
-rw-r--r--cpus.c130
1 files changed, 51 insertions, 79 deletions
diff --git a/cpus.c b/cpus.c
index c1e74d9824..dddd056454 100644
--- a/cpus.c
+++ b/cpus.c
@@ -661,14 +661,6 @@ static void cpu_handle_guest_debug(CPUState *cpu)
cpu->stopped = true;
}
-static void cpu_signal(int sig)
-{
- if (current_cpu) {
- cpu_exit(current_cpu);
- }
- exit_request = 1;
-}
-
#ifdef CONFIG_LINUX
static void sigbus_reraise(void)
{
@@ -781,29 +773,11 @@ static void qemu_kvm_init_cpu_signals(CPUState *cpu)
}
}
-static void qemu_tcg_init_cpu_signals(void)
-{
- sigset_t set;
- struct sigaction sigact;
-
- memset(&sigact, 0, sizeof(sigact));
- sigact.sa_handler = cpu_signal;
- sigaction(SIG_IPI, &sigact, NULL);
-
- sigemptyset(&set);
- sigaddset(&set, SIG_IPI);
- pthread_sigmask(SIG_UNBLOCK, &set, NULL);
-}
-
#else /* _WIN32 */
static void qemu_kvm_init_cpu_signals(CPUState *cpu)
{
abort();
}
-
-static void qemu_tcg_init_cpu_signals(void)
-{
-}
#endif /* _WIN32 */
static QemuMutex qemu_global_mutex;
@@ -812,9 +786,6 @@ static unsigned iothread_requesting_mutex;
static QemuThread io_thread;
-static QemuThread *tcg_cpu_thread;
-static QemuCond *tcg_halt_cond;
-
/* cpu creation */
static QemuCond qemu_cpu_cond;
/* system init */
@@ -845,6 +816,8 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
wi.func = func;
wi.data = data;
wi.free = false;
+
+ qemu_mutex_lock(&cpu->work_mutex);
if (cpu->queued_work_first == NULL) {
cpu->queued_work_first = &wi;
} else {
@@ -853,9 +826,10 @@ void run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
cpu->queued_work_last = &wi;
wi.next = NULL;
wi.done = false;
+ qemu_mutex_unlock(&cpu->work_mutex);
qemu_cpu_kick(cpu);
- while (!wi.done) {
+ while (!atomic_mb_read(&wi.done)) {
CPUState *self_cpu = current_cpu;
qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
@@ -876,6 +850,8 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
wi->func = func;
wi->data = data;
wi->free = true;
+
+ qemu_mutex_lock(&cpu->work_mutex);
if (cpu->queued_work_first == NULL) {
cpu->queued_work_first = wi;
} else {
@@ -884,6 +860,7 @@ void async_run_on_cpu(CPUState *cpu, void (*func)(void *data), void *data)
cpu->queued_work_last = wi;
wi->next = NULL;
wi->done = false;
+ qemu_mutex_unlock(&cpu->work_mutex);
qemu_cpu_kick(cpu);
}
@@ -896,15 +873,23 @@ static void flush_queued_work(CPUState *cpu)
return;
}
- while ((wi = cpu->queued_work_first)) {
+ qemu_mutex_lock(&cpu->work_mutex);
+ while (cpu->queued_work_first != NULL) {
+ wi = cpu->queued_work_first;
cpu->queued_work_first = wi->next;
+ if (!cpu->queued_work_first) {
+ cpu->queued_work_last = NULL;
+ }
+ qemu_mutex_unlock(&cpu->work_mutex);
wi->func(wi->data);
- wi->done = true;
+ qemu_mutex_lock(&cpu->work_mutex);
if (wi->free) {
g_free(wi);
+ } else {
+ atomic_mb_set(&wi->done, true);
}
}
- cpu->queued_work_last = NULL;
+ qemu_mutex_unlock(&cpu->work_mutex);
qemu_cond_broadcast(&qemu_work_cond);
}
@@ -919,15 +904,13 @@ static void qemu_wait_io_event_common(CPUState *cpu)
cpu->thread_kicked = false;
}
-static void qemu_tcg_wait_io_event(void)
+static void qemu_tcg_wait_io_event(CPUState *cpu)
{
- CPUState *cpu;
-
while (all_cpu_threads_idle()) {
/* Start accounting real time to the virtual clock if the CPUs
are idle. */
qemu_clock_warp(QEMU_CLOCK_VIRTUAL);
- qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
+ qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
}
while (iothread_requesting_mutex) {
@@ -1041,7 +1024,6 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
rcu_register_thread();
qemu_mutex_lock_iothread();
- qemu_tcg_init_cpu_signals();
qemu_thread_get_self(cpu->thread);
CPU_FOREACH(cpu) {
@@ -1053,7 +1035,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
/* wait for initial kick-off after machine start */
while (first_cpu->stopped) {
- qemu_cond_wait(tcg_halt_cond, &qemu_global_mutex);
+ qemu_cond_wait(first_cpu->halt_cond, &qemu_global_mutex);
/* process any pending work */
CPU_FOREACH(cpu) {
@@ -1062,7 +1044,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
}
/* process any pending work */
- exit_request = 1;
+ atomic_mb_set(&exit_request, 1);
while (1) {
tcg_exec_all();
@@ -1074,7 +1056,7 @@ static void *qemu_tcg_cpu_thread_fn(void *arg)
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
}
}
- qemu_tcg_wait_io_event();
+ qemu_tcg_wait_io_event(QTAILQ_FIRST(&cpus));
}
return NULL;
@@ -1085,61 +1067,47 @@ static void qemu_cpu_kick_thread(CPUState *cpu)
#ifndef _WIN32
int err;
+ if (cpu->thread_kicked) {
+ return;
+ }
+ cpu->thread_kicked = true;
err = pthread_kill(cpu->thread->thread, SIG_IPI);
if (err) {
fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
exit(1);
}
#else /* _WIN32 */
- if (!qemu_cpu_is_self(cpu)) {
- CONTEXT tcgContext;
-
- if (SuspendThread(cpu->hThread) == (DWORD)-1) {
- fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
- GetLastError());
- exit(1);
- }
-
- /* On multi-core systems, we are not sure that the thread is actually
- * suspended until we can get the context.
- */
- tcgContext.ContextFlags = CONTEXT_CONTROL;
- while (GetThreadContext(cpu->hThread, &tcgContext) != 0) {
- continue;
- }
-
- cpu_signal(0);
+ abort();
+#endif
+}
- if (ResumeThread(cpu->hThread) == (DWORD)-1) {
- fprintf(stderr, "qemu:%s: GetLastError:%lu\n", __func__,
- GetLastError());
- exit(1);
- }
+static void qemu_cpu_kick_no_halt(void)
+{
+ CPUState *cpu;
+ /* Ensure whatever caused the exit has reached the CPU threads before
+ * writing exit_request.
+ */
+ atomic_mb_set(&exit_request, 1);
+ cpu = atomic_mb_read(&tcg_current_cpu);
+ if (cpu) {
+ cpu_exit(cpu);
}
-#endif
}
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
- if (!tcg_enabled() && !cpu->thread_kicked) {
+ if (tcg_enabled()) {
+ qemu_cpu_kick_no_halt();
+ } else {
qemu_cpu_kick_thread(cpu);
- cpu->thread_kicked = true;
}
}
void qemu_cpu_kick_self(void)
{
-#ifndef _WIN32
assert(current_cpu);
-
- if (!current_cpu->thread_kicked) {
- qemu_cpu_kick_thread(current_cpu);
- current_cpu->thread_kicked = true;
- }
-#else
- abort();
-#endif
+ qemu_cpu_kick_thread(current_cpu);
}
bool qemu_cpu_is_self(CPUState *cpu)
@@ -1166,12 +1134,12 @@ void qemu_mutex_lock_iothread(void)
* TCG code execution.
*/
if (!tcg_enabled() || qemu_in_vcpu_thread() ||
- !first_cpu || !first_cpu->thread) {
+ !first_cpu || !first_cpu->created) {
qemu_mutex_lock(&qemu_global_mutex);
atomic_dec(&iothread_requesting_mutex);
} else {
if (qemu_mutex_trylock(&qemu_global_mutex)) {
- qemu_cpu_kick_thread(first_cpu);
+ qemu_cpu_kick_no_halt();
qemu_mutex_lock(&qemu_global_mutex);
}
atomic_dec(&iothread_requesting_mutex);
@@ -1251,6 +1219,8 @@ void resume_all_vcpus(void)
static void qemu_tcg_init_vcpu(CPUState *cpu)
{
char thread_name[VCPU_THREAD_NAME_SIZE];
+ static QemuCond *tcg_halt_cond;
+ static QemuThread *tcg_cpu_thread;
tcg_cpu_address_space_init(cpu, cpu->as);
@@ -1440,7 +1410,9 @@ static void tcg_exec_all(void)
break;
}
}
- exit_request = 0;
+
+ /* Pairs with smp_wmb in qemu_cpu_kick. */
+ atomic_mb_set(&exit_request, 0);
}
void list_cpus(FILE *f, fprintf_function cpu_fprintf, const char *optarg)