aboutsummaryrefslogtreecommitdiff
path: root/linux-user/main.c
diff options
context:
space:
mode:
authorSergey Fedorov <serge.fdrv@gmail.com>2016-08-02 18:27:37 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2016-09-27 11:57:29 +0200
commit959f593c0e010cc0ee2e47e7f45e66c0695683b7 (patch)
treeaed616d784deae8415b8ab971d6c86613afe64ca /linux-user/main.c
parenta5403c69fcf2c946d166faa27e5db8436a00d183 (diff)
linux-user: Use QemuMutex and QemuCond
Convert pthread_mutex_t and pthread_cond_t to QemuMutex and QemuCond. This will allow to make some locks and conditional variables common between user and system mode emulation. Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com> Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Message-Id: <1470158864-17651-7-git-send-email-alex.bennee@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'linux-user/main.c')
-rw-r--r--linux-user/main.c55
1 files changed, 32 insertions, 23 deletions
diff --git a/linux-user/main.c b/linux-user/main.c
index 8daebe0767..7a056fcfd6 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -111,17 +111,25 @@ int cpu_get_pic_interrupt(CPUX86State *env)
We don't require a full sync, only that no cpus are executing guest code.
The alternative is to map target atomic ops onto host equivalents,
which requires quite a lot of per host/target work. */
-static pthread_mutex_t cpu_list_mutex = PTHREAD_MUTEX_INITIALIZER;
-static pthread_mutex_t exclusive_lock = PTHREAD_MUTEX_INITIALIZER;
-static pthread_cond_t exclusive_cond = PTHREAD_COND_INITIALIZER;
-static pthread_cond_t exclusive_resume = PTHREAD_COND_INITIALIZER;
+static QemuMutex cpu_list_lock;
+static QemuMutex exclusive_lock;
+static QemuCond exclusive_cond;
+static QemuCond exclusive_resume;
static int pending_cpus;
+void qemu_init_cpu_loop(void)
+{
+ qemu_mutex_init(&cpu_list_lock);
+ qemu_mutex_init(&exclusive_lock);
+ qemu_cond_init(&exclusive_cond);
+ qemu_cond_init(&exclusive_resume);
+}
+
/* Make sure everything is in a consistent state for calling fork(). */
void fork_start(void)
{
qemu_mutex_lock(&tcg_ctx.tb_ctx.tb_lock);
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
mmap_fork_start();
}
@@ -138,14 +146,14 @@ void fork_end(int child)
}
}
pending_cpus = 0;
- pthread_mutex_init(&exclusive_lock, NULL);
- pthread_mutex_init(&cpu_list_mutex, NULL);
- pthread_cond_init(&exclusive_cond, NULL);
- pthread_cond_init(&exclusive_resume, NULL);
+ qemu_mutex_init(&exclusive_lock);
+ qemu_mutex_init(&cpu_list_lock);
+ qemu_cond_init(&exclusive_cond);
+ qemu_cond_init(&exclusive_resume);
qemu_mutex_init(&tcg_ctx.tb_ctx.tb_lock);
gdbserver_fork(thread_cpu);
} else {
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
qemu_mutex_unlock(&tcg_ctx.tb_ctx.tb_lock);
}
}
@@ -155,7 +163,7 @@ void fork_end(int child)
static inline void exclusive_idle(void)
{
while (pending_cpus) {
- pthread_cond_wait(&exclusive_resume, &exclusive_lock);
+ qemu_cond_wait(&exclusive_resume, &exclusive_lock);
}
}
@@ -165,7 +173,7 @@ static inline void start_exclusive(void)
{
CPUState *other_cpu;
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
exclusive_idle();
pending_cpus = 1;
@@ -176,8 +184,8 @@ static inline void start_exclusive(void)
cpu_exit(other_cpu);
}
}
- if (pending_cpus > 1) {
- pthread_cond_wait(&exclusive_cond, &exclusive_lock);
+ while (pending_cpus > 1) {
+ qemu_cond_wait(&exclusive_cond, &exclusive_lock);
}
}
@@ -185,42 +193,42 @@ static inline void start_exclusive(void)
static inline void __attribute__((unused)) end_exclusive(void)
{
pending_cpus = 0;
- pthread_cond_broadcast(&exclusive_resume);
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_cond_broadcast(&exclusive_resume);
+ qemu_mutex_unlock(&exclusive_lock);
}
/* Wait for exclusive ops to finish, and begin cpu execution. */
static inline void cpu_exec_start(CPUState *cpu)
{
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
exclusive_idle();
cpu->running = true;
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
}
/* Mark cpu as not executing, and release pending exclusive ops. */
static inline void cpu_exec_end(CPUState *cpu)
{
- pthread_mutex_lock(&exclusive_lock);
+ qemu_mutex_lock(&exclusive_lock);
cpu->running = false;
if (pending_cpus > 1) {
pending_cpus--;
if (pending_cpus == 1) {
- pthread_cond_signal(&exclusive_cond);
+ qemu_cond_signal(&exclusive_cond);
}
}
exclusive_idle();
- pthread_mutex_unlock(&exclusive_lock);
+ qemu_mutex_unlock(&exclusive_lock);
}
void cpu_list_lock(void)
{
- pthread_mutex_lock(&cpu_list_mutex);
+ qemu_mutex_lock(&cpu_list_lock);
}
void cpu_list_unlock(void)
{
- pthread_mutex_unlock(&cpu_list_mutex);
+ qemu_mutex_unlock(&cpu_list_lock);
}
@@ -4211,6 +4219,7 @@ int main(int argc, char **argv, char **envp)
int ret;
int execfd;
+ qemu_init_cpu_loop();
module_call_init(MODULE_INIT_QOM);
if ((envlist = envlist_create()) == NULL) {