aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJuan Quintela <quintela@redhat.com>2009-07-27 16:13:24 +0200
committerAnthony Liguori <aliguori@us.ibm.com>2009-07-27 14:10:55 -0500
commit2f7bb8780af4a007e90045b4cc97f558e956adf9 (patch)
tree464d805fa93ac6fdad7b41978c2a67183e2a1f15
parent75b5a697ed5dbfd9bef2eeddf82032ecfeb169f8 (diff)
rename USE_NPTL to CONFIG_USE_NPTL
Signed-off-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
-rw-r--r--bsd-user/mmap.c2
-rw-r--r--bsd-user/qemu.h6
-rwxr-xr-xconfigure2
-rwxr-xr-xcreate_config4
-rw-r--r--exec.c2
-rw-r--r--gdbstub.c2
-rw-r--r--linux-user/main.c6
-rw-r--r--linux-user/mmap.c2
-rw-r--r--linux-user/qemu.h8
-rw-r--r--linux-user/syscall.c20
-rw-r--r--qemu-lock.h2
11 files changed, 26 insertions, 30 deletions
diff --git a/bsd-user/mmap.c b/bsd-user/mmap.c
index 26f981ab2c..ff207cd55d 100644
--- a/bsd-user/mmap.c
+++ b/bsd-user/mmap.c
@@ -30,7 +30,7 @@
//#define DEBUG_MMAP
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
pthread_mutex_t mmap_mutex;
static int __thread mmap_lock_count;
diff --git a/bsd-user/qemu.h b/bsd-user/qemu.h
index d411bbbf4c..822a214451 100644
--- a/bsd-user/qemu.h
+++ b/bsd-user/qemu.h
@@ -24,7 +24,7 @@ enum BSDType {
#include "target_signal.h"
#include "gdbstub.h"
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#define THREAD __thread
#else
#define THREAD
@@ -188,7 +188,7 @@ void mmap_lock(void);
void mmap_unlock(void);
void cpu_list_lock(void);
void cpu_list_unlock(void);
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
void mmap_fork_start(void);
void mmap_fork_end(int child);
#endif
@@ -382,7 +382,7 @@ static inline void *lock_user_string(abi_ulong guest_addr)
#define unlock_user_struct(host_ptr, guest_addr, copy) \
unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#include <pthread.h>
#endif
diff --git a/configure b/configure
index 44f544a3f9..977acae46a 100755
--- a/configure
+++ b/configure
@@ -2056,7 +2056,7 @@ if test "$target_user_only" = "yes" -a "$bflt" = "yes"; then
fi
if test "$target_user_only" = "yes" \
-a "$nptl" = "yes" -a "$target_nptl" = "yes"; then
- echo "USE_NPTL=y" >> $config_mak
+ echo "CONFIG_USE_NPTL=y" >> $config_mak
fi
# 32 bit ELF loader in addition to native 64 bit loader?
if test "$target_user_only" = "yes" -a "$elfload32" = "yes"; then
diff --git a/create_config b/create_config
index cac0edbbb2..8b7b365d7f 100755
--- a/create_config
+++ b/create_config
@@ -46,10 +46,6 @@ case $line in
value=${line#*=}
echo "#define $name $value"
;;
- USE_NPTL=y) # configuration
- name=${line%=*}
- echo "#define $name 1"
- ;;
esac
done # read
diff --git a/exec.c b/exec.c
index ef79d6d29e..a4a7de7570 100644
--- a/exec.c
+++ b/exec.c
@@ -1515,7 +1515,7 @@ void cpu_set_log_filename(const char *filename)
static void cpu_unlink_tb(CPUState *env)
{
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
/* FIXME: TB unchaining isn't SMP safe. For now just ignore the
problem and hope the cpu will stop of its own accord. For userspace
emulation this often isn't actually as bad as it sounds. Often
diff --git a/gdbstub.c b/gdbstub.c
index bb389710da..ff4c86c0f8 100644
--- a/gdbstub.c
+++ b/gdbstub.c
@@ -1592,7 +1592,7 @@ static void gdb_set_cpu_pc(GDBState *s, target_ulong pc)
static inline int gdb_id(CPUState *env)
{
-#if defined(CONFIG_USER_ONLY) && defined(USE_NPTL)
+#if defined(CONFIG_USER_ONLY) && defined(CONFIG_USE_NPTL)
return env->host_tid;
#else
return env->cpu_index + 1;
diff --git a/linux-user/main.c b/linux-user/main.c
index 4388c04c9a..20a25769f9 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -108,7 +108,7 @@ int64_t cpu_get_real_ticks(void)
#endif
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
/***********************************************************/
/* Helper routines for implementing atomic operations. */
@@ -222,7 +222,7 @@ void cpu_list_unlock(void)
{
pthread_mutex_unlock(&cpu_list_mutex);
}
-#else /* if !USE_NPTL */
+#else /* if !CONFIG_USE_NPTL */
/* These are no-ops because we are not threadsafe. */
static inline void cpu_exec_start(CPUState *env)
{
@@ -2357,7 +2357,7 @@ THREAD CPUState *thread_env;
void task_settid(TaskState *ts)
{
if (ts->ts_tid == 0) {
-#ifdef USE_NPTL
+#ifdef CONFIG_USE_NPTL
ts->ts_tid = (pid_t)syscall(SYS_gettid);
#else
/* when no threads are used, tid becomes pid */
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index e05caa0a11..3d2c8b3073 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -33,7 +33,7 @@
//#define DEBUG_MMAP
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER;
static int __thread mmap_lock_count;
diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index 8e728a3d28..f0e410b261 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -20,7 +20,7 @@
#include "gdbstub.h"
#include "sys-queue.h"
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#define THREAD __thread
#else
#define THREAD
@@ -104,7 +104,7 @@ typedef struct TaskState {
uint32_t v86flags;
uint32_t v86mask;
#endif
-#ifdef USE_NPTL
+#ifdef CONFIG_USE_NPTL
abi_ulong child_tidptr;
#endif
#ifdef TARGET_M68K
@@ -244,7 +244,7 @@ void mmap_unlock(void);
abi_ulong mmap_find_vma(abi_ulong, abi_ulong);
void cpu_list_lock(void);
void cpu_list_unlock(void);
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
void mmap_fork_start(void);
void mmap_fork_end(int child);
#endif
@@ -441,7 +441,7 @@ static inline void *lock_user_string(abi_ulong guest_addr)
#define unlock_user_struct(host_ptr, guest_addr, copy) \
unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0)
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#include <pthread.h>
#endif
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index 4b230ddfc9..b5f669e282 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -82,7 +82,7 @@
#include "qemu.h"
#include "qemu-common.h"
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \
CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID)
#else
@@ -219,7 +219,7 @@ _syscall1(int,exit_group,int,error_code)
#if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address)
_syscall1(int,set_tid_address,int *,tidptr)
#endif
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#if defined(TARGET_NR_futex) && defined(__NR_futex)
_syscall6(int,sys_futex,int *,uaddr,int,op,int,val,
const struct timespec *,timeout,int *,uaddr2,int,val3)
@@ -3458,7 +3458,7 @@ static abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr)
#endif /* defined(TARGET_I386) */
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#define NEW_STACK_SIZE PTHREAD_STACK_MIN
@@ -3527,7 +3527,7 @@ static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
TaskState *ts;
uint8_t *new_stack;
CPUState *new_env;
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
unsigned int nptl_flags;
sigset_t sigmask;
#endif
@@ -3538,7 +3538,7 @@ static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
if (flags & CLONE_VM) {
TaskState *parent_ts = (TaskState *)env->opaque;
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
new_thread_info info;
pthread_attr_t attr;
#endif
@@ -3552,7 +3552,7 @@ static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
new_env->opaque = ts;
ts->bprm = parent_ts->bprm;
ts->info = parent_ts->info;
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
nptl_flags = flags;
flags &= ~CLONE_NPTL_FLAGS2;
@@ -3621,7 +3621,7 @@ static int do_fork(CPUState *env, unsigned int flags, abi_ulong newsp,
/* Child Process. */
cpu_clone_regs(env, newsp);
fork_end(1);
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
/* There is a race condition here. The parent process could
theoretically read the TID in the child process before the child
tid is set. This would require using either ptrace
@@ -4016,7 +4016,7 @@ static inline abi_long host_to_target_stat64(void *cpu_env,
}
#endif
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
/* ??? Using host futex calls even when target atomic operations
are not really atomic probably breaks things. However implementing
futexes locally would make futexes shared between multiple processes
@@ -4126,7 +4126,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
switch(num) {
case TARGET_NR_exit:
-#ifdef USE_NPTL
+#ifdef CONFIG_USE_NPTL
/* In old applications this may be used to implement _exit(2).
However in threaded applictions it is used for thread termination,
and _exit_group is used for application termination.
@@ -6828,7 +6828,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1,
}
break;
#endif
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
case TARGET_NR_futex:
ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6);
break;
diff --git a/qemu-lock.h b/qemu-lock.h
index 49e2203751..9a3e6acce8 100644
--- a/qemu-lock.h
+++ b/qemu-lock.h
@@ -23,7 +23,7 @@
likely to release it soon. In environments where you have more threads
than physical CPUs (the extreme case being a single CPU host) a spinlock
simply wastes CPU until the OS decides to preempt it. */
-#if defined(USE_NPTL)
+#if defined(CONFIG_USE_NPTL)
#include <pthread.h>
#define spin_lock pthread_mutex_lock