diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2013-07-16 18:45:00 +0100 |
---|---|---|
committer | Riku Voipio <riku.voipio@linaro.org> | 2013-07-22 21:54:52 +0300 |
commit | 24cb36a61c663d98a53338620e88e4cd3403459a (patch) | |
tree | f50ee27aca89b80b87bb4f63d3a40cadd830b782 /linux-user | |
parent | 2667e71c3d9262d756bea1473e2ea28eb2c9c070 (diff) |
configure: Make NPTL non-optional
Now all linux-user targets support building with NPTL, we can make it
mandatory. This is a good idea because:
* NPTL is no longer new and experimental; it is completely standard
* in practice, linux-user without NPTL is nearly useless for
binaries built against non-ancient glibc
* it allows us to delete the rather untested code for handling
the non-NPTL configuration
Note that this patch leaves the CONFIG_USE_NPTL ifdefs in the
bsd-user codebase alone. This makes no change for bsd-user, since
our configure test for NPTL had a "#include <linux/futex.h>"
which means bsd-user would never have been compiled with
CONFIG_USE_NPTL defined, and it still is not.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Riku Voipio <riku.voipio@linaro.org>
Diffstat (limited to 'linux-user')
-rw-r--r-- | linux-user/main.c | 43 | ||||
-rw-r--r-- | linux-user/mmap.c | 11 | ||||
-rw-r--r-- | linux-user/qemu.h | 10 | ||||
-rw-r--r-- | linux-user/syscall.c | 47 |
4 files changed, 1 insertions, 110 deletions
diff --git a/linux-user/main.c b/linux-user/main.c index 99c3b3f5ef..5537967254 100644 --- a/linux-user/main.c +++ b/linux-user/main.c @@ -92,7 +92,6 @@ int cpu_get_pic_interrupt(CPUX86State *env) } #endif -#if defined(CONFIG_USE_NPTL) /***********************************************************/ /* Helper routines for implementing atomic operations. */ @@ -207,43 +206,6 @@ void cpu_list_unlock(void) { pthread_mutex_unlock(&cpu_list_mutex); } -#else /* if !CONFIG_USE_NPTL */ -/* These are no-ops because we are not threadsafe. */ -static inline void cpu_exec_start(CPUState *cpu) -{ -} - -static inline void cpu_exec_end(CPUState *cpu) -{ -} - -static inline void start_exclusive(void) -{ -} - -static inline void end_exclusive(void) -{ -} - -void fork_start(void) -{ -} - -void fork_end(int child) -{ - if (child) { - gdbserver_fork((CPUArchState *)thread_cpu->env_ptr); - } -} - -void cpu_list_lock(void) -{ -} - -void cpu_list_unlock(void) -{ -} -#endif #ifdef TARGET_I386 @@ -3156,12 +3118,7 @@ THREAD CPUState *thread_cpu; void task_settid(TaskState *ts) { if (ts->ts_tid == 0) { -#ifdef CONFIG_USE_NPTL ts->ts_tid = (pid_t)syscall(SYS_gettid); -#else - /* when no threads are used, tid becomes pid */ - ts->ts_tid = getpid(); -#endif } } diff --git a/linux-user/mmap.c b/linux-user/mmap.c index de2219768d..a249f0ceb6 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -33,7 +33,6 @@ //#define DEBUG_MMAP -#if defined(CONFIG_USE_NPTL) static pthread_mutex_t mmap_mutex = PTHREAD_MUTEX_INITIALIZER; static __thread int mmap_lock_count; @@ -66,16 +65,6 @@ void mmap_fork_end(int child) else pthread_mutex_unlock(&mmap_mutex); } -#else -/* We aren't threadsafe to start with, so no need to worry about locking. */ -void mmap_lock(void) -{ -} - -void mmap_unlock(void) -{ -} -#endif /* NOTE: all the constants are the HOST ones, but addresses are target. */ int target_mprotect(abi_ulong start, abi_ulong len, int prot) diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 1ff0fa8b12..4a16e8fe1d 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -19,11 +19,7 @@ #include "exec/gdbstub.h" #include "qemu/queue.h" -#if defined(CONFIG_USE_NPTL) #define THREAD __thread -#else -#define THREAD -#endif /* This struct is used to hold certain information about the image. * Basically, it replicates in user space what would be certain @@ -116,9 +112,7 @@ typedef struct TaskState { uint32_t v86flags; uint32_t v86mask; #endif -#ifdef CONFIG_USE_NPTL abi_ulong child_tidptr; -#endif #ifdef TARGET_M68K int sim_syscalls; abi_ulong tp_value; @@ -268,10 +262,8 @@ void mmap_unlock(void); abi_ulong mmap_find_vma(abi_ulong, abi_ulong); void cpu_list_lock(void); void cpu_list_unlock(void); -#if defined(CONFIG_USE_NPTL) void mmap_fork_start(void); void mmap_fork_end(int child); -#endif /* main.c */ extern unsigned long guest_stack_size; @@ -449,9 +441,7 @@ static inline void *lock_user_string(abi_ulong guest_addr) #define unlock_user_struct(host_ptr, guest_addr, copy) \ unlock_user(host_ptr, guest_addr, (copy) ? sizeof(*host_ptr) : 0) -#if defined(CONFIG_USE_NPTL) #include <pthread.h> -#endif /* Include target-specific struct and function definitions; * they may need access to the target-independent structures diff --git a/linux-user/syscall.c b/linux-user/syscall.c index d3b3590e8d..5bd57d0a72 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -111,13 +111,8 @@ int __clone2(int (*fn)(void *), void *child_stack_base, #include "qemu.h" -#if defined(CONFIG_USE_NPTL) #define CLONE_NPTL_FLAGS2 (CLONE_SETTLS | \ CLONE_PARENT_SETTID | CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID) -#else -/* XXX: Hardcode the above values. */ -#define CLONE_NPTL_FLAGS2 0 -#endif //#define DEBUG @@ -234,12 +229,10 @@ _syscall1(int,exit_group,int,error_code) #if defined(TARGET_NR_set_tid_address) && defined(__NR_set_tid_address) _syscall1(int,set_tid_address,int *,tidptr) #endif -#if defined(CONFIG_USE_NPTL) #if defined(TARGET_NR_futex) && defined(__NR_futex) _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, const struct timespec *,timeout,int *,uaddr2,int,val3) #endif -#endif #define __NR_sys_sched_getaffinity __NR_sched_getaffinity _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long *, user_mask_ptr); @@ -4227,7 +4220,6 @@ abi_long do_arch_prctl(CPUX86State *env, int code, abi_ulong addr) #define NEW_STACK_SIZE 0x40000 -#if defined(CONFIG_USE_NPTL) static pthread_mutex_t clone_lock = PTHREAD_MUTEX_INITIALIZER; typedef struct { @@ -4272,16 +4264,6 @@ static void *clone_func(void *arg) /* never exits */ return NULL; } -#else - -static int clone_func(void *arg) -{ - CPUArchState *env = arg; - cpu_loop(env); - /* never exits */ - return 0; -} -#endif /* do_fork() Must return host values and target errnos (unlike most do_*() functions). */ @@ -4292,12 +4274,8 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, int ret; TaskState *ts; CPUArchState *new_env; -#if defined(CONFIG_USE_NPTL) unsigned int nptl_flags; sigset_t sigmask; -#else - uint8_t *new_stack; -#endif /* Emulate vfork() with fork() */ if (flags & CLONE_VFORK) @@ -4305,10 +4283,9 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, if (flags & CLONE_VM) { TaskState *parent_ts = (TaskState *)env->opaque; -#if defined(CONFIG_USE_NPTL) new_thread_info info; pthread_attr_t attr; -#endif + ts = g_malloc0(sizeof(TaskState)); init_task_state(ts); /* we create a new CPU instance. */ @@ -4321,7 +4298,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, new_env->opaque = ts; ts->bprm = parent_ts->bprm; ts->info = parent_ts->info; -#if defined(CONFIG_USE_NPTL) nptl_flags = flags; flags &= ~CLONE_NPTL_FLAGS2; @@ -4371,17 +4347,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, pthread_cond_destroy(&info.cond); pthread_mutex_destroy(&info.mutex); pthread_mutex_unlock(&clone_lock); -#else - if (flags & CLONE_NPTL_FLAGS2) - return -EINVAL; - /* This is probably going to die very quickly, but do it anyway. */ - new_stack = g_malloc0 (NEW_STACK_SIZE); -#ifdef __ia64__ - ret = __clone2(clone_func, new_stack, NEW_STACK_SIZE, flags, new_env); -#else - ret = clone(clone_func, new_stack + NEW_STACK_SIZE, flags, new_env); -#endif -#endif } else { /* if no CLONE_VM, we consider it is a fork */ if ((flags & ~(CSIGNAL | CLONE_NPTL_FLAGS2)) != 0) @@ -4392,7 +4357,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, /* Child Process. */ cpu_clone_regs(env, newsp); fork_end(1); -#if defined(CONFIG_USE_NPTL) /* There is a race condition here. The parent process could theoretically read the TID in the child process before the child tid is set. This would require using either ptrace @@ -4408,7 +4372,6 @@ static int do_fork(CPUArchState *env, unsigned int flags, abi_ulong newsp, cpu_set_tls (env, newtls); if (flags & CLONE_CHILD_CLEARTID) ts->child_tidptr = child_tidptr; -#endif } else { fork_end(0); } @@ -4834,7 +4797,6 @@ static inline abi_long host_to_target_stat64(void *cpu_env, } #endif -#if defined(CONFIG_USE_NPTL) /* ??? Using host futex calls even when target atomic operations are not really atomic probably breaks things. However implementing futexes locally would make futexes shared between multiple processes @@ -4886,7 +4848,6 @@ static int do_futex(target_ulong uaddr, int op, int val, target_ulong timeout, return -TARGET_ENOSYS; } } -#endif /* Map host to target signal numbers for the wait family of syscalls. Assume all other status bits are the same. */ @@ -5132,9 +5093,7 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, abi_long arg5, abi_long arg6, abi_long arg7, abi_long arg8) { -#ifdef CONFIG_USE_NPTL CPUState *cpu = ENV_GET_CPU(cpu_env); -#endif abi_long ret; struct stat st; struct statfs stfs; @@ -5148,7 +5107,6 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, switch(num) { case TARGET_NR_exit: -#ifdef CONFIG_USE_NPTL /* In old applications this may be used to implement _exit(2). However in threaded applictions it is used for thread termination, and _exit_group is used for application termination. @@ -5186,7 +5144,6 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, g_free(ts); pthread_exit(NULL); } -#endif #ifdef TARGET_GPROF _mcleanup(); #endif @@ -8687,11 +8644,9 @@ abi_long do_syscall(void *cpu_env, int num, abi_long arg1, } break; #endif -#if defined(CONFIG_USE_NPTL) case TARGET_NR_futex: ret = do_futex(arg1, arg2, arg3, arg4, arg5, arg6); break; -#endif #if defined(TARGET_NR_inotify_init) && defined(__NR_inotify_init) case TARGET_NR_inotify_init: ret = get_errno(sys_inotify_init()); |