diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2022-03-22 17:58:38 -0700 |
---|---|---|
committer | Laurent Vivier <laurent@vivier.eu> | 2022-03-23 16:53:17 +0100 |
commit | 7f4f0d9ea87097a31b36bcc52b7368efed35593c (patch) | |
tree | 1aad85550daa9c21d6a163ecaca2f4c6de74a778 /linux-user/arm/cpu_loop.c | |
parent | 6e05e7047c7271aa27e0cdd243a20dfd6c8cada2 (diff) |
linux-user/arm: Implement __kernel_cmpxchg with host atomics
The existing implementation using start/end_exclusive
does not provide atomicity across processes.
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20220323005839.94327-3-richard.henderson@linaro.org>
Signed-off-by: Laurent Vivier <laurent@vivier.eu>
Diffstat (limited to 'linux-user/arm/cpu_loop.c')
-rw-r--r-- | linux-user/arm/cpu_loop.c | 87 |
1 files changed, 62 insertions, 25 deletions
diff --git a/linux-user/arm/cpu_loop.c b/linux-user/arm/cpu_loop.c index a0e43b261c..aa2d777bf4 100644 --- a/linux-user/arm/cpu_loop.c +++ b/linux-user/arm/cpu_loop.c @@ -75,7 +75,67 @@ put_user_u16(__x, (gaddr)); \ }) -/* Commpage handling -- there is no commpage for AArch64 */ +/* + * Similar to code in accel/tcg/user-exec.c, but outside the execution loop. + * Must be called with mmap_lock. + * We get the PC of the entry address - which is as good as anything, + * on a real kernel what you get depends on which mode it uses. + */ +static void *atomic_mmu_lookup(CPUArchState *env, uint32_t addr, int size) +{ + int need_flags = PAGE_READ | PAGE_WRITE_ORG | PAGE_VALID; + int page_flags; + + /* Enforce guest required alignment. */ + if (unlikely(addr & (size - 1))) { + force_sig_fault(TARGET_SIGBUS, TARGET_BUS_ADRALN, addr); + return NULL; + } + + page_flags = page_get_flags(addr); + if (unlikely((page_flags & need_flags) != need_flags)) { + force_sig_fault(TARGET_SIGSEGV, + page_flags & PAGE_VALID ? + TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR, addr); + return NULL; + } + + return g2h(env_cpu(env), addr); +} + +/* + * See the Linux kernel's Documentation/arm/kernel_user_helpers.rst + * Input: + * r0 = oldval + * r1 = newval + * r2 = pointer to target value + * + * Output: + * r0 = 0 if *ptr was changed, non-0 if no exchange happened + * C set if *ptr was changed, clear if no exchange happened + */ +static void arm_kernel_cmpxchg32_helper(CPUARMState *env) +{ + uint32_t oldval, newval, val, addr, cpsr, *host_addr; + + oldval = env->regs[0]; + newval = env->regs[1]; + addr = env->regs[2]; + + mmap_lock(); + host_addr = atomic_mmu_lookup(env, addr, 4); + if (!host_addr) { + mmap_unlock(); + return; + } + + val = qatomic_cmpxchg__nocheck(host_addr, oldval, newval); + mmap_unlock(); + + cpsr = (val == oldval) * CPSR_C; + cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); + env->regs[0] = cpsr ? 0 : -1; +} /* * See the Linux kernel's Documentation/arm/kernel_user_helpers.txt @@ -153,36 +213,13 @@ static int do_kernel_trap(CPUARMState *env) { uint32_t addr; - uint32_t cpsr; - uint32_t val; switch (env->regs[15]) { case 0xffff0fa0: /* __kernel_memory_barrier */ smp_mb(); break; case 0xffff0fc0: /* __kernel_cmpxchg */ - /* XXX: This only works between threads, not between processes. - It's probably possible to implement this with native host - operations. However things like ldrex/strex are much harder so - there's not much point trying. */ - start_exclusive(); - cpsr = cpsr_read(env); - addr = env->regs[2]; - /* FIXME: This should SEGV if the access fails. */ - if (get_user_u32(val, addr)) - val = ~env->regs[0]; - if (val == env->regs[0]) { - val = env->regs[1]; - /* FIXME: Check for segfaults. */ - put_user_u32(val, addr); - env->regs[0] = 0; - cpsr |= CPSR_C; - } else { - env->regs[0] = -1; - cpsr &= ~CPSR_C; - } - cpsr_write(env, cpsr, CPSR_C, CPSRWriteByInstr); - end_exclusive(); + arm_kernel_cmpxchg32_helper(env); break; case 0xffff0fe0: /* __kernel_get_tls */ env->regs[0] = cpu_get_tls(env); |