aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2017-11-16 12:45:14 +0000
committerPeter Maydell <peter.maydell@linaro.org>2017-11-16 12:45:14 +0000
commit6a7cb8c3d674815cab08d884740d203fded12249 (patch)
treefd510506d08d6d7cfaed7533f1133843aff99a6d /accel
parent8048082f7a11040a366942a2de8abb4c3d0020c9 (diff)
parent3c5f9c3f35dd3b6d1d1cd68c9d4d86fc3c59c397 (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20171115' into staging
User-mode memory helper fixes # gpg: Signature made Wed 15 Nov 2017 12:32:33 GMT # gpg: using RSA key 0x64DF38E8AF7E215F # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20171115: target/arm: Fix GETPC usage in do_paired_cmpxchg64_l/be target/arm: Use helper_retaddr in stxp helpers tcg: Record code_gen_buffer address for user-only memory helpers Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/atomic_template.h32
-rw-r--r--accel/tcg/cputlb.c1
-rw-r--r--accel/tcg/user-exec.c58
3 files changed, 73 insertions, 18 deletions
diff --git a/accel/tcg/atomic_template.h b/accel/tcg/atomic_template.h
index b400b2a3d3..1c7c17526c 100644
--- a/accel/tcg/atomic_template.h
+++ b/accel/tcg/atomic_template.h
@@ -62,7 +62,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
{
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
- return atomic_cmpxchg__nocheck(haddr, cmpv, newv);
+ DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, cmpv, newv);
+ ATOMIC_MMU_CLEANUP;
+ return ret;
}
#if DATA_SIZE >= 16
@@ -70,6 +72,7 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
__atomic_load(haddr, &val, __ATOMIC_RELAXED);
+ ATOMIC_MMU_CLEANUP;
return val;
}
@@ -78,13 +81,16 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
{
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
__atomic_store(haddr, &val, __ATOMIC_RELAXED);
+ ATOMIC_MMU_CLEANUP;
}
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
- return atomic_xchg__nocheck(haddr, val);
+ DATA_TYPE ret = atomic_xchg__nocheck(haddr, val);
+ ATOMIC_MMU_CLEANUP;
+ return ret;
}
#define GEN_ATOMIC_HELPER(X) \
@@ -92,8 +98,10 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \
{ \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
- return atomic_##X(haddr, val); \
-} \
+ DATA_TYPE ret = atomic_##X(haddr, val); \
+ ATOMIC_MMU_CLEANUP; \
+ return ret; \
+}
GEN_ATOMIC_HELPER(fetch_add)
GEN_ATOMIC_HELPER(fetch_and)
@@ -123,7 +131,9 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE cmpv, ABI_TYPE newv EXTRA_ARGS)
{
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
- return BSWAP(atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv)));
+ DATA_TYPE ret = atomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
+ ATOMIC_MMU_CLEANUP;
+ return BSWAP(ret);
}
#if DATA_SIZE >= 16
@@ -131,6 +141,7 @@ ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr EXTRA_ARGS)
{
DATA_TYPE val, *haddr = ATOMIC_MMU_LOOKUP;
__atomic_load(haddr, &val, __ATOMIC_RELAXED);
+ ATOMIC_MMU_CLEANUP;
return BSWAP(val);
}
@@ -140,13 +151,16 @@ void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr,
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
val = BSWAP(val);
__atomic_store(haddr, &val, __ATOMIC_RELAXED);
+ ATOMIC_MMU_CLEANUP;
}
#else
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr,
ABI_TYPE val EXTRA_ARGS)
{
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP;
- return BSWAP(atomic_xchg__nocheck(haddr, BSWAP(val)));
+ ABI_TYPE ret = atomic_xchg__nocheck(haddr, BSWAP(val));
+ ATOMIC_MMU_CLEANUP;
+ return BSWAP(ret);
}
#define GEN_ATOMIC_HELPER(X) \
@@ -154,7 +168,9 @@ ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr, \
ABI_TYPE val EXTRA_ARGS) \
{ \
DATA_TYPE *haddr = ATOMIC_MMU_LOOKUP; \
- return BSWAP(atomic_##X(haddr, BSWAP(val))); \
+ DATA_TYPE ret = atomic_##X(haddr, BSWAP(val)); \
+ ATOMIC_MMU_CLEANUP; \
+ return BSWAP(ret); \
}
GEN_ATOMIC_HELPER(fetch_and)
@@ -180,6 +196,7 @@ ABI_TYPE ATOMIC_NAME(fetch_add)(CPUArchState *env, target_ulong addr,
sto = BSWAP(ret + val);
ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
if (ldn == ldo) {
+ ATOMIC_MMU_CLEANUP;
return ret;
}
ldo = ldn;
@@ -198,6 +215,7 @@ ABI_TYPE ATOMIC_NAME(add_fetch)(CPUArchState *env, target_ulong addr,
sto = BSWAP(ret);
ldn = atomic_cmpxchg__nocheck(haddr, ldo, sto);
if (ldn == ldo) {
+ ATOMIC_MMU_CLEANUP;
return ret;
}
ldo = ldn;
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a23919c3a8..d071ca4d14 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1041,6 +1041,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
#define ATOMIC_NAME(X) \
HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu))
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr)
+#define ATOMIC_MMU_CLEANUP do { } while (0)
#define DATA_SIZE 1
#include "atomic_template.h"
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 492ea0826c..0324ba8ad1 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -39,6 +39,8 @@
#include <sys/ucontext.h>
#endif
+__thread uintptr_t helper_retaddr;
+
//#define DEBUG_SIGNAL
/* exit the current TB from a signal handler. The host registers are
@@ -62,6 +64,27 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
CPUClass *cc;
int ret;
+ /* We must handle PC addresses from two different sources:
+ * a call return address and a signal frame address.
+ *
+ * Within cpu_restore_state_from_tb we assume the former and adjust
+ * the address by -GETPC_ADJ so that the address is within the call
+ * insn so that addr does not accidentally match the beginning of the
+ * next guest insn.
+ *
+ * However, when the PC comes from the signal frame, it points to
+ * the actual faulting host insn and not a call insn. Subtracting
+ * GETPC_ADJ in that case may accidentally match the previous guest insn.
+ *
+ * So for the later case, adjust forward to compensate for what
+ * will be done later by cpu_restore_state_from_tb.
+ */
+ if (helper_retaddr) {
+ pc = helper_retaddr;
+ } else {
+ pc += GETPC_ADJ;
+ }
+
/* For synchronous signals we expect to be coming from the vCPU
* thread (so current_cpu should be valid) and either from running
* code or during translation which can fault as we cross pages.
@@ -84,21 +107,24 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
switch (page_unprotect(h2g(address), pc)) {
case 0:
/* Fault not caused by a page marked unwritable to protect
- * cached translations, must be the guest binary's problem
+ * cached translations, must be the guest binary's problem.
*/
break;
case 1:
/* Fault caused by protection of cached translation; TBs
- * invalidated, so resume execution
+ * invalidated, so resume execution. Retain helper_retaddr
+ * for a possible second fault.
*/
return 1;
case 2:
/* Fault caused by protection of cached translation, and the
* currently executing TB was modified and must be exited
- * immediately.
+ * immediately. Clear helper_retaddr for next execution.
*/
+ helper_retaddr = 0;
cpu_exit_tb_from_sighandler(cpu, old_set);
- g_assert_not_reached();
+ /* NORETURN */
+
default:
g_assert_not_reached();
}
@@ -112,17 +138,25 @@ static inline int handle_cpu_signal(uintptr_t pc, unsigned long address,
/* see if it is an MMU fault */
g_assert(cc->handle_mmu_fault);
ret = cc->handle_mmu_fault(cpu, address, is_write, MMU_USER_IDX);
+
+ if (ret == 0) {
+ /* The MMU fault was handled without causing real CPU fault.
+ * Retain helper_retaddr for a possible second fault.
+ */
+ return 1;
+ }
+
+ /* All other paths lead to cpu_exit; clear helper_retaddr
+ * for next execution.
+ */
+ helper_retaddr = 0;
+
if (ret < 0) {
return 0; /* not an MMU fault */
}
- if (ret == 0) {
- return 1; /* the MMU fault was handled without causing real CPU fault */
- }
- /* Now we have a real cpu fault. Since this is the exact location of
- * the exception, we must undo the adjustment done by cpu_restore_state
- * for handling call return addresses. */
- cpu_restore_state(cpu, pc + GETPC_ADJ);
+ /* Now we have a real cpu fault. */
+ cpu_restore_state(cpu, pc);
sigprocmask(SIG_SETMASK, old_set, NULL);
cpu_loop_exit(cpu);
@@ -585,11 +619,13 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
if (unlikely(addr & (size - 1))) {
cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr);
}
+ helper_retaddr = retaddr;
return g2h(addr);
}
/* Macro to call the above, with local variables from the use context. */
#define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, DATA_SIZE, GETPC())
+#define ATOMIC_MMU_CLEANUP do { helper_retaddr = 0; } while (0)
#define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END))
#define EXTRA_ARGS