aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-07-15 09:46:12 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-07-15 09:46:15 +0100
commit298ad7b5a4519d2ae547df46103b2f8d49ca6f95 (patch)
treeef4040d79adfcdbd214c378892dc333d8e4add62 /include
parent46cd24e7ed38191b5ab5c40a836d6c5b6b604f8a (diff)
parent52ba13f042714c4086416973fb88e2465e0888a1 (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190714' into staging
Fixes for 3 tcg bugs # gpg: Signature made Sun 14 Jul 2019 12:11:01 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20190714: tcg: Release mmap_lock on translation fault tcg: Remove duplicate #if !defined(CODE_ACCESS) tcg: Remove cpu_ld*_code_ra tcg: Introduce set/clear_helper_retaddr include/qemu/atomic.h: Add signal_barrier tcg/aarch64: Fix output of extract2 opcodes tcg: Fix constant folding of INDEX_op_extract2_i32 Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r--include/exec/cpu_ldst.h20
-rw-r--r--include/exec/cpu_ldst_useronly_template.h40
-rw-r--r--include/qemu/atomic.h11
3 files changed, 58 insertions, 13 deletions
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index a08b11bd2c..9de8c93303 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -89,6 +89,26 @@ typedef target_ulong abi_ptr;
extern __thread uintptr_t helper_retaddr;
+static inline void set_helper_retaddr(uintptr_t ra)
+{
+ helper_retaddr = ra;
+ /*
+ * Ensure that this write is visible to the SIGSEGV handler that
+ * may be invoked due to a subsequent invalid memory operation.
+ */
+ signal_barrier();
+}
+
+static inline void clear_helper_retaddr(void)
+{
+ /*
+ * Ensure that previous memory operations have succeeded before
+ * removing the data visible to the signal handler.
+ */
+ signal_barrier();
+ helper_retaddr = 0;
+}
+
/* In user-only mode we provide only the _code and _data accessors. */
#define MEMSUFFIX _data
diff --git a/include/exec/cpu_ldst_useronly_template.h b/include/exec/cpu_ldst_useronly_template.h
index bc45e2b8d4..2378f2958c 100644
--- a/include/exec/cpu_ldst_useronly_template.h
+++ b/include/exec/cpu_ldst_useronly_template.h
@@ -64,61 +64,75 @@
static inline RES_TYPE
glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
-#if !defined(CODE_ACCESS)
+#ifdef CODE_ACCESS
+ RES_TYPE ret;
+ set_helper_retaddr(1);
+ ret = glue(glue(ld, USUFFIX), _p)(g2h(ptr));
+ clear_helper_retaddr();
+ return ret;
+#else
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, false));
-#endif
return glue(glue(ld, USUFFIX), _p)(g2h(ptr));
+#endif
}
+#ifndef CODE_ACCESS
static inline RES_TYPE
glue(glue(glue(cpu_ld, USUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
abi_ptr ptr,
uintptr_t retaddr)
{
RES_TYPE ret;
- helper_retaddr = retaddr;
+ set_helper_retaddr(retaddr);
ret = glue(glue(cpu_ld, USUFFIX), MEMSUFFIX)(env, ptr);
- helper_retaddr = 0;
+ clear_helper_retaddr();
return ret;
}
+#endif
#if DATA_SIZE <= 2
static inline int
glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr)
{
-#if !defined(CODE_ACCESS)
+#ifdef CODE_ACCESS
+ int ret;
+ set_helper_retaddr(1);
+ ret = glue(glue(lds, SUFFIX), _p)(g2h(ptr));
+ clear_helper_retaddr();
+ return ret;
+#else
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, true, MO_TE, false));
-#endif
return glue(glue(lds, SUFFIX), _p)(g2h(ptr));
+#endif
}
+#ifndef CODE_ACCESS
static inline int
glue(glue(glue(cpu_lds, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
abi_ptr ptr,
uintptr_t retaddr)
{
int ret;
- helper_retaddr = retaddr;
+ set_helper_retaddr(retaddr);
ret = glue(glue(cpu_lds, SUFFIX), MEMSUFFIX)(env, ptr);
- helper_retaddr = 0;
+ clear_helper_retaddr();
return ret;
}
-#endif
+#endif /* CODE_ACCESS */
+#endif /* DATA_SIZE <= 2 */
#ifndef CODE_ACCESS
static inline void
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(CPUArchState *env, abi_ptr ptr,
RES_TYPE v)
{
-#if !defined(CODE_ACCESS)
trace_guest_mem_before_exec(
env_cpu(env), ptr,
trace_mem_build_info(SHIFT, false, MO_TE, true));
-#endif
glue(glue(st, SUFFIX), _p)(g2h(ptr), v);
}
@@ -128,9 +142,9 @@ glue(glue(glue(cpu_st, SUFFIX), MEMSUFFIX), _ra)(CPUArchState *env,
RES_TYPE v,
uintptr_t retaddr)
{
- helper_retaddr = retaddr;
+ set_helper_retaddr(retaddr);
glue(glue(cpu_st, SUFFIX), MEMSUFFIX)(env, ptr, v);
- helper_retaddr = 0;
+ clear_helper_retaddr();
}
#endif
diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h
index a6ac188188..f9cd24c899 100644
--- a/include/qemu/atomic.h
+++ b/include/qemu/atomic.h
@@ -88,6 +88,13 @@
#define smp_read_barrier_depends() barrier()
#endif
+/*
+ * A signal barrier forces all pending local memory ops to be observed before
+ * a SIGSEGV is delivered to the *same* thread. In practice this is exactly
+ * the same as barrier(), but since we have the correct builtin, use it.
+ */
+#define signal_barrier() __atomic_signal_fence(__ATOMIC_SEQ_CST)
+
/* Sanity check that the size of an atomic operation isn't "overly large".
* Despite the fact that e.g. i686 has 64-bit atomic operations, we do not
* want to use them because we ought not need them, and this lets us do a
@@ -308,6 +315,10 @@
#define smp_read_barrier_depends() barrier()
#endif
+#ifndef signal_barrier
+#define signal_barrier() barrier()
+#endif
+
/* These will only be atomic if the processor does the fetch or store
* in a single issue memory operation
*/