aboutsummaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-07-28 11:32:24 -1000
committerRichard Henderson <richard.henderson@linaro.org>2022-02-09 09:00:01 +1100
commit5c1a101ef6b85537a4ade93c39ea81cadd5c246e (patch)
tree5f02a22db541d7d99ad449d9afae88e9c1709286 /tests
parent321dbde33a6aa8e7780a3b6b4746628d215a1fec (diff)
tests/tcg/multiarch: Add sigbus.c
A mostly generic test for unaligned access raising SIGBUS. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tests')
-rw-r--r--tests/tcg/multiarch/sigbus.c68
1 files changed, 68 insertions, 0 deletions
diff --git a/tests/tcg/multiarch/sigbus.c b/tests/tcg/multiarch/sigbus.c
new file mode 100644
index 0000000000..8134c5fd56
--- /dev/null
+++ b/tests/tcg/multiarch/sigbus.c
@@ -0,0 +1,68 @@
+#define _GNU_SOURCE 1
+
+#include <assert.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <endian.h>
+
+
+unsigned long long x = 0x8877665544332211ull;
+void * volatile p = (void *)&x + 1;
+
+void sigbus(int sig, siginfo_t *info, void *uc)
+{
+ assert(sig == SIGBUS);
+ assert(info->si_signo == SIGBUS);
+#ifdef BUS_ADRALN
+ assert(info->si_code == BUS_ADRALN);
+#endif
+ assert(info->si_addr == p);
+ exit(EXIT_SUCCESS);
+}
+
+int main()
+{
+ struct sigaction sa = {
+ .sa_sigaction = sigbus,
+ .sa_flags = SA_SIGINFO
+ };
+ int allow_fail = 0;
+ int tmp;
+
+ tmp = sigaction(SIGBUS, &sa, NULL);
+ assert(tmp == 0);
+
+ /*
+ * Select an operation that's likely to enforce alignment.
+ * On many guests that support unaligned accesses by default,
+ * this is often an atomic operation.
+ */
+#if defined(__aarch64__)
+ asm volatile("ldxr %w0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
+#elif defined(__alpha__)
+ asm volatile("ldl_l %0,0(%1)" : "=r"(tmp) : "r"(p) : "memory");
+#elif defined(__arm__)
+ asm volatile("ldrex %0,[%1]" : "=r"(tmp) : "r"(p) : "memory");
+#elif defined(__powerpc__)
+ asm volatile("lwarx %0,0,%1" : "=r"(tmp) : "r"(p) : "memory");
+#elif defined(__riscv_atomic)
+ asm volatile("lr.w %0,(%1)" : "=r"(tmp) : "r"(p) : "memory");
+#else
+ /* No insn known to fault unaligned -- try for a straight load. */
+ allow_fail = 1;
+ tmp = *(volatile int *)p;
+#endif
+
+ assert(allow_fail);
+
+ /*
+ * We didn't see a signal.
+ * We might as well validate the unaligned load worked.
+ */
+ if (BYTE_ORDER == LITTLE_ENDIAN) {
+ assert(tmp == 0x55443322);
+ } else {
+ assert(tmp == 0x77665544);
+ }
+ return EXIT_SUCCESS;
+}