aboutsummaryrefslogtreecommitdiff
path: root/linux-user/main.c
diff options
context:
space:
mode:
authorMichael Matz <matz@suse.de>2014-01-04 22:15:47 +0000
committerPeter Maydell <peter.maydell@linaro.org>2014-01-08 19:07:20 +0000
commitfa2ef212dff8556ea92c4aef54b57efec69ca6fe (patch)
tree45931520fb538132a2279285931369a9cbf7594c /linux-user/main.c
parent03d05e2d0765512fb960192b6e9f9a41c47282bd (diff)
target-arm: A64: support for ld/st/cl exclusive
This implement exclusive loads/stores for aarch64 along the lines of arm32 and ppc implementations. The exclusive load remembers the address and loaded value. The exclusive store throws an an exception which uses those values to check for equality in a proper exclusive region. This is not actually the architecture mandated semantics (for either AArch32 or AArch64) but it is close enough for typical guest code sequences to work correctly, and saves us from having to monitor all guest stores. It's fairly easy to come up with test cases where we don't behave like hardware - we don't for example model cache line behaviour. However in the common patterns this works, and the existing 32 bit ARM exclusive access implementation has the same limitations. AArch64 also implements new acquire/release loads/stores (which may be either exclusive or non-exclusive). These imposes extra ordering constraints on memory operations (ie they act as if they have an implicit barrier built into them). As TCG is single-threaded all our barriers are no-ops, so these just behave like normal loads and stores. Signed-off-by: Michael Matz <matz@suse.de> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'linux-user/main.c')
-rw-r--r--linux-user/main.c127
1 files changed, 124 insertions, 3 deletions
diff --git a/linux-user/main.c b/linux-user/main.c
index 20f9832d38..cabc9e1a0e 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -585,8 +585,8 @@ do_kernel_trap(CPUARMState *env)
return 0;
}
-#endif
+/* Store exclusive handling for AArch32 */
static int do_strex(CPUARMState *env)
{
uint64_t val;
@@ -670,7 +670,6 @@ done:
return segv;
}
-#ifdef TARGET_ABI32
void cpu_loop(CPUARMState *env)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
@@ -885,6 +884,122 @@ void cpu_loop(CPUARMState *env)
#else
+/*
+ * Handle AArch64 store-release exclusive
+ *
+ * rs = gets the status result of store exclusive
+ * rt = is the register that is stored
+ * rt2 = is the second register store (in STP)
+ *
+ */
+static int do_strex_a64(CPUARMState *env)
+{
+ uint64_t val;
+ int size;
+ bool is_pair;
+ int rc = 1;
+ int segv = 0;
+ uint64_t addr;
+ int rs, rt, rt2;
+
+ start_exclusive();
+ /* size | is_pair << 2 | (rs << 4) | (rt << 9) | (rt2 << 14)); */
+ size = extract32(env->exclusive_info, 0, 2);
+ is_pair = extract32(env->exclusive_info, 2, 1);
+ rs = extract32(env->exclusive_info, 4, 5);
+ rt = extract32(env->exclusive_info, 9, 5);
+ rt2 = extract32(env->exclusive_info, 14, 5);
+
+ addr = env->exclusive_addr;
+
+ if (addr != env->exclusive_test) {
+ goto finish;
+ }
+
+ switch (size) {
+ case 0:
+ segv = get_user_u8(val, addr);
+ break;
+ case 1:
+ segv = get_user_u16(val, addr);
+ break;
+ case 2:
+ segv = get_user_u32(val, addr);
+ break;
+ case 3:
+ segv = get_user_u64(val, addr);
+ break;
+ default:
+ abort();
+ }
+ if (segv) {
+ env->cp15.c6_data = addr;
+ goto error;
+ }
+ if (val != env->exclusive_val) {
+ goto finish;
+ }
+ if (is_pair) {
+ if (size == 2) {
+ segv = get_user_u32(val, addr + 4);
+ } else {
+ segv = get_user_u64(val, addr + 8);
+ }
+ if (segv) {
+ env->cp15.c6_data = addr + (size == 2 ? 4 : 8);
+ goto error;
+ }
+ if (val != env->exclusive_high) {
+ goto finish;
+ }
+ }
+ val = env->xregs[rt];
+ switch (size) {
+ case 0:
+ segv = put_user_u8(val, addr);
+ break;
+ case 1:
+ segv = put_user_u16(val, addr);
+ break;
+ case 2:
+ segv = put_user_u32(val, addr);
+ break;
+ case 3:
+ segv = put_user_u64(val, addr);
+ break;
+ }
+ if (segv) {
+ goto error;
+ }
+ if (is_pair) {
+ val = env->xregs[rt2];
+ if (size == 2) {
+ segv = put_user_u32(val, addr + 4);
+ } else {
+ segv = put_user_u64(val, addr + 8);
+ }
+ if (segv) {
+ env->cp15.c6_data = addr + (size == 2 ? 4 : 8);
+ goto error;
+ }
+ }
+ rc = 0;
+finish:
+ env->pc += 4;
+ /* rs == 31 encodes a write to the ZR, thus throwing away
+ * the status return. This is rather silly but valid.
+ */
+ if (rs < 31) {
+ env->xregs[rs] = rc;
+ }
+error:
+ /* instruction faulted, PC does not advance */
+ /* either way a strex releases any exclusive lock we have */
+ env->exclusive_addr = -1;
+ end_exclusive();
+ return segv;
+}
+
/* AArch64 main loop */
void cpu_loop(CPUARMState *env)
{
@@ -944,7 +1059,7 @@ void cpu_loop(CPUARMState *env)
}
break;
case EXCP_STREX:
- if (do_strex(env)) {
+ if (do_strex_a64(env)) {
addr = env->cp15.c6_data;
goto do_segv;
}
@@ -956,6 +1071,12 @@ void cpu_loop(CPUARMState *env)
abort();
}
process_pending_signals(env);
+ /* Exception return on AArch64 always clears the exclusive monitor,
+ * so any return to running guest code implies this.
+ * A strex (successful or otherwise) also clears the monitor, so
+ * we don't need to specialcase EXCP_STREX.
+ */
+ env->exclusive_addr = -1;
}
}
#endif /* ndef TARGET_ABI32 */