aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Brook <paul@codesourcery.com>2009-11-22 21:35:13 +0000
committerPaul Brook <paul@codesourcery.com>2009-11-22 21:35:13 +0000
commit426f5abcaa15d0e59475fdb33b2a4a5fdf492d05 (patch)
treeef9eb63ac19b7ba19fe533d514d3571f21de2b3d
parenteee4850422d1d18276c1f4dbb6153b568efcb8b9 (diff)
ARM atomic ops rewrite
Implement ARMv6 atomic ops (ldrex/strex) using the same trick as PPC. Signed-off-by: Paul Brook <paul@codesourcery.com>
-rw-r--r--linux-user/main.c80
-rw-r--r--target-arm/cpu.h9
-rw-r--r--target-arm/helper.c84
-rw-r--r--target-arm/helpers.h4
-rw-r--r--target-arm/translate.c250
5 files changed, 251 insertions, 176 deletions
diff --git a/linux-user/main.c b/linux-user/main.c
index 67336d71ff..2b8bab1b63 100644
--- a/linux-user/main.c
+++ b/linux-user/main.c
@@ -524,6 +524,81 @@ do_kernel_trap(CPUARMState *env)
return 0;
}
+static int do_strex(CPUARMState *env)
+{
+ uint32_t val;
+ int size;
+ int rc = 1;
+ int segv = 0;
+ uint32_t addr;
+ start_exclusive();
+ addr = env->exclusive_addr;
+ if (addr != env->exclusive_test) {
+ goto fail;
+ }
+ size = env->exclusive_info & 0xf;
+ switch (size) {
+ case 0:
+ segv = get_user_u8(val, addr);
+ break;
+ case 1:
+ segv = get_user_u16(val, addr);
+ break;
+ case 2:
+ case 3:
+ segv = get_user_u32(val, addr);
+ break;
+ }
+ if (segv) {
+ env->cp15.c6_data = addr;
+ goto done;
+ }
+ if (val != env->exclusive_val) {
+ goto fail;
+ }
+ if (size == 3) {
+ segv = get_user_u32(val, addr + 4);
+ if (segv) {
+ env->cp15.c6_data = addr + 4;
+ goto done;
+ }
+ if (val != env->exclusive_high) {
+ goto fail;
+ }
+ }
+ val = env->regs[(env->exclusive_info >> 8) & 0xf];
+ switch (size) {
+ case 0:
+ segv = put_user_u8(val, addr);
+ break;
+ case 1:
+ segv = put_user_u16(val, addr);
+ break;
+ case 2:
+ case 3:
+ segv = put_user_u32(val, addr);
+ break;
+ }
+ if (segv) {
+ env->cp15.c6_data = addr;
+ goto done;
+ }
+ if (size == 3) {
+ val = env->regs[(env->exclusive_info >> 12) & 0xf];
+ segv = put_user_u32(val, addr);
+ if (segv) {
+ env->cp15.c6_data = addr + 4;
+ goto done;
+ }
+ }
+ rc = 0;
+fail:
+ env->regs[(env->exclusive_info >> 4) & 0xf] = rc;
+done:
+ end_exclusive();
+ return segv;
+}
+
void cpu_loop(CPUARMState *env)
{
int trapnr;
@@ -717,6 +792,11 @@ void cpu_loop(CPUARMState *env)
if (do_kernel_trap(env))
goto error;
break;
+ case EXCP_STREX:
+ if (do_strex(env)) {
+ addr = env->cp15.c6_data;
+ goto do_segv;
+ }
default:
error:
fprintf(stderr, "qemu: unhandled CPU exception 0x%x - aborting\n",
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index a83ce48c4d..4a1c53f24f 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -40,6 +40,7 @@
#define EXCP_BKPT 7
#define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
#define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
+#define EXCP_STREX 10
#define ARMV7M_EXCP_RESET 1
#define ARMV7M_EXCP_NMI 2
@@ -180,10 +181,12 @@ typedef struct CPUARMState {
float_status fp_status;
} vfp;
+ uint32_t exclusive_addr;
+ uint32_t exclusive_val;
+ uint32_t exclusive_high;
#if defined(CONFIG_USER_ONLY)
- struct mmon_state *mmon_entry;
-#else
- uint32_t mmon_addr;
+ uint32_t exclusive_test;
+ uint32_t exclusive_info;
#endif
/* iwMMXt coprocessor state. */
diff --git a/target-arm/helper.c b/target-arm/helper.c
index ffc14f032b..b3aec99442 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -470,16 +470,6 @@ void do_interrupt (CPUState *env)
env->exception_index = -1;
}
-/* Structure used to record exclusive memory locations. */
-typedef struct mmon_state {
- struct mmon_state *next;
- CPUARMState *cpu_env;
- uint32_t addr;
-} mmon_state;
-
-/* Chain of current locks. */
-static mmon_state* mmon_head = NULL;
-
int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int mmu_idx, int is_softmmu)
{
@@ -493,62 +483,6 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
return 1;
}
-static void allocate_mmon_state(CPUState *env)
-{
- env->mmon_entry = malloc(sizeof (mmon_state));
- memset (env->mmon_entry, 0, sizeof (mmon_state));
- env->mmon_entry->cpu_env = env;
- mmon_head = env->mmon_entry;
-}
-
-/* Flush any monitor locks for the specified address. */
-static void flush_mmon(uint32_t addr)
-{
- mmon_state *mon;
-
- for (mon = mmon_head; mon; mon = mon->next)
- {
- if (mon->addr != addr)
- continue;
-
- mon->addr = 0;
- break;
- }
-}
-
-/* Mark an address for exclusive access. */
-void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
-{
- if (!env->mmon_entry)
- allocate_mmon_state(env);
- /* Clear any previous locks. */
- flush_mmon(addr);
- env->mmon_entry->addr = addr;
-}
-
-/* Test if an exclusive address is still exclusive. Returns zero
- if the address is still exclusive. */
-uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
-{
- int res;
-
- if (!env->mmon_entry)
- return 1;
- if (env->mmon_entry->addr == addr)
- res = 0;
- else
- res = 1;
- flush_mmon(addr);
- return res;
-}
-
-void HELPER(clrex)(CPUState *env)
-{
- if (!(env->mmon_entry && env->mmon_entry->addr))
- return;
- flush_mmon(env->mmon_entry->addr);
-}
-
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
return addr;
@@ -1273,24 +1207,6 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
return phys_addr;
}
-/* Not really implemented. Need to figure out a sane way of doing this.
- Maybe add generic watchpoint support and use that. */
-
-void HELPER(mark_exclusive)(CPUState *env, uint32_t addr)
-{
- env->mmon_addr = addr;
-}
-
-uint32_t HELPER(test_exclusive)(CPUState *env, uint32_t addr)
-{
- return (env->mmon_addr != addr);
-}
-
-void HELPER(clrex)(CPUState *env)
-{
- env->mmon_addr = -1;
-}
-
void HELPER(set_cp)(CPUState *env, uint32_t insn, uint32_t val)
{
int cp_num = (insn >> 8) & 0xf;
diff --git a/target-arm/helpers.h b/target-arm/helpers.h
index dc25f185d5..0d1bc47d00 100644
--- a/target-arm/helpers.h
+++ b/target-arm/helpers.h
@@ -68,10 +68,6 @@ DEF_HELPER_2(get_cp, i32, env, i32)
DEF_HELPER_2(get_r13_banked, i32, env, i32)
DEF_HELPER_3(set_r13_banked, void, env, i32, i32)
-DEF_HELPER_2(mark_exclusive, void, env, i32)
-DEF_HELPER_2(test_exclusive, i32, env, i32)
-DEF_HELPER_1(clrex, void, env)
-
DEF_HELPER_1(get_user_reg, i32, i32)
DEF_HELPER_2(set_user_reg, void, i32, i32)
diff --git a/target-arm/translate.c b/target-arm/translate.c
index a002f7e029..45bf77256f 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -76,6 +76,13 @@ static TCGv_ptr cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
+static TCGv_i32 cpu_exclusive_addr;
+static TCGv_i32 cpu_exclusive_val;
+static TCGv_i32 cpu_exclusive_high;
+#ifdef CONFIG_USER_ONLY
+static TCGv_i32 cpu_exclusive_test;
+static TCGv_i32 cpu_exclusive_info;
+#endif
/* FIXME: These should be removed. */
static TCGv cpu_F0s, cpu_F1s;
@@ -99,6 +106,18 @@ void arm_translate_init(void)
offsetof(CPUState, regs[i]),
regnames[i]);
}
+ cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, exclusive_addr), "exclusive_addr");
+ cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, exclusive_val), "exclusive_val");
+ cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, exclusive_high), "exclusive_high");
+#ifdef CONFIG_USER_ONLY
+ cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, exclusive_test), "exclusive_test");
+ cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
+ offsetof(CPUState, exclusive_info), "exclusive_info");
+#endif
#define GEN_HELPER 2
#include "helpers.h"
@@ -5819,6 +5838,132 @@ static void gen_logicq_cc(TCGv_i64 val)
dead_tmp(tmp);
}
+/* Load/Store exclusive instructions are implemented by remembering
+ the value/address loaded, and seeing if these are the same
+ when the store is performed. This should be is sufficient to implement
+ the architecturally mandated semantics, and avoids having to monitor
+ regular stores.
+
+ In system emulation mode only one CPU will be running at once, so
+ this sequence is effectively atomic. In user emulation mode we
+ throw an exception and handle the atomic operation elsewhere. */
+static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
+ TCGv addr, int size)
+{
+ TCGv tmp;
+
+ switch (size) {
+ case 0:
+ tmp = gen_ld8u(addr, IS_USER(s));
+ break;
+ case 1:
+ tmp = gen_ld16u(addr, IS_USER(s));
+ break;
+ case 2:
+ case 3:
+ tmp = gen_ld32(addr, IS_USER(s));
+ break;
+ default:
+ abort();
+ }
+ tcg_gen_mov_i32(cpu_exclusive_val, tmp);
+ store_reg(s, rt, tmp);
+ if (size == 3) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = gen_ld32(addr, IS_USER(s));
+ tcg_gen_mov_i32(cpu_exclusive_high, tmp);
+ store_reg(s, rt2, tmp);
+ }
+ tcg_gen_mov_i32(cpu_exclusive_addr, addr);
+}
+
+static void gen_clrex(DisasContext *s)
+{
+ tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+}
+
+#ifdef CONFIG_USER_ONLY
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv addr, int size)
+{
+ tcg_gen_mov_i32(cpu_exclusive_test, addr);
+ tcg_gen_movi_i32(cpu_exclusive_info,
+ size | (rd << 4) | (rt << 8) | (rt2 << 12));
+ gen_set_condexec(s);
+ gen_set_pc_im(s->pc - 4);
+ gen_exception(EXCP_STREX);
+ s->is_jmp = DISAS_JUMP;
+}
+#else
+static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
+ TCGv addr, int size)
+{
+ TCGv tmp;
+ int done_label;
+ int fail_label;
+
+ /* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
+ [addr] = {Rt};
+ {Rd} = 0;
+ } else {
+ {Rd} = 1;
+ } */
+ fail_label = gen_new_label();
+ done_label = gen_new_label();
+ tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+ switch (size) {
+ case 0:
+ tmp = gen_ld8u(addr, IS_USER(s));
+ break;
+ case 1:
+ tmp = gen_ld16u(addr, IS_USER(s));
+ break;
+ case 2:
+ case 3:
+ tmp = gen_ld32(addr, IS_USER(s));
+ break;
+ default:
+ abort();
+ }
+ tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
+ dead_tmp(tmp);
+ if (size == 3) {
+ TCGv tmp2 = new_tmp();
+ tcg_gen_addi_i32(tmp2, addr, 4);
+ tmp = gen_ld32(addr, IS_USER(s));
+ dead_tmp(tmp2);
+ tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
+ dead_tmp(tmp);
+ }
+ tmp = load_reg(s, rt);
+ switch (size) {
+ case 0:
+ gen_st8(tmp, addr, IS_USER(s));
+ break;
+ case 1:
+ gen_st16(tmp, addr, IS_USER(s));
+ break;
+ case 2:
+ case 3:
+ gen_st32(tmp, addr, IS_USER(s));
+ break;
+ default:
+ abort();
+ }
+ if (size == 3) {
+ tcg_gen_addi_i32(addr, addr, 4);
+ tmp = load_reg(s, rt2);
+ gen_st32(tmp, addr, IS_USER(s));
+ }
+ tcg_gen_movi_i32(cpu_R[rd], 0);
+ tcg_gen_br(done_label);
+ gen_set_label(fail_label);
+ tcg_gen_movi_i32(cpu_R[rd], 1);
+ gen_set_label(done_label);
+ tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+}
+#endif
+
static void disas_arm_insn(CPUState * env, DisasContext *s)
{
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
@@ -5869,7 +6014,7 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
switch ((insn >> 4) & 0xf) {
case 1: /* clrex */
ARCH(6K);
- gen_helper_clrex(cpu_env);
+ gen_clrex(s);
return;
case 4: /* dsb */
case 5: /* dmb */
@@ -6454,57 +6599,40 @@ static void disas_arm_insn(CPUState * env, DisasContext *s)
addr = tcg_temp_local_new_i32();
load_reg_var(s, addr, rn);
if (insn & (1 << 20)) {
- gen_helper_mark_exclusive(cpu_env, addr);
switch (op1) {
case 0: /* ldrex */
- tmp = gen_ld32(addr, IS_USER(s));
+ gen_load_exclusive(s, rd, 15, addr, 2);
break;
case 1: /* ldrexd */
- tmp = gen_ld32(addr, IS_USER(s));
- store_reg(s, rd, tmp);
- tcg_gen_addi_i32(addr, addr, 4);
- tmp = gen_ld32(addr, IS_USER(s));
- rd++;
+ gen_load_exclusive(s, rd, rd + 1, addr, 3);
break;
case 2: /* ldrexb */
- tmp = gen_ld8u(addr, IS_USER(s));
+ gen_load_exclusive(s, rd, 15, addr, 0);
break;
case 3: /* ldrexh */
- tmp = gen_ld16u(addr, IS_USER(s));
+ gen_load_exclusive(s, rd, 15, addr, 1);
break;
default:
abort();
}
- store_reg(s, rd, tmp);
} else {
- int label = gen_new_label();
rm = insn & 0xf;
- tmp2 = tcg_temp_local_new_i32();
- gen_helper_test_exclusive(tmp2, cpu_env, addr);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
- tmp = load_reg(s,rm);
switch (op1) {
case 0: /* strex */
- gen_st32(tmp, addr, IS_USER(s));
+ gen_store_exclusive(s, rd, rm, 15, addr, 2);
break;
case 1: /* strexd */
- gen_st32(tmp, addr, IS_USER(s));
- tcg_gen_addi_i32(addr, addr, 4);
- tmp = load_reg(s, rm + 1);
- gen_st32(tmp, addr, IS_USER(s));
+ gen_store_exclusive(s, rd, rm, rm + 1, addr, 2);
break;
case 2: /* strexb */
- gen_st8(tmp, addr, IS_USER(s));
+ gen_store_exclusive(s, rd, rm, 15, addr, 0);
break;
case 3: /* strexh */
- gen_st16(tmp, addr, IS_USER(s));
+ gen_store_exclusive(s, rd, rm, 15, addr, 1);
break;
default:
abort();
}
- gen_set_label(label);
- tcg_gen_mov_i32(cpu_R[rd], tmp2);
- tcg_temp_free(tmp2);
}
tcg_temp_free(addr);
} else {
@@ -7259,20 +7387,11 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
/* Load/store exclusive word. */
addr = tcg_temp_local_new();
load_reg_var(s, addr, rn);
+ tcg_gen_addi_i32(addr, addr, (insn & 0xff) << 2);
if (insn & (1 << 20)) {
- gen_helper_mark_exclusive(cpu_env, addr);
- tmp = gen_ld32(addr, IS_USER(s));
- store_reg(s, rd, tmp);
+ gen_load_exclusive(s, rs, 15, addr, 2);
} else {
- int label = gen_new_label();
- tmp2 = tcg_temp_local_new();
- gen_helper_test_exclusive(tmp2, cpu_env, addr);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
- tmp = load_reg(s, rs);
- gen_st32(tmp, addr, IS_USER(s));
- gen_set_label(label);
- tcg_gen_mov_i32(cpu_R[rd], tmp2);
- tcg_temp_free(tmp2);
+ gen_store_exclusive(s, rd, rs, 15, addr, 2);
}
tcg_temp_free(addr);
} else if ((insn & (1 << 6)) == 0) {
@@ -7300,56 +7419,17 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
store_reg(s, 15, tmp);
} else {
/* Load/store exclusive byte/halfword/doubleword. */
- /* ??? These are not really atomic. However we know
- we never have multiple CPUs running in parallel,
- so it is good enough. */
+ ARCH(7);
op = (insn >> 4) & 0x3;
+ if (op == 2) {
+ goto illegal_op;
+ }
addr = tcg_temp_local_new();
load_reg_var(s, addr, rn);
if (insn & (1 << 20)) {
- gen_helper_mark_exclusive(cpu_env, addr);
- switch (op) {
- case 0:
- tmp = gen_ld8u(addr, IS_USER(s));
- break;
- case 1:
- tmp = gen_ld16u(addr, IS_USER(s));
- break;
- case 3:
- tmp = gen_ld32(addr, IS_USER(s));
- tcg_gen_addi_i32(addr, addr, 4);
- tmp2 = gen_ld32(addr, IS_USER(s));
- store_reg(s, rd, tmp2);
- break;
- default:
- goto illegal_op;
- }
- store_reg(s, rs, tmp);
+ gen_load_exclusive(s, rs, rd, addr, op);
} else {
- int label = gen_new_label();
- tmp2 = tcg_temp_local_new();
- gen_helper_test_exclusive(tmp2, cpu_env, addr);
- tcg_gen_brcondi_i32(TCG_COND_NE, tmp2, 0, label);
- tmp = load_reg(s, rs);
- switch (op) {
- case 0:
- gen_st8(tmp, addr, IS_USER(s));
- break;
- case 1:
- gen_st16(tmp, addr, IS_USER(s));
- break;
- case 3:
- gen_st32(tmp, addr, IS_USER(s));
- tcg_gen_addi_i32(addr, addr, 4);
- tmp = load_reg(s, rd);
- gen_st32(tmp, addr, IS_USER(s));
- break;
- default:
- goto illegal_op;
- }
- gen_set_label(label);
- tcg_gen_mov_i32(cpu_R[rm], tmp2);
- tcg_temp_free(tmp2);
+ gen_store_exclusive(s, rm, rs, rd, addr, op);
}
tcg_temp_free(addr);
}
@@ -7845,16 +7925,16 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
}
break;
case 3: /* Special control operations. */
+ ARCH(7);
op = (insn >> 4) & 0xf;
switch (op) {
case 2: /* clrex */
- gen_helper_clrex(cpu_env);
+ gen_clrex(s);
break;
case 4: /* dsb */
case 5: /* dmb */
case 6: /* isb */
/* These execute as NOPs. */
- ARCH(7);
break;
default:
goto illegal_op;