aboutsummaryrefslogtreecommitdiff
path: root/target-arm
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2014-01-04 22:15:47 +0000
committerPeter Maydell <peter.maydell@linaro.org>2014-01-08 19:07:20 +0000
commit03d05e2d0765512fb960192b6e9f9a41c47282bd (patch)
tree27dd7454641f6720883f750c2fc4fc03ad36c5fc /target-arm
parent32b64e860d6e0887b3d2ad36a940c362646146f4 (diff)
target-arm: Widen exclusive-access support struct fields to 64 bits
In preparation for adding support for A64 load/store exclusive instructions, widen the fields in the CPU state struct that deal with address and data values for exclusives from 32 to 64 bits. Although in practice AArch64 and AArch32 exclusive accesses will be generally separate there are some odd theoretical corner cases (eg you should be able to do the exclusive load in AArch32, take an exception to AArch64 and successfully do the store exclusive there), and it's also easier to reason about. The changes in semantics for the variables are: exclusive_addr -> extended to 64 bits; -1ULL for "monitor lost", otherwise always < 2^32 for AArch32 exclusive_val -> extended to 64 bits. 64 bit exclusives in AArch32 now use the high half of exclusive_val instead of a separate exclusive_high exclusive_high -> is no longer used in AArch32; extended to 64 bits as it will be needed for AArch64's pair-of-64-bit-values exclusives. exclusive_test -> extended to 64 bits, as it is an address. Since this is a linux-user-only field, in arm-linux-user it will always have the top 32 bits zero. exclusive_info -> stays 32 bits, as it is neither data nor address, but simply holds register indexes etc. AArch64 will be able to fit all its information into 32 bits as well. Note that the refactoring of gen_store_exclusive() coincidentally fixes a minor bug where ldrexd would incorrectly update the first CPU register even if the load for the second register faulted. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'target-arm')
-rw-r--r--target-arm/cpu.h8
-rw-r--r--target-arm/machine.c12
-rw-r--r--target-arm/translate.c65
3 files changed, 49 insertions, 36 deletions
diff --git a/target-arm/cpu.h b/target-arm/cpu.h
index 983aa31bef..6fbbab2c7f 100644
--- a/target-arm/cpu.h
+++ b/target-arm/cpu.h
@@ -278,11 +278,11 @@ typedef struct CPUARMState {
float_status fp_status;
float_status standard_fp_status;
} vfp;
- uint32_t exclusive_addr;
- uint32_t exclusive_val;
- uint32_t exclusive_high;
+ uint64_t exclusive_addr;
+ uint64_t exclusive_val;
+ uint64_t exclusive_high;
#if defined(CONFIG_USER_ONLY)
- uint32_t exclusive_test;
+ uint64_t exclusive_test;
uint32_t exclusive_info;
#endif
diff --git a/target-arm/machine.c b/target-arm/machine.c
index 74f010f637..8f9e7d4d28 100644
--- a/target-arm/machine.c
+++ b/target-arm/machine.c
@@ -222,9 +222,9 @@ static int cpu_post_load(void *opaque, int version_id)
const VMStateDescription vmstate_arm_cpu = {
.name = "cpu",
- .version_id = 13,
- .minimum_version_id = 13,
- .minimum_version_id_old = 13,
+ .version_id = 14,
+ .minimum_version_id = 14,
+ .minimum_version_id_old = 14,
.pre_save = cpu_pre_save,
.post_load = cpu_post_load,
.fields = (VMStateField[]) {
@@ -253,9 +253,9 @@ const VMStateDescription vmstate_arm_cpu = {
VMSTATE_VARRAY_INT32(cpreg_vmstate_values, ARMCPU,
cpreg_vmstate_array_len,
0, vmstate_info_uint64, uint64_t),
- VMSTATE_UINT32(env.exclusive_addr, ARMCPU),
- VMSTATE_UINT32(env.exclusive_val, ARMCPU),
- VMSTATE_UINT32(env.exclusive_high, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_addr, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_val, ARMCPU),
+ VMSTATE_UINT64(env.exclusive_high, ARMCPU),
VMSTATE_UINT64(env.features, ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_PHYS], ARMCPU),
VMSTATE_TIMER(gt_timer[GTIMER_VIRT], ARMCPU),
diff --git a/target-arm/translate.c b/target-arm/translate.c
index 8bfe95007e..4387547a9c 100644
--- a/target-arm/translate.c
+++ b/target-arm/translate.c
@@ -61,11 +61,10 @@ TCGv_ptr cpu_env;
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
static TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
-static TCGv_i32 cpu_exclusive_addr;
-static TCGv_i32 cpu_exclusive_val;
-static TCGv_i32 cpu_exclusive_high;
+static TCGv_i64 cpu_exclusive_addr;
+static TCGv_i64 cpu_exclusive_val;
#ifdef CONFIG_USER_ONLY
-static TCGv_i32 cpu_exclusive_test;
+static TCGv_i64 cpu_exclusive_test;
static TCGv_i32 cpu_exclusive_info;
#endif
@@ -96,14 +95,12 @@ void arm_translate_init(void)
cpu_VF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, VF), "VF");
cpu_ZF = tcg_global_mem_new_i32(TCG_AREG0, offsetof(CPUARMState, ZF), "ZF");
- cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_exclusive_addr = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
- cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_exclusive_val = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_val), "exclusive_val");
- cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUARMState, exclusive_high), "exclusive_high");
#ifdef CONFIG_USER_ONLY
- cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
+ cpu_exclusive_test = tcg_global_mem_new_i64(TCG_AREG0,
offsetof(CPUARMState, exclusive_test), "exclusive_test");
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
offsetof(CPUARMState, exclusive_info), "exclusive_info");
@@ -6758,30 +6755,34 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
default:
abort();
}
- tcg_gen_mov_i32(cpu_exclusive_val, tmp);
- store_reg(s, rt, tmp);
+
if (size == 3) {
TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i32 tmp3 = tcg_temp_new_i32();
+
tcg_gen_addi_i32(tmp2, addr, 4);
- tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
- tcg_gen_mov_i32(cpu_exclusive_high, tmp);
- store_reg(s, rt2, tmp);
+ tcg_gen_concat_i32_i64(cpu_exclusive_val, tmp, tmp3);
+ store_reg(s, rt2, tmp3);
+ } else {
+ tcg_gen_extu_i32_i64(cpu_exclusive_val, tmp);
}
- tcg_gen_mov_i32(cpu_exclusive_addr, addr);
+
+ store_reg(s, rt, tmp);
+ tcg_gen_extu_i32_i64(cpu_exclusive_addr, addr);
}
static void gen_clrex(DisasContext *s)
{
- tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
#ifdef CONFIG_USER_ONLY
static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
{
- tcg_gen_mov_i32(cpu_exclusive_test, addr);
+ tcg_gen_extu_i32_i64(cpu_exclusive_test, addr);
tcg_gen_movi_i32(cpu_exclusive_info,
size | (rd << 4) | (rt << 8) | (rt2 << 12));
gen_exception_insn(s, 4, EXCP_STREX);
@@ -6791,6 +6792,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
TCGv_i32 addr, int size)
{
TCGv_i32 tmp;
+ TCGv_i64 val64, extaddr;
int done_label;
int fail_label;
@@ -6802,7 +6804,11 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
} */
fail_label = gen_new_label();
done_label = gen_new_label();
- tcg_gen_brcond_i32(TCG_COND_NE, addr, cpu_exclusive_addr, fail_label);
+ extaddr = tcg_temp_new_i64();
+ tcg_gen_extu_i32_i64(extaddr, addr);
+ tcg_gen_brcond_i64(TCG_COND_NE, extaddr, cpu_exclusive_addr, fail_label);
+ tcg_temp_free_i64(extaddr);
+
tmp = tcg_temp_new_i32();
switch (size) {
case 0:
@@ -6818,17 +6824,24 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
default:
abort();
}
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_val, fail_label);
- tcg_temp_free_i32(tmp);
+
+ val64 = tcg_temp_new_i64();
if (size == 3) {
TCGv_i32 tmp2 = tcg_temp_new_i32();
+ TCGv_i32 tmp3 = tcg_temp_new_i32();
tcg_gen_addi_i32(tmp2, addr, 4);
- tmp = tcg_temp_new_i32();
- gen_aa32_ld32u(tmp, tmp2, IS_USER(s));
+ gen_aa32_ld32u(tmp3, tmp2, IS_USER(s));
tcg_temp_free_i32(tmp2);
- tcg_gen_brcond_i32(TCG_COND_NE, tmp, cpu_exclusive_high, fail_label);
- tcg_temp_free_i32(tmp);
+ tcg_gen_concat_i32_i64(val64, tmp, tmp3);
+ tcg_temp_free_i32(tmp3);
+ } else {
+ tcg_gen_extu_i32_i64(val64, tmp);
}
+ tcg_temp_free_i32(tmp);
+
+ tcg_gen_brcond_i64(TCG_COND_NE, val64, cpu_exclusive_val, fail_label);
+ tcg_temp_free_i64(val64);
+
tmp = load_reg(s, rt);
switch (size) {
case 0:
@@ -6856,7 +6869,7 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
gen_set_label(fail_label);
tcg_gen_movi_i32(cpu_R[rd], 1);
gen_set_label(done_label);
- tcg_gen_movi_i32(cpu_exclusive_addr, -1);
+ tcg_gen_movi_i64(cpu_exclusive_addr, -1);
}
#endif