aboutsummaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-02-24 17:28:45 +0000
committerPeter Maydell <peter.maydell@linaro.org>2020-02-28 16:14:57 +0000
commit2677cf9f92a5319bb995927f9225940414ce879d (patch)
tree33052fe3e31a742b3510f32e25693d2d645b48c2 /target/arm
parent54117b90ffd8a3977917971c3bd99bb5242710d9 (diff)
target/arm: Implement v8.3-RCPC
The v8.3-RCPC extension implements three new load instructions which provide slightly weaker consistency guarantees than the existing load-acquire operations. For QEMU we choose to simply implement them with a full LDAQ barrier. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20200224172846.13053-3-peter.maydell@linaro.org
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/cpu.h5
-rw-r--r--target/arm/cpu64.c1
-rw-r--r--target/arm/translate-a64.c24
3 files changed, 30 insertions, 0 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 6013287f62..ff30985ead 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3774,6 +3774,11 @@ static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id)
FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
}
+static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
+}
+
/*
* Feature tests for "does this exist in either 32-bit or 64-bit?"
*/
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 8f2a43c938..18c7b40f98 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -654,6 +654,7 @@ static void aarch64_max_initfn(Object *obj)
t = FIELD_DP64(t, ID_AA64ISAR1, SB, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, SPECRES, 1);
t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1);
+ t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 1); /* ARMv8.3-RCPC */
cpu->isar.id_aa64isar1 = t;
t = cpu->isar.id_aa64pfr0;
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index 596bf4cf73..7a066fb7cb 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -3142,6 +3142,8 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
int rs = extract32(insn, 16, 5);
int rn = extract32(insn, 5, 5);
int o3_opc = extract32(insn, 12, 4);
+ bool r = extract32(insn, 22, 1);
+ bool a = extract32(insn, 23, 1);
TCGv_i64 tcg_rs, clean_addr;
AtomicThreeOpFn *fn;
@@ -3177,6 +3179,13 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
case 010: /* SWP */
fn = tcg_gen_atomic_xchg_i64;
break;
+ case 014: /* LDAPR, LDAPRH, LDAPRB */
+ if (!dc_isar_feature(aa64_rcpc_8_3, s) ||
+ rs != 31 || a != 1 || r != 0) {
+ unallocated_encoding(s);
+ return;
+ }
+ break;
default:
unallocated_encoding(s);
return;
@@ -3186,6 +3195,21 @@ static void disas_ldst_atomic(DisasContext *s, uint32_t insn,
gen_check_sp_alignment(s);
}
clean_addr = clean_data_tbi(s, cpu_reg_sp(s, rn));
+
+ if (o3_opc == 014) {
+ /*
+ * LDAPR* are a special case because they are a simple load, not a
+ * fetch-and-do-something op.
+ * The architectural consistency requirements here are weaker than
+ * full load-acquire (we only need "load-acquire processor consistent"),
+ * but we choose to implement them as full LDAQ.
+ */
+ do_gpr_ld(s, cpu_reg(s, rt), clean_addr, size, false, false,
+ true, rt, disas_ldst_compute_iss_sf(size, false, 0), true);
+ tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ);
+ return;
+ }
+
tcg_rs = read_cpu_reg(s, rs, true);
if (o3_opc == 1) { /* LDCLR */