aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2018-12-13 13:48:08 +0000
committerPeter Maydell <peter.maydell@linaro.org>2018-12-13 14:41:24 +0000
commit2d7137c10fafefe40a0a049ff8a7bd78b66e661f (patch)
tree1216da5c2346d877845d09b934831eebab22ff38 /target
parent7c208e0f4171c9e2cc35efc12e1bf264a45c229f (diff)
target/arm: Implement the ARMv8.1-LOR extension
Provide a trivial implementation with zero limited ordering regions, which causes the LDLAR and STLLR instructions to devolve into the LDAR and STLR instructions from the base ARMv8.0 instruction set. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20181210150501.7990-4-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/cpu.h5
-rw-r--r--target/arm/cpu64.c1
-rw-r--r--target/arm/helper.c75
-rw-r--r--target/arm/translate-a64.c12
4 files changed, 93 insertions, 0 deletions
diff --git a/target/arm/cpu.h b/target/arm/cpu.h
index 05ac883b6b..c943f35dd9 100644
--- a/target/arm/cpu.h
+++ b/target/arm/cpu.h
@@ -3340,6 +3340,11 @@ static inline bool isar_feature_aa64_sve(const ARMISARegisters *id)
return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0;
}
+static inline bool isar_feature_aa64_lor(const ARMISARegisters *id)
+{
+ return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0;
+}
+
/*
* Forward to the above feature tests given an ARMCPU pointer.
*/
diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c
index 1a4289c9dd..1d57be0c91 100644
--- a/target/arm/cpu64.c
+++ b/target/arm/cpu64.c
@@ -326,6 +326,7 @@ static void aarch64_max_initfn(Object *obj)
t = cpu->isar.id_aa64mmfr1;
t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */
+ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1);
cpu->isar.id_aa64mmfr1 = t;
/* Replicate the same data to the 32-bit id registers. */
diff --git a/target/arm/helper.c b/target/arm/helper.c
index d6f8be9f4e..644599b29d 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -1281,6 +1281,7 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
/* Begin with base v8.0 state. */
uint32_t valid_mask = 0x3fff;
+ ARMCPU *cpu = arm_env_get_cpu(env);
if (arm_el_is_aa64(env, 3)) {
value |= SCR_FW | SCR_AW; /* these two bits are RES1. */
@@ -1303,6 +1304,9 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
valid_mask &= ~SCR_SMD;
}
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= SCR_TLOR;
+ }
/* Clear all-context RES0 bits. */
value &= valid_mask;
@@ -3963,6 +3967,9 @@ static void hcr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
*/
valid_mask &= ~HCR_TSC;
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ valid_mask |= HCR_TLOR;
+ }
/* Clear RES0 bits. */
value &= valid_mask;
@@ -5018,6 +5025,42 @@ static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
return pfr0;
}
+/* Shared logic between LORID and the rest of the LOR* registers.
+ * Secure state has already been delt with.
+ */
+static CPAccessResult access_lor_ns(CPUARMState *env)
+{
+ int el = arm_current_el(env);
+
+ if (el < 2 && (arm_hcr_el2_eff(env) & HCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL2;
+ }
+ if (el < 3 && (env->cp15.scr_el3 & SCR_TLOR)) {
+ return CP_ACCESS_TRAP_EL3;
+ }
+ return CP_ACCESS_OK;
+}
+
+static CPAccessResult access_lorid(CPUARMState *env, const ARMCPRegInfo *ri,
+ bool isread)
+{
+ if (arm_is_secure_below_el3(env)) {
+ /* Access ok in secure mode. */
+ return CP_ACCESS_OK;
+ }
+ return access_lor_ns(env);
+}
+
+static CPAccessResult access_lor_other(CPUARMState *env,
+ const ARMCPRegInfo *ri, bool isread)
+{
+ if (arm_is_secure_below_el3(env)) {
+ /* Access denied in secure mode. */
+ return CP_ACCESS_TRAP;
+ }
+ return access_lor_ns(env);
+}
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
@@ -5759,6 +5802,38 @@ void register_cp_regs_for_features(ARMCPU *cpu)
define_one_arm_cp_reg(cpu, &sctlr);
}
+ if (cpu_isar_feature(aa64_lor, cpu)) {
+ /*
+ * A trivial implementation of ARMv8.1-LOR leaves all of these
+ * registers fixed at 0, which indicates that there are zero
+ * supported Limited Ordering regions.
+ */
+ static const ARMCPRegInfo lor_reginfo[] = {
+ { .name = "LORSA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 0,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LOREA_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 1,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORN_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 2,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORC_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 3,
+ .access = PL1_RW, .accessfn = access_lor_other,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ { .name = "LORID_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 4, .opc2 = 7,
+ .access = PL1_R, .accessfn = access_lorid,
+ .type = ARM_CP_CONST, .resetvalue = 0 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, lor_reginfo);
+ }
+
if (cpu_isar_feature(aa64_sve, cpu)) {
define_one_arm_cp_reg(cpu, &zcr_el1_reginfo);
if (arm_feature(env, ARM_FEATURE_EL2)) {
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index fd36425f1a..e1da1e4d6f 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -2290,6 +2290,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
}
return;
+ case 0x8: /* STLLR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* StoreLORelease is the same as Store-Release for QEMU. */
+ /* fall through */
case 0x9: /* STLR */
/* Generate ISS for non-exclusive accesses including LASR. */
if (rn == 31) {
@@ -2301,6 +2307,12 @@ static void disas_ldst_excl(DisasContext *s, uint32_t insn)
disas_ldst_compute_iss_sf(size, false, 0), is_lasr);
return;
+ case 0xc: /* LDLAR */
+ if (!dc_isar_feature(aa64_lor, s)) {
+ break;
+ }
+ /* LoadLOAcquire is the same as Load-Acquire for QEMU. */
+ /* fall through */
case 0xd: /* LDAR */
/* Generate ISS for non-exclusive accesses including LASR. */
if (rn == 31) {