diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2023-07-24 18:43:33 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2023-08-31 11:05:04 +0100 |
commit | b8f7959f28c4f36496bc0a694fa28bf5078152c5 (patch) | |
tree | 2c6eed0075e902c991841ad3c8a5bcb248c58857 | |
parent | 5ec008a2bd7e928d41d2a158120223311742d860 (diff) |
target/arm: Do all "ARM_FEATURE_X implies Y" checks in post_init
Where architecturally one ARM_FEATURE_X flag implies another
ARM_FEATURE_Y, we allow the CPU init function to only set X, and then
set Y for it. Currently we do this in two places -- we set a few
flags in arm_cpu_post_init() because we need them to decide which
properties to create on the CPU object, and then we do the rest in
arm_cpu_realizefn(). However, this is fragile, because it's easy to
add a new property and not notice that this means that an X-implies-Y
check now has to move from realize to post-init.
As a specific example, the pmsav7-dregion property is conditional
on ARM_FEATURE_PMSA && ARM_FEATURE_V7, which means it won't appear
on the Cortex-M33 and -M55, because they set ARM_FEATURE_V8 and
rely on V8-implies-V7, which doesn't happen until the realizefn.
Move all of these X-implies-Y checks into a new function, which
we call at the top of arm_cpu_post_init(), so the feature bits
are available at that point.
This does now give us the reverse issue, that if there's a feature
bit which is enabled or disabled by the setting of a property then
then X-implies-Y features that are dependent on that property need to
be in realize, not in this new function. But the only one of those
is the "EL3 implies VBAR" which is already in the right place, so
putting things this way round seems better to me.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20230724174335.2150499-2-peter.maydell@linaro.org
-rw-r--r-- | target/arm/cpu.c | 179 |
1 files changed, 97 insertions, 82 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 17540300fe..0bb0585441 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -1356,17 +1356,108 @@ unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1; } +static void arm_cpu_propagate_feature_implications(ARMCPU *cpu) +{ + CPUARMState *env = &cpu->env; + bool no_aa32 = false; + + /* + * Some features automatically imply others: set the feature + * bits explicitly for these cases. + */ + + if (arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_PMSA); + } + + if (arm_feature(env, ARM_FEATURE_V8)) { + if (arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_V7); + } else { + set_feature(env, ARM_FEATURE_V7VE); + } + } + + /* + * There exist AArch64 cpus without AArch32 support. When KVM + * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN. + * Similarly, we cannot check ID_AA64PFR0 without AArch64 support. + * As a general principle, we also do not make ID register + * consistency checks anywhere unless using TCG, because only + * for TCG would a consistency-check failure be a QEMU bug. + */ + if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { + no_aa32 = !cpu_isar_feature(aa64_aa32, cpu); + } + + if (arm_feature(env, ARM_FEATURE_V7VE)) { + /* + * v7 Virtualization Extensions. In real hardware this implies + * EL2 and also the presence of the Security Extensions. + * For QEMU, for backwards-compatibility we implement some + * CPUs or CPU configs which have no actual EL2 or EL3 but do + * include the various other features that V7VE implies. + * Presence of EL2 itself is ARM_FEATURE_EL2, and of the + * Security Extensions is ARM_FEATURE_EL3. + */ + assert(!tcg_enabled() || no_aa32 || + cpu_isar_feature(aa32_arm_div, cpu)); + set_feature(env, ARM_FEATURE_LPAE); + set_feature(env, ARM_FEATURE_V7); + } + if (arm_feature(env, ARM_FEATURE_V7)) { + set_feature(env, ARM_FEATURE_VAPA); + set_feature(env, ARM_FEATURE_THUMB2); + set_feature(env, ARM_FEATURE_MPIDR); + if (!arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_V6K); + } else { + set_feature(env, ARM_FEATURE_V6); + } + + /* + * Always define VBAR for V7 CPUs even if it doesn't exist in + * non-EL3 configs. This is needed by some legacy boards. + */ + set_feature(env, ARM_FEATURE_VBAR); + } + if (arm_feature(env, ARM_FEATURE_V6K)) { + set_feature(env, ARM_FEATURE_V6); + set_feature(env, ARM_FEATURE_MVFR); + } + if (arm_feature(env, ARM_FEATURE_V6)) { + set_feature(env, ARM_FEATURE_V5); + if (!arm_feature(env, ARM_FEATURE_M)) { + assert(!tcg_enabled() || no_aa32 || + cpu_isar_feature(aa32_jazelle, cpu)); + set_feature(env, ARM_FEATURE_AUXCR); + } + } + if (arm_feature(env, ARM_FEATURE_V5)) { + set_feature(env, ARM_FEATURE_V4T); + } + if (arm_feature(env, ARM_FEATURE_LPAE)) { + set_feature(env, ARM_FEATURE_V7MP); + } + if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { + set_feature(env, ARM_FEATURE_CBAR); + } + if (arm_feature(env, ARM_FEATURE_THUMB2) && + !arm_feature(env, ARM_FEATURE_M)) { + set_feature(env, ARM_FEATURE_THUMB_DSP); + } +} + void arm_cpu_post_init(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); - /* M profile implies PMSA. We have to do this here rather than - * in realize with the other feature-implication checks because - * we look at the PMSA bit to see if we should add some properties. + /* + * Some features imply others. Figure this out now, because we + * are going to look at the feature bits in deciding which + * properties to add. */ - if (arm_feature(&cpu->env, ARM_FEATURE_M)) { - set_feature(&cpu->env, ARM_FEATURE_PMSA); - } + arm_cpu_propagate_feature_implications(cpu); if (arm_feature(&cpu->env, ARM_FEATURE_CBAR) || arm_feature(&cpu->env, ARM_FEATURE_CBAR_RO)) { @@ -1588,7 +1679,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) CPUARMState *env = &cpu->env; int pagebits; Error *local_err = NULL; - bool no_aa32 = false; /* Use pc-relative instructions in system-mode */ #ifndef CONFIG_USER_ONLY @@ -1869,81 +1959,6 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu->isar.id_isar3 = u; } - /* Some features automatically imply others: */ - if (arm_feature(env, ARM_FEATURE_V8)) { - if (arm_feature(env, ARM_FEATURE_M)) { - set_feature(env, ARM_FEATURE_V7); - } else { - set_feature(env, ARM_FEATURE_V7VE); - } - } - - /* - * There exist AArch64 cpus without AArch32 support. When KVM - * queries ID_ISAR0_EL1 on such a host, the value is UNKNOWN. - * Similarly, we cannot check ID_AA64PFR0 without AArch64 support. - * As a general principle, we also do not make ID register - * consistency checks anywhere unless using TCG, because only - * for TCG would a consistency-check failure be a QEMU bug. - */ - if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - no_aa32 = !cpu_isar_feature(aa64_aa32, cpu); - } - - if (arm_feature(env, ARM_FEATURE_V7VE)) { - /* v7 Virtualization Extensions. In real hardware this implies - * EL2 and also the presence of the Security Extensions. - * For QEMU, for backwards-compatibility we implement some - * CPUs or CPU configs which have no actual EL2 or EL3 but do - * include the various other features that V7VE implies. - * Presence of EL2 itself is ARM_FEATURE_EL2, and of the - * Security Extensions is ARM_FEATURE_EL3. - */ - assert(!tcg_enabled() || no_aa32 || - cpu_isar_feature(aa32_arm_div, cpu)); - set_feature(env, ARM_FEATURE_LPAE); - set_feature(env, ARM_FEATURE_V7); - } - if (arm_feature(env, ARM_FEATURE_V7)) { - set_feature(env, ARM_FEATURE_VAPA); - set_feature(env, ARM_FEATURE_THUMB2); - set_feature(env, ARM_FEATURE_MPIDR); - if (!arm_feature(env, ARM_FEATURE_M)) { - set_feature(env, ARM_FEATURE_V6K); - } else { - set_feature(env, ARM_FEATURE_V6); - } - - /* Always define VBAR for V7 CPUs even if it doesn't exist in - * non-EL3 configs. This is needed by some legacy boards. - */ - set_feature(env, ARM_FEATURE_VBAR); - } - if (arm_feature(env, ARM_FEATURE_V6K)) { - set_feature(env, ARM_FEATURE_V6); - set_feature(env, ARM_FEATURE_MVFR); - } - if (arm_feature(env, ARM_FEATURE_V6)) { - set_feature(env, ARM_FEATURE_V5); - if (!arm_feature(env, ARM_FEATURE_M)) { - assert(!tcg_enabled() || no_aa32 || - cpu_isar_feature(aa32_jazelle, cpu)); - set_feature(env, ARM_FEATURE_AUXCR); - } - } - if (arm_feature(env, ARM_FEATURE_V5)) { - set_feature(env, ARM_FEATURE_V4T); - } - if (arm_feature(env, ARM_FEATURE_LPAE)) { - set_feature(env, ARM_FEATURE_V7MP); - } - if (arm_feature(env, ARM_FEATURE_CBAR_RO)) { - set_feature(env, ARM_FEATURE_CBAR); - } - if (arm_feature(env, ARM_FEATURE_THUMB2) && - !arm_feature(env, ARM_FEATURE_M)) { - set_feature(env, ARM_FEATURE_THUMB_DSP); - } /* * We rely on no XScale CPU having VFP so we can use the same bits in the |