diff options
-rw-r--r-- | hw/arm/boot.c | 95 | ||||
-rw-r--r-- | target/arm/arm-powerctl.c | 53 | ||||
-rw-r--r-- | target/arm/cpu.c | 95 | ||||
-rw-r--r-- | target/arm/cpu.h | 22 |
4 files changed, 141 insertions, 124 deletions
diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 24fa169060..84ea6a807a 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -722,84 +722,35 @@ static void do_cpu_reset(void *opaque) cpu_set_pc(cs, entry); } else { - /* If we are booting Linux then we need to check whether we are - * booting into secure or non-secure state and adjust the state - * accordingly. Out of reset, ARM is defined to be in secure state - * (SCR.NS = 0), we change that here if non-secure boot has been - * requested. + /* + * If we are booting Linux then we might need to do so at: + * - AArch64 NS EL2 or NS EL1 + * - AArch32 Secure SVC (EL3) + * - AArch32 NS Hyp (EL2) + * - AArch32 NS SVC (EL1) + * Configure the CPU in the way boot firmware would do to + * drop us down to the appropriate level. */ - if (arm_feature(env, ARM_FEATURE_EL3)) { - /* AArch64 is defined to come out of reset into EL3 if enabled. - * If we are booting Linux then we need to adjust our EL as - * Linux expects us to be in EL2 or EL1. AArch32 resets into - * SVC, which Linux expects, so no privilege/exception level to - * adjust. - */ - if (env->aarch64) { - env->cp15.scr_el3 |= SCR_RW; - if (arm_feature(env, ARM_FEATURE_EL2)) { - env->cp15.hcr_el2 |= HCR_RW; - env->pstate = PSTATE_MODE_EL2h; - } else { - env->pstate = PSTATE_MODE_EL1h; - } - if (cpu_isar_feature(aa64_pauth, cpu)) { - env->cp15.scr_el3 |= SCR_API | SCR_APK; - } - if (cpu_isar_feature(aa64_mte, cpu)) { - env->cp15.scr_el3 |= SCR_ATA; - } - if (cpu_isar_feature(aa64_sve, cpu)) { - env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK; - env->vfp.zcr_el[3] = 0xf; - } - if (cpu_isar_feature(aa64_sme, cpu)) { - env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK; - env->cp15.scr_el3 |= SCR_ENTP2; - env->vfp.smcr_el[3] = 0xf; - } - if (cpu_isar_feature(aa64_hcx, cpu)) { - env->cp15.scr_el3 |= SCR_HXEN; - } - if (cpu_isar_feature(aa64_fgt, cpu)) { - env->cp15.scr_el3 |= SCR_FGTEN; - } + int target_el = arm_feature(env, ARM_FEATURE_EL2) ? 2 : 1; - /* AArch64 kernels never boot in secure mode */ - assert(!info->secure_boot); - /* This hook is only supported for AArch32 currently: - * bootloader_aarch64[] will not call the hook, and - * the code above has already dropped us into EL2 or EL1. - */ - assert(!info->secure_board_setup); - } - - if (arm_feature(env, ARM_FEATURE_EL2)) { - /* If we have EL2 then Linux expects the HVC insn to work */ - env->cp15.scr_el3 |= SCR_HCE; - } - - /* Set to non-secure if not a secure boot */ - if (!info->secure_boot && - (cs != first_cpu || !info->secure_board_setup)) { - /* Linux expects non-secure state */ - env->cp15.scr_el3 |= SCR_NS; - /* Set NSACR.{CP11,CP10} so NS can access the FPU */ - env->cp15.nsacr |= 3 << 10; - } - } - - if (!env->aarch64 && !info->secure_boot && - arm_feature(env, ARM_FEATURE_EL2)) { + if (env->aarch64) { /* - * This is an AArch32 boot not to Secure state, and - * we have Hyp mode available, so boot the kernel into - * Hyp mode. This is not how the CPU comes out of reset, - * so we need to manually put it there. + * AArch64 kernels never boot in secure mode, and we don't + * support the secure_board_setup hook for AArch64. */ - cpsr_write(env, ARM_CPU_MODE_HYP, CPSR_M, CPSRWriteRaw); + assert(!info->secure_boot); + assert(!info->secure_board_setup); + } else { + if (arm_feature(env, ARM_FEATURE_EL3) && + (info->secure_boot || + (info->secure_board_setup && cs == first_cpu))) { + /* Start this CPU in Secure SVC */ + target_el = 3; + } } + arm_emulate_firmware_reset(cs, target_el); + if (cs == first_cpu) { AddressSpace *as = arm_boot_address_space(cpu, info); diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index 326a03153d..c078849403 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -65,60 +65,9 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, /* Initialize the cpu we are turning on */ cpu_reset(target_cpu_state); + arm_emulate_firmware_reset(target_cpu_state, info->target_el); target_cpu_state->halted = 0; - if (info->target_aa64) { - if ((info->target_el < 3) && arm_feature(&target_cpu->env, - ARM_FEATURE_EL3)) { - /* - * As target mode is AArch64, we need to set lower - * exception level (the requested level 2) to AArch64 - */ - target_cpu->env.cp15.scr_el3 |= SCR_RW; - } - - if ((info->target_el < 2) && arm_feature(&target_cpu->env, - ARM_FEATURE_EL2)) { - /* - * As target mode is AArch64, we need to set lower - * exception level (the requested level 1) to AArch64 - */ - target_cpu->env.cp15.hcr_el2 |= HCR_RW; - } - - target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true); - } else { - /* We are requested to boot in AArch32 mode */ - static const uint32_t mode_for_el[] = { 0, - ARM_CPU_MODE_SVC, - ARM_CPU_MODE_HYP, - ARM_CPU_MODE_SVC }; - - cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M, - CPSRWriteRaw); - } - - if (info->target_el == 3) { - /* Processor is in secure mode */ - target_cpu->env.cp15.scr_el3 &= ~SCR_NS; - } else { - /* Processor is not in secure mode */ - target_cpu->env.cp15.scr_el3 |= SCR_NS; - - /* Set NSACR.{CP11,CP10} so NS can access the FPU */ - target_cpu->env.cp15.nsacr |= 3 << 10; - - /* - * If QEMU is providing the equivalent of EL3 firmware, then we need - * to make sure a CPU targeting EL2 comes out of reset with a - * functional HVC insn. - */ - if (arm_feature(&target_cpu->env, ARM_FEATURE_EL3) - && info->target_el == 2) { - target_cpu->env.cp15.scr_el3 |= SCR_HCE; - } - } - /* We check if the started CPU is now at the correct level */ assert(info->target_el == arm_current_el(&target_cpu->env)); diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 6c6c551573..aa4e006f21 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -553,6 +553,101 @@ static void arm_cpu_reset_hold(Object *obj) } } +void arm_emulate_firmware_reset(CPUState *cpustate, int target_el) +{ + ARMCPU *cpu = ARM_CPU(cpustate); + CPUARMState *env = &cpu->env; + bool have_el3 = arm_feature(env, ARM_FEATURE_EL3); + bool have_el2 = arm_feature(env, ARM_FEATURE_EL2); + + /* + * Check we have the EL we're aiming for. If that is the + * highest implemented EL, then cpu_reset has already done + * all the work. + */ + switch (target_el) { + case 3: + assert(have_el3); + return; + case 2: + assert(have_el2); + if (!have_el3) { + return; + } + break; + case 1: + if (!have_el3 && !have_el2) { + return; + } + break; + default: + g_assert_not_reached(); + } + + if (have_el3) { + /* + * Set the EL3 state so code can run at EL2. This should match + * the requirements set by Linux in its booting spec. + */ + if (env->aarch64) { + env->cp15.scr_el3 |= SCR_RW; + if (cpu_isar_feature(aa64_pauth, cpu)) { + env->cp15.scr_el3 |= SCR_API | SCR_APK; + } + if (cpu_isar_feature(aa64_mte, cpu)) { + env->cp15.scr_el3 |= SCR_ATA; + } + if (cpu_isar_feature(aa64_sve, cpu)) { + env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK; + env->vfp.zcr_el[3] = 0xf; + } + if (cpu_isar_feature(aa64_sme, cpu)) { + env->cp15.cptr_el[3] |= R_CPTR_EL3_ESM_MASK; + env->cp15.scr_el3 |= SCR_ENTP2; + env->vfp.smcr_el[3] = 0xf; + } + if (cpu_isar_feature(aa64_hcx, cpu)) { + env->cp15.scr_el3 |= SCR_HXEN; + } + if (cpu_isar_feature(aa64_fgt, cpu)) { + env->cp15.scr_el3 |= SCR_FGTEN; + } + } + + if (target_el == 2) { + /* If the guest is at EL2 then Linux expects the HVC insn to work */ + env->cp15.scr_el3 |= SCR_HCE; + } + + /* Put CPU into non-secure state */ + env->cp15.scr_el3 |= SCR_NS; + /* Set NSACR.{CP11,CP10} so NS can access the FPU */ + env->cp15.nsacr |= 3 << 10; + } + + if (have_el2 && target_el < 2) { + /* Set EL2 state so code can run at EL1. */ + if (env->aarch64) { + env->cp15.hcr_el2 |= HCR_RW; + } + } + + /* Set the CPU to the desired state */ + if (env->aarch64) { + env->pstate = aarch64_pstate_mode(target_el, true); + } else { + static const uint32_t mode_for_el[] = { + 0, + ARM_CPU_MODE_SVC, + ARM_CPU_MODE_HYP, + ARM_CPU_MODE_SVC, + }; + + cpsr_write(env, mode_for_el[target_el], CPSR_M, CPSRWriteRaw); + } +} + + #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx, diff --git a/target/arm/cpu.h b/target/arm/cpu.h index a9edfb8353..76d4cef9e3 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -1149,6 +1149,28 @@ int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, int cpuid, DumpState *s); +/** + * arm_emulate_firmware_reset: Emulate firmware CPU reset handling + * @cpu: CPU (which must have been freshly reset) + * @target_el: exception level to put the CPU into + * @secure: whether to put the CPU in secure state + * + * When QEMU is directly running a guest kernel at a lower level than + * EL3 it implicitly emulates some aspects of the guest firmware. + * This includes that on reset we need to configure the parts of the + * CPU corresponding to EL3 so that the real guest code can run at its + * lower exception level. This function does that post-reset CPU setup, + * for when we do direct boot of a guest kernel, and for when we + * emulate PSCI and similar firmware interfaces starting a CPU at a + * lower exception level. + * + * @target_el must be an EL implemented by the CPU between 1 and 3. + * We do not support dropping into a Secure EL other than 3. + * + * It is the responsibility of the caller to call arm_rebuild_hflags(). + */ +void arm_emulate_firmware_reset(CPUState *cpustate, int target_el); + #ifdef TARGET_AARCH64 int aarch64_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); |