diff options
Diffstat (limited to 'target/arm')
-rw-r--r-- | target/arm/arm-powerctl.c | 202 | ||||
-rw-r--r-- | target/arm/arm-powerctl.h | 2 | ||||
-rw-r--r-- | target/arm/cpu.c | 4 | ||||
-rw-r--r-- | target/arm/cpu.h | 18 | ||||
-rw-r--r-- | target/arm/helper.c | 219 | ||||
-rw-r--r-- | target/arm/kvm.c | 7 | ||||
-rw-r--r-- | target/arm/machine.c | 41 | ||||
-rw-r--r-- | target/arm/op_helper.c | 50 | ||||
-rw-r--r-- | target/arm/psci.c | 4 | ||||
-rw-r--r-- | target/arm/translate-a64.c | 8 | ||||
-rw-r--r-- | target/arm/translate.c | 20 |
11 files changed, 385 insertions, 190 deletions
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index fbb7a15daa..25207cb850 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -14,6 +14,7 @@ #include "internals.h" #include "arm-powerctl.h" #include "qemu/log.h" +#include "qemu/main-loop.h" #include "exec/exec-all.h" #ifndef DEBUG_ARM_POWERCTL @@ -48,11 +49,93 @@ CPUState *arm_get_cpu_by_id(uint64_t id) return NULL; } +struct CpuOnInfo { + uint64_t entry; + uint64_t context_id; + uint32_t target_el; + bool target_aa64; +}; + + +static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + ARMCPU *target_cpu = ARM_CPU(target_cpu_state); + struct CpuOnInfo *info = (struct CpuOnInfo *) data.host_ptr; + + /* Initialize the cpu we are turning on */ + cpu_reset(target_cpu_state); + target_cpu_state->halted = 0; + + if (info->target_aa64) { + if ((info->target_el < 3) && arm_feature(&target_cpu->env, + ARM_FEATURE_EL3)) { + /* + * As target mode is AArch64, we need to set lower + * exception level (the requested level 2) to AArch64 + */ + target_cpu->env.cp15.scr_el3 |= SCR_RW; + } + + if ((info->target_el < 2) && arm_feature(&target_cpu->env, + ARM_FEATURE_EL2)) { + /* + * As target mode is AArch64, we need to set lower + * exception level (the requested level 1) to AArch64 + */ + target_cpu->env.cp15.hcr_el2 |= HCR_RW; + } + + target_cpu->env.pstate = aarch64_pstate_mode(info->target_el, true); + } else { + /* We are requested to boot in AArch32 mode */ + static const uint32_t mode_for_el[] = { 0, + ARM_CPU_MODE_SVC, + ARM_CPU_MODE_HYP, + ARM_CPU_MODE_SVC }; + + cpsr_write(&target_cpu->env, mode_for_el[info->target_el], CPSR_M, + CPSRWriteRaw); + } + + if (info->target_el == 3) { + /* Processor is in secure mode */ + target_cpu->env.cp15.scr_el3 &= ~SCR_NS; + } else { + /* Processor is not in secure mode */ + target_cpu->env.cp15.scr_el3 |= SCR_NS; + } + + /* We check if the started CPU is now at the correct level */ + assert(info->target_el == arm_current_el(&target_cpu->env)); + + if (info->target_aa64) { + target_cpu->env.xregs[0] = info->context_id; + target_cpu->env.thumb = false; + } else { + target_cpu->env.regs[0] = info->context_id; + target_cpu->env.thumb = info->entry & 1; + info->entry &= 0xfffffffe; + } + + /* Start the new CPU at the requested address */ + cpu_set_pc(target_cpu_state, info->entry); + + g_free(info); + + /* Finally set the power status */ + assert(qemu_mutex_iothread_locked()); + target_cpu->power_state = PSCI_ON; +} + int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, uint32_t target_el, bool target_aa64) { CPUState *target_cpu_state; ARMCPU *target_cpu; + struct CpuOnInfo *info; + + assert(qemu_mutex_iothread_locked()); DPRINTF("cpu %" PRId64 " (EL %d, %s) @ 0x%" PRIx64 " with R0 = 0x%" PRIx64 "\n", cpuid, target_el, target_aa64 ? "aarch64" : "aarch32", entry, @@ -77,7 +160,7 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, } target_cpu = ARM_CPU(target_cpu_state); - if (!target_cpu->powered_off) { + if (target_cpu->power_state == PSCI_ON) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already on\n", __func__, cpuid); @@ -109,74 +192,54 @@ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, return QEMU_ARM_POWERCTL_INVALID_PARAM; } - /* Initialize the cpu we are turning on */ - cpu_reset(target_cpu_state); - target_cpu->powered_off = false; - target_cpu_state->halted = 0; - - if (target_aa64) { - if ((target_el < 3) && arm_feature(&target_cpu->env, ARM_FEATURE_EL3)) { - /* - * As target mode is AArch64, we need to set lower - * exception level (the requested level 2) to AArch64 - */ - target_cpu->env.cp15.scr_el3 |= SCR_RW; - } - - if ((target_el < 2) && arm_feature(&target_cpu->env, ARM_FEATURE_EL2)) { - /* - * As target mode is AArch64, we need to set lower - * exception level (the requested level 1) to AArch64 - */ - target_cpu->env.cp15.hcr_el2 |= HCR_RW; - } - - target_cpu->env.pstate = aarch64_pstate_mode(target_el, true); - } else { - /* We are requested to boot in AArch32 mode */ - static uint32_t mode_for_el[] = { 0, - ARM_CPU_MODE_SVC, - ARM_CPU_MODE_HYP, - ARM_CPU_MODE_SVC }; - - cpsr_write(&target_cpu->env, mode_for_el[target_el], CPSR_M, - CPSRWriteRaw); - } - - if (target_el == 3) { - /* Processor is in secure mode */ - target_cpu->env.cp15.scr_el3 &= ~SCR_NS; - } else { - /* Processor is not in secure mode */ - target_cpu->env.cp15.scr_el3 |= SCR_NS; - } - - /* We check if the started CPU is now at the correct level */ - assert(target_el == arm_current_el(&target_cpu->env)); - - if (target_aa64) { - target_cpu->env.xregs[0] = context_id; - target_cpu->env.thumb = false; - } else { - target_cpu->env.regs[0] = context_id; - target_cpu->env.thumb = entry & 1; - entry &= 0xfffffffe; + /* + * If another CPU has powered the target on we are in the state + * ON_PENDING and additional attempts to power on the CPU should + * fail (see 6.6 Implementation CPU_ON/CPU_OFF races in the PSCI + * spec) + */ + if (target_cpu->power_state == PSCI_ON_PENDING) { + qemu_log_mask(LOG_GUEST_ERROR, + "[ARM]%s: CPU %" PRId64 " is already powering on\n", + __func__, cpuid); + return QEMU_ARM_POWERCTL_ON_PENDING; } - /* Start the new CPU at the requested address */ - cpu_set_pc(target_cpu_state, entry); + /* To avoid racing with a CPU we are just kicking off we do the + * final bit of preparation for the work in the target CPUs + * context. + */ + info = g_new(struct CpuOnInfo, 1); + info->entry = entry; + info->context_id = context_id; + info->target_el = target_el; + info->target_aa64 = target_aa64; - qemu_cpu_kick(target_cpu_state); + async_run_on_cpu(target_cpu_state, arm_set_cpu_on_async_work, + RUN_ON_CPU_HOST_PTR(info)); /* We are good to go */ return QEMU_ARM_POWERCTL_RET_SUCCESS; } +static void arm_set_cpu_off_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + ARMCPU *target_cpu = ARM_CPU(target_cpu_state); + + assert(qemu_mutex_iothread_locked()); + target_cpu->power_state = PSCI_OFF; + target_cpu_state->halted = 1; + target_cpu_state->exception_index = EXCP_HLT; +} + int arm_set_cpu_off(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; + assert(qemu_mutex_iothread_locked()); + DPRINTF("cpu %" PRId64 "\n", cpuid); /* change to the cpu we are powering up */ @@ -185,27 +248,34 @@ int arm_set_cpu_off(uint64_t cpuid) return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); - if (target_cpu->powered_off) { + if (target_cpu->power_state == PSCI_OFF) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is already off\n", __func__, cpuid); return QEMU_ARM_POWERCTL_IS_OFF; } - target_cpu->powered_off = true; - target_cpu_state->halted = 1; - target_cpu_state->exception_index = EXCP_HLT; - cpu_loop_exit(target_cpu_state); - /* notreached */ + /* Queue work to run under the target vCPUs context */ + async_run_on_cpu(target_cpu_state, arm_set_cpu_off_async_work, + RUN_ON_CPU_NULL); return QEMU_ARM_POWERCTL_RET_SUCCESS; } +static void arm_reset_cpu_async_work(CPUState *target_cpu_state, + run_on_cpu_data data) +{ + /* Reset the cpu */ + cpu_reset(target_cpu_state); +} + int arm_reset_cpu(uint64_t cpuid) { CPUState *target_cpu_state; ARMCPU *target_cpu; + assert(qemu_mutex_iothread_locked()); + DPRINTF("cpu %" PRId64 "\n", cpuid); /* change to the cpu we are resetting */ @@ -214,15 +284,17 @@ int arm_reset_cpu(uint64_t cpuid) return QEMU_ARM_POWERCTL_INVALID_PARAM; } target_cpu = ARM_CPU(target_cpu_state); - if (target_cpu->powered_off) { + + if (target_cpu->power_state == PSCI_OFF) { qemu_log_mask(LOG_GUEST_ERROR, "[ARM]%s: CPU %" PRId64 " is off\n", __func__, cpuid); return QEMU_ARM_POWERCTL_IS_OFF; } - /* Reset the cpu */ - cpu_reset(target_cpu_state); + /* Queue work to run under the target vCPUs context */ + async_run_on_cpu(target_cpu_state, arm_reset_cpu_async_work, + RUN_ON_CPU_NULL); return QEMU_ARM_POWERCTL_RET_SUCCESS; } diff --git a/target/arm/arm-powerctl.h b/target/arm/arm-powerctl.h index 98ee04989b..04353923c0 100644 --- a/target/arm/arm-powerctl.h +++ b/target/arm/arm-powerctl.h @@ -17,6 +17,7 @@ #define QEMU_ARM_POWERCTL_INVALID_PARAM QEMU_PSCI_RET_INVALID_PARAMS #define QEMU_ARM_POWERCTL_ALREADY_ON QEMU_PSCI_RET_ALREADY_ON #define QEMU_ARM_POWERCTL_IS_OFF QEMU_PSCI_RET_DENIED +#define QEMU_ARM_POWERCTL_ON_PENDING QEMU_PSCI_RET_ON_PENDING /* * arm_get_cpu_by_id: @@ -43,6 +44,7 @@ CPUState *arm_get_cpu_by_id(uint64_t cpuid); * Returns: QEMU_ARM_POWERCTL_RET_SUCCESS on success. * QEMU_ARM_POWERCTL_INVALID_PARAM if bad parameters are provided. * QEMU_ARM_POWERCTL_ALREADY_ON if the CPU was already started. + * QEMU_ARM_POWERCTL_ON_PENDING if the CPU is still powering up */ int arm_set_cpu_on(uint64_t cpuid, uint64_t entry, uint64_t context_id, uint32_t target_el, bool target_aa64); diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 4a069f6985..f7157dc0e5 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -45,7 +45,7 @@ static bool arm_cpu_has_work(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); - return !cpu->powered_off + return (cpu->power_state != PSCI_OFF) && cs->interrupt_request & (CPU_INTERRUPT_FIQ | CPU_INTERRUPT_HARD | CPU_INTERRUPT_VFIQ | CPU_INTERRUPT_VIRQ @@ -132,7 +132,7 @@ static void arm_cpu_reset(CPUState *s) env->vfp.xregs[ARM_VFP_MVFR1] = cpu->mvfr1; env->vfp.xregs[ARM_VFP_MVFR2] = cpu->mvfr2; - cpu->powered_off = cpu->start_powered_off; + cpu->power_state = cpu->start_powered_off ? PSCI_OFF : PSCI_ON; s->halted = cpu->start_powered_off; if (arm_feature(env, ARM_FEATURE_IWMMXT)) { diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 0956a54e89..38a8e00908 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -30,6 +30,9 @@ # define TARGET_LONG_BITS 32 #endif +/* ARM processors have a weak memory model */ +#define TCG_GUEST_DEFAULT_MO (0) + #define CPUArchState struct CPUARMState #include "qemu-common.h" @@ -526,6 +529,15 @@ typedef struct CPUARMState { */ typedef void ARMELChangeHook(ARMCPU *cpu, void *opaque); + +/* These values map onto the return values for + * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ +typedef enum ARMPSCIState { + PSCI_OFF = 0, + PSCI_ON = 1, + PSCI_ON_PENDING = 2 +} ARMPSCIState; + /** * ARMCPU: * @env: #CPUARMState @@ -582,8 +594,10 @@ struct ARMCPU { /* Should CPU start in PSCI powered-off state? */ bool start_powered_off; - /* CPU currently in PSCI powered-off state */ - bool powered_off; + + /* Current power state, access guarded by BQL */ + ARMPSCIState power_state; + /* CPU has virtualization extension */ bool has_el2; /* CPU has security extension */ diff --git a/target/arm/helper.c b/target/arm/helper.c index 47250bcf16..bcedb4a808 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -536,41 +536,33 @@ static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbiall_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush(other_cs); - } + tlb_flush_all_cpus_synced(cs); } static void tlbiasid_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush(other_cs); - } + tlb_flush_all_cpus_synced(cs); } static void tlbimva_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_page(other_cs, value & TARGET_PAGE_MASK); - } + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } static void tlbimvaa_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_page(other_cs, value & TARGET_PAGE_MASK); - } + tlb_flush_page_all_cpus_synced(cs, value & TARGET_PAGE_MASK); } static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -578,19 +570,21 @@ static void tlbiall_nsnh_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = ENV_GET_CPU(env); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, - ARMMMUIdx_S2NS, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); } static void tlbiall_nsnh_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); } static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -611,13 +605,13 @@ static void tlbiipas2_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 40); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS)); } static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { @@ -626,9 +620,8 @@ static void tlbiipas2_is_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 40); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S2NS)); } static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -636,17 +629,15 @@ static void tlbiall_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, { CPUState *cs = ENV_GET_CPU(env); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1); + tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbiall_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -655,18 +646,17 @@ static void tlbimva_hyp_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2)); } static void tlbimva_hyp_is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = value & ~MAKE_64BIT_MASK(0, 12); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1E2)); } static const ARMCPRegInfo cp_reginfo[] = { @@ -2542,8 +2532,10 @@ static void vttbr_write(CPUARMState *env, const ARMCPRegInfo *ri, /* Accesses to VTTBR may change the VMID so we must flush the TLB. */ if (raw_read(env, ri) != value) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, - ARMMMUIdx_S2NS, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); raw_write(env, ri, value); } } @@ -2898,29 +2890,33 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env, static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - ARMCPU *cpu = arm_env_get_cpu(env); - CPUState *cs = CPU(cpu); + CPUState *cs = ENV_GET_CPU(env); if (arm_is_secure_below_el3(env)) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); } else { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + CPUState *cs = ENV_GET_CPU(env); bool sec = arm_is_secure_below_el3(env); - CPUState *other_cs; - CPU_FOREACH(other_cs) { - if (sec) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); - } else { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); - } + if (sec) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); + } else { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } @@ -2935,13 +2931,19 @@ static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); if (arm_is_secure_below_el3(env)) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); } else { if (arm_feature(env, ARM_FEATURE_EL2)) { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, - ARMMMUIdx_S2NS, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); } else { - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S12NSE1, ARMMMUIdx_S12NSE0, -1); + tlb_flush_by_mmuidx(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } } @@ -2952,7 +2954,7 @@ static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E2, -1); + tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2961,7 +2963,7 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); - tlb_flush_by_mmuidx(cs, ARMMMUIdx_S1E3, -1); + tlb_flush_by_mmuidx(cs, (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -2971,41 +2973,40 @@ static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, * stage 2 translations, whereas most other scopes only invalidate * stage 1 translations. */ + CPUState *cs = ENV_GET_CPU(env); bool sec = arm_is_secure_below_el3(env); bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); - CPUState *other_cs; - - CPU_FOREACH(other_cs) { - if (sec) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1SE1, ARMMMUIdx_S1SE0, -1); - } else if (has_el2) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, ARMMMUIdx_S2NS, -1); - } else { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); - } + + if (sec) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); + } else if (has_el2) { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0) | + (1 << ARMMMUIdx_S2NS)); + } else { + tlb_flush_by_mmuidx_all_cpus_synced(cs, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E2, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_alle3is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); - CPU_FOREACH(other_cs) { - tlb_flush_by_mmuidx(other_cs, ARMMMUIdx_S1E3, -1); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3021,11 +3022,13 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t pageaddr = sextract64(value << 12, 0, 56); if (arm_is_secure_below_el3(env)) { - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1SE1, - ARMMMUIdx_S1SE0, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); } else { - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } @@ -3040,7 +3043,7 @@ static void tlbi_aa64_vae2_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E2, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3054,47 +3057,46 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, CPUState *cs = CPU(cpu); uint64_t pageaddr = sextract64(value << 12, 0, 56); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S1E3, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { + ARMCPU *cpu = arm_env_get_cpu(env); + CPUState *cs = CPU(cpu); bool sec = arm_is_secure_below_el3(env); - CPUState *other_cs; uint64_t pageaddr = sextract64(value << 12, 0, 56); - CPU_FOREACH(other_cs) { - if (sec) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1SE1, - ARMMMUIdx_S1SE0, -1); - } else { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S12NSE1, - ARMMMUIdx_S12NSE0, -1); - } + if (sec) { + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1SE1) | + (1 << ARMMMUIdx_S1SE0)); + } else { + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S12NSE1) | + (1 << ARMMMUIdx_S12NSE0)); } } static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E2, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1E2)); } static void tlbi_aa64_vae3is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S1E3, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S1E3)); } static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3116,13 +3118,13 @@ static void tlbi_aa64_ipas2e1_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 48); - tlb_flush_page_by_mmuidx(cs, pageaddr, ARMMMUIdx_S2NS, -1); + tlb_flush_page_by_mmuidx(cs, pageaddr, (1 << ARMMMUIdx_S2NS)); } static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - CPUState *other_cs; + CPUState *cs = ENV_GET_CPU(env); uint64_t pageaddr; if (!arm_feature(env, ARM_FEATURE_EL2) || !(env->cp15.scr_el3 & SCR_NS)) { @@ -3131,9 +3133,8 @@ static void tlbi_aa64_ipas2e1is_write(CPUARMState *env, const ARMCPRegInfo *ri, pageaddr = sextract64(value << 12, 0, 48); - CPU_FOREACH(other_cs) { - tlb_flush_page_by_mmuidx(other_cs, pageaddr, ARMMMUIdx_S2NS, -1); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, + (1 << ARMMMUIdx_S2NS)); } static CPAccessResult aa64_zva_access(CPUARMState *env, const ARMCPRegInfo *ri, @@ -6769,6 +6770,12 @@ void arm_cpu_do_interrupt(CPUState *cs) arm_cpu_do_interrupt_aarch32(cs); } + /* Hooks may change global state so BQL should be held, also the + * BQL needs to be held for any modification of + * cs->interrupt_request. + */ + g_assert(qemu_mutex_iothread_locked()); + arm_call_el_change_hook(cpu); if (!kvm_enabled()) { diff --git a/target/arm/kvm.c b/target/arm/kvm.c index c00b94e42a..395e986973 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -488,8 +488,8 @@ int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu) { if (cap_has_mp_state) { struct kvm_mp_state mp_state = { - .mp_state = - cpu->powered_off ? KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE + .mp_state = (cpu->power_state == PSCI_OFF) ? + KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE }; int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); if (ret) { @@ -515,7 +515,8 @@ int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu) __func__, ret, strerror(-ret)); abort(); } - cpu->powered_off = (mp_state.mp_state == KVM_MP_STATE_STOPPED); + cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ? + PSCI_OFF : PSCI_ON; } return 0; diff --git a/target/arm/machine.c b/target/arm/machine.c index fa5ec76090..d8094a840b 100644 --- a/target/arm/machine.c +++ b/target/arm/machine.c @@ -211,6 +211,38 @@ static const VMStateInfo vmstate_cpsr = { .put = put_cpsr, }; +static int get_power(QEMUFile *f, void *opaque, size_t size, + VMStateField *field) +{ + ARMCPU *cpu = opaque; + bool powered_off = qemu_get_byte(f); + cpu->power_state = powered_off ? PSCI_OFF : PSCI_ON; + return 0; +} + +static int put_power(QEMUFile *f, void *opaque, size_t size, + VMStateField *field, QJSON *vmdesc) +{ + ARMCPU *cpu = opaque; + + /* Migration should never happen while we transition power states */ + + if (cpu->power_state == PSCI_ON || + cpu->power_state == PSCI_OFF) { + bool powered_off = (cpu->power_state == PSCI_OFF) ? true : false; + qemu_put_byte(f, powered_off); + return 0; + } else { + return 1; + } +} + +static const VMStateInfo vmstate_powered_off = { + .name = "powered_off", + .get = get_power, + .put = put_power, +}; + static void cpu_pre_save(void *opaque) { ARMCPU *cpu = opaque; @@ -329,7 +361,14 @@ const VMStateDescription vmstate_arm_cpu = { VMSTATE_UINT64(env.exception.vaddress, ARMCPU), VMSTATE_TIMER_PTR(gt_timer[GTIMER_PHYS], ARMCPU), VMSTATE_TIMER_PTR(gt_timer[GTIMER_VIRT], ARMCPU), - VMSTATE_BOOL(powered_off, ARMCPU), + { + .name = "power_state", + .version_id = 0, + .size = sizeof(bool), + .info = &vmstate_powered_off, + .flags = VMS_SINGLE, + .offset = 0, + }, VMSTATE_END_OF_LIST() }, .subsections = (const VMStateDescription*[]) { diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index fb366fdc35..d64c8670fa 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -18,6 +18,7 @@ */ #include "qemu/osdep.h" #include "qemu/log.h" +#include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" #include "internals.h" @@ -435,6 +436,13 @@ void HELPER(yield)(CPUARMState *env) ARMCPU *cpu = arm_env_get_cpu(env); CPUState *cs = CPU(cpu); + /* When running in MTTCG we don't generate jumps to the yield and + * WFE helpers as it won't affect the scheduling of other vCPUs. + * If we wanted to more completely model WFE/SEV so we don't busy + * spin unnecessarily we would need to do something more involved. + */ + g_assert(!parallel_cpus); + /* This is a non-trappable hint instruction that generally indicates * that the guest is currently busy-looping. Yield control back to the * top level loop so that a more deserving VCPU has a chance to run. @@ -487,7 +495,9 @@ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) */ env->regs[15] &= (env->thumb ? ~1 : ~3); + qemu_mutex_lock_iothread(); arm_call_el_change_hook(arm_env_get_cpu(env)); + qemu_mutex_unlock_iothread(); } /* Access to user mode registers from privileged modes. */ @@ -735,28 +745,58 @@ void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value) { const ARMCPRegInfo *ri = rip; - ri->writefn(env, ri, value); + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + ri->writefn(env, ri, value); + qemu_mutex_unlock_iothread(); + } else { + ri->writefn(env, ri, value); + } } uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip) { const ARMCPRegInfo *ri = rip; + uint32_t res; + + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + res = ri->readfn(env, ri); + qemu_mutex_unlock_iothread(); + } else { + res = ri->readfn(env, ri); + } - return ri->readfn(env, ri); + return res; } void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value) { const ARMCPRegInfo *ri = rip; - ri->writefn(env, ri, value); + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + ri->writefn(env, ri, value); + qemu_mutex_unlock_iothread(); + } else { + ri->writefn(env, ri, value); + } } uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip) { const ARMCPRegInfo *ri = rip; + uint64_t res; - return ri->readfn(env, ri); + if (ri->type & ARM_CP_IO) { + qemu_mutex_lock_iothread(); + res = ri->readfn(env, ri); + qemu_mutex_unlock_iothread(); + } else { + res = ri->readfn(env, ri); + } + + return res; } void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm) @@ -989,7 +1029,9 @@ void HELPER(exception_return)(CPUARMState *env) cur_el, new_el, env->pc); } + qemu_mutex_lock_iothread(); arm_call_el_change_hook(arm_env_get_cpu(env)); + qemu_mutex_unlock_iothread(); return; diff --git a/target/arm/psci.c b/target/arm/psci.c index 64bf82eea1..ade9fe2ede 100644 --- a/target/arm/psci.c +++ b/target/arm/psci.c @@ -127,7 +127,9 @@ void arm_handle_psci_call(ARMCPU *cpu) break; } target_cpu = ARM_CPU(target_cpu_state); - ret = target_cpu->powered_off ? 1 : 0; + + g_assert(qemu_mutex_iothread_locked()); + ret = target_cpu->power_state; break; default: /* Everything above affinity level 0 is always on. */ diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index e61bbd6b3b..e15eae6d41 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -1328,10 +1328,14 @@ static void handle_hint(DisasContext *s, uint32_t insn, s->is_jmp = DISAS_WFI; return; case 1: /* YIELD */ - s->is_jmp = DISAS_YIELD; + if (!parallel_cpus) { + s->is_jmp = DISAS_YIELD; + } return; case 2: /* WFE */ - s->is_jmp = DISAS_WFE; + if (!parallel_cpus) { + s->is_jmp = DISAS_WFE; + } return; case 4: /* SEV */ case 5: /* SEVL */ diff --git a/target/arm/translate.c b/target/arm/translate.c index 4436d8f3a2..abc1f77ee4 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -4404,20 +4404,32 @@ static void gen_exception_return(DisasContext *s, TCGv_i32 pc) gen_rfe(s, pc, load_cpu_field(spsr)); } +/* + * For WFI we will halt the vCPU until an IRQ. For WFE and YIELD we + * only call the helper when running single threaded TCG code to ensure + * the next round-robin scheduled vCPU gets a crack. In MTTCG mode we + * just skip this instruction. Currently the SEV/SEVL instructions + * which are *one* of many ways to wake the CPU from WFE are not + * implemented so we can't sleep like WFI does. + */ static void gen_nop_hint(DisasContext *s, int val) { switch (val) { case 1: /* yield */ - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_YIELD; + if (!parallel_cpus) { + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_YIELD; + } break; case 3: /* wfi */ gen_set_pc_im(s, s->pc); s->is_jmp = DISAS_WFI; break; case 2: /* wfe */ - gen_set_pc_im(s, s->pc); - s->is_jmp = DISAS_WFE; + if (!parallel_cpus) { + gen_set_pc_im(s, s->pc); + s->is_jmp = DISAS_WFE; + } break; case 4: /* sev */ case 5: /* sevl */ |