diff options
Diffstat (limited to 'target-arm')
-rw-r--r-- | target-arm/cpu-qom.h | 8 | ||||
-rw-r--r-- | target-arm/cpu.c | 35 | ||||
-rw-r--r-- | target-arm/cpu.h | 56 | ||||
-rw-r--r-- | target-arm/cpu64.c | 3 | ||||
-rw-r--r-- | target-arm/helper-a64.c | 104 | ||||
-rw-r--r-- | target-arm/helper.c | 301 | ||||
-rw-r--r-- | target-arm/op_helper.c | 94 |
7 files changed, 407 insertions, 194 deletions
diff --git a/target-arm/cpu-qom.h b/target-arm/cpu-qom.h index 5bd9b7bb9f..07c0a71942 100644 --- a/target-arm/cpu-qom.h +++ b/target-arm/cpu-qom.h @@ -87,6 +87,9 @@ typedef struct ARMCPU { /* GPIO outputs for generic timer */ qemu_irq gt_timer_outputs[NUM_GTIMERS]; + /* MemoryRegion to use for secure physical accesses */ + MemoryRegion *secure_memory; + /* 'compatible' string for this CPU for Linux device trees */ const char *dtb_compatible; @@ -216,7 +219,8 @@ bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req); void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, int flags); -hwaddr arm_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr, + MemTxAttrs *attrs); int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); @@ -248,8 +252,6 @@ void arm_gt_stimer_cb(void *opaque); #ifdef TARGET_AARCH64 int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); - -void aarch64_cpu_do_interrupt(CPUState *cs); #endif #endif diff --git a/target-arm/cpu.c b/target-arm/cpu.c index 3f5f8e8cb5..6c34476a3d 100644 --- a/target-arm/cpu.c +++ b/target-arm/cpu.c @@ -543,6 +543,15 @@ static void arm_cpu_post_init(Object *obj) */ qdev_property_add_static(DEVICE(obj), &arm_cpu_has_el3_property, &error_abort); + +#ifndef CONFIG_USER_ONLY + object_property_add_link(obj, "secure-memory", + TYPE_MEMORY_REGION, + (Object **)&cpu->secure_memory, + qdev_prop_allow_set_link_before_realize, + OBJ_PROP_LINK_UNREF_ON_RELEASE, + &error_abort); +#endif } if (arm_feature(&cpu->env, ARM_FEATURE_MPU)) { @@ -666,6 +675,29 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) init_cpreg_list(cpu); +#ifndef CONFIG_USER_ONLY + if (cpu->has_el3) { + cs->num_ases = 2; + } else { + cs->num_ases = 1; + } + + if (cpu->has_el3) { + AddressSpace *as; + + if (!cpu->secure_memory) { + cpu->secure_memory = cs->memory; + } + as = address_space_init_shareable(cpu->secure_memory, + "cpu-secure-memory"); + cpu_address_space_init(cs, as, ARMASIdx_S); + } + cpu_address_space_init(cs, + address_space_init_shareable(cs->memory, + "cpu-memory"), + ARMASIdx_NS); +#endif + qemu_init_vcpu(cs); cpu_reset(cs); @@ -1419,7 +1451,8 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) #else cc->do_interrupt = arm_cpu_do_interrupt; cc->do_unaligned_access = arm_cpu_do_unaligned_access; - cc->get_phys_page_debug = arm_cpu_get_phys_page_debug; + cc->get_phys_page_attrs_debug = arm_cpu_get_phys_page_attrs_debug; + cc->asidx_from_attrs = arm_asidx_from_attrs; cc->vmsd = &vmstate_arm_cpu; cc->virtio_is_big_endian = arm_cpu_is_big_endian; cc->write_elf64_note = arm_cpu_write_elf64_note; diff --git a/target-arm/cpu.h b/target-arm/cpu.h index 815fef8a30..b8b3364615 100644 --- a/target-arm/cpu.h +++ b/target-arm/cpu.h @@ -969,18 +969,33 @@ static inline bool arm_is_secure(CPUARMState *env) /* Return true if the specified exception level is running in AArch64 state. */ static inline bool arm_el_is_aa64(CPUARMState *env, int el) { - /* We don't currently support EL2, and this isn't valid for EL0 - * (if we're in EL0, is_a64() is what you want, and if we're not in EL0 - * then the state of EL0 isn't well defined.) + /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want, + * and if we're not in EL0 then the state of EL0 isn't well defined.) */ - assert(el == 1 || el == 3); + assert(el >= 1 && el <= 3); + bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64); - /* AArch64-capable CPUs always run with EL1 in AArch64 mode. This - * is a QEMU-imposed simplification which we may wish to change later. - * If we in future support EL2 and/or EL3, then the state of lower - * exception levels is controlled by the HCR.RW and SCR.RW bits. + /* The highest exception level is always at the maximum supported + * register width, and then lower levels have a register width controlled + * by bits in the SCR or HCR registers. */ - return arm_feature(env, ARM_FEATURE_AARCH64); + if (el == 3) { + return aa64; + } + + if (arm_feature(env, ARM_FEATURE_EL3)) { + aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW); + } + + if (el == 2) { + return aa64; + } + + if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) { + aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW); + } + + return aa64; } /* Function for determing whether guest cp register reads and writes should @@ -1720,6 +1735,12 @@ static inline int cpu_mmu_index(CPUARMState *env, bool ifetch) return el; } +/* Indexes used when registering address spaces with cpu_address_space_init */ +typedef enum ARMASIdx { + ARMASIdx_NS = 0, + ARMASIdx_S = 1, +} ARMASIdx; + /* Return the Exception Level targeted by debug exceptions; * currently always EL1 since we don't implement EL2 or EL3. */ @@ -1991,4 +2012,21 @@ enum { QEMU_PSCI_CONDUIT_HVC = 2, }; +#ifndef CONFIG_USER_ONLY +/* Return the address space index to use for a memory access */ +static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) +{ + return attrs.secure ? ARMASIdx_S : ARMASIdx_NS; +} + +/* Return the AddressSpace to use for a memory access + * (which depends on whether the access is S or NS, and whether + * the board gave us a separate AddressSpace for S accesses). + */ +static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs) +{ + return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs)); +} +#endif + #endif diff --git a/target-arm/cpu64.c b/target-arm/cpu64.c index 5f8a177475..cc177bb9f6 100644 --- a/target-arm/cpu64.c +++ b/target-arm/cpu64.c @@ -291,9 +291,6 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data) { CPUClass *cc = CPU_CLASS(oc); -#if !defined(CONFIG_USER_ONLY) - cc->do_interrupt = aarch64_cpu_do_interrupt; -#endif cc->cpu_exec_interrupt = arm_cpu_exec_interrupt; cc->set_pc = aarch64_cpu_set_pc; cc->gdb_read_register = aarch64_cpu_gdb_read_register; diff --git a/target-arm/helper-a64.c b/target-arm/helper-a64.c index 01f3958b90..c7bfb4d8f7 100644 --- a/target-arm/helper-a64.c +++ b/target-arm/helper-a64.c @@ -26,7 +26,6 @@ #include "qemu/bitops.h" #include "internals.h" #include "qemu/crc32c.h" -#include "sysemu/kvm.h" #include <zlib.h> /* For crc32 */ /* C2.4.7 Multiply and divide */ @@ -444,106 +443,3 @@ uint64_t HELPER(crc32c_64)(uint64_t acc, uint64_t val, uint32_t bytes) /* Linux crc32c converts the output to one's complement. */ return crc32c(acc, buf, bytes) ^ 0xffffffff; } - -#if !defined(CONFIG_USER_ONLY) - -/* Handle a CPU exception. */ -void aarch64_cpu_do_interrupt(CPUState *cs) -{ - ARMCPU *cpu = ARM_CPU(cs); - CPUARMState *env = &cpu->env; - unsigned int new_el = env->exception.target_el; - target_ulong addr = env->cp15.vbar_el[new_el]; - unsigned int new_mode = aarch64_pstate_mode(new_el, true); - - if (arm_current_el(env) < new_el) { - if (env->aarch64) { - addr += 0x400; - } else { - addr += 0x600; - } - } else if (pstate_read(env) & PSTATE_SP) { - addr += 0x200; - } - - arm_log_exception(cs->exception_index); - qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), - new_el); - if (qemu_loglevel_mask(CPU_LOG_INT) - && !excp_is_internal(cs->exception_index)) { - qemu_log_mask(CPU_LOG_INT, "...with ESR %x/0x%" PRIx32 "\n", - env->exception.syndrome >> ARM_EL_EC_SHIFT, - env->exception.syndrome); - } - - if (arm_is_psci_call(cpu, cs->exception_index)) { - arm_handle_psci_call(cpu); - qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); - return; - } - - switch (cs->exception_index) { - case EXCP_PREFETCH_ABORT: - case EXCP_DATA_ABORT: - env->cp15.far_el[new_el] = env->exception.vaddress; - qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", - env->cp15.far_el[new_el]); - /* fall through */ - case EXCP_BKPT: - case EXCP_UDEF: - case EXCP_SWI: - case EXCP_HVC: - case EXCP_HYP_TRAP: - case EXCP_SMC: - env->cp15.esr_el[new_el] = env->exception.syndrome; - break; - case EXCP_IRQ: - case EXCP_VIRQ: - addr += 0x80; - break; - case EXCP_FIQ: - case EXCP_VFIQ: - addr += 0x100; - break; - case EXCP_SEMIHOST: - qemu_log_mask(CPU_LOG_INT, - "...handling as semihosting call 0x%" PRIx64 "\n", - env->xregs[0]); - env->xregs[0] = do_arm_semihosting(env); - return; - default: - cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); - } - - if (is_a64(env)) { - env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); - aarch64_save_sp(env, arm_current_el(env)); - env->elr_el[new_el] = env->pc; - } else { - env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); - if (!env->thumb) { - env->cp15.esr_el[new_el] |= 1 << 25; - } - env->elr_el[new_el] = env->regs[15]; - - aarch64_sync_32_to_64(env); - - env->condexec_bits = 0; - } - qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", - env->elr_el[new_el]); - - pstate_write(env, PSTATE_DAIF | new_mode); - env->aarch64 = 1; - aarch64_restore_sp(env, new_el); - - env->pc = addr; - - qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", - new_el, env->pc, pstate_read(env)); - - if (!kvm_enabled()) { - cs->interrupt_request |= CPU_INTERRUPT_EXITTB; - } -} -#endif diff --git a/target-arm/helper.c b/target-arm/helper.c index f956b67ded..ae024869d0 100644 --- a/target-arm/helper.c +++ b/target-arm/helper.c @@ -12,6 +12,7 @@ #include "arm_ldst.h" #include <zlib.h> /* For crc32 */ #include "exec/semihost.h" +#include "sysemu/kvm.h" #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ @@ -2890,6 +2891,17 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri, tlb_flush(CPU(cpu), 1); } +static CPAccessResult fpexc32_access(CPUARMState *env, const ARMCPRegInfo *ri) +{ + if ((env->cp15.cptr_el[2] & CPTR_TFP) && arm_current_el(env) == 2) { + return CP_ACCESS_TRAP_EL2; + } + if (env->cp15.cptr_el[3] & CPTR_TFP) { + return CP_ACCESS_TRAP_EL3; + } + return CP_ACCESS_OK; +} + static const ARMCPRegInfo v8_cp_reginfo[] = { /* Minimal set of EL0-visible registers. This will need to be expanded * significantly for system emulation of AArch64 CPUs. @@ -3150,6 +3162,11 @@ static const ARMCPRegInfo v8_cp_reginfo[] = { .opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0, .type = ARM_CP_NO_RAW, .access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write }, + { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64, + .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0, + .type = ARM_CP_ALIAS, + .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]), + .access = PL2_RW, .accessfn = fpexc32_access }, REGINFO_SENTINEL }; @@ -5707,8 +5724,7 @@ void aarch64_sync_64_to_32(CPUARMState *env) env->regs[15] = env->pc; } -/* Handle a CPU exception. */ -void arm_cpu_do_interrupt(CPUState *cs) +static void arm_cpu_do_interrupt_aarch32(CPUState *cs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; @@ -5718,16 +5734,6 @@ void arm_cpu_do_interrupt(CPUState *cs) uint32_t offset; uint32_t moe; - assert(!IS_M(env)); - - arm_log_exception(cs->exception_index); - - if (arm_is_psci_call(cpu, cs->exception_index)) { - arm_handle_psci_call(cpu); - qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); - return; - } - /* If this is a debug exception we must update the DBGDSCR.MOE bits */ switch (env->exception.syndrome >> ARM_EL_EC_SHIFT) { case EC_BREAKPOINT: @@ -5765,27 +5771,6 @@ void arm_cpu_do_interrupt(CPUState *cs) offset = 4; break; case EXCP_SWI: - if (semihosting_enabled()) { - /* Check for semihosting interrupt. */ - if (env->thumb) { - mask = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code) - & 0xff; - } else { - mask = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code) - & 0xffffff; - } - /* Only intercept calls from privileged modes, to provide some - semblance of security. */ - if (((mask == 0x123456 && !env->thumb) - || (mask == 0xab && env->thumb)) - && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { - qemu_log_mask(CPU_LOG_INT, - "...handling as semihosting call 0x%x\n", - env->regs[0]); - env->regs[0] = do_arm_semihosting(env); - return; - } - } new_mode = ARM_CPU_MODE_SVC; addr = 0x08; mask = CPSR_I; @@ -5793,19 +5778,6 @@ void arm_cpu_do_interrupt(CPUState *cs) offset = 0; break; case EXCP_BKPT: - /* See if this is a semihosting syscall. */ - if (env->thumb && semihosting_enabled()) { - mask = arm_lduw_code(env, env->regs[15], env->bswap_code) & 0xff; - if (mask == 0xab - && (env->uncached_cpsr & CPSR_M) != ARM_CPU_MODE_USR) { - env->regs[15] += 2; - qemu_log_mask(CPU_LOG_INT, - "...handling as semihosting call 0x%x\n", - env->regs[0]); - env->regs[0] = do_arm_semihosting(env); - return; - } - } env->exception.fsr = 2; /* Fall through to prefetch abort. */ case EXCP_PREFETCH_ABORT: @@ -5899,9 +5871,227 @@ void arm_cpu_do_interrupt(CPUState *cs) } env->regs[14] = env->regs[15] + offset; env->regs[15] = addr; - cs->interrupt_request |= CPU_INTERRUPT_EXITTB; } +/* Handle exception entry to a target EL which is using AArch64 */ +static void arm_cpu_do_interrupt_aarch64(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + unsigned int new_el = env->exception.target_el; + target_ulong addr = env->cp15.vbar_el[new_el]; + unsigned int new_mode = aarch64_pstate_mode(new_el, true); + + if (arm_current_el(env) < new_el) { + /* Entry vector offset depends on whether the implemented EL + * immediately lower than the target level is using AArch32 or AArch64 + */ + bool is_aa64; + + switch (new_el) { + case 3: + is_aa64 = (env->cp15.scr_el3 & SCR_RW) != 0; + break; + case 2: + is_aa64 = (env->cp15.hcr_el2 & HCR_RW) != 0; + break; + case 1: + is_aa64 = is_a64(env); + break; + default: + g_assert_not_reached(); + } + + if (is_aa64) { + addr += 0x400; + } else { + addr += 0x600; + } + } else if (pstate_read(env) & PSTATE_SP) { + addr += 0x200; + } + + switch (cs->exception_index) { + case EXCP_PREFETCH_ABORT: + case EXCP_DATA_ABORT: + env->cp15.far_el[new_el] = env->exception.vaddress; + qemu_log_mask(CPU_LOG_INT, "...with FAR 0x%" PRIx64 "\n", + env->cp15.far_el[new_el]); + /* fall through */ + case EXCP_BKPT: + case EXCP_UDEF: + case EXCP_SWI: + case EXCP_HVC: + case EXCP_HYP_TRAP: + case EXCP_SMC: + env->cp15.esr_el[new_el] = env->exception.syndrome; + break; + case EXCP_IRQ: + case EXCP_VIRQ: + addr += 0x80; + break; + case EXCP_FIQ: + case EXCP_VFIQ: + addr += 0x100; + break; + case EXCP_SEMIHOST: + qemu_log_mask(CPU_LOG_INT, + "...handling as semihosting call 0x%" PRIx64 "\n", + env->xregs[0]); + env->xregs[0] = do_arm_semihosting(env); + return; + default: + cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index); + } + + if (is_a64(env)) { + env->banked_spsr[aarch64_banked_spsr_index(new_el)] = pstate_read(env); + aarch64_save_sp(env, arm_current_el(env)); + env->elr_el[new_el] = env->pc; + } else { + env->banked_spsr[aarch64_banked_spsr_index(new_el)] = cpsr_read(env); + if (!env->thumb) { + env->cp15.esr_el[new_el] |= 1 << 25; + } + env->elr_el[new_el] = env->regs[15]; + + aarch64_sync_32_to_64(env); + + env->condexec_bits = 0; + } + qemu_log_mask(CPU_LOG_INT, "...with ELR 0x%" PRIx64 "\n", + env->elr_el[new_el]); + + pstate_write(env, PSTATE_DAIF | new_mode); + env->aarch64 = 1; + aarch64_restore_sp(env, new_el); + + env->pc = addr; + + qemu_log_mask(CPU_LOG_INT, "...to EL%d PC 0x%" PRIx64 " PSTATE 0x%x\n", + new_el, env->pc, pstate_read(env)); +} + +static inline bool check_for_semihosting(CPUState *cs) +{ + /* Check whether this exception is a semihosting call; if so + * then handle it and return true; otherwise return false. + */ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + + if (is_a64(env)) { + if (cs->exception_index == EXCP_SEMIHOST) { + /* This is always the 64-bit semihosting exception. + * The "is this usermode" and "is semihosting enabled" + * checks have been done at translate time. + */ + qemu_log_mask(CPU_LOG_INT, + "...handling as semihosting call 0x%" PRIx64 "\n", + env->xregs[0]); + env->xregs[0] = do_arm_semihosting(env); + return true; + } + return false; + } else { + uint32_t imm; + + /* Only intercept calls from privileged modes, to provide some + * semblance of security. + */ + if (!semihosting_enabled() || + ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR)) { + return false; + } + + switch (cs->exception_index) { + case EXCP_SWI: + /* Check for semihosting interrupt. */ + if (env->thumb) { + imm = arm_lduw_code(env, env->regs[15] - 2, env->bswap_code) + & 0xff; + if (imm == 0xab) { + break; + } + } else { + imm = arm_ldl_code(env, env->regs[15] - 4, env->bswap_code) + & 0xffffff; + if (imm == 0x123456) { + break; + } + } + return false; + case EXCP_BKPT: + /* See if this is a semihosting syscall. */ + if (env->thumb) { + imm = arm_lduw_code(env, env->regs[15], env->bswap_code) + & 0xff; + if (imm == 0xab) { + env->regs[15] += 2; + break; + } + } + return false; + default: + return false; + } + + qemu_log_mask(CPU_LOG_INT, + "...handling as semihosting call 0x%x\n", + env->regs[0]); + env->regs[0] = do_arm_semihosting(env); + return true; + } +} + +/* Handle a CPU exception for A and R profile CPUs. + * Do any appropriate logging, handle PSCI calls, and then hand off + * to the AArch64-entry or AArch32-entry function depending on the + * target exception level's register width. + */ +void arm_cpu_do_interrupt(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + CPUARMState *env = &cpu->env; + unsigned int new_el = env->exception.target_el; + + assert(!IS_M(env)); + + arm_log_exception(cs->exception_index); + qemu_log_mask(CPU_LOG_INT, "...from EL%d to EL%d\n", arm_current_el(env), + new_el); + if (qemu_loglevel_mask(CPU_LOG_INT) + && !excp_is_internal(cs->exception_index)) { + qemu_log_mask(CPU_LOG_INT, "...with ESR %x/0x%" PRIx32 "\n", + env->exception.syndrome >> ARM_EL_EC_SHIFT, + env->exception.syndrome); + } + + if (arm_is_psci_call(cpu, cs->exception_index)) { + arm_handle_psci_call(cpu); + qemu_log_mask(CPU_LOG_INT, "...handled as PSCI call\n"); + return; + } + + /* Semihosting semantics depend on the register width of the + * code that caused the exception, not the target exception level, + * so must be handled here. + */ + if (check_for_semihosting(cs)) { + return; + } + + assert(!excp_is_internal(cs->exception_index)); + if (arm_el_is_aa64(env, new_el)) { + arm_cpu_do_interrupt_aarch64(cs); + } else { + arm_cpu_do_interrupt_aarch32(cs); + } + + if (!kvm_enabled()) { + cs->interrupt_request |= CPU_INTERRUPT_EXITTB; + } +} /* Return the exception level which controls this address translation regime */ static inline uint32_t regime_el(CPUARMState *env, ARMMMUIdx mmu_idx) @@ -6273,13 +6463,15 @@ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = {}; + AddressSpace *as; attrs.secure = is_secure; + as = arm_addressspace(cs, attrs); addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi); if (fi->s1ptw) { return 0; } - return address_space_ldl(cs->as, addr, attrs, NULL); + return address_space_ldl(as, addr, attrs, NULL); } static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, @@ -6289,13 +6481,15 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; MemTxAttrs attrs = {}; + AddressSpace *as; attrs.secure = is_secure; + as = arm_addressspace(cs, attrs); addr = S1_ptw_translate(env, mmu_idx, addr, attrs, fsr, fi); if (fi->s1ptw) { return 0; } - return address_space_ldq(cs->as, addr, attrs, NULL); + return address_space_ldq(as, addr, attrs, NULL); } static bool get_phys_addr_v5(CPUARMState *env, uint32_t address, @@ -7346,7 +7540,8 @@ bool arm_tlb_fill(CPUState *cs, vaddr address, return ret; } -hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr, + MemTxAttrs *attrs) { ARMCPU *cpu = ARM_CPU(cs); CPUARMState *env = &cpu->env; @@ -7355,16 +7550,16 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) int prot; bool ret; uint32_t fsr; - MemTxAttrs attrs = {}; ARMMMUFaultInfo fi = {}; + *attrs = (MemTxAttrs) {}; + ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr, - &attrs, &prot, &page_size, &fsr, &fi); + attrs, &prot, &page_size, &fsr, &fi); if (ret) { return -1; } - return phys_addr; } diff --git a/target-arm/op_helper.c b/target-arm/op_helper.c index 7b6b3fd97c..a5ee65fe2f 100644 --- a/target-arm/op_helper.c +++ b/target-arm/op_helper.c @@ -641,12 +641,51 @@ void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome) } } +static int el_from_spsr(uint32_t spsr) +{ + /* Return the exception level that this SPSR is requesting a return to, + * or -1 if it is invalid (an illegal return) + */ + if (spsr & PSTATE_nRW) { + switch (spsr & CPSR_M) { + case ARM_CPU_MODE_USR: + return 0; + case ARM_CPU_MODE_HYP: + return 2; + case ARM_CPU_MODE_FIQ: + case ARM_CPU_MODE_IRQ: + case ARM_CPU_MODE_SVC: + case ARM_CPU_MODE_ABT: + case ARM_CPU_MODE_UND: + case ARM_CPU_MODE_SYS: + return 1; + case ARM_CPU_MODE_MON: + /* Returning to Mon from AArch64 is never possible, + * so this is an illegal return. + */ + default: + return -1; + } + } else { + if (extract32(spsr, 1, 1)) { + /* Return with reserved M[1] bit set */ + return -1; + } + if (extract32(spsr, 0, 4) == 1) { + /* return to EL0 with M[0] bit set */ + return -1; + } + return extract32(spsr, 2, 2); + } +} + void HELPER(exception_return)(CPUARMState *env) { int cur_el = arm_current_el(env); unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el); uint32_t spsr = env->banked_spsr[spsr_idx]; int new_el; + bool return_to_aa64 = (spsr & PSTATE_nRW) == 0; aarch64_save_sp(env, cur_el); @@ -663,35 +702,48 @@ void HELPER(exception_return)(CPUARMState *env) spsr &= ~PSTATE_SS; } - if (spsr & PSTATE_nRW) { - /* TODO: We currently assume EL1/2/3 are running in AArch64. */ + new_el = el_from_spsr(spsr); + if (new_el == -1) { + goto illegal_return; + } + if (new_el > cur_el + || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { + /* Disallow return to an EL which is unimplemented or higher + * than the current one. + */ + goto illegal_return; + } + + if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) { + /* Return to an EL which is configured for a different register width */ + goto illegal_return; + } + + if (new_el == 2 && arm_is_secure_below_el3(env)) { + /* Return to the non-existent secure-EL2 */ + goto illegal_return; + } + + if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE) + && !arm_is_secure_below_el3(env)) { + goto illegal_return; + } + + if (!return_to_aa64) { env->aarch64 = 0; - new_el = 0; - env->uncached_cpsr = 0x10; + env->uncached_cpsr = spsr & CPSR_M; cpsr_write(env, spsr, ~0); if (!arm_singlestep_active(env)) { env->uncached_cpsr &= ~PSTATE_SS; } aarch64_sync_64_to_32(env); - env->regs[15] = env->elr_el[1] & ~0x1; - } else { - new_el = extract32(spsr, 2, 2); - if (new_el > cur_el - || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) { - /* Disallow return to an EL which is unimplemented or higher - * than the current one. - */ - goto illegal_return; - } - if (extract32(spsr, 1, 1)) { - /* Return with reserved M[1] bit set */ - goto illegal_return; - } - if (new_el == 0 && (spsr & PSTATE_SP)) { - /* Return to EL0 with M[0] bit set */ - goto illegal_return; + if (spsr & CPSR_T) { + env->regs[15] = env->elr_el[cur_el] & ~0x1; + } else { + env->regs[15] = env->elr_el[cur_el] & ~0x3; } + } else { env->aarch64 = 1; pstate_write(env, spsr); if (!arm_singlestep_active(env)) { |