diff options
-rw-r--r-- | device_tree.c | 2 | ||||
-rw-r--r-- | hw/arm/aspeed.c | 2 | ||||
-rw-r--r-- | hw/arm/aspeed_soc.c | 3 | ||||
-rw-r--r-- | hw/arm/boot.c | 2 | ||||
-rw-r--r-- | hw/arm/highbank.c | 2 | ||||
-rw-r--r-- | hw/arm/raspi.c | 2 | ||||
-rw-r--r-- | hw/intc/arm_gicv3_cpuif.c | 10 | ||||
-rw-r--r-- | hw/ssi/xilinx_spips.c | 3 | ||||
-rw-r--r-- | hw/timer/aspeed_timer.c | 2 | ||||
-rw-r--r-- | target/arm/cpu.c | 37 | ||||
-rw-r--r-- | target/arm/cpu.h | 48 | ||||
-rw-r--r-- | target/arm/helper.c | 73 | ||||
-rw-r--r-- | target/arm/internals.h | 14 | ||||
-rw-r--r-- | target/arm/op_helper.c | 8 | ||||
-rw-r--r-- | target/arm/translate-a64.c | 6 | ||||
-rw-r--r-- | target/arm/translate.c | 12 |
16 files changed, 148 insertions, 78 deletions
diff --git a/device_tree.c b/device_tree.c index 19458b32bf..52c3358a55 100644 --- a/device_tree.c +++ b/device_tree.c @@ -29,7 +29,7 @@ #include <libfdt.h> -#define FDT_MAX_SIZE 0x10000 +#define FDT_MAX_SIZE 0x100000 void *create_device_tree(int *sizep) { diff --git a/hw/arm/aspeed.c b/hw/arm/aspeed.c index 7088c907bd..aecb3c1e75 100644 --- a/hw/arm/aspeed.c +++ b/hw/arm/aspeed.c @@ -225,7 +225,7 @@ static void aspeed_board_init(MachineState *machine, * SoC and 128MB for the AST2500 SoC, which is twice as big as * needed by the flash modules of the Aspeed machines. */ - memory_region_init_rom_nomigrate(boot_rom, OBJECT(bmc), "aspeed.boot_rom", + memory_region_init_rom(boot_rom, OBJECT(bmc), "aspeed.boot_rom", fl->size, &error_abort); memory_region_add_subregion(get_system_memory(), FIRMWARE_ADDR, boot_rom); diff --git a/hw/arm/aspeed_soc.c b/hw/arm/aspeed_soc.c index 30d25f8b06..407f10d0d4 100644 --- a/hw/arm/aspeed_soc.c +++ b/hw/arm/aspeed_soc.c @@ -186,13 +186,12 @@ static void aspeed_soc_realize(DeviceState *dev, Error **errp) } /* SRAM */ - memory_region_init_ram_nomigrate(&s->sram, OBJECT(dev), "aspeed.sram", + memory_region_init_ram(&s->sram, OBJECT(dev), "aspeed.sram", sc->info->sram_size, &err); if (err) { error_propagate(errp, err); return; } - vmstate_register_ram_global(&s->sram); memory_region_add_subregion(get_system_memory(), ASPEED_SOC_SRAM_BASE, &s->sram); diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 26184bcd7c..9ae6ab2689 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -1188,7 +1188,7 @@ void arm_load_kernel(ARMCPU *cpu, struct arm_boot_info *info) * actually loading a kernel, the handler is also responsible for * arranging that we start it correctly. */ - for (cs = CPU(cpu); cs; cs = CPU_NEXT(cs)) { + for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { qemu_register_reset(do_cpu_reset, ARM_CPU(cs)); } } diff --git a/hw/arm/highbank.c b/hw/arm/highbank.c index 1742cf6f6c..88326d1bfd 100644 --- a/hw/arm/highbank.c +++ b/hw/arm/highbank.c @@ -291,7 +291,7 @@ static void calxeda_init(MachineState *machine, enum cxmachines machine_id) memory_region_add_subregion(sysmem, 0, dram); sysram = g_new(MemoryRegion, 1); - memory_region_init_ram_nomigrate(sysram, NULL, "highbank.sysram", 0x8000, + memory_region_init_ram(sysram, NULL, "highbank.sysram", 0x8000, &error_fatal); memory_region_add_subregion(sysmem, 0xfff88000, sysram); if (bios_name != NULL) { diff --git a/hw/arm/raspi.c b/hw/arm/raspi.c index 955a7c4e80..66899c28dc 100644 --- a/hw/arm/raspi.c +++ b/hw/arm/raspi.c @@ -226,7 +226,6 @@ static void raspi2_machine_init(MachineClass *mc) mc->no_parallel = 1; mc->no_floppy = 1; mc->no_cdrom = 1; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a7"); mc->max_cpus = BCM283X_NCPUS; mc->min_cpus = BCM283X_NCPUS; mc->default_cpus = BCM283X_NCPUS; @@ -249,7 +248,6 @@ static void raspi3_machine_init(MachineClass *mc) mc->no_parallel = 1; mc->no_floppy = 1; mc->no_cdrom = 1; - mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a53"); mc->max_cpus = BCM283X_NCPUS; mc->min_cpus = BCM283X_NCPUS; mc->default_cpus = BCM283X_NCPUS; diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 26f5eeda94..cb9a3a542d 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -29,11 +29,7 @@ void gicv3_set_gicv3state(CPUState *cpu, GICv3CPUState *s) static GICv3CPUState *icc_cs_from_env(CPUARMState *env) { - /* Given the CPU, find the right GICv3CPUState struct. - * Since we registered the CPU interface with the EL change hook as - * the opaque pointer, we can just directly get from the CPU to it. - */ - return arm_get_el_change_hook_opaque(arm_env_get_cpu(env)); + return env->gicv3state; } static bool gicv3_use_ns_bank(CPUARMState *env) @@ -2615,9 +2611,7 @@ void gicv3_init_cpuif(GICv3State *s) * it might be with code translated by CPU 0 but run by CPU 1, in * which case we'd get the wrong value. * So instead we define the regs with no ri->opaque info, and - * get back to the GICv3CPUState from the ARMCPU by reading back - * the opaque pointer from the el_change_hook, which we're going - * to need to register anyway. + * get back to the GICv3CPUState from the CPUARMState. */ define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); if (arm_feature(&cpu->env, ARM_FEATURE_EL2) diff --git a/hw/ssi/xilinx_spips.c b/hw/ssi/xilinx_spips.c index 426f971311..03f5faee4b 100644 --- a/hw/ssi/xilinx_spips.c +++ b/hw/ssi/xilinx_spips.c @@ -616,7 +616,8 @@ static void xilinx_spips_flush_txfifo(XilinxSPIPS *s) if (fifo8_is_empty(&s->tx_fifo)) { xilinx_spips_update_ixr(s); return; - } else if (s->snoop_state == SNOOP_STRIPING) { + } else if (s->snoop_state == SNOOP_STRIPING || + s->snoop_state == SNOOP_NONE) { for (i = 0; i < num_effective_busses(s); ++i) { tx_rx[i] = fifo8_pop(&s->tx_fifo); } diff --git a/hw/timer/aspeed_timer.c b/hw/timer/aspeed_timer.c index 50acbf530a..1e31e22b6f 100644 --- a/hw/timer/aspeed_timer.c +++ b/hw/timer/aspeed_timer.c @@ -504,7 +504,7 @@ static const VMStateDescription vmstate_aspeed_timer_state = { VMSTATE_UINT32(ctrl, AspeedTimerCtrlState), VMSTATE_UINT32(ctrl2, AspeedTimerCtrlState), VMSTATE_STRUCT_ARRAY(timers, AspeedTimerCtrlState, - ASPEED_TIMER_NR_TIMERS, 2, vmstate_aspeed_timer, + ASPEED_TIMER_NR_TIMERS, 1, vmstate_aspeed_timer, AspeedTimer), VMSTATE_END_OF_LIST() } diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 022d8c5787..d175c5e94f 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -55,13 +55,26 @@ static bool arm_cpu_has_work(CPUState *cs) | CPU_INTERRUPT_EXITTB); } -void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook, +void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void *opaque) { - /* We currently only support registering a single hook function */ - assert(!cpu->el_change_hook); - cpu->el_change_hook = hook; - cpu->el_change_hook_opaque = opaque; + ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1); + + entry->hook = hook; + entry->opaque = opaque; + + QLIST_INSERT_HEAD(&cpu->pre_el_change_hooks, entry, node); +} + +void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, + void *opaque) +{ + ARMELChangeHook *entry = g_new0(ARMELChangeHook, 1); + + entry->hook = hook; + entry->opaque = opaque; + + QLIST_INSERT_HEAD(&cpu->el_change_hooks, entry, node); } static void cp_reg_reset(gpointer key, gpointer value, gpointer opaque) @@ -552,6 +565,9 @@ static void arm_cpu_initfn(Object *obj) cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); + QLIST_INIT(&cpu->pre_el_change_hooks); + QLIST_INIT(&cpu->el_change_hooks); + #ifndef CONFIG_USER_ONLY /* Our inbound IRQ and FIQ lines */ if (kvm_enabled()) { @@ -713,7 +729,18 @@ static void arm_cpu_post_init(Object *obj) static void arm_cpu_finalizefn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); + ARMELChangeHook *hook, *next; + g_hash_table_destroy(cpu->cp_regs); + + QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { + QLIST_REMOVE(hook, node); + g_free(hook); + } + QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { + QLIST_REMOVE(hook, node); + g_free(hook); + } } static void arm_cpu_realizefn(DeviceState *dev, Error **errp) diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 19a0c03f9b..44e6b77151 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -367,8 +367,8 @@ typedef struct CPUARMState { uint32_t c9_data; uint64_t c9_pmcr; /* performance monitor control register */ uint64_t c9_pmcnten; /* perf monitor counter enables */ - uint32_t c9_pmovsr; /* perf monitor overflow status */ - uint32_t c9_pmuserenr; /* perf monitor user enable */ + uint64_t c9_pmovsr; /* perf monitor overflow status */ + uint64_t c9_pmuserenr; /* perf monitor user enable */ uint64_t c9_pmselr; /* perf monitor counter selection register */ uint64_t c9_pminten; /* perf monitor interrupt enables */ union { /* Memory attribute redirection */ @@ -632,12 +632,17 @@ typedef struct CPUARMState { } CPUARMState; /** - * ARMELChangeHook: + * ARMELChangeHookFn: * type of a function which can be registered via arm_register_el_change_hook() * to get callbacks when the CPU changes its exception level or mode. */ -typedef void ARMELChangeHook(ARMCPU *cpu, void *opaque); - +typedef void ARMELChangeHookFn(ARMCPU *cpu, void *opaque); +typedef struct ARMELChangeHook ARMELChangeHook; +struct ARMELChangeHook { + ARMELChangeHookFn *hook; + void *opaque; + QLIST_ENTRY(ARMELChangeHook) node; +}; /* These values map onto the return values for * QEMU_PSCI_0_2_FN_AFFINITY_INFO */ @@ -826,8 +831,8 @@ struct ARMCPU { */ bool cfgend; - ARMELChangeHook *el_change_hook; - void *el_change_hook_opaque; + QLIST_HEAD(, ARMELChangeHook) pre_el_change_hooks; + QLIST_HEAD(, ARMELChangeHook) el_change_hooks; int32_t node_id; /* NUMA node this CPU belongs to */ @@ -2889,28 +2894,29 @@ static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs) #endif /** - * arm_register_el_change_hook: - * Register a hook function which will be called back whenever this + * arm_register_pre_el_change_hook: + * Register a hook function which will be called immediately before this * CPU changes exception level or mode. The hook function will be * passed a pointer to the ARMCPU and the opaque data pointer passed * to this function when the hook was registered. * - * Note that we currently only support registering a single hook function, - * and will assert if this function is called twice. - * This facility is intended for the use of the GICv3 emulation. + * Note that if a pre-change hook is called, any registered post-change hooks + * are guaranteed to subsequently be called. */ -void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook, +void arm_register_pre_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void *opaque); - /** - * arm_get_el_change_hook_opaque: - * Return the opaque data that will be used by the el_change_hook - * for this CPU. + * arm_register_el_change_hook: + * Register a hook function which will be called immediately after this + * CPU changes exception level or mode. The hook function will be + * passed a pointer to the ARMCPU and the opaque data pointer passed + * to this function when the hook was registered. + * + * Note that any registered hooks registered here are guaranteed to be called + * if pre-change hooks have been. */ -static inline void *arm_get_el_change_hook_opaque(ARMCPU *cpu) -{ - return cpu->el_change_hook_opaque; -} +void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHookFn *hook, void + *opaque); /** * aa32_vfp_dreg: diff --git a/target/arm/helper.c b/target/arm/helper.c index b14fdab140..52a88e0297 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -52,11 +52,6 @@ typedef struct V8M_SAttributes { static void v8m_security_lookup(CPUARMState *env, uint32_t address, MMUAccessType access_type, ARMMMUIdx mmu_idx, V8M_SAttributes *sattrs); - -/* Definitions for the PMCCNTR and PMCR registers */ -#define PMCRD 0x8 -#define PMCRC 0x4 -#define PMCRE 0x1 #endif static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg) @@ -906,6 +901,24 @@ static const ARMCPRegInfo v6_cp_reginfo[] = { REGINFO_SENTINEL }; +/* Definitions for the PMU registers */ +#define PMCRN_MASK 0xf800 +#define PMCRN_SHIFT 11 +#define PMCRD 0x8 +#define PMCRC 0x4 +#define PMCRE 0x1 + +static inline uint32_t pmu_num_counters(CPUARMState *env) +{ + return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT; +} + +/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */ +static inline uint64_t pmu_counter_mask(CPUARMState *env) +{ + return (1 << 31) | ((1 << pmu_num_counters(env)) - 1); +} + static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri, bool isread) { @@ -994,7 +1007,7 @@ static inline bool arm_ccnt_enabled(CPUARMState *env) { /* This does not support checking PMCCFILTR_EL0 register */ - if (!(env->cp15.c9_pmcr & PMCRE)) { + if (!(env->cp15.c9_pmcr & PMCRE) || !(env->cp15.c9_pmcnten & (1 << 31))) { return false; } @@ -1106,21 +1119,21 @@ static void pmccfiltr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { pmccntr_sync(env); - env->cp15.pmccfiltr_el0 = value & 0x7E000000; + env->cp15.pmccfiltr_el0 = value & 0xfc000000; pmccntr_sync(env); } static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - value &= (1 << 31); + value &= pmu_counter_mask(env); env->cp15.c9_pmcnten |= value; } static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - value &= (1 << 31); + value &= pmu_counter_mask(env); env->cp15.c9_pmcnten &= ~value; } @@ -1168,14 +1181,14 @@ static void pmintenset_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { /* We have no event counters so only the C bit can be changed */ - value &= (1 << 31); + value &= pmu_counter_mask(env); env->cp15.c9_pminten |= value; } static void pmintenclr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - value &= (1 << 31); + value &= pmu_counter_mask(env); env->cp15.c9_pminten &= ~value; } @@ -1292,7 +1305,8 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.c9_pmcnten), .writefn = pmcntenclr_write }, { .name = "PMOVSR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 3, - .access = PL0_RW, .fieldoffset = offsetof(CPUARMState, cp15.c9_pmovsr), + .access = PL0_RW, + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmovsr), .accessfn = pmreg_access, .writefn = pmovsr_write, .raw_writefn = raw_write }, @@ -1318,7 +1332,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .fieldoffset = offsetof(CPUARMState, cp15.c9_pmselr), .writefn = pmselr_write, .raw_writefn = raw_write, }, { .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0, - .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO, + .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_ALIAS | ARM_CP_IO, .readfn = pmccntr_read, .writefn = pmccntr_write32, .accessfn = pmreg_access_ccntr }, { .name = "PMCCNTR_EL0", .state = ARM_CP_STATE_AA64, @@ -1347,7 +1361,7 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .accessfn = pmreg_access_xevcntr }, { .name = "PMUSERENR", .cp = 15, .crn = 9, .crm = 14, .opc1 = 0, .opc2 = 0, .access = PL0_R | PL1_RW, .accessfn = access_tpm, - .fieldoffset = offsetof(CPUARMState, cp15.c9_pmuserenr), + .fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmuserenr), .resetvalue = 0, .writefn = pmuserenr_write, .raw_writefn = raw_write }, { .name = "PMUSERENR_EL0", .state = ARM_CP_STATE_AA64, @@ -6913,7 +6927,6 @@ static bool v7m_push_stack(ARMCPU *cpu) static void do_v7m_exception_exit(ARMCPU *cpu) { CPUARMState *env = &cpu->env; - CPUState *cs = CPU(cpu); uint32_t excret; uint32_t xpsr; bool ufault = false; @@ -7112,9 +7125,11 @@ static void do_v7m_exception_exit(ARMCPU *cpu) ((excret & R_V7M_EXCRET_ES_MASK) == 0 || (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) { uint32_t expected_sig = 0xfefa125b; - uint32_t actual_sig = ldl_phys(cs->as, frameptr); + uint32_t actual_sig; + + pop_ok = v7m_stack_read(cpu, &actual_sig, frameptr, mmu_idx); - if (expected_sig != actual_sig) { + if (pop_ok && expected_sig != actual_sig) { /* Take a SecureFault on the current stack */ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK; armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false); @@ -7125,7 +7140,7 @@ static void do_v7m_exception_exit(ARMCPU *cpu) return; } - pop_ok = + pop_ok = pop_ok && v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && v7m_stack_read(cpu, &env->regs[4], frameptr + 0x8, mmu_idx) && v7m_stack_read(cpu, &env->regs[5], frameptr + 0xc, mmu_idx) && @@ -8235,6 +8250,14 @@ void arm_cpu_do_interrupt(CPUState *cs) return; } + /* Hooks may change global state so BQL should be held, also the + * BQL needs to be held for any modification of + * cs->interrupt_request. + */ + g_assert(qemu_mutex_iothread_locked()); + + arm_call_pre_el_change_hook(cpu); + assert(!excp_is_internal(cs->exception_index)); if (arm_el_is_aa64(env, new_el)) { arm_cpu_do_interrupt_aarch64(cs); @@ -8242,12 +8265,6 @@ void arm_cpu_do_interrupt(CPUState *cs) arm_cpu_do_interrupt_aarch32(cs); } - /* Hooks may change global state so BQL should be held, also the - * BQL needs to be held for any modification of - * cs->interrupt_request. - */ - g_assert(qemu_mutex_iothread_locked()); - arm_call_el_change_hook(cpu); if (!kvm_enabled()) { @@ -8680,13 +8697,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx, return addr; } -/* All loads done in the course of a page table walk go through here. - * TODO: rather than ignoring errors from physical memory reads (which - * are external aborts in ARM terminology) we should propagate this - * error out so that we can turn it into a Data Abort if this walk - * was being done for a CPU load/store or an address translation instruction - * (but not if it was for a debug access). - */ +/* All loads done in the course of a page table walk go through here. */ static uint32_t arm_ldl_ptw(CPUState *cs, hwaddr addr, bool is_secure, ARMMMUIdx mmu_idx, ARMMMUFaultInfo *fi) { diff --git a/target/arm/internals.h b/target/arm/internals.h index 8ce944b7a0..dc9357766c 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -727,11 +727,19 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, int mmu_idx, MemTxAttrs attrs, MemTxResult response, uintptr_t retaddr); -/* Call the EL change hook if one has been registered */ +/* Call any registered EL change hooks */ +static inline void arm_call_pre_el_change_hook(ARMCPU *cpu) +{ + ARMELChangeHook *hook, *next; + QLIST_FOREACH_SAFE(hook, &cpu->pre_el_change_hooks, node, next) { + hook->hook(cpu, hook->opaque); + } +} static inline void arm_call_el_change_hook(ARMCPU *cpu) { - if (cpu->el_change_hook) { - cpu->el_change_hook(cpu, cpu->el_change_hook_opaque); + ARMELChangeHook *hook, *next; + QLIST_FOREACH_SAFE(hook, &cpu->el_change_hooks, node, next) { + hook->hook(cpu, hook->opaque); } } diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index 84f08bf815..f728f25e4b 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -511,6 +511,10 @@ void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask) /* Write the CPSR for a 32-bit exception return */ void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val) { + qemu_mutex_lock_iothread(); + arm_call_pre_el_change_hook(arm_env_get_cpu(env)); + qemu_mutex_unlock_iothread(); + cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn); /* Generated code has already stored the new PC value, but @@ -1028,6 +1032,10 @@ void HELPER(exception_return)(CPUARMState *env) goto illegal_return; } + qemu_mutex_lock_iothread(); + arm_call_pre_el_change_hook(arm_env_get_cpu(env)); + qemu_mutex_unlock_iothread(); + if (!return_to_aa64) { env->aarch64 = 0; /* We do a raw CPSR write because aarch64_sync_64_to_32() diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index c91329249d..bff4e13bf6 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -1930,7 +1930,13 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn) unallocated_encoding(s); return; } + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_exception_return(cpu_env); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(); + } /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; return; diff --git a/target/arm/translate.c b/target/arm/translate.c index db1ce6510a..9bc2ce1a0b 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -4548,7 +4548,13 @@ static void gen_rfe(DisasContext *s, TCGv_i32 pc, TCGv_i32 cpsr) * appropriately depending on the new Thumb bit, so it must * be called after storing the new PC. */ + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_cpsr_write_eret(cpu_env, cpsr); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(); + } tcg_temp_free_i32(cpsr); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; @@ -9843,7 +9849,13 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn) if (exc_return) { /* Restore CPSR from SPSR. */ tmp = load_cpu_field(spsr); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_start(); + } gen_helper_cpsr_write_eret(cpu_env, tmp); + if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) { + gen_io_end(); + } tcg_temp_free_i32(tmp); /* Must exit loop to check un-masked IRQs */ s->base.is_jmp = DISAS_EXIT; |