diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2019-12-16 13:04:33 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2019-12-16 13:04:34 +0000 |
commit | 856ffa6465ad38a31603223eb057a253114ceaea (patch) | |
tree | b845dfc0a096d300a79286612254dcf8c98b072e /target | |
parent | 7697ac55fcc6178fd8fd8aa22baed13a0c8ca942 (diff) | |
parent | f80741d107673f162e3b097fc76a1590036cc9d1 (diff) |
Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20191216-1' into staging
target-arm queue:
* Add support for Cortex-M7 CPU
* exynos4210_gic: Suppress gcc9 format-truncation warnings
* aspeed: Various minor bug fixes and improvements
* aspeed: Add support for the tacoma-bmc board
* Honour HCR_EL32.TID1 and .TID2 trapping requirements
* Handle trapping to EL2 of AArch32 VMRS instructions
* Handle AArch32 CP15 trapping via HSTR_EL2
* Add support for missing Jazelle system registers
* arm/arm-powerctl: set NSACR.{CP11, CP10} bits in arm_set_cpu_on
* Add support for DC CVAP & DC CVADP instructions
* Fix assertion when SCR.NS is changed in Secure-SVC &c
* enable SHPC native hot plug in arm ACPI
# gpg: Signature made Mon 16 Dec 2019 11:08:07 GMT
# gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE
# gpg: issuer "peter.maydell@linaro.org"
# gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@gmail.com>" [ultimate]
# gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [ultimate]
# Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE
* remotes/pmaydell/tags/pull-target-arm-20191216-1: (34 commits)
target/arm: ensure we use current exception state after SCR update
hw/arm/virt: Simplify by moving the gic in the machine state
hw/arm/acpi: enable SHPC native hot plug
hw/arm/acpi: simplify AML bit and/or statement
hw/arm/sbsa-ref: Simplify by moving the gic in the machine state
target/arm: Add support for DC CVAP & DC CVADP ins
migration: ram: Switch to ram block writeback
Memory: Enable writeback for given memory region
tcg: cputlb: Add probe_read
arm/arm-powerctl: set NSACR.{CP11, CP10} bits in arm_set_cpu_on()
target/arm: Add support for missing Jazelle system registers
target/arm: Handle AArch32 CP15 trapping via HSTR_EL2
target/arm: Handle trapping to EL2 of AArch32 VMRS instructions
target/arm: Honor HCR_EL2.TID1 trapping requirements
target/arm: Honor HCR_EL2.TID2 trapping requirements
aspeed: Change the "nic" property definition
aspeed: Change the "scu" property definition
gpio: fix memory leak in aspeed_gpio_init()
aspeed: Add support for the tacoma-bmc board
aspeed: Remove AspeedBoardConfig array and use AspeedMachineClass
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/arm-powerctl.c | 3 | ||||
-rw-r--r-- | target/arm/cpu.c | 33 | ||||
-rw-r--r-- | target/arm/cpu.h | 20 | ||||
-rw-r--r-- | target/arm/cpu64.c | 1 | ||||
-rw-r--r-- | target/arm/helper.c | 170 | ||||
-rw-r--r-- | target/arm/helper.h | 3 | ||||
-rw-r--r-- | target/arm/op_helper.c | 22 | ||||
-rw-r--r-- | target/arm/translate-vfp.inc.c | 18 | ||||
-rw-r--r-- | target/arm/translate.c | 9 | ||||
-rw-r--r-- | target/arm/translate.h | 2 | ||||
-rw-r--r-- | target/arm/vfp_helper.c | 29 |
11 files changed, 294 insertions, 16 deletions
diff --git a/target/arm/arm-powerctl.c b/target/arm/arm-powerctl.c index f77a950db6..b064513d44 100644 --- a/target/arm/arm-powerctl.c +++ b/target/arm/arm-powerctl.c @@ -104,6 +104,9 @@ static void arm_set_cpu_on_async_work(CPUState *target_cpu_state, /* Processor is not in secure mode */ target_cpu->env.cp15.scr_el3 |= SCR_NS; + /* Set NSACR.{CP11,CP10} so NS can access the FPU */ + target_cpu->env.cp15.nsacr |= 3 << 10; + /* * If QEMU is providing the equivalent of EL3 firmware, then we need * to make sure a CPU targeting EL2 comes out of reset with a diff --git a/target/arm/cpu.c b/target/arm/cpu.c index 7a4ac9339b..dd51adac05 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -1975,6 +1975,37 @@ static void cortex_m4_initfn(Object *obj) cpu->isar.id_isar6 = 0x00000000; } +static void cortex_m7_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + set_feature(&cpu->env, ARM_FEATURE_V7); + set_feature(&cpu->env, ARM_FEATURE_M); + set_feature(&cpu->env, ARM_FEATURE_M_MAIN); + set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); + set_feature(&cpu->env, ARM_FEATURE_VFP4); + cpu->midr = 0x411fc272; /* r1p2 */ + cpu->pmsav7_dregion = 8; + cpu->isar.mvfr0 = 0x10110221; + cpu->isar.mvfr1 = 0x12000011; + cpu->isar.mvfr2 = 0x00000040; + cpu->id_pfr0 = 0x00000030; + cpu->id_pfr1 = 0x00000200; + cpu->id_dfr0 = 0x00100000; + cpu->id_afr0 = 0x00000000; + cpu->id_mmfr0 = 0x00100030; + cpu->id_mmfr1 = 0x00000000; + cpu->id_mmfr2 = 0x01000000; + cpu->id_mmfr3 = 0x00000000; + cpu->isar.id_isar0 = 0x01101110; + cpu->isar.id_isar1 = 0x02112000; + cpu->isar.id_isar2 = 0x20232231; + cpu->isar.id_isar3 = 0x01111131; + cpu->isar.id_isar4 = 0x01310132; + cpu->isar.id_isar5 = 0x00000000; + cpu->isar.id_isar6 = 0x00000000; +} + static void cortex_m33_initfn(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -2559,6 +2590,8 @@ static const ARMCPUInfo arm_cpus[] = { .class_init = arm_v7m_class_init }, { .name = "cortex-m4", .initfn = cortex_m4_initfn, .class_init = arm_v7m_class_init }, + { .name = "cortex-m7", .initfn = cortex_m7_initfn, + .class_init = arm_v7m_class_init }, { .name = "cortex-m33", .initfn = cortex_m33_initfn, .class_init = arm_v7m_class_init }, { .name = "cortex-r5", .initfn = cortex_r5_initfn }, diff --git a/target/arm/cpu.h b/target/arm/cpu.h index 83a809d4ba..5f70e9e043 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -2238,6 +2238,9 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) * RAISES_EXC is for when the read or write hook might raise an exception; * the generated code will synchronize the CPU state before calling the hook * so that it is safe for the hook to call raise_exception(). + * NEWEL is for writes to registers that might change the exception + * level - typically on older ARM chips. For those cases we need to + * re-read the new el when recomputing the translation flags. */ #define ARM_CP_SPECIAL 0x0001 #define ARM_CP_CONST 0x0002 @@ -2257,10 +2260,11 @@ static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid) #define ARM_CP_SVE 0x2000 #define ARM_CP_NO_GDB 0x4000 #define ARM_CP_RAISES_EXC 0x8000 +#define ARM_CP_NEWEL 0x10000 /* Used only as a terminator for ARMCPRegInfo lists */ -#define ARM_CP_SENTINEL 0xffff +#define ARM_CP_SENTINEL 0xfffff /* Mask of only the flag bits in a type field */ -#define ARM_CP_FLAG_MASK 0xf0ff +#define ARM_CP_FLAG_MASK 0x1f0ff /* Valid values for ARMCPRegInfo state field, indicating which of * the AArch32 and AArch64 execution states this register is visible in. @@ -3215,6 +3219,8 @@ FIELD(TBFLAG_A32, NS, 6, 1) FIELD(TBFLAG_A32, VFPEN, 7, 1) /* Partially cached, minus FPEXC. */ FIELD(TBFLAG_A32, CONDEXEC, 8, 8) /* Not cached. */ FIELD(TBFLAG_A32, SCTLR_B, 16, 1) +FIELD(TBFLAG_A32, HSTR_ACTIVE, 17, 1) + /* For M profile only, set if FPCCR.LSPACT is set */ FIELD(TBFLAG_A32, LSPACT, 18, 1) /* Not cached. */ /* For M profile only, set if we must create a new FP context */ @@ -3616,6 +3622,16 @@ static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; } +static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; +} + +static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) +{ + return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; +} + static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically wrt FP16. */ diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index a39d6fcea3..61fd0ade29 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -646,6 +646,7 @@ static void aarch64_max_initfn(Object *obj) cpu->isar.id_aa64isar0 = t; t = cpu->isar.id_aa64isar1; + t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); t = FIELD_DP64(t, ID_AA64ISAR1, APA, 1); /* PAuth, architected only */ diff --git a/target/arm/helper.c b/target/arm/helper.c index 0bf8f53d4b..5074b5f69c 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -1910,6 +1910,17 @@ static void scr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) raw_write(env, ri, value); } +static CPAccessResult access_aa64_tid2(CPUARMState *env, + const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID2)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + static uint64_t ccsidr_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); @@ -1962,6 +1973,26 @@ static uint64_t isr_read(CPUARMState *env, const ARMCPRegInfo *ri) return ret; } +static CPAccessResult access_aa64_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID1)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static CPAccessResult access_aa32_tid1(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_feature(env, ARM_FEATURE_V8)) { + return access_aa64_tid1(env, ri, isread); + } + + return CP_ACCESS_OK; +} + static const ARMCPRegInfo v7_cp_reginfo[] = { /* the old v6 WFI, UNPREDICTABLE in v7 but we choose to NOP */ { .name = "NOP", .cp = 15, .crn = 7, .crm = 0, .opc1 = 0, .opc2 = 4, @@ -2110,10 +2141,14 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { .writefn = pmintenclr_write }, { .name = "CCSIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 0, - .access = PL1_R, .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, + .access = PL1_R, + .accessfn = access_aa64_tid2, + .readfn = ccsidr_read, .type = ARM_CP_NO_RAW }, { .name = "CSSELR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 2, .opc2 = 0, - .access = PL1_RW, .writefn = csselr_write, .resetvalue = 0, + .access = PL1_RW, + .accessfn = access_aa64_tid2, + .writefn = csselr_write, .resetvalue = 0, .bank_fieldoffsets = { offsetof(CPUARMState, cp15.csselr_s), offsetof(CPUARMState, cp15.csselr_ns) } }, /* Auxiliary ID register: this actually has an IMPDEF value but for now @@ -2121,7 +2156,9 @@ static const ARMCPRegInfo v7_cp_reginfo[] = { */ { .name = "AIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 1, .crn = 0, .crm = 0, .opc2 = 7, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid1, + .resetvalue = 0 }, /* Auxiliary fault status registers: these also are IMPDEF, and we * choose to RAZ/WI for all cores. */ @@ -5096,7 +5133,7 @@ static const ARMCPRegInfo el3_cp_reginfo[] = { .opc0 = 3, .opc1 = 6, .crn = 1, .crm = 1, .opc2 = 0, .access = PL3_RW, .fieldoffset = offsetof(CPUARMState, cp15.scr_el3), .resetvalue = 0, .writefn = scr_write }, - { .name = "SCR", .type = ARM_CP_ALIAS, + { .name = "SCR", .type = ARM_CP_ALIAS | ARM_CP_NEWEL, .cp = 15, .opc1 = 0, .crn = 1, .crm = 1, .opc2 = 0, .access = PL1_RW, .accessfn = access_trap_aa32s_el1, .fieldoffset = offsetoflow32(CPUARMState, cp15.scr_el3), @@ -5204,6 +5241,11 @@ static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri, if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UCT)) { return CP_ACCESS_TRAP; } + + if (arm_current_el(env) < 2 && arm_hcr_el2_eff(env) & HCR_TID2) { + return CP_ACCESS_TRAP_EL2; + } + return CP_ACCESS_OK; } @@ -5932,6 +5974,52 @@ static const ARMCPRegInfo rndr_reginfo[] = { .access = PL0_R, .readfn = rndr_readfn }, REGINFO_SENTINEL }; + +#ifndef CONFIG_USER_ONLY +static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque, + uint64_t value) +{ + ARMCPU *cpu = env_archcpu(env); + /* CTR_EL0 System register -> DminLine, bits [19:16] */ + uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF); + uint64_t vaddr_in = (uint64_t) value; + uint64_t vaddr = vaddr_in & ~(dline_size - 1); + void *haddr; + int mem_idx = cpu_mmu_index(env, false); + + /* This won't be crossing page boundaries */ + haddr = probe_read(env, vaddr, dline_size, mem_idx, GETPC()); + if (haddr) { + + ram_addr_t offset; + MemoryRegion *mr; + + /* RCU lock is already being held */ + mr = memory_region_from_host(haddr, &offset); + + if (mr) { + memory_region_do_writeback(mr, offset, dline_size); + } + } +} + +static const ARMCPRegInfo dcpop_reg[] = { + { .name = "DC_CVAP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 12, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; + +static const ARMCPRegInfo dcpodp_reg[] = { + { .name = "DC_CVADP", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 3, .crn = 7, .crm = 13, .opc2 = 1, + .access = PL0_W, .type = ARM_CP_NO_RAW | ARM_CP_SUPPRESS_TB_END, + .accessfn = aa64_cacheop_access, .writefn = dccvap_writefn }, + REGINFO_SENTINEL +}; +#endif /*CONFIG_USER_ONLY*/ + #endif static CPAccessResult access_predinv(CPUARMState *env, const ARMCPRegInfo *ri, @@ -5998,6 +6086,30 @@ static CPAccessResult access_aa32_tid3(CPUARMState *env, const ARMCPRegInfo *ri, return CP_ACCESS_OK; } +static CPAccessResult access_jazelle(CPUARMState *env, const ARMCPRegInfo *ri, + bool isread) +{ + if (arm_current_el(env) == 1 && (arm_hcr_el2_eff(env) & HCR_TID0)) { + return CP_ACCESS_TRAP_EL2; + } + + return CP_ACCESS_OK; +} + +static const ARMCPRegInfo jazelle_regs[] = { + { .name = "JIDR", + .cp = 14, .crn = 0, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_R, .accessfn = access_jazelle, + .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "JOSCR", + .cp = 14, .crn = 1, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + { .name = "JMCR", + .cp = 14, .crn = 2, .crm = 0, .opc1 = 7, .opc2 = 0, + .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 }, + REGINFO_SENTINEL +}; + void register_cp_regs_for_features(ARMCPU *cpu) { /* Register all the coprocessor registers based on feature bits */ @@ -6184,7 +6296,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) ARMCPRegInfo clidr = { .name = "CLIDR", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr + .access = PL1_R, .type = ARM_CP_CONST, + .accessfn = access_aa64_tid2, + .resetvalue = cpu->clidr }; define_one_arm_cp_reg(cpu, &clidr); define_arm_cp_regs(cpu, v7_cp_reginfo); @@ -6655,6 +6769,9 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (arm_feature(env, ARM_FEATURE_LPAE)) { define_arm_cp_regs(cpu, lpae_cp_reginfo); } + if (cpu_isar_feature(jazelle, cpu)) { + define_arm_cp_regs(cpu, jazelle_regs); + } /* Slightly awkwardly, the OMAP and StrongARM cores need all of * cp15 crn=0 to be writes-ignored, whereas for other cores they should * be read-only (ie write causes UNDEF exception). @@ -6710,14 +6827,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, .resetvalue = cpu->midr }, { .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, + .access = PL1_R, + .accessfn = access_aa64_tid1, + .type = ARM_CP_CONST, .resetvalue = cpu->revidr }, REGINFO_SENTINEL }; ARMCPRegInfo id_cp_reginfo[] = { /* These are common to v8 and pre-v8 */ { .name = "CTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 1, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, + .access = PL1_R, .accessfn = ctr_el0_access, + .type = ARM_CP_CONST, .resetvalue = cpu->ctr }, { .name = "CTR_EL0", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 3, .opc2 = 1, .crn = 0, .crm = 0, .access = PL0_R, .accessfn = ctr_el0_access, @@ -6725,14 +6845,18 @@ void register_cp_regs_for_features(ARMCPU *cpu) /* TCMTR and TLBTR exist in v8 but have no 64-bit versions */ { .name = "TCMTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 }, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0 }, REGINFO_SENTINEL }; /* TLBTR is specific to VMSA */ ARMCPRegInfo id_tlbtr_reginfo = { .name = "TLBTR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3, - .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0, + .access = PL1_R, + .accessfn = access_aa32_tid1, + .type = ARM_CP_CONST, .resetvalue = 0, }; /* MPUIR is specific to PMSA V6+ */ ARMCPRegInfo id_mpuir_reginfo = { @@ -6968,6 +7092,16 @@ void register_cp_regs_for_features(ARMCPU *cpu) if (cpu_isar_feature(aa64_rndr, cpu)) { define_arm_cp_regs(cpu, rndr_reginfo); } +#ifndef CONFIG_USER_ONLY + /* Data Cache clean instructions up to PoP */ + if (cpu_isar_feature(aa64_dcpop, cpu)) { + define_one_arm_cp_reg(cpu, dcpop_reg); + + if (cpu_isar_feature(aa64_dcpodp, cpu)) { + define_one_arm_cp_reg(cpu, dcpodp_reg); + } + } +#endif /*CONFIG_USER_ONLY*/ #endif /* @@ -11232,6 +11366,12 @@ static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el, if (arm_el_is_aa64(env, 1)) { flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1); } + + if (arm_current_el(env) < 2 && env->cp15.hstr_el2 && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + flags = FIELD_DP32(flags, TBFLAG_A32, HSTR_ACTIVE, 1); + } + return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags); } @@ -11332,6 +11472,18 @@ void HELPER(rebuild_hflags_m32)(CPUARMState *env, int el) env->hflags = rebuild_hflags_m32(env, fp_el, mmu_idx); } +/* + * If we have triggered a EL state change we can't rely on the + * translator having passed it too us, we need to recompute. + */ +void HELPER(rebuild_hflags_a32_newel)(CPUARMState *env) +{ + int el = arm_current_el(env); + int fp_el = fp_exception_el(env, el); + ARMMMUIdx mmu_idx = arm_mmu_idx_el(env, el); + env->hflags = rebuild_hflags_a32(env, fp_el, mmu_idx); +} + void HELPER(rebuild_hflags_a32)(CPUARMState *env, int el) { int fp_el = fp_exception_el(env, el); diff --git a/target/arm/helper.h b/target/arm/helper.h index 3d4ec267a2..aa3d8cd08f 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -91,6 +91,7 @@ DEF_HELPER_2(get_user_reg, i32, env, i32) DEF_HELPER_3(set_user_reg, void, env, i32, i32) DEF_HELPER_FLAGS_2(rebuild_hflags_m32, TCG_CALL_NO_RWG, void, env, int) +DEF_HELPER_FLAGS_1(rebuild_hflags_a32_newel, TCG_CALL_NO_RWG, void, env) DEF_HELPER_FLAGS_2(rebuild_hflags_a32, TCG_CALL_NO_RWG, void, env, int) DEF_HELPER_FLAGS_2(rebuild_hflags_a64, TCG_CALL_NO_RWG, void, env, int) @@ -226,6 +227,8 @@ DEF_HELPER_FLAGS_2(rintd, TCG_CALL_NO_RWG, f64, f64, ptr) DEF_HELPER_FLAGS_2(vjcvt, TCG_CALL_NO_RWG, i32, f64, env) DEF_HELPER_FLAGS_2(fjcvtzs, TCG_CALL_NO_RWG, i64, f64, ptr) +DEF_HELPER_FLAGS_3(check_hcr_el2_trap, TCG_CALL_NO_WG, void, env, i32, i32) + /* neon_helper.c */ DEF_HELPER_FLAGS_3(neon_qadd_u8, TCG_CALL_NO_RWG, i32, env, i32, i32) DEF_HELPER_FLAGS_3(neon_qadd_s8, TCG_CALL_NO_RWG, i32, env, i32, i32) diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c index b529d6c1bf..e5a346cb87 100644 --- a/target/arm/op_helper.c +++ b/target/arm/op_helper.c @@ -603,6 +603,27 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env)); } + /* + * Check for an EL2 trap due to HSTR_EL2. We expect EL0 accesses + * to sysregs non accessible at EL0 to have UNDEF-ed already. + */ + if (!is_a64(env) && arm_current_el(env) < 2 && ri->cp == 15 && + (arm_hcr_el2_eff(env) & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) { + uint32_t mask = 1 << ri->crn; + + if (ri->type & ARM_CP_64BIT) { + mask = 1 << ri->crm; + } + + /* T4 and T14 are RES0 */ + mask &= ~((1 << 4) | (1 << 14)); + + if (env->cp15.hstr_el2 & mask) { + target_el = 2; + goto exept; + } + } + if (!ri->accessfn) { return; } @@ -652,6 +673,7 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome, g_assert_not_reached(); } +exept: raise_exception(env, EXCP_UDEF, syndrome, target_el); } diff --git a/target/arm/translate-vfp.inc.c b/target/arm/translate-vfp.inc.c index 85c5ef897b..bf90ac0e5b 100644 --- a/target/arm/translate-vfp.inc.c +++ b/target/arm/translate-vfp.inc.c @@ -761,13 +761,25 @@ static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a) if (a->l) { /* VMRS, move VFP special register to gp register */ switch (a->reg) { + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: case ARM_VFP_FPSID: + if (s->current_el == 1) { + TCGv_i32 tcg_reg, tcg_rt; + + gen_set_condexec(s); + gen_set_pc_im(s, s->pc_curr); + tcg_reg = tcg_const_i32(a->reg); + tcg_rt = tcg_const_i32(a->rt); + gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg); + tcg_temp_free_i32(tcg_reg); + tcg_temp_free_i32(tcg_rt); + } + /* fall through */ case ARM_VFP_FPEXC: case ARM_VFP_FPINST: case ARM_VFP_FPINST2: - case ARM_VFP_MVFR0: - case ARM_VFP_MVFR1: - case ARM_VFP_MVFR2: tmp = load_cpu_field(vfp.xregs[a->reg]); break; case ARM_VFP_FPSCR: diff --git a/target/arm/translate.c b/target/arm/translate.c index 4d5d4bd888..2b6c1f91bf 100644 --- a/target/arm/translate.c +++ b/target/arm/translate.c @@ -6897,7 +6897,7 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn) return 1; } - if (ri->accessfn || + if (s->hstr_active || ri->accessfn || (arm_dc_feature(s, ARM_FEATURE_XSCALE) && cpnum < 14)) { /* Emit code to perform further access permissions checks at * runtime; this may result in an exception. @@ -7083,7 +7083,11 @@ static int disas_coproc_insn(DisasContext *s, uint32_t insn) if (arm_dc_feature(s, ARM_FEATURE_M)) { gen_helper_rebuild_hflags_m32(cpu_env, tcg_el); } else { - gen_helper_rebuild_hflags_a32(cpu_env, tcg_el); + if (ri->type & ARM_CP_NEWEL) { + gen_helper_rebuild_hflags_a32_newel(cpu_env); + } else { + gen_helper_rebuild_hflags_a32(cpu_env, tcg_el); + } } tcg_temp_free_i32(tcg_el); /* @@ -10843,6 +10847,7 @@ static void arm_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) !arm_el_is_aa64(env, 3); dc->thumb = FIELD_EX32(tb_flags, TBFLAG_A32, THUMB); dc->sctlr_b = FIELD_EX32(tb_flags, TBFLAG_A32, SCTLR_B); + dc->hstr_active = FIELD_EX32(tb_flags, TBFLAG_A32, HSTR_ACTIVE); dc->be_data = FIELD_EX32(tb_flags, TBFLAG_ANY, BE_DATA) ? MO_BE : MO_LE; condexec = FIELD_EX32(tb_flags, TBFLAG_A32, CONDEXEC); dc->condexec_mask = (condexec & 0xf) << 1; diff --git a/target/arm/translate.h b/target/arm/translate.h index dd24f91f26..b837b7fcbf 100644 --- a/target/arm/translate.h +++ b/target/arm/translate.h @@ -77,6 +77,8 @@ typedef struct DisasContext { bool pauth_active; /* True with v8.5-BTI and SCTLR_ELx.BT* set. */ bool bt; + /* True if any CP15 access is trapped by HSTR_EL2 */ + bool hstr_active; /* * >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI. * < 0, set by the current instruction. diff --git a/target/arm/vfp_helper.c b/target/arm/vfp_helper.c index 9710ef1c3e..0ae7d4f34a 100644 --- a/target/arm/vfp_helper.c +++ b/target/arm/vfp_helper.c @@ -1322,4 +1322,33 @@ float64 HELPER(frint64_d)(float64 f, void *fpst) return frint_d(f, fpst, 64); } +void HELPER(check_hcr_el2_trap)(CPUARMState *env, uint32_t rt, uint32_t reg) +{ + uint32_t syndrome; + + switch (reg) { + case ARM_VFP_MVFR0: + case ARM_VFP_MVFR1: + case ARM_VFP_MVFR2: + if (!(arm_hcr_el2_eff(env) & HCR_TID3)) { + return; + } + break; + case ARM_VFP_FPSID: + if (!(arm_hcr_el2_eff(env) & HCR_TID0)) { + return; + } + break; + default: + g_assert_not_reached(); + } + + syndrome = ((EC_FPIDTRAP << ARM_EL_EC_SHIFT) + | ARM_EL_IL + | (1 << 24) | (0xe << 20) | (7 << 14) + | (reg << 10) | (rt << 5) | 1); + + raise_exception(env, EXCP_HYP_TRAP, syndrome, 2); +} + #endif |