diff options
Diffstat (limited to 'target/riscv/cpu_helper.c')
-rw-r--r-- | target/riscv/cpu_helper.c | 55 |
1 files changed, 50 insertions, 5 deletions
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 8b6754b917..e32b6126af 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -132,6 +132,16 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) } /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; + + /* + * Clear the load reservation - otherwise a reservation placed in one + * context/process can be used by another, resulting in an SC succeeding + * incorrectly. Version 2.2 of the ISA specification explicitly requires + * this behaviour, while later revisions say that the kernel "should" use + * an SC instruction to force the yielding of a load reservation on a + * preemptive context switch. As a result, do both. + */ + env->load_res = -1; } /* get_physical_address - get the physical address for this virtual address @@ -230,6 +240,12 @@ restart: /* check that physical address of PTE is legal */ target_ulong pte_addr = base + idx * ptesize; + + if (riscv_feature(env, RISCV_FEATURE_PMP) && + !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), + 1 << MMU_DATA_LOAD, PRV_S)) { + return TRANSLATE_PMP_FAIL; + } #if defined(TARGET_RISCV32) target_ulong pte = ldl_phys(cs->as, pte_addr); #elif defined(TARGET_RISCV64) @@ -337,12 +353,13 @@ restart: } static void raise_mmu_exception(CPURISCVState *env, target_ulong address, - MMUAccessType access_type) + MMUAccessType access_type, bool pmp_violation) { CPUState *cs = env_cpu(env); int page_fault_exceptions = (env->priv_ver >= PRIV_VERSION_1_10_0) && - get_field(env->satp, SATP_MODE) != VM_1_10_MBARE; + get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && + !pmp_violation; switch (access_type) { case MMU_INST_FETCH: cs->exception_index = page_fault_exceptions ? @@ -375,6 +392,22 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return phys_addr; } +void riscv_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write, + bool is_exec, int unused, unsigned size) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + if (is_write) { + cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + } else { + cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; + } + + env->badaddr = addr; + riscv_raise_exception(&cpu->env, cs->exception_index, GETPC()); +} + void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) @@ -408,20 +441,32 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, CPURISCVState *env = &cpu->env; hwaddr pa = 0; int prot; + bool pmp_violation = false; int ret = TRANSLATE_FAIL; + int mode = mmu_idx; qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", __func__, address, access_type, mmu_idx); ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx); + if (mode == PRV_M && access_type != MMU_INST_FETCH) { + if (get_field(env->mstatus, MSTATUS_MPRV)) { + mode = get_field(env->mstatus, MSTATUS_MPP); + } + } + qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx " prot %d\n", __func__, address, ret, pa, prot); if (riscv_feature(env, RISCV_FEATURE_PMP) && - !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << access_type)) { - ret = TRANSLATE_FAIL; + (ret == TRANSLATE_SUCCESS) && + !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { + ret = TRANSLATE_PMP_FAIL; + } + if (ret == TRANSLATE_PMP_FAIL) { + pmp_violation = true; } if (ret == TRANSLATE_SUCCESS) { tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, @@ -430,7 +475,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } else if (probe) { return false; } else { - raise_mmu_exception(env, address, access_type); + raise_mmu_exception(env, address, access_type, pmp_violation); riscv_raise_exception(env, cs->exception_index, retaddr); } #else |