diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2019-07-04 11:09:19 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2019-07-04 11:09:19 +0100 |
commit | aff8cee805e5b3c2b6e38ddb85448818a19a48db (patch) | |
tree | 49272e53612c6b2428c398c1a1a59bc666a8c66b /target | |
parent | b2e1bc59f0f1ce0fd3962757ec4a334363b37f47 (diff) | |
parent | 395fd69582a00b76a89c12d9c074055a9d207997 (diff) |
Merge remote-tracking branch 'remotes/palmer/tags/riscv-for-master-4.1-sf1-v3' into staging
RISC-V Patches for the 4.1 Soft Freeze, Part 2 v3
This pull request contains a handful of patches that I'd like to target
for the 4.1 soft freeze. There are a handful of new features:
* Support for the 1.11.0, the latest privileged specification.
* Support for reading and writing the PRCI registers.
* Better control over the ISA of the target machine.
* Support for the cpu-topology device tree node.
Additionally, there are a handful of bug fixes including:
* Load reservations are now broken by both store conditional and by
scheduling, which fixes issues with parallel applications.
* Various fixes to the PMP implementation.
* Fixes to the 32-bit linux-user syscall ABI.
* Various fixes for instruction decodeing.
* A fix to the PCI device tree "bus-range" property.
This boots 32-bit and 64-bit OpenEmbedded.
Changes since v2 [riscv-for-master-4.1-sf1-v2]:
* Dropped OpenSBI.
Changes since v1 [riscv-for-master-4.1-sf1]:
* Contains a fix to the sifive_u OpenSBI integration.
# gpg: Signature made Wed 03 Jul 2019 09:39:09 BST
# gpg: using RSA key 00CE76D1834960DFCE886DF8EF4CA1502CCBAB41
# gpg: issuer "palmer@dabbelt.com"
# gpg: Good signature from "Palmer Dabbelt <palmer@dabbelt.com>" [unknown]
# gpg: aka "Palmer Dabbelt <palmer@sifive.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 00CE 76D1 8349 60DF CE88 6DF8 EF4C A150 2CCB AB41
* remotes/palmer/tags/riscv-for-master-4.1-sf1-v3: (32 commits)
hw/riscv: Extend the kernel loading support
hw/riscv: Add support for loading a firmware
hw/riscv: Split out the boot functions
riscv: sifive_u: Update the plic hart config to support multicore
riscv: sifive_u: Do not create hard-coded phandles in DT
disas/riscv: Fix `rdinstreth` constraint
disas/riscv: Disassemble reserved compressed encodings as illegal
riscv: virt: Add cpu-topology DT node.
RISC-V: Update syscall list for 32-bit support.
RISC-V: Clear load reservations on context switch and SC
RISC-V: Add support for the Zicsr extension
RISC-V: Add support for the Zifencei extension
target/riscv: Add support for disabling/enabling Counters
target/riscv: Remove user version information
target/riscv: Require either I or E base extension
qemu-deprecated.texi: Deprecate the RISC-V privledge spec 1.09.1
target/riscv: Set privledge spec 1.11.0 as default
target/riscv: Add the mcountinhibit CSR
target/riscv: Add the privledge spec version 1.11.0
target/riscv: Restructure deprecatd CPUs
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/riscv/cpu.c | 137 | ||||
-rw-r--r-- | target/riscv/cpu.h | 33 | ||||
-rw-r--r-- | target/riscv/cpu_bits.h | 1 | ||||
-rw-r--r-- | target/riscv/cpu_helper.c | 55 | ||||
-rw-r--r-- | target/riscv/csr.c | 30 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_privileged.inc.c | 2 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rva.inc.c | 8 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvi.inc.c | 4 | ||||
-rw-r--r-- | target/riscv/pmp.c | 17 | ||||
-rw-r--r-- | target/riscv/pmp.h | 2 | ||||
-rw-r--r-- | target/riscv/translate.c | 3 |
11 files changed, 231 insertions, 61 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 6f2b644220..f8d07bd20a 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -24,6 +24,7 @@ #include "cpu.h" #include "exec/exec-all.h" #include "qapi/error.h" +#include "qemu/error-report.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" @@ -88,9 +89,8 @@ static void set_misa(CPURISCVState *env, target_ulong misa) env->misa_mask = env->misa = misa; } -static void set_versions(CPURISCVState *env, int user_ver, int priv_ver) +static void set_priv_version(CPURISCVState *env, int priv_ver) { - env->user_ver = user_ver; env->priv_ver = priv_ver; } @@ -110,7 +110,7 @@ static void riscv_any_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVU); - set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0); + set_priv_version(env, PRIV_VERSION_1_11_0); set_resetvec(env, DEFAULT_RSTVEC); } @@ -119,14 +119,15 @@ static void riscv_any_cpu_init(Object *obj) static void riscv_base32_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + /* We set this in the realise function */ + set_misa(env, 0); } static void rv32gcsu_priv1_09_1_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_09_1); + set_priv_version(env, PRIV_VERSION_1_09_1); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_MMU); set_feature(env, RISCV_FEATURE_PMP); @@ -136,7 +137,7 @@ static void rv32gcsu_priv1_10_0_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0); + set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_MMU); set_feature(env, RISCV_FEATURE_PMP); @@ -146,7 +147,7 @@ static void rv32imacu_nommu_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV32 | RVI | RVM | RVA | RVC | RVU); - set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0); + set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_PMP); } @@ -156,14 +157,15 @@ static void rv32imacu_nommu_cpu_init(Object *obj) static void riscv_base64_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; - set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); + /* We set this in the realise function */ + set_misa(env, 0); } static void rv64gcsu_priv1_09_1_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_09_1); + set_priv_version(env, PRIV_VERSION_1_09_1); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_MMU); set_feature(env, RISCV_FEATURE_PMP); @@ -173,7 +175,7 @@ static void rv64gcsu_priv1_10_0_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU); - set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0); + set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_MMU); set_feature(env, RISCV_FEATURE_PMP); @@ -183,7 +185,7 @@ static void rv64imacu_nommu_cpu_init(Object *obj) { CPURISCVState *env = &RISCV_CPU(obj)->env; set_misa(env, RV64 | RVI | RVM | RVA | RVC | RVU); - set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0); + set_priv_version(env, PRIV_VERSION_1_10_0); set_resetvec(env, DEFAULT_RSTVEC); set_feature(env, RISCV_FEATURE_PMP); } @@ -295,6 +297,7 @@ static void riscv_cpu_reset(CPUState *cs) env->pc = env->resetvec; #endif cs->exception_index = EXCP_NONE; + env->load_res = -1; set_default_nan_mode(1, &env->fp_status); } @@ -313,8 +316,8 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) RISCVCPU *cpu = RISCV_CPU(dev); CPURISCVState *env = &cpu->env; RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev); - int priv_version = PRIV_VERSION_1_10_0; - int user_version = USER_VERSION_2_02_0; + int priv_version = PRIV_VERSION_1_11_0; + target_ulong target_misa = 0; Error *local_err = NULL; cpu_exec_realizefn(cs, &local_err); @@ -324,7 +327,9 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } if (cpu->cfg.priv_spec) { - if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { + if (!g_strcmp0(cpu->cfg.priv_spec, "v1.11.0")) { + priv_version = PRIV_VERSION_1_11_0; + } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.10.0")) { priv_version = PRIV_VERSION_1_10_0; } else if (!g_strcmp0(cpu->cfg.priv_spec, "v1.9.1")) { priv_version = PRIV_VERSION_1_09_1; @@ -336,18 +341,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } } - if (cpu->cfg.user_spec) { - if (!g_strcmp0(cpu->cfg.user_spec, "v2.02.0")) { - user_version = USER_VERSION_2_02_0; - } else { - error_setg(errp, - "Unsupported user spec version '%s'", - cpu->cfg.user_spec); - return; - } - } - - set_versions(env, user_version, priv_version); + set_priv_version(env, priv_version); set_resetvec(env, DEFAULT_RSTVEC); if (cpu->cfg.mmu) { @@ -358,6 +352,64 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) set_feature(env, RISCV_FEATURE_PMP); } + /* If misa isn't set (rv32 and rv64 machines) set it here */ + if (!env->misa) { + /* Do some ISA extension error checking */ + if (cpu->cfg.ext_i && cpu->cfg.ext_e) { + error_setg(errp, + "I and E extensions are incompatible"); + return; + } + + if (!cpu->cfg.ext_i && !cpu->cfg.ext_e) { + error_setg(errp, + "Either I or E extension must be set"); + return; + } + + if (cpu->cfg.ext_g && !(cpu->cfg.ext_i & cpu->cfg.ext_m & + cpu->cfg.ext_a & cpu->cfg.ext_f & + cpu->cfg.ext_d)) { + warn_report("Setting G will also set IMAFD"); + cpu->cfg.ext_i = true; + cpu->cfg.ext_m = true; + cpu->cfg.ext_a = true; + cpu->cfg.ext_f = true; + cpu->cfg.ext_d = true; + } + + /* Set the ISA extensions, checks should have happened above */ + if (cpu->cfg.ext_i) { + target_misa |= RVI; + } + if (cpu->cfg.ext_e) { + target_misa |= RVE; + } + if (cpu->cfg.ext_m) { + target_misa |= RVM; + } + if (cpu->cfg.ext_a) { + target_misa |= RVA; + } + if (cpu->cfg.ext_f) { + target_misa |= RVF; + } + if (cpu->cfg.ext_d) { + target_misa |= RVD; + } + if (cpu->cfg.ext_c) { + target_misa |= RVC; + } + if (cpu->cfg.ext_s) { + target_misa |= RVS; + } + if (cpu->cfg.ext_u) { + target_misa |= RVU; + } + + set_misa(env, RVXLEN | target_misa); + } + riscv_cpu_register_gdb_regs_for_features(cs); qemu_init_vcpu(cs); @@ -379,8 +431,20 @@ static const VMStateDescription vmstate_riscv_cpu = { }; static Property riscv_cpu_properties[] = { + DEFINE_PROP_BOOL("i", RISCVCPU, cfg.ext_i, true), + DEFINE_PROP_BOOL("e", RISCVCPU, cfg.ext_e, false), + DEFINE_PROP_BOOL("g", RISCVCPU, cfg.ext_g, true), + DEFINE_PROP_BOOL("m", RISCVCPU, cfg.ext_m, true), + DEFINE_PROP_BOOL("a", RISCVCPU, cfg.ext_a, true), + DEFINE_PROP_BOOL("f", RISCVCPU, cfg.ext_f, true), + DEFINE_PROP_BOOL("d", RISCVCPU, cfg.ext_d, true), + DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true), + DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true), + DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true), + DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true), + DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true), + DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true), DEFINE_PROP_STRING("priv_spec", RISCVCPU, cfg.priv_spec), - DEFINE_PROP_STRING("user_spec", RISCVCPU, cfg.user_spec), DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), DEFINE_PROP_END_OF_LIST(), @@ -416,6 +480,7 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data) cc->gdb_stop_before_watchpoint = true; cc->disas_set_info = riscv_cpu_disas_set_info; #ifndef CONFIG_USER_ONLY + cc->do_unassigned_access = riscv_cpu_unassigned_access; cc->do_unaligned_access = riscv_cpu_do_unaligned_access; cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug; #endif @@ -492,18 +557,20 @@ static const TypeInfo riscv_cpu_type_infos[] = { DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init), #if defined(TARGET_RISCV32) DEFINE_CPU(TYPE_RISCV_CPU_BASE32, riscv_base32_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_RV32GCSU_V1_09_1, rv32gcsu_priv1_09_1_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_RV32GCSU_V1_10_0, rv32gcsu_priv1_10_0_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_RV32IMACU_NOMMU, rv32imacu_nommu_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32imacu_nommu_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32gcsu_priv1_10_0_cpu_init) + DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32gcsu_priv1_10_0_cpu_init), + /* Depreacted */ + DEFINE_CPU(TYPE_RISCV_CPU_RV32IMACU_NOMMU, rv32imacu_nommu_cpu_init), + DEFINE_CPU(TYPE_RISCV_CPU_RV32GCSU_V1_09_1, rv32gcsu_priv1_09_1_cpu_init), + DEFINE_CPU(TYPE_RISCV_CPU_RV32GCSU_V1_10_0, rv32gcsu_priv1_10_0_cpu_init) #elif defined(TARGET_RISCV64) DEFINE_CPU(TYPE_RISCV_CPU_BASE64, riscv_base64_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_RV64GCSU_V1_09_1, rv64gcsu_priv1_09_1_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_RV64GCSU_V1_10_0, rv64gcsu_priv1_10_0_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_RV64IMACU_NOMMU, rv64imacu_nommu_cpu_init), DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64imacu_nommu_cpu_init), - DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64gcsu_priv1_10_0_cpu_init) + DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64gcsu_priv1_10_0_cpu_init), + /* Deprecated */ + DEFINE_CPU(TYPE_RISCV_CPU_RV64IMACU_NOMMU, rv64imacu_nommu_cpu_init), + DEFINE_CPU(TYPE_RISCV_CPU_RV64GCSU_V1_09_1, rv64gcsu_priv1_09_1_cpu_init), + DEFINE_CPU(TYPE_RISCV_CPU_RV64GCSU_V1_10_0, rv64gcsu_priv1_10_0_cpu_init) #endif }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 6c5de37b25..0adb307f32 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -35,16 +35,17 @@ #define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any") #define TYPE_RISCV_CPU_BASE32 RISCV_CPU_TYPE_NAME("rv32") #define TYPE_RISCV_CPU_BASE64 RISCV_CPU_TYPE_NAME("rv64") -#define TYPE_RISCV_CPU_RV32GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.9.1") -#define TYPE_RISCV_CPU_RV32GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.10.0") -#define TYPE_RISCV_CPU_RV32IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv32imacu-nommu") -#define TYPE_RISCV_CPU_RV64GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.9.1") -#define TYPE_RISCV_CPU_RV64GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.10.0") -#define TYPE_RISCV_CPU_RV64IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv64imacu-nommu") #define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31") #define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51") #define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34") #define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54") +/* Deprecated */ +#define TYPE_RISCV_CPU_RV32IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv32imacu-nommu") +#define TYPE_RISCV_CPU_RV32GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.9.1") +#define TYPE_RISCV_CPU_RV32GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.10.0") +#define TYPE_RISCV_CPU_RV64IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv64imacu-nommu") +#define TYPE_RISCV_CPU_RV64GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.9.1") +#define TYPE_RISCV_CPU_RV64GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.10.0") #define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2)) #define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2)) @@ -77,10 +78,11 @@ enum { RISCV_FEATURE_MISA }; -#define USER_VERSION_2_02_0 0x00020200 #define PRIV_VERSION_1_09_1 0x00010901 #define PRIV_VERSION_1_10_0 0x00011000 +#define PRIV_VERSION_1_11_0 0x00011100 +#define TRANSLATE_PMP_FAIL 2 #define TRANSLATE_FAIL 1 #define TRANSLATE_SUCCESS 0 #define MMU_USER_IDX 3 @@ -102,7 +104,6 @@ struct CPURISCVState { target_ulong badaddr; - target_ulong user_ver; target_ulong priv_ver; target_ulong misa; target_ulong misa_mask; @@ -211,6 +212,20 @@ typedef struct RISCVCPU { /* Configuration Settings */ struct { + bool ext_i; + bool ext_e; + bool ext_g; + bool ext_m; + bool ext_a; + bool ext_f; + bool ext_d; + bool ext_c; + bool ext_s; + bool ext_u; + bool ext_counters; + bool ext_ifencei; + bool ext_icsr; + char *priv_spec; char *user_spec; bool mmu; @@ -248,6 +263,8 @@ void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, MMUAccessType access_type, int mmu_idx, bool probe, uintptr_t retaddr); +void riscv_cpu_unassigned_access(CPUState *cpu, hwaddr addr, bool is_write, + bool is_exec, int unused, unsigned size); char *riscv_isa_string(RISCVCPU *cpu); void riscv_cpu_list(void); diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 47450a3cdb..11f971ad5d 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -136,6 +136,7 @@ #define CSR_MCOUNTEREN 0x306 /* Legacy Counter Setup (priv v1.9.1) */ +/* Update to #define CSR_MCOUNTINHIBIT 0x320 for 1.11.0 */ #define CSR_MUCOUNTEREN 0x320 #define CSR_MSCOUNTEREN 0x321 #define CSR_MHCOUNTEREN 0x322 diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 8b6754b917..e32b6126af 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -132,6 +132,16 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) } /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; + + /* + * Clear the load reservation - otherwise a reservation placed in one + * context/process can be used by another, resulting in an SC succeeding + * incorrectly. Version 2.2 of the ISA specification explicitly requires + * this behaviour, while later revisions say that the kernel "should" use + * an SC instruction to force the yielding of a load reservation on a + * preemptive context switch. As a result, do both. + */ + env->load_res = -1; } /* get_physical_address - get the physical address for this virtual address @@ -230,6 +240,12 @@ restart: /* check that physical address of PTE is legal */ target_ulong pte_addr = base + idx * ptesize; + + if (riscv_feature(env, RISCV_FEATURE_PMP) && + !pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong), + 1 << MMU_DATA_LOAD, PRV_S)) { + return TRANSLATE_PMP_FAIL; + } #if defined(TARGET_RISCV32) target_ulong pte = ldl_phys(cs->as, pte_addr); #elif defined(TARGET_RISCV64) @@ -337,12 +353,13 @@ restart: } static void raise_mmu_exception(CPURISCVState *env, target_ulong address, - MMUAccessType access_type) + MMUAccessType access_type, bool pmp_violation) { CPUState *cs = env_cpu(env); int page_fault_exceptions = (env->priv_ver >= PRIV_VERSION_1_10_0) && - get_field(env->satp, SATP_MODE) != VM_1_10_MBARE; + get_field(env->satp, SATP_MODE) != VM_1_10_MBARE && + !pmp_violation; switch (access_type) { case MMU_INST_FETCH: cs->exception_index = page_fault_exceptions ? @@ -375,6 +392,22 @@ hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) return phys_addr; } +void riscv_cpu_unassigned_access(CPUState *cs, hwaddr addr, bool is_write, + bool is_exec, int unused, unsigned size) +{ + RISCVCPU *cpu = RISCV_CPU(cs); + CPURISCVState *env = &cpu->env; + + if (is_write) { + cs->exception_index = RISCV_EXCP_STORE_AMO_ACCESS_FAULT; + } else { + cs->exception_index = RISCV_EXCP_LOAD_ACCESS_FAULT; + } + + env->badaddr = addr; + riscv_raise_exception(&cpu->env, cs->exception_index, GETPC()); +} + void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) @@ -408,20 +441,32 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, CPURISCVState *env = &cpu->env; hwaddr pa = 0; int prot; + bool pmp_violation = false; int ret = TRANSLATE_FAIL; + int mode = mmu_idx; qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n", __func__, address, access_type, mmu_idx); ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx); + if (mode == PRV_M && access_type != MMU_INST_FETCH) { + if (get_field(env->mstatus, MSTATUS_MPRV)) { + mode = get_field(env->mstatus, MSTATUS_MPP); + } + } + qemu_log_mask(CPU_LOG_MMU, "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx " prot %d\n", __func__, address, ret, pa, prot); if (riscv_feature(env, RISCV_FEATURE_PMP) && - !pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << access_type)) { - ret = TRANSLATE_FAIL; + (ret == TRANSLATE_SUCCESS) && + !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) { + ret = TRANSLATE_PMP_FAIL; + } + if (ret == TRANSLATE_PMP_FAIL) { + pmp_violation = true; } if (ret == TRANSLATE_SUCCESS) { tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK, @@ -430,7 +475,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size, } else if (probe) { return false; } else { - raise_mmu_exception(env, address, access_type); + raise_mmu_exception(env, address, access_type, pmp_violation); riscv_raise_exception(env, cs->exception_index, retaddr); } #else diff --git a/target/riscv/csr.c b/target/riscv/csr.c index c67d29e206..e0d4586760 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -56,8 +56,24 @@ static int fs(CPURISCVState *env, int csrno) static int ctr(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) + CPUState *cs = env_cpu(env); + RISCVCPU *cpu = RISCV_CPU(cs); uint32_t ctr_en = ~0u; + if (!cpu->cfg.ext_counters) { + /* The Counters extensions is not enabled */ + return -1; + } + + /* + * The counters are always enabled at run time on newer priv specs, as the + * CSR has changed from controlling that the counters can be read to + * controlling that the counters increment. + */ + if (env->priv_ver > PRIV_VERSION_1_09_1) { + return 0; + } + if (env->priv < PRV_M) { ctr_en &= env->mcounteren; } @@ -461,18 +477,22 @@ static int write_mcounteren(CPURISCVState *env, int csrno, target_ulong val) return 0; } +/* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ static int read_mscounteren(CPURISCVState *env, int csrno, target_ulong *val) { - if (env->priv_ver > PRIV_VERSION_1_09_1) { + if (env->priv_ver > PRIV_VERSION_1_09_1 + && env->priv_ver < PRIV_VERSION_1_11_0) { return -1; } *val = env->mcounteren; return 0; } +/* This regiser is replaced with CSR_MCOUNTINHIBIT in 1.11.0 */ static int write_mscounteren(CPURISCVState *env, int csrno, target_ulong val) { - if (env->priv_ver > PRIV_VERSION_1_09_1) { + if (env->priv_ver > PRIV_VERSION_1_09_1 + && env->priv_ver < PRIV_VERSION_1_11_0) { return -1; } env->mcounteren = val; @@ -773,6 +793,7 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, { int ret; target_ulong old_value; + RISCVCPU *cpu = env_archcpu(env); /* check privileges and return -1 if check fails */ #if !defined(CONFIG_USER_ONLY) @@ -783,6 +804,11 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, } #endif + /* ensure the CSR extension is enabled. */ + if (!cpu->cfg.ext_icsr) { + return -1; + } + /* check predicate */ if (!csr_ops[csrno].predicate || csr_ops[csrno].predicate(env, csrno) < 0) { return -1; diff --git a/target/riscv/insn_trans/trans_privileged.inc.c b/target/riscv/insn_trans/trans_privileged.inc.c index 664d6ba3f2..c5e4b3e49a 100644 --- a/target/riscv/insn_trans/trans_privileged.inc.c +++ b/target/riscv/insn_trans/trans_privileged.inc.c @@ -90,7 +90,7 @@ static bool trans_wfi(DisasContext *ctx, arg_wfi *a) static bool trans_sfence_vma(DisasContext *ctx, arg_sfence_vma *a) { #ifndef CONFIG_USER_ONLY - if (ctx->priv_ver == PRIV_VERSION_1_10_0) { + if (ctx->priv_ver >= PRIV_VERSION_1_10_0) { gen_helper_tlb_flush(cpu_env); return true; } diff --git a/target/riscv/insn_trans/trans_rva.inc.c b/target/riscv/insn_trans/trans_rva.inc.c index f6dbbc065e..fadd88849e 100644 --- a/target/riscv/insn_trans/trans_rva.inc.c +++ b/target/riscv/insn_trans/trans_rva.inc.c @@ -61,7 +61,7 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) gen_set_label(l1); /* - * Address comparion failure. However, we still need to + * Address comparison failure. However, we still need to * provide the memory barrier implied by AQ/RL. */ tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); @@ -69,6 +69,12 @@ static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) gen_set_gpr(a->rd, dat); gen_set_label(l2); + /* + * Clear the load reservation, since an SC must fail if there is + * an SC to any address, in between an LR and SC pair. + */ + tcg_gen_movi_tl(load_res, -1); + tcg_temp_free(dat); tcg_temp_free(src1); tcg_temp_free(src2); diff --git a/target/riscv/insn_trans/trans_rvi.inc.c b/target/riscv/insn_trans/trans_rvi.inc.c index 6cda078ed6..ea6473111c 100644 --- a/target/riscv/insn_trans/trans_rvi.inc.c +++ b/target/riscv/insn_trans/trans_rvi.inc.c @@ -484,6 +484,10 @@ static bool trans_fence(DisasContext *ctx, arg_fence *a) static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) { + if (!ctx->ext_ifencei) { + return false; + } + /* * FENCE_I is a no-op in QEMU, * however we need to end the translation block diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index fed1c3c030..958c7502a0 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -228,7 +228,7 @@ static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr) * Check if the address has required RWX privs to complete desired operation */ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, - target_ulong size, pmp_priv_t privs) + target_ulong size, pmp_priv_t privs, target_ulong mode) { int i = 0; int ret = -1; @@ -245,7 +245,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, from low to high */ for (i = 0; i < MAX_RISCV_PMPS; i++) { s = pmp_is_in_range(env, i, addr); - e = pmp_is_in_range(env, i, addr + size); + e = pmp_is_in_range(env, i, addr + size - 1); /* partially inside */ if ((s + e) == 1) { @@ -258,13 +258,14 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, /* fully inside */ const uint8_t a_field = pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg); - if ((s + e) == 2) { - if (PMP_AMATCH_OFF == a_field) { - return 1; - } + /* + * If the PMP entry is not off and the address is in range, do the priv + * check + */ + if (((s + e) == 2) && (PMP_AMATCH_OFF != a_field)) { allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC; - if ((env->priv != PRV_M) || pmp_is_locked(env, i)) { + if ((mode != PRV_M) || pmp_is_locked(env, i)) { allowed_privs &= env->pmp_state.pmp[i].cfg_reg; } @@ -280,7 +281,7 @@ bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, /* No rule matched */ if (ret == -1) { - if (env->priv == PRV_M) { + if (mode == PRV_M) { ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an * M-Mode access, the access succeeds */ } else { diff --git a/target/riscv/pmp.h b/target/riscv/pmp.h index 66790950eb..8e19793132 100644 --- a/target/riscv/pmp.h +++ b/target/riscv/pmp.h @@ -59,6 +59,6 @@ void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index, target_ulong val); target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index); bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, - target_ulong size, pmp_priv_t priv); + target_ulong size, pmp_priv_t priv, target_ulong mode); #endif diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 313c27b700..8d6ab73258 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -54,6 +54,7 @@ typedef struct DisasContext { to any system register, which includes CSR_FRM, so we do not have to reset this known value. */ int frm; + bool ext_ifencei; } DisasContext; #ifdef TARGET_RISCV64 @@ -752,6 +753,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) { DisasContext *ctx = container_of(dcbase, DisasContext, base); CPURISCVState *env = cs->env_ptr; + RISCVCPU *cpu = RISCV_CPU(cs); ctx->pc_succ_insn = ctx->base.pc_first; ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK; @@ -759,6 +761,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->priv_ver = env->priv_ver; ctx->misa = env->misa; ctx->frm = -1; /* unknown rounding mode */ + ctx->ext_ifencei = cpu->cfg.ext_ifencei; } static void riscv_tr_tb_start(DisasContextBase *db, CPUState *cpu) |