aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-03-03 11:06:39 +0000
committerPeter Maydell <peter.maydell@linaro.org>2020-03-03 11:06:39 +0000
commit2ac031d171ccd18c973014d9978b4a63f0ad5fb0 (patch)
treed0c01e70238eb9c69730dbbe06e8691a3aa75501 /target
parentc81acb643a61db199b9198add7972d8a8496b27c (diff)
parent5f3616ccceb5d5c49f99838c78498e581fb42fc5 (diff)
Merge remote-tracking branch 'remotes/palmer/tags/riscv-for-master-5.0-sf3' into staging
RISC-V Patches for the 5.0 Soft Freeze, Part 3 This pull request is almost entirely an implementation of the draft hypervisor extension. This extension is still in draft and is expected to have incompatible changes before being frozen, but we've had good luck managing other RISC-V draft extensions in QEMU so far. Additionally, there's a fix to PCI addressing and some improvements to the M-mode timer. This boots linux and passes make check for me. # gpg: Signature made Tue 03 Mar 2020 00:23:20 GMT # gpg: using RSA key 2B3C3747446843B24A943A7A2E1319F35FBB1889 # gpg: issuer "palmer@dabbelt.com" # gpg: Good signature from "Palmer Dabbelt <palmer@dabbelt.com>" [unknown] # gpg: aka "Palmer Dabbelt <palmer@sifive.com>" [unknown] # gpg: aka "Palmer Dabbelt <palmerdabbelt@google.com>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 00CE 76D1 8349 60DF CE88 6DF8 EF4C A150 2CCB AB41 # Subkey fingerprint: 2B3C 3747 4468 43B2 4A94 3A7A 2E13 19F3 5FBB 1889 * remotes/palmer/tags/riscv-for-master-5.0-sf3: (38 commits) hw/riscv: Provide rdtime callback for TCG in CLINT emulation target/riscv: Emulate TIME CSRs for privileged mode riscv: virt: Allow PCI address 0 target/riscv: Allow enabling the Hypervisor extension target/riscv: Add the MSTATUS_MPV_ISSET helper macro target/riscv: Add support for the 32-bit MSTATUSH CSR target/riscv: Set htval and mtval2 on execptions target/riscv: Raise the new execptions when 2nd stage translation fails target/riscv: Implement second stage MMU target/riscv: Allow specifying MMU stage target/riscv: Respect MPRV and SPRV for floating point ops target/riscv: Mark both sstatus and msstatus_hs as dirty target/riscv: Disable guest FP support based on virtual status target/riscv: Only set TB flags with FP status if enabled target/riscv: Remove the hret instruction target/riscv: Add hfence instructions target/riscv: Add Hypervisor trap return support target/riscv: Add hypvervisor trap support target/riscv: Generate illegal instruction on WFI when V=1 target/ricsv: Flush the TLB on virtulisation mode changes ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/riscv/cpu.c57
-rw-r--r--target/riscv/cpu.h63
-rw-r--r--target/riscv/cpu_bits.h111
-rw-r--r--target/riscv/cpu_helper.c486
-rw-r--r--target/riscv/csr.c455
-rw-r--r--target/riscv/gdbstub.c11
-rw-r--r--target/riscv/insn32.decode22
-rw-r--r--target/riscv/insn_trans/trans_privileged.inc.c45
-rw-r--r--target/riscv/op_helper.c71
-rw-r--r--target/riscv/translate.c35
10 files changed, 1223 insertions, 133 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 8c86ebc109..c47d10b739 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -67,19 +67,27 @@ const char * const riscv_excp_names[] = {
"load_page_fault",
"reserved",
"store_page_fault"
+ "reserved",
+ "reserved",
+ "reserved",
+ "reserved",
+ "guest_exec_page_fault",
+ "guest_load_page_fault",
+ "reserved",
+ "guest_store_page_fault"
};
const char * const riscv_intr_names[] = {
"u_software",
"s_software",
- "h_software",
+ "vs_software",
"m_software",
"u_timer",
"s_timer",
- "h_timer",
+ "vs_timer",
"m_timer",
"u_external",
- "s_external",
+ "vs_external",
"h_external",
"m_external",
"reserved",
@@ -220,17 +228,53 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
CPURISCVState *env = &cpu->env;
int i;
+#if !defined(CONFIG_USER_ONLY)
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s %d\n", "V = ", riscv_cpu_virt_enabled(env));
+ }
+#endif
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
#ifndef CONFIG_USER_ONLY
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid);
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus);
- qemu_fprintf(f, " %s 0x%x\n", "mip ", env->mip);
+#ifdef TARGET_RISCV32
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatush ", env->mstatush);
+#endif
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hstatus ", env->hstatus);
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vsstatus ", env->vsstatus);
+ }
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ", env->mip);
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie);
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg);
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hideleg ", env->hideleg);
+ }
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg);
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "hedeleg ", env->hedeleg);
+ }
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtvec ", env->mtvec);
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stvec ", env->stvec);
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vstvec ", env->vstvec);
+ }
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mepc ", env->mepc);
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "sepc ", env->sepc);
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vsepc ", env->vsepc);
+ }
qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mcause ", env->mcause);
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "scause ", env->scause);
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "vscause ", env->vscause);
+ }
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval ", env->mtval);
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "stval ", env->sbadaddr);
+ if (riscv_has_ext(env, RVH)) {
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "htval ", env->htval);
+ qemu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtval2 ", env->mtval2);
+ }
#endif
for (i = 0; i < 32; i++) {
@@ -409,6 +453,9 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
if (cpu->cfg.ext_u) {
target_misa |= RVU;
}
+ if (cpu->cfg.ext_h) {
+ target_misa |= RVH;
+ }
set_misa(env, RVXLEN | target_misa);
}
@@ -444,6 +491,8 @@ static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("c", RISCVCPU, cfg.ext_c, true),
DEFINE_PROP_BOOL("s", RISCVCPU, cfg.ext_s, true),
DEFINE_PROP_BOOL("u", RISCVCPU, cfg.ext_u, true),
+ /* This is experimental so mark with 'x-' */
+ DEFINE_PROP_BOOL("x-h", RISCVCPU, cfg.ext_h, false),
DEFINE_PROP_BOOL("Counters", RISCVCPU, cfg.ext_counters, true),
DEFINE_PROP_BOOL("Zifencei", RISCVCPU, cfg.ext_ifencei, true),
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index de0a8d893a..3dcdf92227 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -67,6 +67,7 @@
#define RVC RV('C')
#define RVS RV('S')
#define RVU RV('U')
+#define RVH RV('H')
/* S extension denotes that Supervisor mode exists, however it is possible
to have a core that support S mode but does not have an MMU and there
@@ -103,6 +104,7 @@ struct CPURISCVState {
target_ulong frm;
target_ulong badaddr;
+ target_ulong guest_phys_fault_addr;
target_ulong priv_ver;
target_ulong misa;
@@ -116,12 +118,19 @@ struct CPURISCVState {
#ifndef CONFIG_USER_ONLY
target_ulong priv;
+ /* This contains QEMU specific information about the virt state. */
+ target_ulong virt;
target_ulong resetvec;
target_ulong mhartid;
target_ulong mstatus;
- uint32_t mip;
+ target_ulong mip;
+
+#ifdef TARGET_RISCV32
+ target_ulong mstatush;
+#endif
+
uint32_t miclaim;
target_ulong mie;
@@ -142,6 +151,43 @@ struct CPURISCVState {
target_ulong mcause;
target_ulong mtval; /* since: priv-1.10.0 */
+ /* Hypervisor CSRs */
+ target_ulong hstatus;
+ target_ulong hedeleg;
+ target_ulong hideleg;
+ target_ulong hcounteren;
+ target_ulong htval;
+ target_ulong htinst;
+ target_ulong hgatp;
+ uint64_t htimedelta;
+
+ /* Virtual CSRs */
+ target_ulong vsstatus;
+ target_ulong vstvec;
+ target_ulong vsscratch;
+ target_ulong vsepc;
+ target_ulong vscause;
+ target_ulong vstval;
+ target_ulong vsatp;
+#ifdef TARGET_RISCV32
+ target_ulong vsstatush;
+#endif
+
+ target_ulong mtval2;
+ target_ulong mtinst;
+
+ /* HS Backup CSRs */
+ target_ulong stvec_hs;
+ target_ulong sscratch_hs;
+ target_ulong sepc_hs;
+ target_ulong scause_hs;
+ target_ulong stval_hs;
+ target_ulong satp_hs;
+ target_ulong mstatus_hs;
+#ifdef TARGET_RISCV32
+ target_ulong mstatush_hs;
+#endif
+
target_ulong scounteren;
target_ulong mcounteren;
@@ -156,6 +202,9 @@ struct CPURISCVState {
/* physical memory protection */
pmp_table_t pmp_state;
+ /* machine specific rdtime callback */
+ uint64_t (*rdtime_fn)(void);
+
/* True if in debugger mode. */
bool debugger;
#endif
@@ -213,6 +262,7 @@ typedef struct RISCVCPU {
bool ext_c;
bool ext_s;
bool ext_u;
+ bool ext_h;
bool ext_counters;
bool ext_ifencei;
bool ext_icsr;
@@ -247,6 +297,10 @@ int riscv_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
bool riscv_cpu_fp_enabled(CPURISCVState *env);
+bool riscv_cpu_virt_enabled(CPURISCVState *env);
+void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable);
+bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env);
+void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable);
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
@@ -268,9 +322,11 @@ void riscv_cpu_list(void);
#define cpu_mmu_index riscv_cpu_mmu_index
#ifndef CONFIG_USER_ONLY
+void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env);
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts);
uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value);
#define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */
+void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void));
#endif
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv);
@@ -293,7 +349,10 @@ static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
#ifdef CONFIG_USER_ONLY
*flags = TB_FLAGS_MSTATUS_FS;
#else
- *flags = cpu_mmu_index(env, 0) | (env->mstatus & MSTATUS_FS);
+ *flags = cpu_mmu_index(env, 0);
+ if (riscv_cpu_fp_enabled(env)) {
+ *flags |= env->mstatus & MSTATUS_FS;
+ }
#endif
}
diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h
index e99834856c..7f64ee1174 100644
--- a/target/riscv/cpu_bits.h
+++ b/target/riscv/cpu_bits.h
@@ -135,6 +135,9 @@
#define CSR_MTVEC 0x305
#define CSR_MCOUNTEREN 0x306
+/* 32-bit only */
+#define CSR_MSTATUSH 0x310
+
/* Legacy Counter Setup (priv v1.9.1) */
/* Update to #define CSR_MCOUNTINHIBIT 0x320 for 1.11.0 */
#define CSR_MUCOUNTEREN 0x320
@@ -177,8 +180,14 @@
#define CSR_HSTATUS 0x600
#define CSR_HEDELEG 0x602
#define CSR_HIDELEG 0x603
-#define CSR_HCOUNTERNEN 0x606
+#define CSR_HIE 0x604
+#define CSR_HCOUNTEREN 0x606
+#define CSR_HTVAL 0x643
+#define CSR_HIP 0x644
+#define CSR_HTINST 0x64A
#define CSR_HGATP 0x680
+#define CSR_HTIMEDELTA 0x605
+#define CSR_HTIMEDELTAH 0x615
#if defined(TARGET_RISCV32)
#define HGATP_MODE SATP32_MODE
@@ -191,6 +200,20 @@
#define HGATP_PPN SATP64_PPN
#endif
+/* Virtual CSRs */
+#define CSR_VSSTATUS 0x200
+#define CSR_VSIE 0x204
+#define CSR_VSTVEC 0x205
+#define CSR_VSSCRATCH 0x240
+#define CSR_VSEPC 0x241
+#define CSR_VSCAUSE 0x242
+#define CSR_VSTVAL 0x243
+#define CSR_VSIP 0x244
+#define CSR_VSATP 0x280
+
+#define CSR_MTINST 0x34a
+#define CSR_MTVAL2 0x34b
+
/* Physical Memory Protection */
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPCFG1 0x3a1
@@ -313,17 +336,6 @@
#define CSR_MHPMCOUNTER30H 0xb9e
#define CSR_MHPMCOUNTER31H 0xb9f
-/* Legacy Hypervisor Trap Setup (priv v1.9.1) */
-#define CSR_HIE 0x204
-#define CSR_HTVEC 0x205
-
-/* Legacy Hypervisor Trap Handling (priv v1.9.1) */
-#define CSR_HSCRATCH 0x240
-#define CSR_HEPC 0x241
-#define CSR_HCAUSE 0x242
-#define CSR_HBADADDR 0x243
-#define CSR_HIP 0x244
-
/* Legacy Machine Protection and Translation (priv v1.9.1) */
#define CSR_MBASE 0x380
#define CSR_MBOUND 0x381
@@ -351,8 +363,19 @@
#define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */
#define MSTATUS_TW 0x20000000 /* since: priv-1.10 */
#define MSTATUS_TSR 0x40000000 /* since: priv-1.10 */
+#if defined(TARGET_RISCV64)
#define MSTATUS_MTL 0x4000000000ULL
#define MSTATUS_MPV 0x8000000000ULL
+#elif defined(TARGET_RISCV32)
+#define MSTATUS_MTL 0x00000040
+#define MSTATUS_MPV 0x00000080
+#endif
+
+#ifdef TARGET_RISCV32
+# define MSTATUS_MPV_ISSET(env) get_field(env->mstatush, MSTATUS_MPV)
+#else
+# define MSTATUS_MPV_ISSET(env) get_field(env->mstatus, MSTATUS_MPV)
+#endif
#define MSTATUS64_UXL 0x0000000300000000ULL
#define MSTATUS64_SXL 0x0000000C00000000ULL
@@ -400,7 +423,6 @@
/* hstatus CSR bits */
#define HSTATUS_SPRV 0x00000001
-#define HSTATUS_STL 0x00000040
#define HSTATUS_SPV 0x00000080
#define HSTATUS_SP2P 0x00000100
#define HSTATUS_SP2V 0x00000200
@@ -422,6 +444,15 @@
#define PRV_H 2 /* Reserved */
#define PRV_M 3
+/* Virtulisation Register Fields */
+#define VIRT_ONOFF 1
+/* This is used to save state for when we take an exception. If this is set
+ * that means that we want to force a HS level exception (no matter what the
+ * delegation is set to). This will occur for things such as a second level
+ * page table fault.
+ */
+#define FORCE_HS_EXCEP 2
+
/* RV32 satp CSR field masks */
#define SATP32_MODE 0x80000000
#define SATP32_ASID 0x7fc00000
@@ -480,22 +511,25 @@
#define DEFAULT_RSTVEC 0x1000
/* Exception causes */
-#define EXCP_NONE -1 /* sentinel value */
-#define RISCV_EXCP_INST_ADDR_MIS 0x0
-#define RISCV_EXCP_INST_ACCESS_FAULT 0x1
-#define RISCV_EXCP_ILLEGAL_INST 0x2
-#define RISCV_EXCP_BREAKPOINT 0x3
-#define RISCV_EXCP_LOAD_ADDR_MIS 0x4
-#define RISCV_EXCP_LOAD_ACCESS_FAULT 0x5
-#define RISCV_EXCP_STORE_AMO_ADDR_MIS 0x6
-#define RISCV_EXCP_STORE_AMO_ACCESS_FAULT 0x7
-#define RISCV_EXCP_U_ECALL 0x8
-#define RISCV_EXCP_S_ECALL 0x9
-#define RISCV_EXCP_H_ECALL 0xa
-#define RISCV_EXCP_M_ECALL 0xb
-#define RISCV_EXCP_INST_PAGE_FAULT 0xc /* since: priv-1.10.0 */
-#define RISCV_EXCP_LOAD_PAGE_FAULT 0xd /* since: priv-1.10.0 */
-#define RISCV_EXCP_STORE_PAGE_FAULT 0xf /* since: priv-1.10.0 */
+#define EXCP_NONE -1 /* sentinel value */
+#define RISCV_EXCP_INST_ADDR_MIS 0x0
+#define RISCV_EXCP_INST_ACCESS_FAULT 0x1
+#define RISCV_EXCP_ILLEGAL_INST 0x2
+#define RISCV_EXCP_BREAKPOINT 0x3
+#define RISCV_EXCP_LOAD_ADDR_MIS 0x4
+#define RISCV_EXCP_LOAD_ACCESS_FAULT 0x5
+#define RISCV_EXCP_STORE_AMO_ADDR_MIS 0x6
+#define RISCV_EXCP_STORE_AMO_ACCESS_FAULT 0x7
+#define RISCV_EXCP_U_ECALL 0x8
+#define RISCV_EXCP_S_ECALL 0x9
+#define RISCV_EXCP_VS_ECALL 0xa
+#define RISCV_EXCP_M_ECALL 0xb
+#define RISCV_EXCP_INST_PAGE_FAULT 0xc /* since: priv-1.10.0 */
+#define RISCV_EXCP_LOAD_PAGE_FAULT 0xd /* since: priv-1.10.0 */
+#define RISCV_EXCP_STORE_PAGE_FAULT 0xf /* since: priv-1.10.0 */
+#define RISCV_EXCP_INST_GUEST_PAGE_FAULT 0x14
+#define RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT 0x15
+#define RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT 0x17
#define RISCV_EXCP_INT_FLAG 0x80000000
#define RISCV_EXCP_INT_MASK 0x7fffffff
@@ -503,29 +537,29 @@
/* Interrupt causes */
#define IRQ_U_SOFT 0
#define IRQ_S_SOFT 1
-#define IRQ_H_SOFT 2 /* reserved */
+#define IRQ_VS_SOFT 2
#define IRQ_M_SOFT 3
#define IRQ_U_TIMER 4
#define IRQ_S_TIMER 5
-#define IRQ_H_TIMER 6 /* reserved */
+#define IRQ_VS_TIMER 6
#define IRQ_M_TIMER 7
#define IRQ_U_EXT 8
#define IRQ_S_EXT 9
-#define IRQ_H_EXT 10 /* reserved */
+#define IRQ_VS_EXT 10
#define IRQ_M_EXT 11
/* mip masks */
#define MIP_USIP (1 << IRQ_U_SOFT)
#define MIP_SSIP (1 << IRQ_S_SOFT)
-#define MIP_HSIP (1 << IRQ_H_SOFT)
+#define MIP_VSSIP (1 << IRQ_VS_SOFT)
#define MIP_MSIP (1 << IRQ_M_SOFT)
#define MIP_UTIP (1 << IRQ_U_TIMER)
#define MIP_STIP (1 << IRQ_S_TIMER)
-#define MIP_HTIP (1 << IRQ_H_TIMER)
+#define MIP_VSTIP (1 << IRQ_VS_TIMER)
#define MIP_MTIP (1 << IRQ_M_TIMER)
#define MIP_UEIP (1 << IRQ_U_EXT)
#define MIP_SEIP (1 << IRQ_S_EXT)
-#define MIP_HEIP (1 << IRQ_H_EXT)
+#define MIP_VSEIP (1 << IRQ_VS_EXT)
#define MIP_MEIP (1 << IRQ_M_EXT)
/* sip masks */
@@ -533,4 +567,11 @@
#define SIP_STIP MIP_STIP
#define SIP_SEIP MIP_SEIP
+/* MIE masks */
+#define MIE_SEIE (1 << IRQ_S_EXT)
+#define MIE_UEIE (1 << IRQ_U_EXT)
+#define MIE_STIE (1 << IRQ_S_TIMER)
+#define MIE_UTIE (1 << IRQ_U_TIMER)
+#define MIE_SSIE (1 << IRQ_S_SOFT)
+#define MIE_USIE (1 << IRQ_U_SOFT)
#endif
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 85403da9c8..5ea5d133aa 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -37,13 +37,36 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
#ifndef CONFIG_USER_ONLY
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
{
+ target_ulong irqs;
+
target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
- target_ulong pending = env->mip & env->mie;
- target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie);
- target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie);
- target_ulong irqs = (pending & ~env->mideleg & -mie) |
- (pending & env->mideleg & -sie);
+ target_ulong hs_mstatus_sie = get_field(env->mstatus_hs, MSTATUS_SIE);
+
+ target_ulong pending = env->mip & env->mie &
+ ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
+ target_ulong vspending = (env->mip & env->mie &
+ (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)) >> 1;
+
+ target_ulong mie = env->priv < PRV_M ||
+ (env->priv == PRV_M && mstatus_mie);
+ target_ulong sie = env->priv < PRV_S ||
+ (env->priv == PRV_S && mstatus_sie);
+ target_ulong hs_sie = env->priv < PRV_S ||
+ (env->priv == PRV_S && hs_mstatus_sie);
+
+ if (riscv_cpu_virt_enabled(env)) {
+ target_ulong pending_hs_irq = pending & -hs_sie;
+
+ if (pending_hs_irq) {
+ riscv_cpu_set_force_hs_excep(env, FORCE_HS_EXCEP);
+ return ctz64(pending_hs_irq);
+ }
+
+ pending = vspending;
+ }
+
+ irqs = (pending & ~env->mideleg & -mie) | (pending & env->mideleg & -sie);
if (irqs) {
return ctz64(irqs); /* since non-zero */
@@ -76,12 +99,127 @@ bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
bool riscv_cpu_fp_enabled(CPURISCVState *env)
{
if (env->mstatus & MSTATUS_FS) {
+ if (riscv_cpu_virt_enabled(env) && !(env->mstatus_hs & MSTATUS_FS)) {
+ return false;
+ }
return true;
}
return false;
}
+void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
+{
+ target_ulong mstatus_mask = MSTATUS_MXR | MSTATUS_SUM | MSTATUS_FS |
+ MSTATUS_SPP | MSTATUS_SPIE | MSTATUS_SIE;
+ bool current_virt = riscv_cpu_virt_enabled(env);
+
+ g_assert(riscv_has_ext(env, RVH));
+
+#if defined(TARGET_RISCV64)
+ mstatus_mask |= MSTATUS64_UXL;
+#endif
+
+ if (current_virt) {
+ /* Current V=1 and we are about to change to V=0 */
+ env->vsstatus = env->mstatus & mstatus_mask;
+ env->mstatus &= ~mstatus_mask;
+ env->mstatus |= env->mstatus_hs;
+
+#if defined(TARGET_RISCV32)
+ env->vsstatush = env->mstatush;
+ env->mstatush |= env->mstatush_hs;
+#endif
+
+ env->vstvec = env->stvec;
+ env->stvec = env->stvec_hs;
+
+ env->vsscratch = env->sscratch;
+ env->sscratch = env->sscratch_hs;
+
+ env->vsepc = env->sepc;
+ env->sepc = env->sepc_hs;
+
+ env->vscause = env->scause;
+ env->scause = env->scause_hs;
+
+ env->vstval = env->sbadaddr;
+ env->sbadaddr = env->stval_hs;
+
+ env->vsatp = env->satp;
+ env->satp = env->satp_hs;
+ } else {
+ /* Current V=0 and we are about to change to V=1 */
+ env->mstatus_hs = env->mstatus & mstatus_mask;
+ env->mstatus &= ~mstatus_mask;
+ env->mstatus |= env->vsstatus;
+
+#if defined(TARGET_RISCV32)
+ env->mstatush_hs = env->mstatush;
+ env->mstatush |= env->vsstatush;
+#endif
+
+ env->stvec_hs = env->stvec;
+ env->stvec = env->vstvec;
+
+ env->sscratch_hs = env->sscratch;
+ env->sscratch = env->vsscratch;
+
+ env->sepc_hs = env->sepc;
+ env->sepc = env->vsepc;
+
+ env->scause_hs = env->scause;
+ env->scause = env->vscause;
+
+ env->stval_hs = env->sbadaddr;
+ env->sbadaddr = env->vstval;
+
+ env->satp_hs = env->satp;
+ env->satp = env->vsatp;
+ }
+}
+
+bool riscv_cpu_virt_enabled(CPURISCVState *env)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return false;
+ }
+
+ return get_field(env->virt, VIRT_ONOFF);
+}
+
+void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return;
+ }
+
+ /* Flush the TLB on all virt mode changes. */
+ if (get_field(env->virt, VIRT_ONOFF) != enable) {
+ tlb_flush(env_cpu(env));
+ }
+
+ env->virt = set_field(env->virt, VIRT_ONOFF, enable);
+}
+
+bool riscv_cpu_force_hs_excep_enabled(CPURISCVState *env)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return false;
+ }
+
+ return get_field(env->virt, FORCE_HS_EXCEP);
+}
+
+void riscv_cpu_set_force_hs_excep(CPURISCVState *env, bool enable)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return;
+ }
+
+ env->virt = set_field(env->virt, FORCE_HS_EXCEP, enable);
+}
+
int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
{
CPURISCVState *env = &cpu->env;
@@ -120,6 +258,11 @@ uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
return old;
}
+void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(void))
+{
+ env->rdtime_fn = fn;
+}
+
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
{
if (newpriv > PRV_M) {
@@ -149,10 +292,20 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
*
* Adapted from Spike's mmu_t::translate and mmu_t::walk
*
+ * @env: CPURISCVState
+ * @physical: This will be set to the calculated physical address
+ * @prot: The returned protection attributes
+ * @addr: The virtual address to be translated
+ * @access_type: The type of MMU access
+ * @mmu_idx: Indicates current privilege level
+ * @first_stage: Are we in first stage translation?
+ * Second stage is used for hypervisor guest translation
+ * @two_stage: Are we going to perform two stage translation
*/
static int get_physical_address(CPURISCVState *env, hwaddr *physical,
int *prot, target_ulong addr,
- int access_type, int mmu_idx)
+ int access_type, int mmu_idx,
+ bool first_stage, bool two_stage)
{
/* NOTE: the env->pc value visible here will not be
* correct, but the value visible to the exception handler
@@ -160,13 +313,40 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
MemTxResult res;
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmu_idx;
+ bool use_background = false;
+ /*
+ * Check if we should use the background registers for the two
+ * stage translation. We don't need to check if we actually need
+ * two stage translation as that happened before this function
+ * was called. Background registers will be used if the guest has
+ * forced a two stage translation to be on (in HS or M mode).
+ */
if (mode == PRV_M && access_type != MMU_INST_FETCH) {
if (get_field(env->mstatus, MSTATUS_MPRV)) {
mode = get_field(env->mstatus, MSTATUS_MPP);
+
+ if (riscv_has_ext(env, RVH) &&
+ MSTATUS_MPV_ISSET(env)) {
+ use_background = true;
+ }
+ }
+ }
+
+ if (mode == PRV_S && access_type != MMU_INST_FETCH &&
+ riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) {
+ if (get_field(env->hstatus, HSTATUS_SPRV)) {
+ mode = get_field(env->mstatus, SSTATUS_SPP);
+ use_background = true;
}
}
+ if (first_stage == false) {
+ /* We are in stage 2 translation, this is similar to stage 1. */
+ /* Stage 2 is always taken as U-mode */
+ mode = PRV_U;
+ }
+
if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
*physical = addr;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
@@ -176,13 +356,30 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
*prot = 0;
hwaddr base;
- int levels, ptidxbits, ptesize, vm, sum;
- int mxr = get_field(env->mstatus, MSTATUS_MXR);
+ int levels, ptidxbits, ptesize, vm, sum, mxr, widened;
+
+ if (first_stage == true) {
+ mxr = get_field(env->mstatus, MSTATUS_MXR);
+ } else {
+ mxr = get_field(env->vsstatus, MSTATUS_MXR);
+ }
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
- base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT;
+ if (first_stage == true) {
+ if (use_background) {
+ base = (hwaddr)get_field(env->vsatp, SATP_PPN) << PGSHIFT;
+ vm = get_field(env->vsatp, SATP_MODE);
+ } else {
+ base = (hwaddr)get_field(env->satp, SATP_PPN) << PGSHIFT;
+ vm = get_field(env->satp, SATP_MODE);
+ }
+ widened = 0;
+ } else {
+ base = (hwaddr)get_field(env->hgatp, HGATP_PPN) << PGSHIFT;
+ vm = get_field(env->hgatp, HGATP_MODE);
+ widened = 2;
+ }
sum = get_field(env->mstatus, MSTATUS_SUM);
- vm = get_field(env->satp, SATP_MODE);
switch (vm) {
case VM_1_10_SV32:
levels = 2; ptidxbits = 10; ptesize = 4; break;
@@ -200,6 +397,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
g_assert_not_reached();
}
} else {
+ widened = 0;
base = (hwaddr)(env->sptbr) << PGSHIFT;
sum = !get_field(env->mstatus, MSTATUS_PUM);
vm = get_field(env->mstatus, MSTATUS_VM);
@@ -220,9 +418,16 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
}
CPUState *cs = env_cpu(env);
- int va_bits = PGSHIFT + levels * ptidxbits;
- target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
- target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask;
+ int va_bits = PGSHIFT + levels * ptidxbits + widened;
+ target_ulong mask, masked_msbs;
+
+ if (TARGET_LONG_BITS > (va_bits - 1)) {
+ mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
+ } else {
+ mask = 0;
+ }
+ masked_msbs = (addr >> (va_bits - 1)) & mask;
+
if (masked_msbs != 0 && masked_msbs != mask) {
return TRANSLATE_FAIL;
}
@@ -234,11 +439,29 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
restart:
#endif
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
- target_ulong idx = (addr >> (PGSHIFT + ptshift)) &
+ target_ulong idx;
+ if (i == 0) {
+ idx = (addr >> (PGSHIFT + ptshift)) &
+ ((1 << (ptidxbits + widened)) - 1);
+ } else {
+ idx = (addr >> (PGSHIFT + ptshift)) &
((1 << ptidxbits) - 1);
+ }
/* check that physical address of PTE is legal */
- hwaddr pte_addr = base + idx * ptesize;
+ hwaddr pte_addr;
+
+ if (two_stage && first_stage) {
+ hwaddr vbase;
+
+ /* Do the second stage translation on the base PTE address. */
+ get_physical_address(env, &vbase, prot, base, access_type,
+ mmu_idx, false, true);
+
+ pte_addr = vbase + idx * ptesize;
+ } else {
+ pte_addr = base + idx * ptesize;
+ }
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
!pmp_hart_has_privs(env, pte_addr, sizeof(target_ulong),
@@ -335,7 +558,12 @@ restart:
/* for superpage mappings, make a fake leaf PTE for the TLB's
benefit. */
target_ulong vpn = addr >> PGSHIFT;
- *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
+ if (i == 0) {
+ *physical = (ppn | (vpn & ((1L << (ptshift + widened)) - 1))) <<
+ PGSHIFT;
+ } else {
+ *physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
+ }
/* set permissions on the TLB entry */
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
@@ -357,25 +585,45 @@ restart:
}
static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
- MMUAccessType access_type, bool pmp_violation)
+ MMUAccessType access_type, bool pmp_violation,
+ bool first_stage)
{
CPUState *cs = env_cpu(env);
- int page_fault_exceptions =
- (env->priv_ver >= PRIV_VERSION_1_10_0) &&
- get_field(env->satp, SATP_MODE) != VM_1_10_MBARE &&
- !pmp_violation;
+ int page_fault_exceptions;
+ if (first_stage) {
+ page_fault_exceptions =
+ (env->priv_ver >= PRIV_VERSION_1_10_0) &&
+ get_field(env->satp, SATP_MODE) != VM_1_10_MBARE &&
+ !pmp_violation;
+ } else {
+ page_fault_exceptions =
+ get_field(env->hgatp, HGATP_MODE) != VM_1_10_MBARE &&
+ !pmp_violation;
+ }
switch (access_type) {
case MMU_INST_FETCH:
- cs->exception_index = page_fault_exceptions ?
- RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
+ if (riscv_cpu_virt_enabled(env) && !first_stage) {
+ cs->exception_index = RISCV_EXCP_INST_GUEST_PAGE_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
+ }
break;
case MMU_DATA_LOAD:
- cs->exception_index = page_fault_exceptions ?
- RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
+ if (riscv_cpu_virt_enabled(env) && !first_stage) {
+ cs->exception_index = RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
+ }
break;
case MMU_DATA_STORE:
- cs->exception_index = page_fault_exceptions ?
- RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ if (riscv_cpu_virt_enabled(env) && !first_stage) {
+ cs->exception_index = RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT;
+ } else {
+ cs->exception_index = page_fault_exceptions ?
+ RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
+ }
break;
default:
g_assert_not_reached();
@@ -386,13 +634,23 @@ static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
RISCVCPU *cpu = RISCV_CPU(cs);
+ CPURISCVState *env = &cpu->env;
hwaddr phys_addr;
int prot;
int mmu_idx = cpu_mmu_index(&cpu->env, false);
- if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) {
+ if (get_physical_address(env, &phys_addr, &prot, addr, 0, mmu_idx,
+ true, riscv_cpu_virt_enabled(env))) {
return -1;
}
+
+ if (riscv_cpu_virt_enabled(env)) {
+ if (get_physical_address(env, &phys_addr, &prot, phys_addr,
+ 0, mmu_idx, false, true)) {
+ return -1;
+ }
+ }
+
return phys_addr;
}
@@ -446,16 +704,37 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
#ifndef CONFIG_USER_ONLY
+ vaddr im_address;
hwaddr pa = 0;
int prot;
bool pmp_violation = false;
+ bool m_mode_two_stage = false;
+ bool hs_mode_two_stage = false;
+ bool first_stage_error = true;
int ret = TRANSLATE_FAIL;
int mode = mmu_idx;
+ env->guest_phys_fault_addr = 0;
+
qemu_log_mask(CPU_LOG_MMU, "%s ad %" VADDR_PRIx " rw %d mmu_idx %d\n",
__func__, address, access_type, mmu_idx);
- ret = get_physical_address(env, &pa, &prot, address, access_type, mmu_idx);
+ /*
+ * Determine if we are in M mode and MPRV is set or in HS mode and SPRV is
+ * set and we want to access a virtulisation address.
+ */
+ if (riscv_has_ext(env, RVH)) {
+ m_mode_two_stage = env->priv == PRV_M &&
+ access_type != MMU_INST_FETCH &&
+ get_field(env->mstatus, MSTATUS_MPRV) &&
+ MSTATUS_MPV_ISSET(env);
+
+ hs_mode_two_stage = env->priv == PRV_S &&
+ !riscv_cpu_virt_enabled(env) &&
+ access_type != MMU_INST_FETCH &&
+ get_field(env->hstatus, HSTATUS_SPRV) &&
+ get_field(env->hstatus, HSTATUS_SPV);
+ }
if (mode == PRV_M && access_type != MMU_INST_FETCH) {
if (get_field(env->mstatus, MSTATUS_MPRV)) {
@@ -463,9 +742,55 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
}
}
- qemu_log_mask(CPU_LOG_MMU,
- "%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
- " prot %d\n", __func__, address, ret, pa, prot);
+ if (riscv_cpu_virt_enabled(env) || m_mode_two_stage || hs_mode_two_stage) {
+ /* Two stage lookup */
+ ret = get_physical_address(env, &pa, &prot, address, access_type,
+ mmu_idx, true, true);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s 1st-stage address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, address, ret, pa, prot);
+
+ if (ret != TRANSLATE_FAIL) {
+ /* Second stage lookup */
+ im_address = pa;
+
+ ret = get_physical_address(env, &pa, &prot, im_address,
+ access_type, mmu_idx, false, true);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s 2nd-stage address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, im_address, ret, pa, prot);
+
+ if (riscv_feature(env, RISCV_FEATURE_PMP) &&
+ (ret == TRANSLATE_SUCCESS) &&
+ !pmp_hart_has_privs(env, pa, size, 1 << access_type, mode)) {
+ ret = TRANSLATE_PMP_FAIL;
+ }
+
+ if (ret != TRANSLATE_SUCCESS) {
+ /*
+ * Guest physical address translation failed, this is a HS
+ * level exception
+ */
+ first_stage_error = false;
+ env->guest_phys_fault_addr = (im_address |
+ (address &
+ (TARGET_PAGE_SIZE - 1))) >> 2;
+ }
+ }
+ } else {
+ /* Single stage lookup */
+ ret = get_physical_address(env, &pa, &prot, address, access_type,
+ mmu_idx, true, false);
+
+ qemu_log_mask(CPU_LOG_MMU,
+ "%s address=%" VADDR_PRIx " ret %d physical "
+ TARGET_FMT_plx " prot %d\n",
+ __func__, address, ret, pa, prot);
+ }
if (riscv_feature(env, RISCV_FEATURE_PMP) &&
(ret == TRANSLATE_SUCCESS) &&
@@ -475,6 +800,7 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == TRANSLATE_PMP_FAIL) {
pmp_violation = true;
}
+
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
@@ -482,9 +808,12 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
} else if (probe) {
return false;
} else {
- raise_mmu_exception(env, address, access_type, pmp_violation);
+ raise_mmu_exception(env, address, access_type, pmp_violation, first_stage_error);
riscv_raise_exception(env, cs->exception_index, retaddr);
}
+
+ return true;
+
#else
switch (access_type) {
case MMU_INST_FETCH:
@@ -516,6 +845,8 @@ void riscv_cpu_do_interrupt(CPUState *cs)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
+ bool force_hs_execp = riscv_cpu_force_hs_excep_enabled(env);
+ target_ulong s;
/* cs->exception is 32-bits wide unlike mcause which is XLEN-bits wide
* so we mask off the MSB and separate into trap type and cause.
@@ -524,17 +855,17 @@ void riscv_cpu_do_interrupt(CPUState *cs)
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
target_ulong deleg = async ? env->mideleg : env->medeleg;
target_ulong tval = 0;
-
- static const int ecall_cause_map[] = {
- [PRV_U] = RISCV_EXCP_U_ECALL,
- [PRV_S] = RISCV_EXCP_S_ECALL,
- [PRV_H] = RISCV_EXCP_H_ECALL,
- [PRV_M] = RISCV_EXCP_M_ECALL
- };
+ target_ulong htval = 0;
+ target_ulong mtval2 = 0;
if (!async) {
/* set tval to badaddr for traps with address information */
switch (cause) {
+ case RISCV_EXCP_INST_GUEST_PAGE_FAULT:
+ case RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT:
+ case RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT:
+ force_hs_execp = true;
+ /* fallthrough */
case RISCV_EXCP_INST_ADDR_MIS:
case RISCV_EXCP_INST_ACCESS_FAULT:
case RISCV_EXCP_LOAD_ADDR_MIS:
@@ -552,17 +883,59 @@ void riscv_cpu_do_interrupt(CPUState *cs)
/* ecall is dispatched as one cause so translate based on mode */
if (cause == RISCV_EXCP_U_ECALL) {
assert(env->priv <= 3);
- cause = ecall_cause_map[env->priv];
+
+ if (env->priv == PRV_M) {
+ cause = RISCV_EXCP_M_ECALL;
+ } else if (env->priv == PRV_S && riscv_cpu_virt_enabled(env)) {
+ cause = RISCV_EXCP_VS_ECALL;
+ } else if (env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) {
+ cause = RISCV_EXCP_S_ECALL;
+ } else if (env->priv == PRV_U) {
+ cause = RISCV_EXCP_U_ECALL;
+ }
}
}
- trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 16 ?
+ trace_riscv_trap(env->mhartid, async, cause, env->pc, tval, cause < 23 ?
(async ? riscv_intr_names : riscv_excp_names)[cause] : "(unknown)");
if (env->priv <= PRV_S &&
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
/* handle the trap in S-mode */
- target_ulong s = env->mstatus;
+ if (riscv_has_ext(env, RVH)) {
+ target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
+
+ if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1) &&
+ !force_hs_execp) {
+ /* Trap to VS mode */
+ } else if (riscv_cpu_virt_enabled(env)) {
+ /* Trap into HS mode, from virt */
+ riscv_cpu_swap_hypervisor_regs(env);
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2V,
+ get_field(env->hstatus, HSTATUS_SPV));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2P,
+ get_field(env->mstatus, SSTATUS_SPP));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
+ riscv_cpu_virt_enabled(env));
+
+ htval = env->guest_phys_fault_addr;
+
+ riscv_cpu_set_virt_enabled(env, 0);
+ riscv_cpu_set_force_hs_excep(env, 0);
+ } else {
+ /* Trap into HS mode */
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2V,
+ get_field(env->hstatus, HSTATUS_SPV));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SP2P,
+ get_field(env->mstatus, SSTATUS_SPP));
+ env->hstatus = set_field(env->hstatus, HSTATUS_SPV,
+ riscv_cpu_virt_enabled(env));
+
+ htval = env->guest_phys_fault_addr;
+ }
+ }
+
+ s = env->mstatus;
s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv));
s = set_field(s, MSTATUS_SPP, env->priv);
@@ -571,12 +944,36 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->scause = cause | ((target_ulong)async << (TARGET_LONG_BITS - 1));
env->sepc = env->pc;
env->sbadaddr = tval;
+ env->htval = htval;
env->pc = (env->stvec >> 2 << 2) +
((async && (env->stvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_S);
} else {
/* handle the trap in M-mode */
- target_ulong s = env->mstatus;
+ if (riscv_has_ext(env, RVH)) {
+ if (riscv_cpu_virt_enabled(env)) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+#ifdef TARGET_RISCV32
+ env->mstatush = set_field(env->mstatush, MSTATUS_MPV,
+ riscv_cpu_virt_enabled(env));
+ env->mstatush = set_field(env->mstatush, MSTATUS_MTL,
+ riscv_cpu_force_hs_excep_enabled(env));
+#else
+ env->mstatus = set_field(env->mstatus, MSTATUS_MPV,
+ riscv_cpu_virt_enabled(env));
+ env->mstatus = set_field(env->mstatus, MSTATUS_MTL,
+ riscv_cpu_force_hs_excep_enabled(env));
+#endif
+
+ mtval2 = env->guest_phys_fault_addr;
+
+ /* Trapping to M mode, virt is disabled */
+ riscv_cpu_set_virt_enabled(env, 0);
+ riscv_cpu_set_force_hs_excep(env, 0);
+ }
+
+ s = env->mstatus;
s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv));
s = set_field(s, MSTATUS_MPP, env->priv);
@@ -585,6 +982,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
env->mcause = cause | ~(((target_ulong)-1) >> async);
env->mepc = env->pc;
env->mbadaddr = tval;
+ env->mtval2 = mtval2;
env->pc = (env->mtvec >> 2 << 2) +
((async && (env->mtvec & 3) == 1) ? cause * 4 : 0);
riscv_cpu_set_mode(env, PRV_M);
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 0e34c292c5..11d184cd16 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -98,6 +98,20 @@ static int smode(CPURISCVState *env, int csrno)
return -!riscv_has_ext(env, RVS);
}
+static int hmode(CPURISCVState *env, int csrno)
+{
+ if (riscv_has_ext(env, RVS) &&
+ riscv_has_ext(env, RVH)) {
+ /* Hypervisor extension is supported */
+ if ((env->priv == PRV_S && !riscv_cpu_virt_enabled(env)) ||
+ env->priv == PRV_M) {
+ return 0;
+ }
+ }
+
+ return -1;
+}
+
static int pmp(CPURISCVState *env, int csrno)
{
return -!riscv_feature(env, RISCV_FEATURE_PMP);
@@ -224,13 +238,42 @@ static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val)
#else /* CONFIG_USER_ONLY */
+static int read_time(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
+
+ if (!env->rdtime_fn) {
+ return -1;
+ }
+
+ *val = env->rdtime_fn() + delta;
+ return 0;
+}
+
+#if defined(TARGET_RISCV32)
+static int read_timeh(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ uint64_t delta = riscv_cpu_virt_enabled(env) ? env->htimedelta : 0;
+
+ if (!env->rdtime_fn) {
+ return -1;
+ }
+
+ *val = (env->rdtime_fn() + delta) >> 32;
+ return 0;
+}
+#endif
+
/* Machine constants */
-#define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP)
-#define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP)
+#define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP)
+#define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP)
+#define VS_MODE_INTERRUPTS (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)
-static const target_ulong delegable_ints = S_MODE_INTERRUPTS;
-static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS;
+static const target_ulong delegable_ints = S_MODE_INTERRUPTS |
+ VS_MODE_INTERRUPTS;
+static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
+ VS_MODE_INTERRUPTS;
static const target_ulong delegable_excps =
(1ULL << (RISCV_EXCP_INST_ADDR_MIS)) |
(1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) |
@@ -242,11 +285,14 @@ static const target_ulong delegable_excps =
(1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT)) |
(1ULL << (RISCV_EXCP_U_ECALL)) |
(1ULL << (RISCV_EXCP_S_ECALL)) |
- (1ULL << (RISCV_EXCP_H_ECALL)) |
+ (1ULL << (RISCV_EXCP_VS_ECALL)) |
(1ULL << (RISCV_EXCP_M_ECALL)) |
(1ULL << (RISCV_EXCP_INST_PAGE_FAULT)) |
(1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT)) |
- (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT));
+ (1ULL << (RISCV_EXCP_STORE_PAGE_FAULT)) |
+ (1ULL << (RISCV_EXCP_INST_GUEST_PAGE_FAULT)) |
+ (1ULL << (RISCV_EXCP_LOAD_GUEST_ACCESS_FAULT)) |
+ (1ULL << (RISCV_EXCP_STORE_GUEST_AMO_ACCESS_FAULT));
static const target_ulong sstatus_v1_9_mask = SSTATUS_SIE | SSTATUS_SPIE |
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
SSTATUS_SUM | SSTATUS_SD;
@@ -254,6 +300,8 @@ static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
SSTATUS_SUM | SSTATUS_MXR | SSTATUS_SD;
static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP;
+static const target_ulong hip_writable_mask = MIP_VSSIP | MIP_VSTIP | MIP_VSEIP;
+static const target_ulong vsip_writable_mask = MIP_VSSIP;
#if defined(TARGET_RISCV32)
static const char valid_vm_1_09[16] = {
@@ -349,6 +397,27 @@ static int write_mstatus(CPURISCVState *env, int csrno, target_ulong val)
return 0;
}
+#ifdef TARGET_RISCV32
+static int read_mstatush(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->mstatush;
+ return 0;
+}
+
+static int write_mstatush(CPURISCVState *env, int csrno, target_ulong val)
+{
+ if ((val ^ env->mstatush) & (MSTATUS_MPV)) {
+ tlb_flush(env_cpu(env));
+ }
+
+ val &= MSTATUS_MPV | MSTATUS_MTL;
+
+ env->mstatush = val;
+
+ return 0;
+}
+#endif
+
static int read_misa(CPURISCVState *env, int csrno, target_ulong *val)
{
*val = env->misa;
@@ -428,6 +497,9 @@ static int read_mideleg(CPURISCVState *env, int csrno, target_ulong *val)
static int write_mideleg(CPURISCVState *env, int csrno, target_ulong val)
{
env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints);
+ if (riscv_has_ext(env, RVH)) {
+ env->mideleg |= VS_MODE_INTERRUPTS;
+ }
return 0;
}
@@ -607,13 +679,27 @@ static int write_sstatus(CPURISCVState *env, int csrno, target_ulong val)
static int read_sie(CPURISCVState *env, int csrno, target_ulong *val)
{
- *val = env->mie & env->mideleg;
+ if (riscv_cpu_virt_enabled(env)) {
+ /* Tell the guest the VS bits, shifted to the S bit locations */
+ *val = (env->mie & env->mideleg & VS_MODE_INTERRUPTS) >> 1;
+ } else {
+ *val = env->mie & env->mideleg;
+ }
return 0;
}
static int write_sie(CPURISCVState *env, int csrno, target_ulong val)
{
- target_ulong newval = (env->mie & ~env->mideleg) | (val & env->mideleg);
+ target_ulong newval;
+
+ if (riscv_cpu_virt_enabled(env)) {
+ /* Shift the guests S bits to VS */
+ newval = (env->mie & ~VS_MODE_INTERRUPTS) |
+ ((val << 1) & VS_MODE_INTERRUPTS);
+ } else {
+ newval = (env->mie & ~S_MODE_INTERRUPTS) | (val & S_MODE_INTERRUPTS);
+ }
+
return write_mie(env, CSR_MIE, newval);
}
@@ -704,8 +790,19 @@ static int write_sbadaddr(CPURISCVState *env, int csrno, target_ulong val)
static int rmw_sip(CPURISCVState *env, int csrno, target_ulong *ret_value,
target_ulong new_value, target_ulong write_mask)
{
- int ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value,
+ int ret;
+
+ if (riscv_cpu_virt_enabled(env)) {
+ /* Shift the new values to line up with the VS bits */
+ ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value << 1,
+ (write_mask & sip_writable_mask) << 1 & env->mideleg);
+ ret &= vsip_writable_mask;
+ ret >>= 1;
+ } else {
+ ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value,
write_mask & env->mideleg & sip_writable_mask);
+ }
+
*ret_value &= env->mideleg;
return ret;
}
@@ -753,6 +850,291 @@ static int write_satp(CPURISCVState *env, int csrno, target_ulong val)
return 0;
}
+/* Hypervisor Extensions */
+static int read_hstatus(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->hstatus;
+ return 0;
+}
+
+static int write_hstatus(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->hstatus = val;
+ return 0;
+}
+
+static int read_hedeleg(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->hedeleg;
+ return 0;
+}
+
+static int write_hedeleg(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->hedeleg = val;
+ return 0;
+}
+
+static int read_hideleg(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->hideleg;
+ return 0;
+}
+
+static int write_hideleg(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->hideleg = val;
+ return 0;
+}
+
+static int rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ int ret = rmw_mip(env, 0, ret_value, new_value,
+ write_mask & hip_writable_mask);
+
+ return ret;
+}
+
+static int read_hie(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->mie & VS_MODE_INTERRUPTS;
+ return 0;
+}
+
+static int write_hie(CPURISCVState *env, int csrno, target_ulong val)
+{
+ target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | (val & VS_MODE_INTERRUPTS);
+ return write_mie(env, CSR_MIE, newval);
+}
+
+static int read_hcounteren(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->hcounteren;
+ return 0;
+}
+
+static int write_hcounteren(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->hcounteren = val;
+ return 0;
+}
+
+static int read_htval(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->htval;
+ return 0;
+}
+
+static int write_htval(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->htval = val;
+ return 0;
+}
+
+static int read_htinst(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->htinst;
+ return 0;
+}
+
+static int write_htinst(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->htinst = val;
+ return 0;
+}
+
+static int read_hgatp(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->hgatp;
+ return 0;
+}
+
+static int write_hgatp(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->hgatp = val;
+ return 0;
+}
+
+static int read_htimedelta(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ if (!env->rdtime_fn) {
+ return -1;
+ }
+
+#if defined(TARGET_RISCV32)
+ *val = env->htimedelta & 0xffffffff;
+#else
+ *val = env->htimedelta;
+#endif
+ return 0;
+}
+
+static int write_htimedelta(CPURISCVState *env, int csrno, target_ulong val)
+{
+ if (!env->rdtime_fn) {
+ return -1;
+ }
+
+#if defined(TARGET_RISCV32)
+ env->htimedelta = deposit64(env->htimedelta, 0, 32, (uint64_t)val);
+#else
+ env->htimedelta = val;
+#endif
+ return 0;
+}
+
+#if defined(TARGET_RISCV32)
+static int read_htimedeltah(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ if (!env->rdtime_fn) {
+ return -1;
+ }
+
+ *val = env->htimedelta >> 32;
+ return 0;
+}
+
+static int write_htimedeltah(CPURISCVState *env, int csrno, target_ulong val)
+{
+ if (!env->rdtime_fn) {
+ return -1;
+ }
+
+ env->htimedelta = deposit64(env->htimedelta, 32, 32, (uint64_t)val);
+ return 0;
+}
+#endif
+
+/* Virtual CSR Registers */
+static int read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vsstatus;
+ return 0;
+}
+
+static int write_vsstatus(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->vsstatus = val;
+ return 0;
+}
+
+static int rmw_vsip(CPURISCVState *env, int csrno, target_ulong *ret_value,
+ target_ulong new_value, target_ulong write_mask)
+{
+ int ret = rmw_mip(env, 0, ret_value, new_value,
+ write_mask & env->mideleg & vsip_writable_mask);
+ return ret;
+}
+
+static int read_vsie(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->mie & env->mideleg & VS_MODE_INTERRUPTS;
+ return 0;
+}
+
+static int write_vsie(CPURISCVState *env, int csrno, target_ulong val)
+{
+ target_ulong newval = (env->mie & ~env->mideleg) | (val & env->mideleg & MIP_VSSIP);
+ return write_mie(env, CSR_MIE, newval);
+}
+
+static int read_vstvec(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vstvec;
+ return 0;
+}
+
+static int write_vstvec(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->vstvec = val;
+ return 0;
+}
+
+static int read_vsscratch(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vsscratch;
+ return 0;
+}
+
+static int write_vsscratch(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->vsscratch = val;
+ return 0;
+}
+
+static int read_vsepc(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vsepc;
+ return 0;
+}
+
+static int write_vsepc(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->vsepc = val;
+ return 0;
+}
+
+static int read_vscause(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vscause;
+ return 0;
+}
+
+static int write_vscause(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->vscause = val;
+ return 0;
+}
+
+static int read_vstval(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vstval;
+ return 0;
+}
+
+static int write_vstval(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->vstval = val;
+ return 0;
+}
+
+static int read_vsatp(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->vsatp;
+ return 0;
+}
+
+static int write_vsatp(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->vsatp = val;
+ return 0;
+}
+
+static int read_mtval2(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->mtval2;
+ return 0;
+}
+
+static int write_mtval2(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->mtval2 = val;
+ return 0;
+}
+
+static int read_mtinst(CPURISCVState *env, int csrno, target_ulong *val)
+{
+ *val = env->mtinst;
+ return 0;
+}
+
+static int write_mtinst(CPURISCVState *env, int csrno, target_ulong val)
+{
+ env->mtinst = val;
+ return 0;
+}
+
/* Physical Memory Protection */
static int read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val)
{
@@ -798,12 +1180,22 @@ int riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value,
/* check privileges and return -1 if check fails */
#if !defined(CONFIG_USER_ONLY)
- int csr_priv = get_field(csrno, 0x300);
+ int effective_priv = env->priv;
int read_only = get_field(csrno, 0xC00) == 3;
- if ((!env->debugger) && (env->priv < csr_priv)) {
- return -1;
+
+ if (riscv_has_ext(env, RVH) &&
+ env->priv == PRV_S &&
+ !riscv_cpu_virt_enabled(env)) {
+ /*
+ * We are in S mode without virtualisation, therefore we are in HS Mode.
+ * Add 1 to the effective privledge level to allow us to access the
+ * Hypervisor CSRs.
+ */
+ effective_priv++;
}
- if (write_mask && read_only) {
+
+ if ((write_mask && read_only) ||
+ (!env->debugger && (effective_priv < get_field(csrno, 0x300)))) {
return -1;
}
#endif
@@ -886,14 +1278,12 @@ static riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_INSTRETH] = { ctr, read_instreth },
#endif
- /* User-level time CSRs are only available in linux-user
- * In privileged mode, the monitor emulates these CSRs */
-#if defined(CONFIG_USER_ONLY)
+ /* In privileged mode, the monitor will have to emulate TIME CSRs only if
+ * rdtime callback is not provided by machine/platform emulation */
[CSR_TIME] = { ctr, read_time },
#if defined(TARGET_RISCV32)
[CSR_TIMEH] = { ctr, read_timeh },
#endif
-#endif
#if !defined(CONFIG_USER_ONLY)
/* Machine Timers and Counters */
@@ -919,6 +1309,10 @@ static riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
[CSR_MTVEC] = { any, read_mtvec, write_mtvec },
[CSR_MCOUNTEREN] = { any, read_mcounteren, write_mcounteren },
+#if defined(TARGET_RISCV32)
+ [CSR_MSTATUSH] = { any, read_mstatush, write_mstatush },
+#endif
+
/* Legacy Counter Setup (priv v1.9.1) */
[CSR_MUCOUNTEREN] = { any, read_mucounteren, write_mucounteren },
[CSR_MSCOUNTEREN] = { any, read_mscounteren, write_mscounteren },
@@ -946,6 +1340,33 @@ static riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
/* Supervisor Protection and Translation */
[CSR_SATP] = { smode, read_satp, write_satp },
+ [CSR_HSTATUS] = { hmode, read_hstatus, write_hstatus },
+ [CSR_HEDELEG] = { hmode, read_hedeleg, write_hedeleg },
+ [CSR_HIDELEG] = { hmode, read_hideleg, write_hideleg },
+ [CSR_HIP] = { hmode, NULL, NULL, rmw_hip },
+ [CSR_HIE] = { hmode, read_hie, write_hie },
+ [CSR_HCOUNTEREN] = { hmode, read_hcounteren, write_hcounteren },
+ [CSR_HTVAL] = { hmode, read_htval, write_htval },
+ [CSR_HTINST] = { hmode, read_htinst, write_htinst },
+ [CSR_HGATP] = { hmode, read_hgatp, write_hgatp },
+ [CSR_HTIMEDELTA] = { hmode, read_htimedelta, write_htimedelta },
+#if defined(TARGET_RISCV32)
+ [CSR_HTIMEDELTAH] = { hmode, read_htimedeltah, write_htimedeltah},
+#endif
+
+ [CSR_VSSTATUS] = { hmode, read_vsstatus, write_vsstatus },
+ [CSR_VSIP] = { hmode, NULL, NULL, rmw_vsip },
+ [CSR_VSIE] = { hmode, read_vsie, write_vsie },
+ [CSR_VSTVEC] = { hmode, read_vstvec, write_vstvec },
+ [CSR_VSSCRATCH] = { hmode, read_vsscratch, write_vsscratch },
+ [CSR_VSEPC] = { hmode, read_vsepc, write_vsepc },
+ [CSR_VSCAUSE] = { hmode, read_vscause, write_vscause },
+ [CSR_VSTVAL] = { hmode, read_vstval, write_vstval },
+ [CSR_VSATP] = { hmode, read_vsatp, write_vsatp },
+
+ [CSR_MTVAL2] = { hmode, read_mtval2, write_mtval2 },
+ [CSR_MTINST] = { hmode, read_mtinst, write_mtinst },
+
/* Physical Memory Protection */
[CSR_PMPCFG0 ... CSR_PMPADDR9] = { pmp, read_pmpcfg, write_pmpcfg },
[CSR_PMPADDR0 ... CSR_PMPADDR15] = { pmp, read_pmpaddr, write_pmpaddr },
diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c
index 1a72f7be9c..2f32750f2f 100644
--- a/target/riscv/gdbstub.c
+++ b/target/riscv/gdbstub.c
@@ -130,6 +130,8 @@ static int csr_register_map[] = {
CSR_MCAUSE,
CSR_MTVAL,
CSR_MIP,
+ CSR_MTINST,
+ CSR_MTVAL2,
CSR_PMPCFG0,
CSR_PMPCFG1,
CSR_PMPCFG2,
@@ -252,12 +254,11 @@ static int csr_register_map[] = {
CSR_HEDELEG,
CSR_HIDELEG,
CSR_HIE,
- CSR_HTVEC,
- CSR_HSCRATCH,
- CSR_HEPC,
- CSR_HCAUSE,
- CSR_HBADADDR,
+ CSR_HCOUNTEREN,
+ CSR_HTVAL,
CSR_HIP,
+ CSR_HTINST,
+ CSR_HGATP,
CSR_MBASE,
CSR_MBOUND,
CSR_MIBASE,
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 77f794ed70..b883672e63 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -63,20 +63,24 @@
@r2_rm ....... ..... ..... ... ..... ....... %rs1 %rm %rd
@r2 ....... ..... ..... ... ..... ....... %rs1 %rd
+@hfence_gvma ....... ..... ..... ... ..... ....... %rs2 %rs1
+@hfence_bvma ....... ..... ..... ... ..... ....... %rs2 %rs1
+
@sfence_vma ....... ..... ..... ... ..... ....... %rs2 %rs1
@sfence_vm ....... ..... ..... ... ..... ....... %rs1
# *** Privileged Instructions ***
-ecall 000000000000 00000 000 00000 1110011
-ebreak 000000000001 00000 000 00000 1110011
-uret 0000000 00010 00000 000 00000 1110011
-sret 0001000 00010 00000 000 00000 1110011
-hret 0010000 00010 00000 000 00000 1110011
-mret 0011000 00010 00000 000 00000 1110011
-wfi 0001000 00101 00000 000 00000 1110011
-sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
-sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
+ecall 000000000000 00000 000 00000 1110011
+ebreak 000000000001 00000 000 00000 1110011
+uret 0000000 00010 00000 000 00000 1110011
+sret 0001000 00010 00000 000 00000 1110011
+mret 0011000 00010 00000 000 00000 1110011
+wfi 0001000 00101 00000 000 00000 1110011
+hfence_gvma 0110001 ..... ..... 000 00000 1110011 @hfence_gvma
+hfence_bvma 0010001 ..... ..... 000 00000 1110011 @hfence_bvma
+sfence_vma 0001001 ..... ..... 000 00000 1110011 @sfence_vma
+sfence_vm 0001000 00100 ..... 000 00000 1110011 @sfence_vm
# *** RV32I Base Instruction Set ***
lui .................... ..... 0110111 @u
diff --git a/target/riscv/insn_trans/trans_privileged.inc.c b/target/riscv/insn_trans/trans_privileged.inc.c
index c5e4b3e49a..76c2fad71c 100644
--- a/target/riscv/insn_trans/trans_privileged.inc.c
+++ b/target/riscv/insn_trans/trans_privileged.inc.c
@@ -58,11 +58,6 @@ static bool trans_sret(DisasContext *ctx, arg_sret *a)
#endif
}
-static bool trans_hret(DisasContext *ctx, arg_hret *a)
-{
- return false;
-}
-
static bool trans_mret(DisasContext *ctx, arg_mret *a)
{
#ifndef CONFIG_USER_ONLY
@@ -108,3 +103,43 @@ static bool trans_sfence_vm(DisasContext *ctx, arg_sfence_vm *a)
#endif
return false;
}
+
+static bool trans_hfence_gvma(DisasContext *ctx, arg_sfence_vma *a)
+{
+#ifndef CONFIG_USER_ONLY
+ if (ctx->priv_ver >= PRIV_VERSION_1_10_0 &&
+ has_ext(ctx, RVH)) {
+ /* Hpervisor extensions exist */
+ /*
+ * if (env->priv == PRV_M ||
+ * (env->priv == PRV_S &&
+ * !riscv_cpu_virt_enabled(env) &&
+ * get_field(ctx->mstatus_fs, MSTATUS_TVM))) {
+ */
+ gen_helper_tlb_flush(cpu_env);
+ return true;
+ /* } */
+ }
+#endif
+ return false;
+}
+
+static bool trans_hfence_bvma(DisasContext *ctx, arg_sfence_vma *a)
+{
+#ifndef CONFIG_USER_ONLY
+ if (ctx->priv_ver >= PRIV_VERSION_1_10_0 &&
+ has_ext(ctx, RVH)) {
+ /* Hpervisor extensions exist */
+ /*
+ * if (env->priv == PRV_M ||
+ * (env->priv == PRV_S &&
+ * !riscv_cpu_virt_enabled(env) &&
+ * get_field(ctx->mstatus_fs, MSTATUS_TVM))) {
+ */
+ gen_helper_tlb_flush(cpu_env);
+ return true;
+ /* } */
+ }
+#endif
+ return false;
+}
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index e87c9115bc..8736f689c2 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -73,6 +73,8 @@ target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
{
+ target_ulong prev_priv, prev_virt, mstatus;
+
if (!(env->priv >= PRV_S)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
@@ -87,16 +89,46 @@ target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
- target_ulong mstatus = env->mstatus;
- target_ulong prev_priv = get_field(mstatus, MSTATUS_SPP);
- mstatus = set_field(mstatus,
- env->priv_ver >= PRIV_VERSION_1_10_0 ?
- MSTATUS_SIE : MSTATUS_UIE << prev_priv,
- get_field(mstatus, MSTATUS_SPIE));
- mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
- mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
+ mstatus = env->mstatus;
+
+ if (riscv_has_ext(env, RVH) && !riscv_cpu_virt_enabled(env)) {
+ /* We support Hypervisor extensions and virtulisation is disabled */
+ target_ulong hstatus = env->hstatus;
+
+ prev_priv = get_field(mstatus, MSTATUS_SPP);
+ prev_virt = get_field(hstatus, HSTATUS_SPV);
+
+ hstatus = set_field(hstatus, HSTATUS_SPV,
+ get_field(hstatus, HSTATUS_SP2V));
+ mstatus = set_field(mstatus, MSTATUS_SPP,
+ get_field(hstatus, HSTATUS_SP2P));
+ hstatus = set_field(hstatus, HSTATUS_SP2V, 0);
+ hstatus = set_field(hstatus, HSTATUS_SP2P, 0);
+ mstatus = set_field(mstatus, SSTATUS_SIE,
+ get_field(mstatus, SSTATUS_SPIE));
+ mstatus = set_field(mstatus, SSTATUS_SPIE, 1);
+
+ env->mstatus = mstatus;
+ env->hstatus = hstatus;
+
+ if (prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_virt_enabled(env, prev_virt);
+ } else {
+ prev_priv = get_field(mstatus, MSTATUS_SPP);
+
+ mstatus = set_field(mstatus,
+ env->priv_ver >= PRIV_VERSION_1_10_0 ?
+ MSTATUS_SIE : MSTATUS_UIE << prev_priv,
+ get_field(mstatus, MSTATUS_SPIE));
+ mstatus = set_field(mstatus, MSTATUS_SPIE, 1);
+ mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
+ env->mstatus = mstatus;
+ }
+
riscv_cpu_set_mode(env, prev_priv);
- env->mstatus = mstatus;
return retpc;
}
@@ -114,14 +146,28 @@ target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
target_ulong mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
+ target_ulong prev_virt = MSTATUS_MPV_ISSET(env);
mstatus = set_field(mstatus,
env->priv_ver >= PRIV_VERSION_1_10_0 ?
MSTATUS_MIE : MSTATUS_UIE << prev_priv,
get_field(mstatus, MSTATUS_MPIE));
mstatus = set_field(mstatus, MSTATUS_MPIE, 1);
mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U);
- riscv_cpu_set_mode(env, prev_priv);
+#ifdef TARGET_RISCV32
+ env->mstatush = set_field(env->mstatush, MSTATUS_MPV, 0);
+#else
+ mstatus = set_field(mstatus, MSTATUS_MPV, 0);
+#endif
env->mstatus = mstatus;
+ riscv_cpu_set_mode(env, prev_priv);
+
+ if (riscv_has_ext(env, RVH)) {
+ if (prev_virt) {
+ riscv_cpu_swap_hypervisor_regs(env);
+ }
+
+ riscv_cpu_set_virt_enabled(env, prev_virt);
+ }
return retpc;
}
@@ -130,9 +176,10 @@ void helper_wfi(CPURISCVState *env)
{
CPUState *cs = env_cpu(env);
- if (env->priv == PRV_S &&
+ if ((env->priv == PRV_S &&
env->priv_ver >= PRIV_VERSION_1_10_0 &&
- get_field(env->mstatus, MSTATUS_TW)) {
+ get_field(env->mstatus, MSTATUS_TW)) ||
+ riscv_cpu_virt_enabled(env)) {
riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
} else {
cs->halted = 1;
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index d5de7f468a..43bf7e39a6 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -44,6 +44,8 @@ typedef struct DisasContext {
/* pc_succ_insn points to the instruction following base.pc_next */
target_ulong pc_succ_insn;
target_ulong priv_ver;
+ bool virt_enabled;
+ uint32_t opcode;
uint32_t mstatus_fs;
uint32_t misa;
uint32_t mem_idx;
@@ -395,6 +397,12 @@ static void mark_fs_dirty(DisasContext *ctx)
tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus));
+
+ if (ctx->virt_enabled) {
+ tcg_gen_ld_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ tcg_gen_ori_tl(tmp, tmp, MSTATUS_FS | MSTATUS_SD);
+ tcg_gen_st_tl(tmp, cpu_env, offsetof(CPURISCVState, mstatus_hs));
+ }
tcg_temp_free(tmp);
}
#else
@@ -742,6 +750,25 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->mem_idx = ctx->base.tb->flags & TB_FLAGS_MMU_MASK;
ctx->mstatus_fs = ctx->base.tb->flags & TB_FLAGS_MSTATUS_FS;
ctx->priv_ver = env->priv_ver;
+#if !defined(CONFIG_USER_ONLY)
+ if (riscv_has_ext(env, RVH)) {
+ ctx->virt_enabled = riscv_cpu_virt_enabled(env);
+ if (env->priv_ver == PRV_M &&
+ get_field(env->mstatus, MSTATUS_MPRV) &&
+ MSTATUS_MPV_ISSET(env)) {
+ ctx->virt_enabled = true;
+ } else if (env->priv == PRV_S &&
+ !riscv_cpu_virt_enabled(env) &&
+ get_field(env->hstatus, HSTATUS_SPRV) &&
+ get_field(env->hstatus, HSTATUS_SPV)) {
+ ctx->virt_enabled = true;
+ }
+ } else {
+ ctx->virt_enabled = false;
+ }
+#else
+ ctx->virt_enabled = false;
+#endif
ctx->misa = env->misa;
ctx->frm = -1; /* unknown rounding mode */
ctx->ext_ifencei = cpu->cfg.ext_ifencei;
@@ -810,7 +837,15 @@ static void riscv_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu)
static void riscv_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu)
{
+#ifndef CONFIG_USER_ONLY
+ RISCVCPU *rvcpu = RISCV_CPU(cpu);
+ CPURISCVState *env = &rvcpu->env;
+#endif
+
qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
+#ifndef CONFIG_USER_ONLY
+ qemu_log("Priv: "TARGET_FMT_ld"; Virt: "TARGET_FMT_ld"\n", env->priv, env->virt);
+#endif
log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size);
}