aboutsummaryrefslogtreecommitdiff
path: root/target/riscv/cpu_helper.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/riscv/cpu_helper.c')
-rw-r--r--target/riscv/cpu_helper.c377
1 files changed, 351 insertions, 26 deletions
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index 327a2c4f1d..746335bfd6 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -152,32 +152,275 @@ void riscv_cpu_update_mask(CPURISCVState *env)
}
#ifndef CONFIG_USER_ONLY
+
+/*
+ * The HS-mode is allowed to configure priority only for the
+ * following VS-mode local interrupts:
+ *
+ * 0 (Reserved interrupt, reads as zero)
+ * 1 Supervisor software interrupt
+ * 4 (Reserved interrupt, reads as zero)
+ * 5 Supervisor timer interrupt
+ * 8 (Reserved interrupt, reads as zero)
+ * 13 (Reserved interrupt)
+ * 14 "
+ * 15 "
+ * 16 "
+ * 18 Debug/trace interrupt
+ * 20 (Reserved interrupt)
+ * 22 "
+ * 24 "
+ * 26 "
+ * 28 "
+ * 30 (Reserved for standard reporting of bus or system errors)
+ */
+
+static const int hviprio_index2irq[] = {
+ 0, 1, 4, 5, 8, 13, 14, 15, 16, 18, 20, 22, 24, 26, 28, 30 };
+static const int hviprio_index2rdzero[] = {
+ 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+
+int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero)
+{
+ if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) {
+ return -EINVAL;
+ }
+
+ if (out_irq) {
+ *out_irq = hviprio_index2irq[index];
+ }
+
+ if (out_rdzero) {
+ *out_rdzero = hviprio_index2rdzero[index];
+ }
+
+ return 0;
+}
+
+/*
+ * Default priorities of local interrupts are defined in the
+ * RISC-V Advanced Interrupt Architecture specification.
+ *
+ * ----------------------------------------------------------------
+ * Default |
+ * Priority | Major Interrupt Numbers
+ * ----------------------------------------------------------------
+ * Highest | 63 (3f), 62 (3e), 31 (1f), 30 (1e), 61 (3d), 60 (3c),
+ * | 59 (3b), 58 (3a), 29 (1d), 28 (1c), 57 (39), 56 (38),
+ * | 55 (37), 54 (36), 27 (1b), 26 (1a), 53 (35), 52 (34),
+ * | 51 (33), 50 (32), 25 (19), 24 (18), 49 (31), 48 (30)
+ * |
+ * | 11 (0b), 3 (03), 7 (07)
+ * | 9 (09), 1 (01), 5 (05)
+ * | 12 (0c)
+ * | 10 (0a), 2 (02), 6 (06)
+ * |
+ * | 47 (2f), 46 (2e), 23 (17), 22 (16), 45 (2d), 44 (2c),
+ * | 43 (2b), 42 (2a), 21 (15), 20 (14), 41 (29), 40 (28),
+ * | 39 (27), 38 (26), 19 (13), 18 (12), 37 (25), 36 (24),
+ * Lowest | 35 (23), 34 (22), 17 (11), 16 (10), 33 (21), 32 (20)
+ * ----------------------------------------------------------------
+ */
+static const uint8_t default_iprio[64] = {
+ [63] = IPRIO_DEFAULT_UPPER,
+ [62] = IPRIO_DEFAULT_UPPER + 1,
+ [31] = IPRIO_DEFAULT_UPPER + 2,
+ [30] = IPRIO_DEFAULT_UPPER + 3,
+ [61] = IPRIO_DEFAULT_UPPER + 4,
+ [60] = IPRIO_DEFAULT_UPPER + 5,
+
+ [59] = IPRIO_DEFAULT_UPPER + 6,
+ [58] = IPRIO_DEFAULT_UPPER + 7,
+ [29] = IPRIO_DEFAULT_UPPER + 8,
+ [28] = IPRIO_DEFAULT_UPPER + 9,
+ [57] = IPRIO_DEFAULT_UPPER + 10,
+ [56] = IPRIO_DEFAULT_UPPER + 11,
+
+ [55] = IPRIO_DEFAULT_UPPER + 12,
+ [54] = IPRIO_DEFAULT_UPPER + 13,
+ [27] = IPRIO_DEFAULT_UPPER + 14,
+ [26] = IPRIO_DEFAULT_UPPER + 15,
+ [53] = IPRIO_DEFAULT_UPPER + 16,
+ [52] = IPRIO_DEFAULT_UPPER + 17,
+
+ [51] = IPRIO_DEFAULT_UPPER + 18,
+ [50] = IPRIO_DEFAULT_UPPER + 19,
+ [25] = IPRIO_DEFAULT_UPPER + 20,
+ [24] = IPRIO_DEFAULT_UPPER + 21,
+ [49] = IPRIO_DEFAULT_UPPER + 22,
+ [48] = IPRIO_DEFAULT_UPPER + 23,
+
+ [11] = IPRIO_DEFAULT_M,
+ [3] = IPRIO_DEFAULT_M + 1,
+ [7] = IPRIO_DEFAULT_M + 2,
+
+ [9] = IPRIO_DEFAULT_S,
+ [1] = IPRIO_DEFAULT_S + 1,
+ [5] = IPRIO_DEFAULT_S + 2,
+
+ [12] = IPRIO_DEFAULT_SGEXT,
+
+ [10] = IPRIO_DEFAULT_VS,
+ [2] = IPRIO_DEFAULT_VS + 1,
+ [6] = IPRIO_DEFAULT_VS + 2,
+
+ [47] = IPRIO_DEFAULT_LOWER,
+ [46] = IPRIO_DEFAULT_LOWER + 1,
+ [23] = IPRIO_DEFAULT_LOWER + 2,
+ [22] = IPRIO_DEFAULT_LOWER + 3,
+ [45] = IPRIO_DEFAULT_LOWER + 4,
+ [44] = IPRIO_DEFAULT_LOWER + 5,
+
+ [43] = IPRIO_DEFAULT_LOWER + 6,
+ [42] = IPRIO_DEFAULT_LOWER + 7,
+ [21] = IPRIO_DEFAULT_LOWER + 8,
+ [20] = IPRIO_DEFAULT_LOWER + 9,
+ [41] = IPRIO_DEFAULT_LOWER + 10,
+ [40] = IPRIO_DEFAULT_LOWER + 11,
+
+ [39] = IPRIO_DEFAULT_LOWER + 12,
+ [38] = IPRIO_DEFAULT_LOWER + 13,
+ [19] = IPRIO_DEFAULT_LOWER + 14,
+ [18] = IPRIO_DEFAULT_LOWER + 15,
+ [37] = IPRIO_DEFAULT_LOWER + 16,
+ [36] = IPRIO_DEFAULT_LOWER + 17,
+
+ [35] = IPRIO_DEFAULT_LOWER + 18,
+ [34] = IPRIO_DEFAULT_LOWER + 19,
+ [17] = IPRIO_DEFAULT_LOWER + 20,
+ [16] = IPRIO_DEFAULT_LOWER + 21,
+ [33] = IPRIO_DEFAULT_LOWER + 22,
+ [32] = IPRIO_DEFAULT_LOWER + 23,
+};
+
+uint8_t riscv_cpu_default_priority(int irq)
+{
+ if (irq < 0 || irq > 63) {
+ return IPRIO_MMAXIPRIO;
+ }
+
+ return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO;
+};
+
+static int riscv_cpu_pending_to_irq(CPURISCVState *env,
+ int extirq, unsigned int extirq_def_prio,
+ uint64_t pending, uint8_t *iprio)
+{
+ int irq, best_irq = RISCV_EXCP_NONE;
+ unsigned int prio, best_prio = UINT_MAX;
+
+ if (!pending) {
+ return RISCV_EXCP_NONE;
+ }
+
+ irq = ctz64(pending);
+ if (!riscv_feature(env, RISCV_FEATURE_AIA)) {
+ return irq;
+ }
+
+ pending = pending >> irq;
+ while (pending) {
+ prio = iprio[irq];
+ if (!prio) {
+ if (irq == extirq) {
+ prio = extirq_def_prio;
+ } else {
+ prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ?
+ 1 : IPRIO_MMAXIPRIO;
+ }
+ }
+ if ((pending & 0x1) && (prio <= best_prio)) {
+ best_irq = irq;
+ best_prio = prio;
+ }
+ irq++;
+ pending = pending >> 1;
+ }
+
+ return best_irq;
+}
+
+static uint64_t riscv_cpu_all_pending(CPURISCVState *env)
+{
+ uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN);
+ uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
+
+ return (env->mip | vsgein) & env->mie;
+}
+
+int riscv_cpu_mirq_pending(CPURISCVState *env)
+{
+ uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg &
+ ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
+
+ return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
+ irqs, env->miprio);
+}
+
+int riscv_cpu_sirq_pending(CPURISCVState *env)
+{
+ uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
+ ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
+
+ return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
+ irqs, env->siprio);
+}
+
+int riscv_cpu_vsirq_pending(CPURISCVState *env)
+{
+ uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg &
+ (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP);
+
+ return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
+ irqs >> 1, env->hviprio);
+}
+
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
{
- target_ulong virt_enabled = riscv_cpu_virt_enabled(env);
+ int virq;
+ uint64_t irqs, pending, mie, hsie, vsie;
- target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
- target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
+ /* Determine interrupt enable state of all privilege modes */
+ if (riscv_cpu_virt_enabled(env)) {
+ mie = 1;
+ hsie = 1;
+ vsie = (env->priv < PRV_S) ||
+ (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
+ } else {
+ mie = (env->priv < PRV_M) ||
+ (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE));
+ hsie = (env->priv < PRV_S) ||
+ (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE));
+ vsie = 0;
+ }
- target_ulong pending = env->mip & env->mie;
+ /* Determine all pending interrupts */
+ pending = riscv_cpu_all_pending(env);
- target_ulong mie = env->priv < PRV_M ||
- (env->priv == PRV_M && mstatus_mie);
- target_ulong sie = env->priv < PRV_S ||
- (env->priv == PRV_S && mstatus_sie);
- target_ulong hsie = virt_enabled || sie;
- target_ulong vsie = virt_enabled && sie;
+ /* Check M-mode interrupts */
+ irqs = pending & ~env->mideleg & -mie;
+ if (irqs) {
+ return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M,
+ irqs, env->miprio);
+ }
- target_ulong irqs =
- (pending & ~env->mideleg & -mie) |
- (pending & env->mideleg & ~env->hideleg & -hsie) |
- (pending & env->mideleg & env->hideleg & -vsie);
+ /* Check HS-mode interrupts */
+ irqs = pending & env->mideleg & ~env->hideleg & -hsie;
+ if (irqs) {
+ return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
+ irqs, env->siprio);
+ }
+ /* Check VS-mode interrupts */
+ irqs = pending & env->mideleg & env->hideleg & -vsie;
if (irqs) {
- return ctz64(irqs); /* since non-zero */
- } else {
- return RISCV_EXCP_NONE; /* indicates no pending interrupt */
+ virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S,
+ irqs >> 1, env->hviprio);
+ return (virq <= 0) ? virq : virq + 1;
}
+
+ /* Indicate no pending interrupt */
+ return RISCV_EXCP_NONE;
}
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
@@ -279,6 +522,28 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env)
}
}
+target_ulong riscv_cpu_get_geilen(CPURISCVState *env)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return 0;
+ }
+
+ return env->geilen;
+}
+
+void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen)
+{
+ if (!riscv_has_ext(env, RVH)) {
+ return;
+ }
+
+ if (geilen > (TARGET_LONG_BITS - 1)) {
+ return;
+ }
+
+ env->geilen = geilen;
+}
+
bool riscv_cpu_virt_enabled(CPURISCVState *env)
{
if (!riscv_has_ext(env, RVH)) {
@@ -300,6 +565,19 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable)
}
env->virt = set_field(env->virt, VIRT_ONOFF, enable);
+
+ if (enable) {
+ /*
+ * The guest external interrupts from an interrupt controller are
+ * delivered only when the Guest/VM is running (i.e. V=1). This means
+ * any guest external interrupt which is triggered while the Guest/VM
+ * is not running (i.e. V=0) will be missed on QEMU resulting in guest
+ * with sluggish response to serial console input and other I/O events.
+ *
+ * To solve this, we check and inject interrupt after setting V=1.
+ */
+ riscv_cpu_update_mip(env_archcpu(env), 0, 0);
+ }
}
bool riscv_cpu_two_stage_lookup(int mmu_idx)
@@ -307,7 +585,7 @@ bool riscv_cpu_two_stage_lookup(int mmu_idx)
return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK;
}
-int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
+int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts)
{
CPURISCVState *env = &cpu->env;
if (env->miclaim & interrupts) {
@@ -318,13 +596,18 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts)
}
}
-uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
+uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value)
{
CPURISCVState *env = &cpu->env;
CPUState *cs = CPU(cpu);
- uint32_t old = env->mip;
+ uint64_t gein, vsgein = 0, old = env->mip;
bool locked = false;
+ if (riscv_cpu_virt_enabled(env)) {
+ gein = get_field(env->hstatus, HSTATUS_VGEIN);
+ vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0;
+ }
+
if (!qemu_mutex_iothread_locked()) {
locked = true;
qemu_mutex_lock_iothread();
@@ -332,7 +615,7 @@ uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value)
env->mip = (env->mip & ~mask) | (value & mask);
- if (env->mip) {
+ if (env->mip | vsgein) {
cpu_interrupt(cs, CPU_INTERRUPT_HARD);
} else {
cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD);
@@ -352,6 +635,20 @@ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t),
env->rdtime_fn_arg = arg;
}
+void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv,
+ int (*rmw_fn)(void *arg,
+ target_ulong reg,
+ target_ulong *val,
+ target_ulong new_val,
+ target_ulong write_mask),
+ void *rmw_fn_arg)
+{
+ if (priv <= PRV_M) {
+ env->aia_ireg_rmw_fn[priv] = rmw_fn;
+ env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg;
+ }
+}
+
void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv)
{
if (newpriv > PRV_M) {
@@ -454,6 +751,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical,
MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED;
int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK;
bool use_background = false;
+ hwaddr ppn;
+ RISCVCPU *cpu = env_archcpu(env);
+ int napot_bits = 0;
+ target_ulong napot_mask;
/*
* Check if we should use the background registers for the two
@@ -622,13 +923,27 @@ restart:
return TRANSLATE_FAIL;
}
- hwaddr ppn = pte >> PTE_PPN_SHIFT;
+ if (riscv_cpu_sxl(env) == MXL_RV32) {
+ ppn = pte >> PTE_PPN_SHIFT;
+ } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) {
+ ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT;
+ } else {
+ ppn = pte >> PTE_PPN_SHIFT;
+ if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) {
+ return TRANSLATE_FAIL;
+ }
+ }
if (!(pte & PTE_V)) {
/* Invalid PTE */
return TRANSLATE_FAIL;
+ } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) {
+ return TRANSLATE_FAIL;
} else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
/* Inner PTE, continue walking */
+ if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) {
+ return TRANSLATE_FAIL;
+ }
base = ppn << PGSHIFT;
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
/* Reserved leaf PTE flags: PTE_W */
@@ -702,8 +1017,18 @@ restart:
/* for superpage mappings, make a fake leaf PTE for the TLB's
benefit. */
target_ulong vpn = addr >> PGSHIFT;
- *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) |
- (addr & ~TARGET_PAGE_MASK);
+
+ if (cpu->cfg.ext_svnapot && (pte & PTE_N)) {
+ napot_bits = ctzl(ppn) + 1;
+ if ((i != (levels - 1)) || (napot_bits != 4)) {
+ return TRANSLATE_FAIL;
+ }
+ }
+
+ napot_mask = (1 << napot_bits) - 1;
+ *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) |
+ (vpn & (((target_ulong)1 << ptshift) - 1))
+ ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK);
/* set permissions on the TLB entry */
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
@@ -1009,7 +1334,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
*/
bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG);
target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK;
- target_ulong deleg = async ? env->mideleg : env->medeleg;
+ uint64_t deleg = async ? env->mideleg : env->medeleg;
target_ulong tval = 0;
target_ulong htval = 0;
target_ulong mtval2 = 0;
@@ -1076,7 +1401,7 @@ void riscv_cpu_do_interrupt(CPUState *cs)
cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) {
/* handle the trap in S-mode */
if (riscv_has_ext(env, RVH)) {
- target_ulong hdeleg = async ? env->hideleg : env->hedeleg;
+ uint64_t hdeleg = async ? env->hideleg : env->hedeleg;
if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) {
/* Trap to VS mode */