diff options
27 files changed, 3252 insertions, 370 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 4b3ae2ab08..81aa31b5e1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -286,6 +286,13 @@ F: include/hw/riscv/ F: linux-user/host/riscv32/ F: linux-user/host/riscv64/ +RISC-V XVentanaCondOps extension +M: Philipp Tomsich <philipp.tomsich@vrull.eu> +L: qemu-riscv@nongnu.org +S: Supported +F: target/riscv/XVentanaCondOps.decode +F: target/riscv/insn_trans/trans_xventanacondops.c.inc + RENESAS RX CPUs R: Yoshinori Sato <ysato@users.sourceforge.jp> S: Orphan diff --git a/docs/system/riscv/virt.rst b/docs/system/riscv/virt.rst index fa016584bf..08ce3c4177 100644 --- a/docs/system/riscv/virt.rst +++ b/docs/system/riscv/virt.rst @@ -23,9 +23,9 @@ The ``virt`` machine supports the following devices: * 1 generic PCIe host bridge * The fw_cfg device that allows a guest to obtain data from QEMU -Note that the default CPU is a generic RV32GC/RV64GC. Optional extensions -can be enabled via command line parameters, e.g.: ``-cpu rv64,x-h=true`` -enables the hypervisor extension for RV64. +The hypervisor extension has been enabled for the default CPU, so virtual +machines with hypervisor extension can simply be used without explicitly +declaring. Hardware configuration information ---------------------------------- diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index 9a24ffb880..504ed7ca72 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -56,7 +56,7 @@ static void generic_loader_reset(void *opaque) } if (s->data_len) { - assert(s->data_len < sizeof(s->data)); + assert(s->data_len <= sizeof(s->data)); dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len, MEMTXATTRS_UNSPECIFIED); } diff --git a/hw/intc/Kconfig b/hw/intc/Kconfig index 010ded7eae..528e77b4a6 100644 --- a/hw/intc/Kconfig +++ b/hw/intc/Kconfig @@ -70,6 +70,9 @@ config LOONGSON_LIOINTC config RISCV_ACLINT bool +config RISCV_APLIC + bool + config SIFIVE_PLIC bool diff --git a/hw/intc/meson.build b/hw/intc/meson.build index 70080bc161..7466024402 100644 --- a/hw/intc/meson.build +++ b/hw/intc/meson.build @@ -50,6 +50,7 @@ specific_ss.add(when: 'CONFIG_S390_FLIC', if_true: files('s390_flic.c')) specific_ss.add(when: 'CONFIG_S390_FLIC_KVM', if_true: files('s390_flic_kvm.c')) specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c')) specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c')) +specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c')) specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c')) specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c')) specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'], diff --git a/hw/intc/riscv_aplic.c b/hw/intc/riscv_aplic.c new file mode 100644 index 0000000000..e7809fb6b2 --- /dev/null +++ b/hw/intc/riscv_aplic.c @@ -0,0 +1,978 @@ +/* + * RISC-V APLIC (Advanced Platform Level Interrupt Controller) + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/log.h" +#include "qemu/module.h" +#include "qemu/error-report.h" +#include "qemu/bswap.h" +#include "exec/address-spaces.h" +#include "hw/sysbus.h" +#include "hw/pci/msi.h" +#include "hw/boards.h" +#include "hw/qdev-properties.h" +#include "hw/intc/riscv_aplic.h" +#include "hw/irq.h" +#include "target/riscv/cpu.h" +#include "sysemu/sysemu.h" +#include "migration/vmstate.h" + +#define APLIC_MAX_IDC (1UL << 14) +#define APLIC_MAX_SOURCE 1024 +#define APLIC_MIN_IPRIO_BITS 1 +#define APLIC_MAX_IPRIO_BITS 8 +#define APLIC_MAX_CHILDREN 1024 + +#define APLIC_DOMAINCFG 0x0000 +#define APLIC_DOMAINCFG_RDONLY 0x80000000 +#define APLIC_DOMAINCFG_IE (1 << 8) +#define APLIC_DOMAINCFG_DM (1 << 2) +#define APLIC_DOMAINCFG_BE (1 << 0) + +#define APLIC_SOURCECFG_BASE 0x0004 +#define APLIC_SOURCECFG_D (1 << 10) +#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff +#define APLIC_SOURCECFG_SM_MASK 0x00000007 +#define APLIC_SOURCECFG_SM_INACTIVE 0x0 +#define APLIC_SOURCECFG_SM_DETACH 0x1 +#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4 +#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5 +#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6 +#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7 + +#define APLIC_MMSICFGADDR 0x1bc0 +#define APLIC_MMSICFGADDRH 0x1bc4 +#define APLIC_SMSICFGADDR 0x1bc8 +#define APLIC_SMSICFGADDRH 0x1bcc + +#define APLIC_xMSICFGADDRH_L (1UL << 31) +#define APLIC_xMSICFGADDRH_HHXS_MASK 0x1f +#define APLIC_xMSICFGADDRH_HHXS_SHIFT 24 +#define APLIC_xMSICFGADDRH_LHXS_MASK 0x7 +#define APLIC_xMSICFGADDRH_LHXS_SHIFT 20 +#define APLIC_xMSICFGADDRH_HHXW_MASK 0x7 +#define APLIC_xMSICFGADDRH_HHXW_SHIFT 16 +#define APLIC_xMSICFGADDRH_LHXW_MASK 0xf +#define APLIC_xMSICFGADDRH_LHXW_SHIFT 12 +#define APLIC_xMSICFGADDRH_BAPPN_MASK 0xfff + +#define APLIC_xMSICFGADDR_PPN_SHIFT 12 + +#define APLIC_xMSICFGADDR_PPN_HART(__lhxs) \ + ((1UL << (__lhxs)) - 1) + +#define APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) \ + ((1UL << (__lhxw)) - 1) +#define APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs) \ + ((__lhxs)) +#define APLIC_xMSICFGADDR_PPN_LHX(__lhxw, __lhxs) \ + (APLIC_xMSICFGADDR_PPN_LHX_MASK(__lhxw) << \ + APLIC_xMSICFGADDR_PPN_LHX_SHIFT(__lhxs)) + +#define APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) \ + ((1UL << (__hhxw)) - 1) +#define APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs) \ + ((__hhxs) + APLIC_xMSICFGADDR_PPN_SHIFT) +#define APLIC_xMSICFGADDR_PPN_HHX(__hhxw, __hhxs) \ + (APLIC_xMSICFGADDR_PPN_HHX_MASK(__hhxw) << \ + APLIC_xMSICFGADDR_PPN_HHX_SHIFT(__hhxs)) + +#define APLIC_xMSICFGADDRH_VALID_MASK \ + (APLIC_xMSICFGADDRH_L | \ + (APLIC_xMSICFGADDRH_HHXS_MASK << APLIC_xMSICFGADDRH_HHXS_SHIFT) | \ + (APLIC_xMSICFGADDRH_LHXS_MASK << APLIC_xMSICFGADDRH_LHXS_SHIFT) | \ + (APLIC_xMSICFGADDRH_HHXW_MASK << APLIC_xMSICFGADDRH_HHXW_SHIFT) | \ + (APLIC_xMSICFGADDRH_LHXW_MASK << APLIC_xMSICFGADDRH_LHXW_SHIFT) | \ + APLIC_xMSICFGADDRH_BAPPN_MASK) + +#define APLIC_SETIP_BASE 0x1c00 +#define APLIC_SETIPNUM 0x1cdc + +#define APLIC_CLRIP_BASE 0x1d00 +#define APLIC_CLRIPNUM 0x1ddc + +#define APLIC_SETIE_BASE 0x1e00 +#define APLIC_SETIENUM 0x1edc + +#define APLIC_CLRIE_BASE 0x1f00 +#define APLIC_CLRIENUM 0x1fdc + +#define APLIC_SETIPNUM_LE 0x2000 +#define APLIC_SETIPNUM_BE 0x2004 + +#define APLIC_ISTATE_PENDING (1U << 0) +#define APLIC_ISTATE_ENABLED (1U << 1) +#define APLIC_ISTATE_ENPEND (APLIC_ISTATE_ENABLED | \ + APLIC_ISTATE_PENDING) +#define APLIC_ISTATE_INPUT (1U << 8) + +#define APLIC_GENMSI 0x3000 + +#define APLIC_TARGET_BASE 0x3004 +#define APLIC_TARGET_HART_IDX_SHIFT 18 +#define APLIC_TARGET_HART_IDX_MASK 0x3fff +#define APLIC_TARGET_GUEST_IDX_SHIFT 12 +#define APLIC_TARGET_GUEST_IDX_MASK 0x3f +#define APLIC_TARGET_IPRIO_MASK 0xff +#define APLIC_TARGET_EIID_MASK 0x7ff + +#define APLIC_IDC_BASE 0x4000 +#define APLIC_IDC_SIZE 32 + +#define APLIC_IDC_IDELIVERY 0x00 + +#define APLIC_IDC_IFORCE 0x04 + +#define APLIC_IDC_ITHRESHOLD 0x08 + +#define APLIC_IDC_TOPI 0x18 +#define APLIC_IDC_TOPI_ID_SHIFT 16 +#define APLIC_IDC_TOPI_ID_MASK 0x3ff +#define APLIC_IDC_TOPI_PRIO_MASK 0xff + +#define APLIC_IDC_CLAIMI 0x1c + +static uint32_t riscv_aplic_read_input_word(RISCVAPLICState *aplic, + uint32_t word) +{ + uint32_t i, irq, ret = 0; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + ret |= ((aplic->state[irq] & APLIC_ISTATE_INPUT) ? 1 : 0) << i; + } + + return ret; +} + +static uint32_t riscv_aplic_read_pending_word(RISCVAPLICState *aplic, + uint32_t word) +{ + uint32_t i, irq, ret = 0; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + ret |= ((aplic->state[irq] & APLIC_ISTATE_PENDING) ? 1 : 0) << i; + } + + return ret; +} + +static void riscv_aplic_set_pending_raw(RISCVAPLICState *aplic, + uint32_t irq, bool pending) +{ + if (pending) { + aplic->state[irq] |= APLIC_ISTATE_PENDING; + } else { + aplic->state[irq] &= ~APLIC_ISTATE_PENDING; + } +} + +static void riscv_aplic_set_pending(RISCVAPLICState *aplic, + uint32_t irq, bool pending) +{ + uint32_t sourcecfg, sm; + + if ((irq <= 0) || (aplic->num_irqs <= irq)) { + return; + } + + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + return; + } + + sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; + if ((sm == APLIC_SOURCECFG_SM_INACTIVE) || + ((!aplic->msimode || (aplic->msimode && !pending)) && + ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) || + (sm == APLIC_SOURCECFG_SM_LEVEL_LOW)))) { + return; + } + + riscv_aplic_set_pending_raw(aplic, irq, pending); +} + +static void riscv_aplic_set_pending_word(RISCVAPLICState *aplic, + uint32_t word, uint32_t value, + bool pending) +{ + uint32_t i, irq; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + if (value & (1U << i)) { + riscv_aplic_set_pending(aplic, irq, pending); + } + } +} + +static uint32_t riscv_aplic_read_enabled_word(RISCVAPLICState *aplic, + int word) +{ + uint32_t i, irq, ret = 0; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + ret |= ((aplic->state[irq] & APLIC_ISTATE_ENABLED) ? 1 : 0) << i; + } + + return ret; +} + +static void riscv_aplic_set_enabled_raw(RISCVAPLICState *aplic, + uint32_t irq, bool enabled) +{ + if (enabled) { + aplic->state[irq] |= APLIC_ISTATE_ENABLED; + } else { + aplic->state[irq] &= ~APLIC_ISTATE_ENABLED; + } +} + +static void riscv_aplic_set_enabled(RISCVAPLICState *aplic, + uint32_t irq, bool enabled) +{ + uint32_t sourcecfg, sm; + + if ((irq <= 0) || (aplic->num_irqs <= irq)) { + return; + } + + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + return; + } + + sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) { + return; + } + + riscv_aplic_set_enabled_raw(aplic, irq, enabled); +} + +static void riscv_aplic_set_enabled_word(RISCVAPLICState *aplic, + uint32_t word, uint32_t value, + bool enabled) +{ + uint32_t i, irq; + + for (i = 0; i < 32; i++) { + irq = word * 32 + i; + if (!irq || aplic->num_irqs <= irq) { + continue; + } + + if (value & (1U << i)) { + riscv_aplic_set_enabled(aplic, irq, enabled); + } + } +} + +static void riscv_aplic_msi_send(RISCVAPLICState *aplic, + uint32_t hart_idx, uint32_t guest_idx, + uint32_t eiid) +{ + uint64_t addr; + MemTxResult result; + RISCVAPLICState *aplic_m; + uint32_t lhxs, lhxw, hhxs, hhxw, group_idx, msicfgaddr, msicfgaddrH; + + aplic_m = aplic; + while (aplic_m && !aplic_m->mmode) { + aplic_m = aplic_m->parent; + } + if (!aplic_m) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: m-level APLIC not found\n", + __func__); + return; + } + + if (aplic->mmode) { + msicfgaddr = aplic_m->mmsicfgaddr; + msicfgaddrH = aplic_m->mmsicfgaddrH; + } else { + msicfgaddr = aplic_m->smsicfgaddr; + msicfgaddrH = aplic_m->smsicfgaddrH; + } + + lhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXS_SHIFT) & + APLIC_xMSICFGADDRH_LHXS_MASK; + lhxw = (msicfgaddrH >> APLIC_xMSICFGADDRH_LHXW_SHIFT) & + APLIC_xMSICFGADDRH_LHXW_MASK; + hhxs = (msicfgaddrH >> APLIC_xMSICFGADDRH_HHXS_SHIFT) & + APLIC_xMSICFGADDRH_HHXS_MASK; + hhxw = (msicfgaddrH >> APLIC_xMSICFGADDRH_HHXW_SHIFT) & + APLIC_xMSICFGADDRH_HHXW_MASK; + + group_idx = hart_idx >> lhxw; + hart_idx &= APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw); + + addr = msicfgaddr; + addr |= ((uint64_t)(msicfgaddrH & APLIC_xMSICFGADDRH_BAPPN_MASK)) << 32; + addr |= ((uint64_t)(group_idx & APLIC_xMSICFGADDR_PPN_HHX_MASK(hhxw))) << + APLIC_xMSICFGADDR_PPN_HHX_SHIFT(hhxs); + addr |= ((uint64_t)(hart_idx & APLIC_xMSICFGADDR_PPN_LHX_MASK(lhxw))) << + APLIC_xMSICFGADDR_PPN_LHX_SHIFT(lhxs); + addr |= (uint64_t)(guest_idx & APLIC_xMSICFGADDR_PPN_HART(lhxs)); + addr <<= APLIC_xMSICFGADDR_PPN_SHIFT; + + address_space_stl_le(&address_space_memory, addr, + eiid, MEMTXATTRS_UNSPECIFIED, &result); + if (result != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: MSI write failed for " + "hart_index=%d guest_index=%d eiid=%d\n", + __func__, hart_idx, guest_idx, eiid); + } +} + +static void riscv_aplic_msi_irq_update(RISCVAPLICState *aplic, uint32_t irq) +{ + uint32_t hart_idx, guest_idx, eiid; + + if (!aplic->msimode || (aplic->num_irqs <= irq) || + !(aplic->domaincfg & APLIC_DOMAINCFG_IE)) { + return; + } + + if ((aplic->state[irq] & APLIC_ISTATE_ENPEND) != APLIC_ISTATE_ENPEND) { + return; + } + + riscv_aplic_set_pending_raw(aplic, irq, false); + + hart_idx = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT; + hart_idx &= APLIC_TARGET_HART_IDX_MASK; + if (aplic->mmode) { + /* M-level APLIC ignores guest_index */ + guest_idx = 0; + } else { + guest_idx = aplic->target[irq] >> APLIC_TARGET_GUEST_IDX_SHIFT; + guest_idx &= APLIC_TARGET_GUEST_IDX_MASK; + } + eiid = aplic->target[irq] & APLIC_TARGET_EIID_MASK; + riscv_aplic_msi_send(aplic, hart_idx, guest_idx, eiid); +} + +static uint32_t riscv_aplic_idc_topi(RISCVAPLICState *aplic, uint32_t idc) +{ + uint32_t best_irq, best_iprio; + uint32_t irq, iprio, ihartidx, ithres; + + if (aplic->num_harts <= idc) { + return 0; + } + + ithres = aplic->ithreshold[idc]; + best_irq = best_iprio = UINT32_MAX; + for (irq = 1; irq < aplic->num_irqs; irq++) { + if ((aplic->state[irq] & APLIC_ISTATE_ENPEND) != + APLIC_ISTATE_ENPEND) { + continue; + } + + ihartidx = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT; + ihartidx &= APLIC_TARGET_HART_IDX_MASK; + if (ihartidx != idc) { + continue; + } + + iprio = aplic->target[irq] & aplic->iprio_mask; + if (ithres && iprio >= ithres) { + continue; + } + + if (iprio < best_iprio) { + best_irq = irq; + best_iprio = iprio; + } + } + + if (best_irq < aplic->num_irqs && best_iprio <= aplic->iprio_mask) { + return (best_irq << APLIC_IDC_TOPI_ID_SHIFT) | best_iprio; + } + + return 0; +} + +static void riscv_aplic_idc_update(RISCVAPLICState *aplic, uint32_t idc) +{ + uint32_t topi; + + if (aplic->msimode || aplic->num_harts <= idc) { + return; + } + + topi = riscv_aplic_idc_topi(aplic, idc); + if ((aplic->domaincfg & APLIC_DOMAINCFG_IE) && + aplic->idelivery[idc] && + (aplic->iforce[idc] || topi)) { + qemu_irq_raise(aplic->external_irqs[idc]); + } else { + qemu_irq_lower(aplic->external_irqs[idc]); + } +} + +static uint32_t riscv_aplic_idc_claimi(RISCVAPLICState *aplic, uint32_t idc) +{ + uint32_t irq, state, sm, topi = riscv_aplic_idc_topi(aplic, idc); + + if (!topi) { + aplic->iforce[idc] = 0; + return 0; + } + + irq = (topi >> APLIC_IDC_TOPI_ID_SHIFT) & APLIC_IDC_TOPI_ID_MASK; + sm = aplic->sourcecfg[irq] & APLIC_SOURCECFG_SM_MASK; + state = aplic->state[irq]; + riscv_aplic_set_pending_raw(aplic, irq, false); + if ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) && + (state & APLIC_ISTATE_INPUT)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + } else if ((sm == APLIC_SOURCECFG_SM_LEVEL_LOW) && + !(state & APLIC_ISTATE_INPUT)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + } + riscv_aplic_idc_update(aplic, idc); + + return topi; +} + +static void riscv_aplic_request(void *opaque, int irq, int level) +{ + bool update = false; + RISCVAPLICState *aplic = opaque; + uint32_t sourcecfg, childidx, state, idc; + + assert((0 < irq) && (irq < aplic->num_irqs)); + + sourcecfg = aplic->sourcecfg[irq]; + if (sourcecfg & APLIC_SOURCECFG_D) { + childidx = sourcecfg & APLIC_SOURCECFG_CHILDIDX_MASK; + if (childidx < aplic->num_children) { + riscv_aplic_request(aplic->children[childidx], irq, level); + } + return; + } + + state = aplic->state[irq]; + switch (sourcecfg & APLIC_SOURCECFG_SM_MASK) { + case APLIC_SOURCECFG_SM_EDGE_RISE: + if ((level > 0) && !(state & APLIC_ISTATE_INPUT) && + !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + case APLIC_SOURCECFG_SM_EDGE_FALL: + if ((level <= 0) && (state & APLIC_ISTATE_INPUT) && + !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + case APLIC_SOURCECFG_SM_LEVEL_HIGH: + if ((level > 0) && !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + case APLIC_SOURCECFG_SM_LEVEL_LOW: + if ((level <= 0) && !(state & APLIC_ISTATE_PENDING)) { + riscv_aplic_set_pending_raw(aplic, irq, true); + update = true; + } + break; + default: + break; + } + + if (level <= 0) { + aplic->state[irq] &= ~APLIC_ISTATE_INPUT; + } else { + aplic->state[irq] |= APLIC_ISTATE_INPUT; + } + + if (update) { + if (aplic->msimode) { + riscv_aplic_msi_irq_update(aplic, irq); + } else { + idc = aplic->target[irq] >> APLIC_TARGET_HART_IDX_SHIFT; + idc &= APLIC_TARGET_HART_IDX_MASK; + riscv_aplic_idc_update(aplic, idc); + } + } +} + +static uint64_t riscv_aplic_read(void *opaque, hwaddr addr, unsigned size) +{ + uint32_t irq, word, idc; + RISCVAPLICState *aplic = opaque; + + /* Reads must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + if (addr == APLIC_DOMAINCFG) { + return APLIC_DOMAINCFG_RDONLY | aplic->domaincfg | + (aplic->msimode ? APLIC_DOMAINCFG_DM : 0); + } else if ((APLIC_SOURCECFG_BASE <= addr) && + (addr < (APLIC_SOURCECFG_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_SOURCECFG_BASE) >> 2) + 1; + return aplic->sourcecfg[irq]; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDR)) { + return aplic->mmsicfgaddr; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDRH)) { + return aplic->mmsicfgaddrH; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDR)) { + /* + * Registers SMSICFGADDR and SMSICFGADDRH are implemented only if: + * (a) the interrupt domain is at machine level + * (b) the domain's harts implement supervisor mode + * (c) the domain has one or more child supervisor-level domains + * that support MSI delivery mode (domaincfg.DM is not read- + * only zero in at least one of the supervisor-level child + * domains). + */ + return (aplic->num_children) ? aplic->smsicfgaddr : 0; + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDRH)) { + return (aplic->num_children) ? aplic->smsicfgaddrH : 0; + } else if ((APLIC_SETIP_BASE <= addr) && + (addr < (APLIC_SETIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIP_BASE) >> 2; + return riscv_aplic_read_pending_word(aplic, word); + } else if (addr == APLIC_SETIPNUM) { + return 0; + } else if ((APLIC_CLRIP_BASE <= addr) && + (addr < (APLIC_CLRIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_CLRIP_BASE) >> 2; + return riscv_aplic_read_input_word(aplic, word); + } else if (addr == APLIC_CLRIPNUM) { + return 0; + } else if ((APLIC_SETIE_BASE <= addr) && + (addr < (APLIC_SETIE_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIE_BASE) >> 2; + return riscv_aplic_read_enabled_word(aplic, word); + } else if (addr == APLIC_SETIENUM) { + return 0; + } else if ((APLIC_CLRIE_BASE <= addr) && + (addr < (APLIC_CLRIE_BASE + aplic->bitfield_words * 4))) { + return 0; + } else if (addr == APLIC_CLRIENUM) { + return 0; + } else if (addr == APLIC_SETIPNUM_LE) { + return 0; + } else if (addr == APLIC_SETIPNUM_BE) { + return 0; + } else if (addr == APLIC_GENMSI) { + return (aplic->msimode) ? aplic->genmsi : 0; + } else if ((APLIC_TARGET_BASE <= addr) && + (addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1; + return aplic->target[irq]; + } else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) && + (addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) { + idc = (addr - APLIC_IDC_BASE) / APLIC_IDC_SIZE; + switch (addr - (APLIC_IDC_BASE + idc * APLIC_IDC_SIZE)) { + case APLIC_IDC_IDELIVERY: + return aplic->idelivery[idc]; + case APLIC_IDC_IFORCE: + return aplic->iforce[idc]; + case APLIC_IDC_ITHRESHOLD: + return aplic->ithreshold[idc]; + case APLIC_IDC_TOPI: + return riscv_aplic_idc_topi(aplic, idc); + case APLIC_IDC_CLAIMI: + return riscv_aplic_idc_claimi(aplic, idc); + default: + goto err; + }; + } + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register read 0x%" HWADDR_PRIx "\n", + __func__, addr); + return 0; +} + +static void riscv_aplic_write(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + RISCVAPLICState *aplic = opaque; + uint32_t irq, word, idc = UINT32_MAX; + + /* Writes must be 4 byte words */ + if ((addr & 0x3) != 0) { + goto err; + } + + if (addr == APLIC_DOMAINCFG) { + /* Only IE bit writeable at the moment */ + value &= APLIC_DOMAINCFG_IE; + aplic->domaincfg = value; + } else if ((APLIC_SOURCECFG_BASE <= addr) && + (addr < (APLIC_SOURCECFG_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_SOURCECFG_BASE) >> 2) + 1; + if (!aplic->num_children && (value & APLIC_SOURCECFG_D)) { + value = 0; + } + if (value & APLIC_SOURCECFG_D) { + value &= (APLIC_SOURCECFG_D | APLIC_SOURCECFG_CHILDIDX_MASK); + } else { + value &= (APLIC_SOURCECFG_D | APLIC_SOURCECFG_SM_MASK); + } + aplic->sourcecfg[irq] = value; + if ((aplic->sourcecfg[irq] & APLIC_SOURCECFG_D) || + (aplic->sourcecfg[irq] == 0)) { + riscv_aplic_set_pending_raw(aplic, irq, false); + riscv_aplic_set_enabled_raw(aplic, irq, false); + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDR)) { + if (!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->mmsicfgaddr = value; + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_MMSICFGADDRH)) { + if (!(aplic->mmsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->mmsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK; + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDR)) { + /* + * Registers SMSICFGADDR and SMSICFGADDRH are implemented only if: + * (a) the interrupt domain is at machine level + * (b) the domain's harts implement supervisor mode + * (c) the domain has one or more child supervisor-level domains + * that support MSI delivery mode (domaincfg.DM is not read- + * only zero in at least one of the supervisor-level child + * domains). + */ + if (aplic->num_children && + !(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->smsicfgaddr = value; + } + } else if (aplic->mmode && aplic->msimode && + (addr == APLIC_SMSICFGADDRH)) { + if (aplic->num_children && + !(aplic->smsicfgaddrH & APLIC_xMSICFGADDRH_L)) { + aplic->smsicfgaddrH = value & APLIC_xMSICFGADDRH_VALID_MASK; + } + } else if ((APLIC_SETIP_BASE <= addr) && + (addr < (APLIC_SETIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIP_BASE) >> 2; + riscv_aplic_set_pending_word(aplic, word, value, true); + } else if (addr == APLIC_SETIPNUM) { + riscv_aplic_set_pending(aplic, value, true); + } else if ((APLIC_CLRIP_BASE <= addr) && + (addr < (APLIC_CLRIP_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_CLRIP_BASE) >> 2; + riscv_aplic_set_pending_word(aplic, word, value, false); + } else if (addr == APLIC_CLRIPNUM) { + riscv_aplic_set_pending(aplic, value, false); + } else if ((APLIC_SETIE_BASE <= addr) && + (addr < (APLIC_SETIE_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_SETIE_BASE) >> 2; + riscv_aplic_set_enabled_word(aplic, word, value, true); + } else if (addr == APLIC_SETIENUM) { + riscv_aplic_set_enabled(aplic, value, true); + } else if ((APLIC_CLRIE_BASE <= addr) && + (addr < (APLIC_CLRIE_BASE + aplic->bitfield_words * 4))) { + word = (addr - APLIC_CLRIE_BASE) >> 2; + riscv_aplic_set_enabled_word(aplic, word, value, false); + } else if (addr == APLIC_CLRIENUM) { + riscv_aplic_set_enabled(aplic, value, false); + } else if (addr == APLIC_SETIPNUM_LE) { + riscv_aplic_set_pending(aplic, value, true); + } else if (addr == APLIC_SETIPNUM_BE) { + riscv_aplic_set_pending(aplic, bswap32(value), true); + } else if (addr == APLIC_GENMSI) { + if (aplic->msimode) { + aplic->genmsi = value & ~(APLIC_TARGET_GUEST_IDX_MASK << + APLIC_TARGET_GUEST_IDX_SHIFT); + riscv_aplic_msi_send(aplic, + value >> APLIC_TARGET_HART_IDX_SHIFT, + 0, + value & APLIC_TARGET_EIID_MASK); + } + } else if ((APLIC_TARGET_BASE <= addr) && + (addr < (APLIC_TARGET_BASE + (aplic->num_irqs - 1) * 4))) { + irq = ((addr - APLIC_TARGET_BASE) >> 2) + 1; + if (aplic->msimode) { + aplic->target[irq] = value; + } else { + aplic->target[irq] = (value & ~APLIC_TARGET_IPRIO_MASK) | + ((value & aplic->iprio_mask) ? + (value & aplic->iprio_mask) : 1); + } + } else if (!aplic->msimode && (APLIC_IDC_BASE <= addr) && + (addr < (APLIC_IDC_BASE + aplic->num_harts * APLIC_IDC_SIZE))) { + idc = (addr - APLIC_IDC_BASE) / APLIC_IDC_SIZE; + switch (addr - (APLIC_IDC_BASE + idc * APLIC_IDC_SIZE)) { + case APLIC_IDC_IDELIVERY: + aplic->idelivery[idc] = value & 0x1; + break; + case APLIC_IDC_IFORCE: + aplic->iforce[idc] = value & 0x1; + break; + case APLIC_IDC_ITHRESHOLD: + aplic->ithreshold[idc] = value & aplic->iprio_mask; + break; + default: + goto err; + }; + } else { + goto err; + } + + if (aplic->msimode) { + for (irq = 1; irq < aplic->num_irqs; irq++) { + riscv_aplic_msi_irq_update(aplic, irq); + } + } else { + if (idc == UINT32_MAX) { + for (idc = 0; idc < aplic->num_harts; idc++) { + riscv_aplic_idc_update(aplic, idc); + } + } else { + riscv_aplic_idc_update(aplic, idc); + } + } + + return; + +err: + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Invalid register write 0x%" HWADDR_PRIx "\n", + __func__, addr); +} + +static const MemoryRegionOps riscv_aplic_ops = { + .read = riscv_aplic_read, + .write = riscv_aplic_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4 + } +}; + +static void riscv_aplic_realize(DeviceState *dev, Error **errp) +{ + uint32_t i; + RISCVAPLICState *aplic = RISCV_APLIC(dev); + + aplic->bitfield_words = (aplic->num_irqs + 31) >> 5; + aplic->sourcecfg = g_new0(uint32_t, aplic->num_irqs); + aplic->state = g_new(uint32_t, aplic->num_irqs); + aplic->target = g_new0(uint32_t, aplic->num_irqs); + if (!aplic->msimode) { + for (i = 0; i < aplic->num_irqs; i++) { + aplic->target[i] = 1; + } + } + aplic->idelivery = g_new0(uint32_t, aplic->num_harts); + aplic->iforce = g_new0(uint32_t, aplic->num_harts); + aplic->ithreshold = g_new0(uint32_t, aplic->num_harts); + + memory_region_init_io(&aplic->mmio, OBJECT(dev), &riscv_aplic_ops, aplic, + TYPE_RISCV_APLIC, aplic->aperture_size); + sysbus_init_mmio(SYS_BUS_DEVICE(dev), &aplic->mmio); + + /* + * Only root APLICs have hardware IRQ lines. All non-root APLICs + * have IRQ lines delegated by their parent APLIC. + */ + if (!aplic->parent) { + qdev_init_gpio_in(dev, riscv_aplic_request, aplic->num_irqs); + } + + /* Create output IRQ lines for non-MSI mode */ + if (!aplic->msimode) { + aplic->external_irqs = g_malloc(sizeof(qemu_irq) * aplic->num_harts); + qdev_init_gpio_out(dev, aplic->external_irqs, aplic->num_harts); + + /* Claim the CPU interrupt to be triggered by this APLIC */ + for (i = 0; i < aplic->num_harts; i++) { + RISCVCPU *cpu = RISCV_CPU(qemu_get_cpu(aplic->hartid_base + i)); + if (riscv_cpu_claim_interrupts(cpu, + (aplic->mmode) ? MIP_MEIP : MIP_SEIP) < 0) { + error_report("%s already claimed", + (aplic->mmode) ? "MEIP" : "SEIP"); + exit(1); + } + } + } + + msi_nonbroken = true; +} + +static Property riscv_aplic_properties[] = { + DEFINE_PROP_UINT32("aperture-size", RISCVAPLICState, aperture_size, 0), + DEFINE_PROP_UINT32("hartid-base", RISCVAPLICState, hartid_base, 0), + DEFINE_PROP_UINT32("num-harts", RISCVAPLICState, num_harts, 0), + DEFINE_PROP_UINT32("iprio-mask", RISCVAPLICState, iprio_mask, 0), + DEFINE_PROP_UINT32("num-irqs", RISCVAPLICState, num_irqs, 0), + DEFINE_PROP_BOOL("msimode", RISCVAPLICState, msimode, 0), + DEFINE_PROP_BOOL("mmode", RISCVAPLICState, mmode, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_riscv_aplic = { + .name = "riscv_aplic", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_UINT32(domaincfg, RISCVAPLICState), + VMSTATE_UINT32(mmsicfgaddr, RISCVAPLICState), + VMSTATE_UINT32(mmsicfgaddrH, RISCVAPLICState), + VMSTATE_UINT32(smsicfgaddr, RISCVAPLICState), + VMSTATE_UINT32(smsicfgaddrH, RISCVAPLICState), + VMSTATE_UINT32(genmsi, RISCVAPLICState), + VMSTATE_VARRAY_UINT32(sourcecfg, RISCVAPLICState, + num_irqs, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(state, RISCVAPLICState, + num_irqs, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(target, RISCVAPLICState, + num_irqs, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(idelivery, RISCVAPLICState, + num_harts, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(iforce, RISCVAPLICState, + num_harts, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_VARRAY_UINT32(ithreshold, RISCVAPLICState, + num_harts, 0, + vmstate_info_uint32, uint32_t), + VMSTATE_END_OF_LIST() + } +}; + +static void riscv_aplic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + device_class_set_props(dc, riscv_aplic_properties); + dc->realize = riscv_aplic_realize; + dc->vmsd = &vmstate_riscv_aplic; +} + +static const TypeInfo riscv_aplic_info = { + .name = TYPE_RISCV_APLIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(RISCVAPLICState), + .class_init = riscv_aplic_class_init, +}; + +static void riscv_aplic_register_types(void) +{ + type_register_static(&riscv_aplic_info); +} + +type_init(riscv_aplic_register_types) + +/* + * Add a APLIC device to another APLIC device as child for + * interrupt delegation. + */ +void riscv_aplic_add_child(DeviceState *parent, DeviceState *child) +{ + RISCVAPLICState *caplic, *paplic; + + assert(parent && child); + caplic = RISCV_APLIC(child); + paplic = RISCV_APLIC(parent); + + assert(paplic->num_irqs == caplic->num_irqs); + assert(paplic->num_children <= QEMU_APLIC_MAX_CHILDREN); + + caplic->parent = paplic; + paplic->children[paplic->num_children] = caplic; + paplic->num_children++; +} + +/* + * Create APLIC device. + */ +DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, + uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources, + uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent) +{ + DeviceState *dev = qdev_new(TYPE_RISCV_APLIC); + uint32_t i; + + assert(num_harts < APLIC_MAX_IDC); + assert((APLIC_IDC_BASE + (num_harts * APLIC_IDC_SIZE)) <= size); + assert(num_sources < APLIC_MAX_SOURCE); + assert(APLIC_MIN_IPRIO_BITS <= iprio_bits); + assert(iprio_bits <= APLIC_MAX_IPRIO_BITS); + + qdev_prop_set_uint32(dev, "aperture-size", size); + qdev_prop_set_uint32(dev, "hartid-base", hartid_base); + qdev_prop_set_uint32(dev, "num-harts", num_harts); + qdev_prop_set_uint32(dev, "iprio-mask", ((1U << iprio_bits) - 1)); + qdev_prop_set_uint32(dev, "num-irqs", num_sources + 1); + qdev_prop_set_bit(dev, "msimode", msimode); + qdev_prop_set_bit(dev, "mmode", mmode); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, addr); + + if (parent) { + riscv_aplic_add_child(parent, dev); + } + + if (!msimode) { + for (i = 0; i < num_harts; i++) { + CPUState *cpu = qemu_get_cpu(hartid_base + i); + + qdev_connect_gpio_out_named(dev, NULL, i, + qdev_get_gpio_in(DEVICE(cpu), + (mmode) ? IRQ_M_EXT : IRQ_S_EXT)); + } + } + + return dev; +} diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 2643c8bc37..e3068d6126 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -212,8 +212,17 @@ static void create_fdt_socket_cpus(RISCVVirtState *s, int socket, qemu_fdt_add_subnode(mc->fdt, intc_name); qemu_fdt_setprop_cell(mc->fdt, intc_name, "phandle", intc_phandles[cpu]); - qemu_fdt_setprop_string(mc->fdt, intc_name, "compatible", - "riscv,cpu-intc"); + if (riscv_feature(&s->soc[socket].harts[cpu].env, + RISCV_FEATURE_AIA)) { + static const char * const compat[2] = { + "riscv,cpu-intc-aia", "riscv,cpu-intc" + }; + qemu_fdt_setprop_string_array(mc->fdt, intc_name, "compatible", + (char **)&compat, ARRAY_SIZE(compat)); + } else { + qemu_fdt_setprop_string(mc->fdt, intc_name, "compatible", + "riscv,cpu-intc"); + } qemu_fdt_setprop(mc->fdt, intc_name, "interrupt-controller", NULL, 0); qemu_fdt_setprop_cell(mc->fdt, intc_name, "#interrupt-cells", 1); diff --git a/include/hw/intc/ibex_plic.h b/include/hw/intc/ibex_plic.h deleted file mode 100644 index d596436e06..0000000000 --- a/include/hw/intc/ibex_plic.h +++ /dev/null @@ -1,67 +0,0 @@ -/* - * QEMU RISC-V lowRISC Ibex PLIC - * - * Copyright (c) 2020 Western Digital - * - * This program is free software; you can redistribute it and/or modify it - * under the terms and conditions of the GNU General Public License, - * version 2 or later, as published by the Free Software Foundation. - * - * This program is distributed in the hope it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see <http://www.gnu.org/licenses/>. - */ - -#ifndef HW_IBEX_PLIC_H -#define HW_IBEX_PLIC_H - -#include "hw/sysbus.h" -#include "qom/object.h" - -#define TYPE_IBEX_PLIC "ibex-plic" -OBJECT_DECLARE_SIMPLE_TYPE(IbexPlicState, IBEX_PLIC) - -struct IbexPlicState { - /*< private >*/ - SysBusDevice parent_obj; - - /*< public >*/ - MemoryRegion mmio; - - uint32_t *pending; - uint32_t *hidden_pending; - uint32_t *claimed; - uint32_t *source; - uint32_t *priority; - uint32_t *enable; - uint32_t threshold; - uint32_t claim; - - /* config */ - uint32_t num_cpus; - uint32_t num_sources; - - uint32_t pending_base; - uint32_t pending_num; - - uint32_t source_base; - uint32_t source_num; - - uint32_t priority_base; - uint32_t priority_num; - - uint32_t enable_base; - uint32_t enable_num; - - uint32_t threshold_base; - - uint32_t claim_base; - - qemu_irq *external_irqs; -}; - -#endif /* HW_IBEX_PLIC_H */ diff --git a/include/hw/intc/riscv_aplic.h b/include/hw/intc/riscv_aplic.h new file mode 100644 index 0000000000..de8532fbc3 --- /dev/null +++ b/include/hw/intc/riscv_aplic.h @@ -0,0 +1,79 @@ +/* + * RISC-V APLIC (Advanced Platform Level Interrupt Controller) interface + * + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef HW_RISCV_APLIC_H +#define HW_RISCV_APLIC_H + +#include "hw/sysbus.h" +#include "qom/object.h" + +#define TYPE_RISCV_APLIC "riscv.aplic" + +typedef struct RISCVAPLICState RISCVAPLICState; +DECLARE_INSTANCE_CHECKER(RISCVAPLICState, RISCV_APLIC, TYPE_RISCV_APLIC) + +#define APLIC_MIN_SIZE 0x4000 +#define APLIC_SIZE_ALIGN(__x) (((__x) + (APLIC_MIN_SIZE - 1)) & \ + ~(APLIC_MIN_SIZE - 1)) +#define APLIC_SIZE(__num_harts) (APLIC_MIN_SIZE + \ + APLIC_SIZE_ALIGN(32 * (__num_harts))) + +struct RISCVAPLICState { + /*< private >*/ + SysBusDevice parent_obj; + qemu_irq *external_irqs; + + /*< public >*/ + MemoryRegion mmio; + uint32_t bitfield_words; + uint32_t domaincfg; + uint32_t mmsicfgaddr; + uint32_t mmsicfgaddrH; + uint32_t smsicfgaddr; + uint32_t smsicfgaddrH; + uint32_t genmsi; + uint32_t *sourcecfg; + uint32_t *state; + uint32_t *target; + uint32_t *idelivery; + uint32_t *iforce; + uint32_t *ithreshold; + + /* topology */ +#define QEMU_APLIC_MAX_CHILDREN 16 + struct RISCVAPLICState *parent; + struct RISCVAPLICState *children[QEMU_APLIC_MAX_CHILDREN]; + uint16_t num_children; + + /* config */ + uint32_t aperture_size; + uint32_t hartid_base; + uint32_t num_harts; + uint32_t iprio_mask; + uint32_t num_irqs; + bool msimode; + bool mmode; +}; + +void riscv_aplic_add_child(DeviceState *parent, DeviceState *child); + +DeviceState *riscv_aplic_create(hwaddr addr, hwaddr size, + uint32_t hartid_base, uint32_t num_harts, uint32_t num_sources, + uint32_t iprio_bits, bool msimode, bool mmode, DeviceState *parent); + +#endif diff --git a/target/riscv/XVentanaCondOps.decode b/target/riscv/XVentanaCondOps.decode new file mode 100644 index 0000000000..5aef7c3d72 --- /dev/null +++ b/target/riscv/XVentanaCondOps.decode @@ -0,0 +1,25 @@ +# +# RISC-V translation routines for the XVentanaCondOps extension +# +# Copyright (c) 2022 Dr. Philipp Tomsich, philipp.tomsich@vrull.eu +# +# SPDX-License-Identifier: LGPL-2.1-or-later +# +# Reference: VTx-family custom instructions +# Custom ISA extensions for Ventana Micro Systems RISC-V cores +# (https://github.com/ventanamicro/ventana-custom-extensions/releases/download/v1.0.0/ventana-custom-extensions-v1.0.0.pdf) + +# Fields +%rs2 20:5 +%rs1 15:5 +%rd 7:5 + +# Argument sets +&r rd rs1 rs2 !extern + +# Formats +@r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd + +# *** RV64 Custom-3 Extension *** +vt_maskc 0000000 ..... ..... 110 ..... 1111011 @r +vt_maskcn 0000000 ..... ..... 111 ..... 1111011 @r diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 1cb0436187..b0a40b83e7 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -135,11 +135,6 @@ static void set_vext_version(CPURISCVState *env, int vext_ver) env->vext_ver = vext_ver; } -static void set_feature(CPURISCVState *env, int feature) -{ - env->features |= (1ULL << feature); -} - static void set_resetvec(CPURISCVState *env, target_ulong resetvec) { #ifndef CONFIG_USER_ONLY @@ -405,6 +400,10 @@ void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb, static void riscv_cpu_reset(DeviceState *dev) { +#ifndef CONFIG_USER_ONLY + uint8_t iprio; + int i, irq, rdzero; +#endif CPUState *cs = CPU(dev); RISCVCPU *cpu = RISCV_CPU(cs); RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu); @@ -434,8 +433,24 @@ static void riscv_cpu_reset(DeviceState *dev) } } env->mcause = 0; + env->miclaim = MIP_SGEIP; env->pc = env->resetvec; env->two_stage_lookup = false; + + /* Initialized default priorities of local interrupts. */ + for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { + iprio = riscv_cpu_default_priority(i); + env->miprio[i] = (i == IRQ_M_EXT) ? 0 : iprio; + env->siprio[i] = (i == IRQ_S_EXT) ? 0 : iprio; + env->hviprio[i] = 0; + } + i = 0; + while (!riscv_cpu_hviprio_index2irq(i, &irq, &rdzero)) { + if (!rdzero) { + env->hviprio[irq] = env->miprio[irq]; + } + i++; + } /* mmte is supposed to have pm.current hardwired to 1 */ env->mmte |= (PM_EXT_INITIAL | MMTE_M_PM_CURRENT); #endif @@ -507,30 +522,33 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } if (cpu->cfg.mmu) { - set_feature(env, RISCV_FEATURE_MMU); + riscv_set_feature(env, RISCV_FEATURE_MMU); } if (cpu->cfg.pmp) { - set_feature(env, RISCV_FEATURE_PMP); + riscv_set_feature(env, RISCV_FEATURE_PMP); /* * Enhanced PMP should only be available * on harts with PMP support */ if (cpu->cfg.epmp) { - set_feature(env, RISCV_FEATURE_EPMP); + riscv_set_feature(env, RISCV_FEATURE_EPMP); } } + if (cpu->cfg.aia) { + riscv_set_feature(env, RISCV_FEATURE_AIA); + } + set_resetvec(env, cpu->cfg.resetvec); /* Validate that MISA_MXL is set properly. */ switch (env->misa_mxl_max) { #ifdef TARGET_RISCV64 case MXL_RV64: - cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; - break; case MXL_RV128: + cc->gdb_core_xml_file = "riscv-64bit-cpu.xml"; break; #endif case MXL_RV32: @@ -663,27 +681,53 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) static void riscv_cpu_set_irq(void *opaque, int irq, int level) { RISCVCPU *cpu = RISCV_CPU(opaque); + CPURISCVState *env = &cpu->env; - switch (irq) { - case IRQ_U_SOFT: - case IRQ_S_SOFT: - case IRQ_VS_SOFT: - case IRQ_M_SOFT: - case IRQ_U_TIMER: - case IRQ_S_TIMER: - case IRQ_VS_TIMER: - case IRQ_M_TIMER: - case IRQ_U_EXT: - case IRQ_S_EXT: - case IRQ_VS_EXT: - case IRQ_M_EXT: - if (kvm_enabled()) { - kvm_riscv_set_irq(cpu, irq, level); - } else { - riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level)); + if (irq < IRQ_LOCAL_MAX) { + switch (irq) { + case IRQ_U_SOFT: + case IRQ_S_SOFT: + case IRQ_VS_SOFT: + case IRQ_M_SOFT: + case IRQ_U_TIMER: + case IRQ_S_TIMER: + case IRQ_VS_TIMER: + case IRQ_M_TIMER: + case IRQ_U_EXT: + case IRQ_S_EXT: + case IRQ_VS_EXT: + case IRQ_M_EXT: + if (kvm_enabled()) { + kvm_riscv_set_irq(cpu, irq, level); + } else { + riscv_cpu_update_mip(cpu, 1 << irq, BOOL_TO_MASK(level)); + } + break; + default: + g_assert_not_reached(); } - break; - default: + } else if (irq < (IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX)) { + /* Require H-extension for handling guest local interrupts */ + if (!riscv_has_ext(env, RVH)) { + g_assert_not_reached(); + } + + /* Compute bit position in HGEIP CSR */ + irq = irq - IRQ_LOCAL_MAX + 1; + if (env->geilen < irq) { + g_assert_not_reached(); + } + + /* Update HGEIP CSR */ + env->hgeip &= ~((target_ulong)1 << irq); + if (level) { + env->hgeip |= (target_ulong)1 << irq; + } + + /* Update mip.SGEIP bit */ + riscv_cpu_update_mip(cpu, MIP_SGEIP, + BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); + } else { g_assert_not_reached(); } } @@ -696,7 +740,8 @@ static void riscv_cpu_init(Object *obj) cpu_set_cpustate_pointers(cpu); #ifndef CONFIG_USER_ONLY - qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, 12); + qdev_init_gpio_in(DEVICE(cpu), riscv_cpu_set_irq, + IRQ_LOCAL_MAX + IRQ_LOCAL_GUEST_MAX); #endif /* CONFIG_USER_ONLY */ } @@ -729,15 +774,23 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), + DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), + DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), + DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), + DEFINE_PROP_BOOL("zba", RISCVCPU, cfg.ext_zba, true), DEFINE_PROP_BOOL("zbb", RISCVCPU, cfg.ext_zbb, true), DEFINE_PROP_BOOL("zbc", RISCVCPU, cfg.ext_zbc, true), DEFINE_PROP_BOOL("zbs", RISCVCPU, cfg.ext_zbs, true), + /* Vendor-specific custom extensions */ + DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), + /* These are experimental so mark with 'x-' */ DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false), /* ePMP 0.9.3 */ DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), + DEFINE_PROP_BOOL("x-aia", RISCVCPU, cfg.aia, false), DEFINE_PROP_UINT64("resetvec", RISCVCPU, cfg.resetvec, DEFAULT_RSTVEC), DEFINE_PROP_END_OF_LIST(), diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 55635d68d5..8183fb86d5 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -78,7 +78,8 @@ enum { RISCV_FEATURE_MMU, RISCV_FEATURE_PMP, RISCV_FEATURE_EPMP, - RISCV_FEATURE_MISA + RISCV_FEATURE_MISA, + RISCV_FEATURE_AIA }; #define PRIV_VERSION_1_10_0 0x00011000 @@ -161,6 +162,7 @@ struct CPURISCVState { target_ulong priv; /* This contains QEMU specific information about the virt state. */ target_ulong virt; + target_ulong geilen; target_ulong resetvec; target_ulong mhartid; @@ -170,12 +172,12 @@ struct CPURISCVState { */ uint64_t mstatus; - target_ulong mip; + uint64_t mip; - uint32_t miclaim; + uint64_t miclaim; - target_ulong mie; - target_ulong mideleg; + uint64_t mie; + uint64_t mideleg; target_ulong satp; /* since: priv-1.10.0 */ target_ulong stval; @@ -190,16 +192,30 @@ struct CPURISCVState { target_ulong mcause; target_ulong mtval; /* since: priv-1.10.0 */ + /* Machine and Supervisor interrupt priorities */ + uint8_t miprio[64]; + uint8_t siprio[64]; + + /* AIA CSRs */ + target_ulong miselect; + target_ulong siselect; + /* Hypervisor CSRs */ target_ulong hstatus; target_ulong hedeleg; - target_ulong hideleg; + uint64_t hideleg; target_ulong hcounteren; target_ulong htval; target_ulong htinst; target_ulong hgatp; + target_ulong hgeie; + target_ulong hgeip; uint64_t htimedelta; + /* Hypervisor controlled virtual interrupt priorities */ + target_ulong hvictl; + uint8_t hviprio[64]; + /* Upper 64-bits of 128-bit CSRs */ uint64_t mscratchh; uint64_t sscratchh; @@ -217,6 +233,9 @@ struct CPURISCVState { target_ulong vstval; target_ulong vsatp; + /* AIA VS-mode CSRs */ + target_ulong vsiselect; + target_ulong mtval2; target_ulong mtinst; @@ -252,6 +271,22 @@ struct CPURISCVState { uint64_t (*rdtime_fn)(uint32_t); uint32_t rdtime_fn_arg; + /* machine specific AIA ireg read-modify-write callback */ +#define AIA_MAKE_IREG(__isel, __priv, __virt, __vgein, __xlen) \ + ((((__xlen) & 0xff) << 24) | \ + (((__vgein) & 0x3f) << 20) | \ + (((__virt) & 0x1) << 18) | \ + (((__priv) & 0x3) << 16) | \ + (__isel & 0xffff)) +#define AIA_IREG_ISEL(__ireg) ((__ireg) & 0xffff) +#define AIA_IREG_PRIV(__ireg) (((__ireg) >> 16) & 0x3) +#define AIA_IREG_VIRT(__ireg) (((__ireg) >> 18) & 0x1) +#define AIA_IREG_VGEIN(__ireg) (((__ireg) >> 20) & 0x3f) +#define AIA_IREG_XLEN(__ireg) (((__ireg) >> 24) & 0xff) + int (*aia_ireg_rmw_fn[4])(void *arg, target_ulong reg, + target_ulong *val, target_ulong new_val, target_ulong write_mask); + void *aia_ireg_rmw_fn_arg[4]; + /* True if in debugger mode. */ bool debugger; @@ -303,6 +338,53 @@ struct RISCVCPUClass { DeviceReset parent_reset; }; +struct RISCVCPUConfig { + bool ext_i; + bool ext_e; + bool ext_g; + bool ext_m; + bool ext_a; + bool ext_f; + bool ext_d; + bool ext_c; + bool ext_s; + bool ext_u; + bool ext_h; + bool ext_j; + bool ext_v; + bool ext_zba; + bool ext_zbb; + bool ext_zbc; + bool ext_zbs; + bool ext_counters; + bool ext_ifencei; + bool ext_icsr; + bool ext_svinval; + bool ext_svnapot; + bool ext_svpbmt; + bool ext_zfh; + bool ext_zfhmin; + bool ext_zve32f; + bool ext_zve64f; + + /* Vendor-specific custom extensions */ + bool ext_XVentanaCondOps; + + char *priv_spec; + char *user_spec; + char *bext_spec; + char *vext_spec; + uint16_t vlen; + uint16_t elen; + bool mmu; + bool pmp; + bool epmp; + bool aia; + uint64_t resetvec; +}; + +typedef struct RISCVCPUConfig RISCVCPUConfig; + /** * RISCVCPU: * @env: #CPURISCVState @@ -320,43 +402,7 @@ struct RISCVCPU { char *dyn_vreg_xml; /* Configuration Settings */ - struct { - bool ext_i; - bool ext_e; - bool ext_g; - bool ext_m; - bool ext_a; - bool ext_f; - bool ext_d; - bool ext_c; - bool ext_s; - bool ext_u; - bool ext_h; - bool ext_j; - bool ext_v; - bool ext_zba; - bool ext_zbb; - bool ext_zbc; - bool ext_zbs; - bool ext_counters; - bool ext_ifencei; - bool ext_icsr; - bool ext_zfh; - bool ext_zfhmin; - bool ext_zve32f; - bool ext_zve64f; - - char *priv_spec; - char *user_spec; - char *bext_spec; - char *vext_spec; - uint16_t vlen; - uint16_t elen; - bool mmu; - bool pmp; - bool epmp; - uint64_t resetvec; - } cfg; + RISCVCPUConfig cfg; }; static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) @@ -369,6 +415,11 @@ static inline bool riscv_feature(CPURISCVState *env, int feature) return env->features & (1ULL << feature); } +static inline void riscv_set_feature(CPURISCVState *env, int feature) +{ + env->features |= (1ULL << feature); +} + #include "cpu_user.h" extern const char * const riscv_int_regnames[]; @@ -383,7 +434,14 @@ int riscv_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs, int cpuid, void *opaque); int riscv_cpu_gdb_read_register(CPUState *cpu, GByteArray *buf, int reg); int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); +int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero); +uint8_t riscv_cpu_default_priority(int irq); +int riscv_cpu_mirq_pending(CPURISCVState *env); +int riscv_cpu_sirq_pending(CPURISCVState *env); +int riscv_cpu_vsirq_pending(CPURISCVState *env); bool riscv_cpu_fp_enabled(CPURISCVState *env); +target_ulong riscv_cpu_get_geilen(CPURISCVState *env); +void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen); bool riscv_cpu_vector_enabled(CPURISCVState *env); bool riscv_cpu_virt_enabled(CPURISCVState *env); void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable); @@ -410,11 +468,18 @@ void riscv_cpu_list(void); #ifndef CONFIG_USER_ONLY bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request); void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env); -int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts); -uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value); +int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts); +uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value); #define BOOL_TO_MASK(x) (-!!(x)) /* helper for riscv_cpu_update_mip value */ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), uint32_t arg); +void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, + int (*rmw_fn)(void *arg, + target_ulong reg, + target_ulong *val, + target_ulong new_val, + target_ulong write_mask), + void *rmw_fn_arg); #endif void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); @@ -459,6 +524,7 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) return env->misa_mxl; } #endif +#define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) #if defined(TARGET_RISCV32) #define cpu_recompute_xl(env) ((void)(env), MXL_RV32) @@ -495,6 +561,19 @@ static inline int riscv_cpu_xlen(CPURISCVState *env) return 16 << env->xl; } +#ifdef TARGET_RISCV32 +#define riscv_cpu_sxl(env) ((void)(env), MXL_RV32) +#else +static inline RISCVMXL riscv_cpu_sxl(CPURISCVState *env) +{ +#ifdef CONFIG_USER_ONLY + return env->misa_mxl; +#else + return get_field(env->mstatus, MSTATUS64_SXL); +#endif +} +#endif + /* * Encode LMUL to lmul as follows: * LMUL vlmul lmul diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 7c87433645..0fe01d7da5 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -168,6 +168,31 @@ #define CSR_MTVAL 0x343 #define CSR_MIP 0x344 +/* Machine-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_MISELECT 0x350 +#define CSR_MIREG 0x351 + +/* Machine-Level Interrupts (AIA) */ +#define CSR_MTOPI 0xfb0 + +/* Machine-Level IMSIC Interface (AIA) */ +#define CSR_MSETEIPNUM 0x358 +#define CSR_MCLREIPNUM 0x359 +#define CSR_MSETEIENUM 0x35a +#define CSR_MCLREIENUM 0x35b +#define CSR_MTOPEI 0x35c + +/* Virtual Interrupts for Supervisor Level (AIA) */ +#define CSR_MVIEN 0x308 +#define CSR_MVIP 0x309 + +/* Machine-Level High-Half CSRs (AIA) */ +#define CSR_MIDELEGH 0x313 +#define CSR_MIEH 0x314 +#define CSR_MVIENH 0x318 +#define CSR_MVIPH 0x319 +#define CSR_MIPH 0x354 + /* Supervisor Trap Setup */ #define CSR_SSTATUS 0x100 #define CSR_SEDELEG 0x102 @@ -187,6 +212,24 @@ #define CSR_SPTBR 0x180 #define CSR_SATP 0x180 +/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ +#define CSR_SISELECT 0x150 +#define CSR_SIREG 0x151 + +/* Supervisor-Level Interrupts (AIA) */ +#define CSR_STOPI 0xdb0 + +/* Supervisor-Level IMSIC Interface (AIA) */ +#define CSR_SSETEIPNUM 0x158 +#define CSR_SCLREIPNUM 0x159 +#define CSR_SSETEIENUM 0x15a +#define CSR_SCLREIENUM 0x15b +#define CSR_STOPEI 0x15c + +/* Supervisor-Level High-Half CSRs (AIA) */ +#define CSR_SIEH 0x114 +#define CSR_SIPH 0x154 + /* Hpervisor CSRs */ #define CSR_HSTATUS 0x600 #define CSR_HEDELEG 0x602 @@ -217,6 +260,35 @@ #define CSR_MTINST 0x34a #define CSR_MTVAL2 0x34b +/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ +#define CSR_HVIEN 0x608 +#define CSR_HVICTL 0x609 +#define CSR_HVIPRIO1 0x646 +#define CSR_HVIPRIO2 0x647 + +/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */ +#define CSR_VSISELECT 0x250 +#define CSR_VSIREG 0x251 + +/* VS-Level Interrupts (H-extension with AIA) */ +#define CSR_VSTOPI 0xeb0 + +/* VS-Level IMSIC Interface (H-extension with AIA) */ +#define CSR_VSSETEIPNUM 0x258 +#define CSR_VSCLREIPNUM 0x259 +#define CSR_VSSETEIENUM 0x25a +#define CSR_VSCLREIENUM 0x25b +#define CSR_VSTOPEI 0x25c + +/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ +#define CSR_HIDELEGH 0x613 +#define CSR_HVIENH 0x618 +#define CSR_HVIPH 0x655 +#define CSR_HVIPRIO1H 0x656 +#define CSR_HVIPRIO2H 0x657 +#define CSR_VSIEH 0x214 +#define CSR_VSIPH 0x254 + /* Enhanced Physical Memory Protection (ePMP) */ #define CSR_MSECCFG 0x747 #define CSR_MSECCFGH 0x757 @@ -489,10 +561,16 @@ typedef enum { #define PTE_A 0x040 /* Accessed */ #define PTE_D 0x080 /* Dirty */ #define PTE_SOFT 0x300 /* Reserved for Software */ +#define PTE_PBMT 0x6000000000000000ULL /* Page-based memory types */ +#define PTE_N 0x8000000000000000ULL /* NAPOT translation */ +#define PTE_ATTR (PTE_N | PTE_PBMT) /* All attributes bits */ /* Page table PPN shift amount */ #define PTE_PPN_SHIFT 10 +/* Page table PPN mask */ +#define PTE_PPN_MASK 0x3FFFFFFFFFFC00ULL + /* Leaf page shift amount */ #define PGSHIFT 12 @@ -540,6 +618,9 @@ typedef enum RISCVException { #define IRQ_S_EXT 9 #define IRQ_VS_EXT 10 #define IRQ_M_EXT 11 +#define IRQ_S_GEXT 12 +#define IRQ_LOCAL_MAX 16 +#define IRQ_LOCAL_GUEST_MAX (TARGET_LONG_BITS - 1) /* mip masks */ #define MIP_USIP (1 << IRQ_U_SOFT) @@ -554,6 +635,7 @@ typedef enum RISCVException { #define MIP_SEIP (1 << IRQ_S_EXT) #define MIP_VSEIP (1 << IRQ_VS_EXT) #define MIP_MEIP (1 << IRQ_M_EXT) +#define MIP_SGEIP (1 << IRQ_S_GEXT) /* sip masks */ #define SIP_SSIP MIP_SSIP @@ -631,4 +713,51 @@ typedef enum RISCVException { #define UMTE_U_PM_INSN U_PM_INSN #define UMTE_MASK (UMTE_U_PM_ENABLE | MMTE_U_PM_CURRENT | UMTE_U_PM_INSN) +/* MISELECT, SISELECT, and VSISELECT bits (AIA) */ +#define ISELECT_IPRIO0 0x30 +#define ISELECT_IPRIO15 0x3f +#define ISELECT_IMSIC_EIDELIVERY 0x70 +#define ISELECT_IMSIC_EITHRESHOLD 0x72 +#define ISELECT_IMSIC_EIP0 0x80 +#define ISELECT_IMSIC_EIP63 0xbf +#define ISELECT_IMSIC_EIE0 0xc0 +#define ISELECT_IMSIC_EIE63 0xff +#define ISELECT_IMSIC_FIRST ISELECT_IMSIC_EIDELIVERY +#define ISELECT_IMSIC_LAST ISELECT_IMSIC_EIE63 +#define ISELECT_MASK 0x1ff + +/* Dummy [M|S|VS]ISELECT value for emulating [M|S|VS]TOPEI CSRs */ +#define ISELECT_IMSIC_TOPEI (ISELECT_MASK + 1) + +/* IMSIC bits (AIA) */ +#define IMSIC_TOPEI_IID_SHIFT 16 +#define IMSIC_TOPEI_IID_MASK 0x7ff +#define IMSIC_TOPEI_IPRIO_MASK 0x7ff +#define IMSIC_EIPx_BITS 32 +#define IMSIC_EIEx_BITS 32 + +/* MTOPI and STOPI bits (AIA) */ +#define TOPI_IID_SHIFT 16 +#define TOPI_IID_MASK 0xfff +#define TOPI_IPRIO_MASK 0xff + +/* Interrupt priority bits (AIA) */ +#define IPRIO_IRQ_BITS 8 +#define IPRIO_MMAXIPRIO 255 +#define IPRIO_DEFAULT_UPPER 4 +#define IPRIO_DEFAULT_MIDDLE (IPRIO_DEFAULT_UPPER + 24) +#define IPRIO_DEFAULT_M IPRIO_DEFAULT_MIDDLE +#define IPRIO_DEFAULT_S (IPRIO_DEFAULT_M + 3) +#define IPRIO_DEFAULT_SGEXT (IPRIO_DEFAULT_S + 3) +#define IPRIO_DEFAULT_VS (IPRIO_DEFAULT_SGEXT + 1) +#define IPRIO_DEFAULT_LOWER (IPRIO_DEFAULT_VS + 3) + +/* HVICTL bits (AIA) */ +#define HVICTL_VTI 0x40000000 +#define HVICTL_IID 0x0fff0000 +#define HVICTL_IPRIOM 0x00000100 +#define HVICTL_IPRIO 0x000000ff +#define HVICTL_VALID_MASK \ + (HVICTL_VTI | HVICTL_IID | HVICTL_IPRIOM | HVICTL_IPRIO) + #endif diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 327a2c4f1d..746335bfd6 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -152,32 +152,275 @@ void riscv_cpu_update_mask(CPURISCVState *env) } #ifndef CONFIG_USER_ONLY + +/* + * The HS-mode is allowed to configure priority only for the + * following VS-mode local interrupts: + * + * 0 (Reserved interrupt, reads as zero) + * 1 Supervisor software interrupt + * 4 (Reserved interrupt, reads as zero) + * 5 Supervisor timer interrupt + * 8 (Reserved interrupt, reads as zero) + * 13 (Reserved interrupt) + * 14 " + * 15 " + * 16 " + * 18 Debug/trace interrupt + * 20 (Reserved interrupt) + * 22 " + * 24 " + * 26 " + * 28 " + * 30 (Reserved for standard reporting of bus or system errors) + */ + +static const int hviprio_index2irq[] = { + 0, 1, 4, 5, 8, 13, 14, 15, 16, 18, 20, 22, 24, 26, 28, 30 }; +static const int hviprio_index2rdzero[] = { + 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; + +int riscv_cpu_hviprio_index2irq(int index, int *out_irq, int *out_rdzero) +{ + if (index < 0 || ARRAY_SIZE(hviprio_index2irq) <= index) { + return -EINVAL; + } + + if (out_irq) { + *out_irq = hviprio_index2irq[index]; + } + + if (out_rdzero) { + *out_rdzero = hviprio_index2rdzero[index]; + } + + return 0; +} + +/* + * Default priorities of local interrupts are defined in the + * RISC-V Advanced Interrupt Architecture specification. + * + * ---------------------------------------------------------------- + * Default | + * Priority | Major Interrupt Numbers + * ---------------------------------------------------------------- + * Highest | 63 (3f), 62 (3e), 31 (1f), 30 (1e), 61 (3d), 60 (3c), + * | 59 (3b), 58 (3a), 29 (1d), 28 (1c), 57 (39), 56 (38), + * | 55 (37), 54 (36), 27 (1b), 26 (1a), 53 (35), 52 (34), + * | 51 (33), 50 (32), 25 (19), 24 (18), 49 (31), 48 (30) + * | + * | 11 (0b), 3 (03), 7 (07) + * | 9 (09), 1 (01), 5 (05) + * | 12 (0c) + * | 10 (0a), 2 (02), 6 (06) + * | + * | 47 (2f), 46 (2e), 23 (17), 22 (16), 45 (2d), 44 (2c), + * | 43 (2b), 42 (2a), 21 (15), 20 (14), 41 (29), 40 (28), + * | 39 (27), 38 (26), 19 (13), 18 (12), 37 (25), 36 (24), + * Lowest | 35 (23), 34 (22), 17 (11), 16 (10), 33 (21), 32 (20) + * ---------------------------------------------------------------- + */ +static const uint8_t default_iprio[64] = { + [63] = IPRIO_DEFAULT_UPPER, + [62] = IPRIO_DEFAULT_UPPER + 1, + [31] = IPRIO_DEFAULT_UPPER + 2, + [30] = IPRIO_DEFAULT_UPPER + 3, + [61] = IPRIO_DEFAULT_UPPER + 4, + [60] = IPRIO_DEFAULT_UPPER + 5, + + [59] = IPRIO_DEFAULT_UPPER + 6, + [58] = IPRIO_DEFAULT_UPPER + 7, + [29] = IPRIO_DEFAULT_UPPER + 8, + [28] = IPRIO_DEFAULT_UPPER + 9, + [57] = IPRIO_DEFAULT_UPPER + 10, + [56] = IPRIO_DEFAULT_UPPER + 11, + + [55] = IPRIO_DEFAULT_UPPER + 12, + [54] = IPRIO_DEFAULT_UPPER + 13, + [27] = IPRIO_DEFAULT_UPPER + 14, + [26] = IPRIO_DEFAULT_UPPER + 15, + [53] = IPRIO_DEFAULT_UPPER + 16, + [52] = IPRIO_DEFAULT_UPPER + 17, + + [51] = IPRIO_DEFAULT_UPPER + 18, + [50] = IPRIO_DEFAULT_UPPER + 19, + [25] = IPRIO_DEFAULT_UPPER + 20, + [24] = IPRIO_DEFAULT_UPPER + 21, + [49] = IPRIO_DEFAULT_UPPER + 22, + [48] = IPRIO_DEFAULT_UPPER + 23, + + [11] = IPRIO_DEFAULT_M, + [3] = IPRIO_DEFAULT_M + 1, + [7] = IPRIO_DEFAULT_M + 2, + + [9] = IPRIO_DEFAULT_S, + [1] = IPRIO_DEFAULT_S + 1, + [5] = IPRIO_DEFAULT_S + 2, + + [12] = IPRIO_DEFAULT_SGEXT, + + [10] = IPRIO_DEFAULT_VS, + [2] = IPRIO_DEFAULT_VS + 1, + [6] = IPRIO_DEFAULT_VS + 2, + + [47] = IPRIO_DEFAULT_LOWER, + [46] = IPRIO_DEFAULT_LOWER + 1, + [23] = IPRIO_DEFAULT_LOWER + 2, + [22] = IPRIO_DEFAULT_LOWER + 3, + [45] = IPRIO_DEFAULT_LOWER + 4, + [44] = IPRIO_DEFAULT_LOWER + 5, + + [43] = IPRIO_DEFAULT_LOWER + 6, + [42] = IPRIO_DEFAULT_LOWER + 7, + [21] = IPRIO_DEFAULT_LOWER + 8, + [20] = IPRIO_DEFAULT_LOWER + 9, + [41] = IPRIO_DEFAULT_LOWER + 10, + [40] = IPRIO_DEFAULT_LOWER + 11, + + [39] = IPRIO_DEFAULT_LOWER + 12, + [38] = IPRIO_DEFAULT_LOWER + 13, + [19] = IPRIO_DEFAULT_LOWER + 14, + [18] = IPRIO_DEFAULT_LOWER + 15, + [37] = IPRIO_DEFAULT_LOWER + 16, + [36] = IPRIO_DEFAULT_LOWER + 17, + + [35] = IPRIO_DEFAULT_LOWER + 18, + [34] = IPRIO_DEFAULT_LOWER + 19, + [17] = IPRIO_DEFAULT_LOWER + 20, + [16] = IPRIO_DEFAULT_LOWER + 21, + [33] = IPRIO_DEFAULT_LOWER + 22, + [32] = IPRIO_DEFAULT_LOWER + 23, +}; + +uint8_t riscv_cpu_default_priority(int irq) +{ + if (irq < 0 || irq > 63) { + return IPRIO_MMAXIPRIO; + } + + return default_iprio[irq] ? default_iprio[irq] : IPRIO_MMAXIPRIO; +}; + +static int riscv_cpu_pending_to_irq(CPURISCVState *env, + int extirq, unsigned int extirq_def_prio, + uint64_t pending, uint8_t *iprio) +{ + int irq, best_irq = RISCV_EXCP_NONE; + unsigned int prio, best_prio = UINT_MAX; + + if (!pending) { + return RISCV_EXCP_NONE; + } + + irq = ctz64(pending); + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return irq; + } + + pending = pending >> irq; + while (pending) { + prio = iprio[irq]; + if (!prio) { + if (irq == extirq) { + prio = extirq_def_prio; + } else { + prio = (riscv_cpu_default_priority(irq) < extirq_def_prio) ? + 1 : IPRIO_MMAXIPRIO; + } + } + if ((pending & 0x1) && (prio <= best_prio)) { + best_irq = irq; + best_prio = prio; + } + irq++; + pending = pending >> 1; + } + + return best_irq; +} + +static uint64_t riscv_cpu_all_pending(CPURISCVState *env) +{ + uint32_t gein = get_field(env->hstatus, HSTATUS_VGEIN); + uint64_t vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; + + return (env->mip | vsgein) & env->mie; +} + +int riscv_cpu_mirq_pending(CPURISCVState *env) +{ + uint64_t irqs = riscv_cpu_all_pending(env) & ~env->mideleg & + ~(MIP_SGEIP | MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); + + return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, + irqs, env->miprio); +} + +int riscv_cpu_sirq_pending(CPURISCVState *env) +{ + uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & + ~(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); + + return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs, env->siprio); +} + +int riscv_cpu_vsirq_pending(CPURISCVState *env) +{ + uint64_t irqs = riscv_cpu_all_pending(env) & env->mideleg & + (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP); + + return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs >> 1, env->hviprio); +} + static int riscv_cpu_local_irq_pending(CPURISCVState *env) { - target_ulong virt_enabled = riscv_cpu_virt_enabled(env); + int virq; + uint64_t irqs, pending, mie, hsie, vsie; - target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE); - target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE); + /* Determine interrupt enable state of all privilege modes */ + if (riscv_cpu_virt_enabled(env)) { + mie = 1; + hsie = 1; + vsie = (env->priv < PRV_S) || + (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); + } else { + mie = (env->priv < PRV_M) || + (env->priv == PRV_M && get_field(env->mstatus, MSTATUS_MIE)); + hsie = (env->priv < PRV_S) || + (env->priv == PRV_S && get_field(env->mstatus, MSTATUS_SIE)); + vsie = 0; + } - target_ulong pending = env->mip & env->mie; + /* Determine all pending interrupts */ + pending = riscv_cpu_all_pending(env); - target_ulong mie = env->priv < PRV_M || - (env->priv == PRV_M && mstatus_mie); - target_ulong sie = env->priv < PRV_S || - (env->priv == PRV_S && mstatus_sie); - target_ulong hsie = virt_enabled || sie; - target_ulong vsie = virt_enabled && sie; + /* Check M-mode interrupts */ + irqs = pending & ~env->mideleg & -mie; + if (irqs) { + return riscv_cpu_pending_to_irq(env, IRQ_M_EXT, IPRIO_DEFAULT_M, + irqs, env->miprio); + } - target_ulong irqs = - (pending & ~env->mideleg & -mie) | - (pending & env->mideleg & ~env->hideleg & -hsie) | - (pending & env->mideleg & env->hideleg & -vsie); + /* Check HS-mode interrupts */ + irqs = pending & env->mideleg & ~env->hideleg & -hsie; + if (irqs) { + return riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs, env->siprio); + } + /* Check VS-mode interrupts */ + irqs = pending & env->mideleg & env->hideleg & -vsie; if (irqs) { - return ctz64(irqs); /* since non-zero */ - } else { - return RISCV_EXCP_NONE; /* indicates no pending interrupt */ + virq = riscv_cpu_pending_to_irq(env, IRQ_S_EXT, IPRIO_DEFAULT_S, + irqs >> 1, env->hviprio); + return (virq <= 0) ? virq : virq + 1; } + + /* Indicate no pending interrupt */ + return RISCV_EXCP_NONE; } bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request) @@ -279,6 +522,28 @@ void riscv_cpu_swap_hypervisor_regs(CPURISCVState *env) } } +target_ulong riscv_cpu_get_geilen(CPURISCVState *env) +{ + if (!riscv_has_ext(env, RVH)) { + return 0; + } + + return env->geilen; +} + +void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) +{ + if (!riscv_has_ext(env, RVH)) { + return; + } + + if (geilen > (TARGET_LONG_BITS - 1)) { + return; + } + + env->geilen = geilen; +} + bool riscv_cpu_virt_enabled(CPURISCVState *env) { if (!riscv_has_ext(env, RVH)) { @@ -300,6 +565,19 @@ void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) } env->virt = set_field(env->virt, VIRT_ONOFF, enable); + + if (enable) { + /* + * The guest external interrupts from an interrupt controller are + * delivered only when the Guest/VM is running (i.e. V=1). This means + * any guest external interrupt which is triggered while the Guest/VM + * is not running (i.e. V=0) will be missed on QEMU resulting in guest + * with sluggish response to serial console input and other I/O events. + * + * To solve this, we check and inject interrupt after setting V=1. + */ + riscv_cpu_update_mip(env_archcpu(env), 0, 0); + } } bool riscv_cpu_two_stage_lookup(int mmu_idx) @@ -307,7 +585,7 @@ bool riscv_cpu_two_stage_lookup(int mmu_idx) return mmu_idx & TB_FLAGS_PRIV_HYP_ACCESS_MASK; } -int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) +int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) { CPURISCVState *env = &cpu->env; if (env->miclaim & interrupts) { @@ -318,13 +596,18 @@ int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint32_t interrupts) } } -uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) +uint64_t riscv_cpu_update_mip(RISCVCPU *cpu, uint64_t mask, uint64_t value) { CPURISCVState *env = &cpu->env; CPUState *cs = CPU(cpu); - uint32_t old = env->mip; + uint64_t gein, vsgein = 0, old = env->mip; bool locked = false; + if (riscv_cpu_virt_enabled(env)) { + gein = get_field(env->hstatus, HSTATUS_VGEIN); + vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; + } + if (!qemu_mutex_iothread_locked()) { locked = true; qemu_mutex_lock_iothread(); @@ -332,7 +615,7 @@ uint32_t riscv_cpu_update_mip(RISCVCPU *cpu, uint32_t mask, uint32_t value) env->mip = (env->mip & ~mask) | (value & mask); - if (env->mip) { + if (env->mip | vsgein) { cpu_interrupt(cs, CPU_INTERRUPT_HARD); } else { cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); @@ -352,6 +635,20 @@ void riscv_cpu_set_rdtime_fn(CPURISCVState *env, uint64_t (*fn)(uint32_t), env->rdtime_fn_arg = arg; } +void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, + int (*rmw_fn)(void *arg, + target_ulong reg, + target_ulong *val, + target_ulong new_val, + target_ulong write_mask), + void *rmw_fn_arg) +{ + if (priv <= PRV_M) { + env->aia_ireg_rmw_fn[priv] = rmw_fn; + env->aia_ireg_rmw_fn_arg[priv] = rmw_fn_arg; + } +} + void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) { if (newpriv > PRV_M) { @@ -454,6 +751,10 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; int mode = mmu_idx & TB_FLAGS_PRIV_MMU_MASK; bool use_background = false; + hwaddr ppn; + RISCVCPU *cpu = env_archcpu(env); + int napot_bits = 0; + target_ulong napot_mask; /* * Check if we should use the background registers for the two @@ -622,13 +923,27 @@ restart: return TRANSLATE_FAIL; } - hwaddr ppn = pte >> PTE_PPN_SHIFT; + if (riscv_cpu_sxl(env) == MXL_RV32) { + ppn = pte >> PTE_PPN_SHIFT; + } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) { + ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; + } else { + ppn = pte >> PTE_PPN_SHIFT; + if ((pte & ~(target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT) { + return TRANSLATE_FAIL; + } + } if (!(pte & PTE_V)) { /* Invalid PTE */ return TRANSLATE_FAIL; + } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) { + return TRANSLATE_FAIL; } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { /* Inner PTE, continue walking */ + if (pte & (PTE_D | PTE_A | PTE_U | PTE_ATTR)) { + return TRANSLATE_FAIL; + } base = ppn << PGSHIFT; } else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) { /* Reserved leaf PTE flags: PTE_W */ @@ -702,8 +1017,18 @@ restart: /* for superpage mappings, make a fake leaf PTE for the TLB's benefit. */ target_ulong vpn = addr >> PGSHIFT; - *physical = ((ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT) | - (addr & ~TARGET_PAGE_MASK); + + if (cpu->cfg.ext_svnapot && (pte & PTE_N)) { + napot_bits = ctzl(ppn) + 1; + if ((i != (levels - 1)) || (napot_bits != 4)) { + return TRANSLATE_FAIL; + } + } + + napot_mask = (1 << napot_bits) - 1; + *physical = (((ppn & ~napot_mask) | (vpn & napot_mask) | + (vpn & (((target_ulong)1 << ptshift) - 1)) + ) << PGSHIFT) | (addr & ~TARGET_PAGE_MASK); /* set permissions on the TLB entry */ if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) { @@ -1009,7 +1334,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) */ bool async = !!(cs->exception_index & RISCV_EXCP_INT_FLAG); target_ulong cause = cs->exception_index & RISCV_EXCP_INT_MASK; - target_ulong deleg = async ? env->mideleg : env->medeleg; + uint64_t deleg = async ? env->mideleg : env->medeleg; target_ulong tval = 0; target_ulong htval = 0; target_ulong mtval2 = 0; @@ -1076,7 +1401,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) cause < TARGET_LONG_BITS && ((deleg >> cause) & 1)) { /* handle the trap in S-mode */ if (riscv_has_ext(env, RVH)) { - target_ulong hdeleg = async ? env->hideleg : env->hedeleg; + uint64_t hdeleg = async ? env->hideleg : env->hedeleg; if (riscv_cpu_virt_enabled(env) && ((hdeleg >> cause) & 1)) { /* Trap to VS mode */ diff --git a/target/riscv/csr.c b/target/riscv/csr.c index e5f9d4ef93..fe2c8dd40e 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -158,6 +158,24 @@ static RISCVException any32(CPURISCVState *env, int csrno) } +static int aia_any(CPURISCVState *env, int csrno) +{ + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return any(env, csrno); +} + +static int aia_any32(CPURISCVState *env, int csrno) +{ + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return any32(env, csrno); +} + static RISCVException smode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS)) { @@ -167,6 +185,33 @@ static RISCVException smode(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } +static int smode32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode(env, csrno); +} + +static int aia_smode(CPURISCVState *env, int csrno) +{ + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode(env, csrno); +} + +static int aia_smode32(CPURISCVState *env, int csrno) +{ + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smode32(env, csrno); +} + static RISCVException hmode(CPURISCVState *env, int csrno) { if (riscv_has_ext(env, RVS) && @@ -186,7 +231,7 @@ static RISCVException hmode(CPURISCVState *env, int csrno) static RISCVException hmode32(CPURISCVState *env, int csrno) { if (riscv_cpu_mxl(env) != MXL_RV32) { - if (riscv_cpu_virt_enabled(env)) { + if (!riscv_cpu_virt_enabled(env)) { return RISCV_EXCP_ILLEGAL_INST; } else { return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; @@ -207,6 +252,24 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) return RISCV_EXCP_ILLEGAL_INST; } +static int aia_hmode(CPURISCVState *env, int csrno) +{ + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode(env, csrno); +} + +static int aia_hmode32(CPURISCVState *env, int csrno) +{ + if (!riscv_feature(env, RISCV_FEATURE_AIA)) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return hmode32(env, csrno); +} + static RISCVException pmp(CPURISCVState *env, int csrno) { if (riscv_feature(env, RISCV_FEATURE_PMP)) { @@ -458,15 +521,18 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, /* Machine constants */ -#define M_MODE_INTERRUPTS (MIP_MSIP | MIP_MTIP | MIP_MEIP) -#define S_MODE_INTERRUPTS (MIP_SSIP | MIP_STIP | MIP_SEIP) -#define VS_MODE_INTERRUPTS (MIP_VSSIP | MIP_VSTIP | MIP_VSEIP) +#define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP)) +#define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP)) +#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP)) +#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS)) -static const target_ulong delegable_ints = S_MODE_INTERRUPTS | +#define VSTOPI_NUM_SRCS 5 + +static const uint64_t delegable_ints = S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS; -static const target_ulong vs_delegable_ints = VS_MODE_INTERRUPTS; -static const target_ulong all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | - VS_MODE_INTERRUPTS; +static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS; +static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS | + HS_MODE_INTERRUPTS; #define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \ (1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \ (1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \ @@ -522,6 +588,12 @@ static RISCVException read_zero(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException write_ignore(CPURISCVState *env, int csrno, + target_ulong val) +{ + return RISCV_EXCP_NONE; +} + static RISCVException read_mhartid(CPURISCVState *env, int csrno, target_ulong *val) { @@ -736,34 +808,471 @@ static RISCVException write_medeleg(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException read_mideleg(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_mideleg64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { - *val = env->mideleg; + uint64_t mask = wr_mask & delegable_ints; + + if (ret_val) { + *ret_val = env->mideleg; + } + + env->mideleg = (env->mideleg & ~mask) | (new_val & mask); + + if (riscv_has_ext(env, RVH)) { + env->mideleg |= HS_MODE_INTERRUPTS; + } + return RISCV_EXCP_NONE; } -static RISCVException write_mideleg(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException rmw_mideleg(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - env->mideleg = (env->mideleg & ~delegable_ints) | (val & delegable_ints); - if (riscv_has_ext(env, RVH)) { - env->mideleg |= VS_MODE_INTERRUPTS; + uint64_t rval; + RISCVException ret; + + ret = rmw_mideleg64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; } + + return ret; +} + +static RISCVException rmw_midelegh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, + target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mideleg64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static RISCVException rmw_mie64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + uint64_t mask = wr_mask & all_ints; + + if (ret_val) { + *ret_val = env->mie; + } + + env->mie = (env->mie & ~mask) | (new_val & mask); + + if (!riscv_has_ext(env, RVH)) { + env->mie &= ~((uint64_t)MIP_SGEIP); + } + return RISCV_EXCP_NONE; } -static RISCVException read_mie(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_mie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - *val = env->mie; + uint64_t rval; + RISCVException ret; + + ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_mieh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mie64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val) +{ + int irq; + uint8_t iprio; + + irq = riscv_cpu_mirq_pending(env); + if (irq <= 0 || irq > 63) { + *val = 0; + } else { + iprio = env->miprio[irq]; + if (!iprio) { + if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_M) { + iprio = IPRIO_MMAXIPRIO; + } + } + *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; + *val |= iprio; + } + return RISCV_EXCP_NONE; } -static RISCVException write_mie(CPURISCVState *env, int csrno, - target_ulong val) +static int aia_xlate_vs_csrno(CPURISCVState *env, int csrno) +{ + if (!riscv_cpu_virt_enabled(env)) { + return csrno; + } + + switch (csrno) { + case CSR_SISELECT: + return CSR_VSISELECT; + case CSR_SIREG: + return CSR_VSIREG; + case CSR_SSETEIPNUM: + return CSR_VSSETEIPNUM; + case CSR_SCLREIPNUM: + return CSR_VSCLREIPNUM; + case CSR_SSETEIENUM: + return CSR_VSSETEIENUM; + case CSR_SCLREIENUM: + return CSR_VSCLREIENUM; + case CSR_STOPEI: + return CSR_VSTOPEI; + default: + return csrno; + }; +} + +static int rmw_xiselect(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + target_ulong *iselect; + + /* Translate CSR number for VS-mode */ + csrno = aia_xlate_vs_csrno(env, csrno); + + /* Find the iselect CSR based on CSR number */ + switch (csrno) { + case CSR_MISELECT: + iselect = &env->miselect; + break; + case CSR_SISELECT: + iselect = &env->siselect; + break; + case CSR_VSISELECT: + iselect = &env->vsiselect; + break; + default: + return RISCV_EXCP_ILLEGAL_INST; + }; + + if (val) { + *val = *iselect; + } + + wr_mask &= ISELECT_MASK; + if (wr_mask) { + *iselect = (*iselect & ~wr_mask) | (new_val & wr_mask); + } + + return RISCV_EXCP_NONE; +} + +static int rmw_iprio(target_ulong xlen, + target_ulong iselect, uint8_t *iprio, + target_ulong *val, target_ulong new_val, + target_ulong wr_mask, int ext_irq_no) +{ + int i, firq, nirqs; + target_ulong old_val; + + if (iselect < ISELECT_IPRIO0 || ISELECT_IPRIO15 < iselect) { + return -EINVAL; + } + if (xlen != 32 && iselect & 0x1) { + return -EINVAL; + } + + nirqs = 4 * (xlen / 32); + firq = ((iselect - ISELECT_IPRIO0) / (xlen / 32)) * (nirqs); + + old_val = 0; + for (i = 0; i < nirqs; i++) { + old_val |= ((target_ulong)iprio[firq + i]) << (IPRIO_IRQ_BITS * i); + } + + if (val) { + *val = old_val; + } + + if (wr_mask) { + new_val = (old_val & ~wr_mask) | (new_val & wr_mask); + for (i = 0; i < nirqs; i++) { + /* + * M-level and S-level external IRQ priority always read-only + * zero. This means default priority order is always preferred + * for M-level and S-level external IRQs. + */ + if ((firq + i) == ext_irq_no) { + continue; + } + iprio[firq + i] = (new_val >> (IPRIO_IRQ_BITS * i)) & 0xff; + } + } + + return 0; +} + +static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + bool virt; + uint8_t *iprio; + int ret = -EINVAL; + target_ulong priv, isel, vgein; + + /* Translate CSR number for VS-mode */ + csrno = aia_xlate_vs_csrno(env, csrno); + + /* Decode register details from CSR number */ + virt = false; + switch (csrno) { + case CSR_MIREG: + iprio = env->miprio; + isel = env->miselect; + priv = PRV_M; + break; + case CSR_SIREG: + iprio = env->siprio; + isel = env->siselect; + priv = PRV_S; + break; + case CSR_VSIREG: + iprio = env->hviprio; + isel = env->vsiselect; + priv = PRV_S; + virt = true; + break; + default: + goto done; + }; + + /* Find the selected guest interrupt file */ + vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; + + if (ISELECT_IPRIO0 <= isel && isel <= ISELECT_IPRIO15) { + /* Local interrupt priority registers not available for VS-mode */ + if (!virt) { + ret = rmw_iprio(riscv_cpu_mxl_bits(env), + isel, iprio, val, new_val, wr_mask, + (priv == PRV_M) ? IRQ_M_EXT : IRQ_S_EXT); + } + } else if (ISELECT_IMSIC_FIRST <= isel && isel <= ISELECT_IMSIC_LAST) { + /* IMSIC registers only available when machine implements it. */ + if (env->aia_ireg_rmw_fn[priv]) { + /* Selected guest interrupt file should not be zero */ + if (virt && (!vgein || env->geilen < vgein)) { + goto done; + } + /* Call machine specific IMSIC register emulation */ + ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], + AIA_MAKE_IREG(isel, priv, virt, vgein, + riscv_cpu_mxl_bits(env)), + val, new_val, wr_mask); + } + } + +done: + if (ret) { + return (riscv_cpu_virt_enabled(env) && virt) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + return RISCV_EXCP_NONE; +} + +static int rmw_xsetclreinum(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) +{ + int ret = -EINVAL; + bool set, pend, virt; + target_ulong priv, isel, vgein, xlen, nval, wmask; + + /* Translate CSR number for VS-mode */ + csrno = aia_xlate_vs_csrno(env, csrno); + + /* Decode register details from CSR number */ + virt = set = pend = false; + switch (csrno) { + case CSR_MSETEIPNUM: + priv = PRV_M; + set = true; + pend = true; + break; + case CSR_MCLREIPNUM: + priv = PRV_M; + pend = true; + break; + case CSR_MSETEIENUM: + priv = PRV_M; + set = true; + break; + case CSR_MCLREIENUM: + priv = PRV_M; + break; + case CSR_SSETEIPNUM: + priv = PRV_S; + set = true; + pend = true; + break; + case CSR_SCLREIPNUM: + priv = PRV_S; + pend = true; + break; + case CSR_SSETEIENUM: + priv = PRV_S; + set = true; + break; + case CSR_SCLREIENUM: + priv = PRV_S; + break; + case CSR_VSSETEIPNUM: + priv = PRV_S; + virt = true; + set = true; + pend = true; + break; + case CSR_VSCLREIPNUM: + priv = PRV_S; + virt = true; + pend = true; + break; + case CSR_VSSETEIENUM: + priv = PRV_S; + virt = true; + set = true; + break; + case CSR_VSCLREIENUM: + priv = PRV_S; + virt = true; + break; + default: + goto done; + }; + + /* IMSIC CSRs only available when machine implements IMSIC. */ + if (!env->aia_ireg_rmw_fn[priv]) { + goto done; + } + + /* Find the selected guest interrupt file */ + vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; + + /* Selected guest interrupt file should be valid */ + if (virt && (!vgein || env->geilen < vgein)) { + goto done; + } + + /* Set/Clear CSRs always read zero */ + if (val) { + *val = 0; + } + + if (wr_mask) { + /* Get interrupt number */ + new_val &= wr_mask; + + /* Find target interrupt pending/enable register */ + xlen = riscv_cpu_mxl_bits(env); + isel = (new_val / xlen); + isel *= (xlen / IMSIC_EIPx_BITS); + isel += (pend) ? ISELECT_IMSIC_EIP0 : ISELECT_IMSIC_EIE0; + + /* Find the interrupt bit to be set/clear */ + wmask = ((target_ulong)1) << (new_val % xlen); + nval = (set) ? wmask : 0; + + /* Call machine specific IMSIC register emulation */ + ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], + AIA_MAKE_IREG(isel, priv, virt, + vgein, xlen), + NULL, nval, wmask); + } else { + ret = 0; + } + +done: + if (ret) { + return (riscv_cpu_virt_enabled(env) && virt) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + return RISCV_EXCP_NONE; +} + +static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val, + target_ulong new_val, target_ulong wr_mask) { - env->mie = (env->mie & ~all_ints) | (val & all_ints); + bool virt; + int ret = -EINVAL; + target_ulong priv, vgein; + + /* Translate CSR number for VS-mode */ + csrno = aia_xlate_vs_csrno(env, csrno); + + /* Decode register details from CSR number */ + virt = false; + switch (csrno) { + case CSR_MTOPEI: + priv = PRV_M; + break; + case CSR_STOPEI: + priv = PRV_S; + break; + case CSR_VSTOPEI: + priv = PRV_S; + virt = true; + break; + default: + goto done; + }; + + /* IMSIC CSRs only available when machine implements IMSIC. */ + if (!env->aia_ireg_rmw_fn[priv]) { + goto done; + } + + /* Find the selected guest interrupt file */ + vgein = (virt) ? get_field(env->hstatus, HSTATUS_VGEIN) : 0; + + /* Selected guest interrupt file should be valid */ + if (virt && (!vgein || env->geilen < vgein)) { + goto done; + } + + /* Call machine specific IMSIC register emulation for TOPEI */ + ret = env->aia_ireg_rmw_fn[priv](env->aia_ireg_rmw_fn_arg[priv], + AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, priv, virt, vgein, + riscv_cpu_mxl_bits(env)), + val, new_val, wr_mask); + +done: + if (ret) { + return (riscv_cpu_virt_enabled(env) && virt) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } return RISCV_EXCP_NONE; } @@ -872,28 +1381,64 @@ static RISCVException write_mtval(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException rmw_mip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_mip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { RISCVCPU *cpu = env_archcpu(env); /* Allow software control of delegable interrupts not claimed by hardware */ - target_ulong mask = write_mask & delegable_ints & ~env->miclaim; - uint32_t old_mip; + uint64_t old_mip, mask = wr_mask & delegable_ints & ~env->miclaim; + uint32_t gin; if (mask) { - old_mip = riscv_cpu_update_mip(cpu, mask, (new_value & mask)); + old_mip = riscv_cpu_update_mip(cpu, mask, (new_val & mask)); } else { old_mip = env->mip; } - if (ret_value) { - *ret_value = old_mip; + if (csrno != CSR_HVIP) { + gin = get_field(env->hstatus, HSTATUS_VGEIN); + old_mip |= (env->hgeip & ((target_ulong)1 << gin)) ? MIP_VSEIP : 0; + } + + if (ret_val) { + *ret_val = old_mip; } return RISCV_EXCP_NONE; } +static RISCVException rmw_mip(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_miph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_mip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + /* Supervisor Trap Setup */ static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno, Int128 *val) @@ -934,45 +1479,115 @@ static RISCVException write_sstatus(CPURISCVState *env, int csrno, return write_mstatus(env, CSR_MSTATUS, newval); } -static RISCVException read_vsie(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_vsie64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { - /* Shift the VS bits to their S bit location in vsie */ - *val = (env->mie & env->hideleg & VS_MODE_INTERRUPTS) >> 1; - return RISCV_EXCP_NONE; + RISCVException ret; + uint64_t rval, vsbits, mask = env->hideleg & VS_MODE_INTERRUPTS; + + /* Bring VS-level bits to correct position */ + vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); + new_val &= ~(VS_MODE_INTERRUPTS >> 1); + new_val |= vsbits << 1; + vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); + wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); + wr_mask |= vsbits << 1; + + ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask); + if (ret_val) { + rval &= mask; + vsbits = rval & VS_MODE_INTERRUPTS; + rval &= ~VS_MODE_INTERRUPTS; + *ret_val = rval | (vsbits >> 1); + } + + return ret; } -static RISCVException read_sie(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_vsie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - if (riscv_cpu_virt_enabled(env)) { - read_vsie(env, CSR_VSIE, val); - } else { - *val = env->mie & env->mideleg; + uint64_t rval; + RISCVException ret; + + ret = rmw_vsie64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; } - return RISCV_EXCP_NONE; + + return ret; } -static RISCVException write_vsie(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException rmw_vsieh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - /* Shift the S bits to their VS bit location in mie */ - target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | - ((val << 1) & env->hideleg & VS_MODE_INTERRUPTS); - return write_mie(env, CSR_MIE, newval); + uint64_t rval; + RISCVException ret; + + ret = rmw_vsie64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; } -static int write_sie(CPURISCVState *env, int csrno, target_ulong val) +static RISCVException rmw_sie64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { + RISCVException ret; + uint64_t mask = env->mideleg & S_MODE_INTERRUPTS; + if (riscv_cpu_virt_enabled(env)) { - write_vsie(env, CSR_VSIE, val); + if (env->hvictl & HVICTL_VTI) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask); } else { - target_ulong newval = (env->mie & ~S_MODE_INTERRUPTS) | - (val & S_MODE_INTERRUPTS); - write_mie(env, CSR_MIE, newval); + ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask); } - return RISCV_EXCP_NONE; + if (ret_val) { + *ret_val &= mask; + } + + return ret; +} + +static RISCVException rmw_sie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_sie64(env, csrno, &rval, new_val, wr_mask); + if (ret == RISCV_EXCP_NONE && ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_sieh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_sie64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; } static RISCVException read_stvec(CPURISCVState *env, int csrno, @@ -1080,38 +1695,114 @@ static RISCVException write_stval(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static RISCVException rmw_vsip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + RISCVException ret; + uint64_t rval, vsbits, mask = env->hideleg & vsip_writable_mask; + + /* Bring VS-level bits to correct position */ + vsbits = new_val & (VS_MODE_INTERRUPTS >> 1); + new_val &= ~(VS_MODE_INTERRUPTS >> 1); + new_val |= vsbits << 1; + vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1); + wr_mask &= ~(VS_MODE_INTERRUPTS >> 1); + wr_mask |= vsbits << 1; + + ret = rmw_mip64(env, csrno, &rval, new_val, wr_mask & mask); + if (ret_val) { + rval &= mask; + vsbits = rval & VS_MODE_INTERRUPTS; + rval &= ~VS_MODE_INTERRUPTS; + *ret_val = rval | (vsbits >> 1); + } + + return ret; +} + static RISCVException rmw_vsip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - /* Shift the S bits to their VS bit location in mip */ - int ret = rmw_mip(env, 0, ret_value, new_value << 1, - (write_mask << 1) & vsip_writable_mask & env->hideleg); + uint64_t rval; + RISCVException ret; - if (ret_value) { - *ret_value &= VS_MODE_INTERRUPTS; - /* Shift the VS bits to their S bit location in vsip */ - *ret_value >>= 1; + ret = rmw_vsip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; } + return ret; } -static RISCVException rmw_sip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) +static RISCVException rmw_vsiph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - int ret; + uint64_t rval; + RISCVException ret; + + ret = rmw_vsip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static RISCVException rmw_sip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + RISCVException ret; + uint64_t mask = env->mideleg & sip_writable_mask; if (riscv_cpu_virt_enabled(env)) { - ret = rmw_vsip(env, CSR_VSIP, ret_value, new_value, write_mask); + if (env->hvictl & HVICTL_VTI) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask); } else { - ret = rmw_mip(env, CSR_MSTATUS, ret_value, new_value, - write_mask & env->mideleg & sip_writable_mask); + ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask); } - if (ret_value) { - *ret_value &= env->mideleg; + if (ret_val) { + *ret_val &= env->mideleg & S_MODE_INTERRUPTS; + } + + return ret; +} + +static RISCVException rmw_sip(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_sip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; } + + return ret; +} + +static RISCVException rmw_siph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_sip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + return ret; } @@ -1165,6 +1856,120 @@ static RISCVException write_satp(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val) +{ + int irq, ret; + target_ulong topei; + uint64_t vseip, vsgein; + uint32_t iid, iprio, hviid, hviprio, gein; + uint32_t s, scount = 0, siid[VSTOPI_NUM_SRCS], siprio[VSTOPI_NUM_SRCS]; + + gein = get_field(env->hstatus, HSTATUS_VGEIN); + hviid = get_field(env->hvictl, HVICTL_IID); + hviprio = get_field(env->hvictl, HVICTL_IPRIO); + + if (gein) { + vsgein = (env->hgeip & (1ULL << gein)) ? MIP_VSEIP : 0; + vseip = env->mie & (env->mip | vsgein) & MIP_VSEIP; + if (gein <= env->geilen && vseip) { + siid[scount] = IRQ_S_EXT; + siprio[scount] = IPRIO_MMAXIPRIO + 1; + if (env->aia_ireg_rmw_fn[PRV_S]) { + /* + * Call machine specific IMSIC register emulation for + * reading TOPEI. + */ + ret = env->aia_ireg_rmw_fn[PRV_S]( + env->aia_ireg_rmw_fn_arg[PRV_S], + AIA_MAKE_IREG(ISELECT_IMSIC_TOPEI, PRV_S, true, gein, + riscv_cpu_mxl_bits(env)), + &topei, 0, 0); + if (!ret && topei) { + siprio[scount] = topei & IMSIC_TOPEI_IPRIO_MASK; + } + } + scount++; + } + } else { + if (hviid == IRQ_S_EXT && hviprio) { + siid[scount] = IRQ_S_EXT; + siprio[scount] = hviprio; + scount++; + } + } + + if (env->hvictl & HVICTL_VTI) { + if (hviid != IRQ_S_EXT) { + siid[scount] = hviid; + siprio[scount] = hviprio; + scount++; + } + } else { + irq = riscv_cpu_vsirq_pending(env); + if (irq != IRQ_S_EXT && 0 < irq && irq <= 63) { + siid[scount] = irq; + siprio[scount] = env->hviprio[irq]; + scount++; + } + } + + iid = 0; + iprio = UINT_MAX; + for (s = 0; s < scount; s++) { + if (siprio[s] < iprio) { + iid = siid[s]; + iprio = siprio[s]; + } + } + + if (iid) { + if (env->hvictl & HVICTL_IPRIOM) { + if (iprio > IPRIO_MMAXIPRIO) { + iprio = IPRIO_MMAXIPRIO; + } + if (!iprio) { + if (riscv_cpu_default_priority(iid) > IPRIO_DEFAULT_S) { + iprio = IPRIO_MMAXIPRIO; + } + } + } else { + iprio = 1; + } + } else { + iprio = 0; + } + + *val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT; + *val |= iprio; + return RISCV_EXCP_NONE; +} + +static int read_stopi(CPURISCVState *env, int csrno, target_ulong *val) +{ + int irq; + uint8_t iprio; + + if (riscv_cpu_virt_enabled(env)) { + return read_vstopi(env, CSR_VSTOPI, val); + } + + irq = riscv_cpu_sirq_pending(env); + if (irq <= 0 || irq > 63) { + *val = 0; + } else { + iprio = env->siprio[irq]; + if (!iprio) { + if (riscv_cpu_default_priority(irq) > IPRIO_DEFAULT_S) { + iprio = IPRIO_MMAXIPRIO; + } + } + *val = (irq & TOPI_IID_MASK) << TOPI_IID_SHIFT; + *val |= iprio; + } + + return RISCV_EXCP_NONE; +} + /* Hypervisor Extensions */ static RISCVException read_hstatus(CPURISCVState *env, int csrno, target_ulong *val) @@ -1206,30 +2011,94 @@ static RISCVException write_hedeleg(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException read_hideleg(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) { - *val = env->hideleg; + uint64_t mask = wr_mask & vs_delegable_ints; + + if (ret_val) { + *ret_val = env->hideleg & vs_delegable_ints; + } + + env->hideleg = (env->hideleg & ~mask) | (new_val & mask); return RISCV_EXCP_NONE; } -static RISCVException write_hideleg(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException rmw_hideleg(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - env->hideleg = val & vs_delegable_ints; - return RISCV_EXCP_NONE; + uint64_t rval; + RISCVException ret; + + ret = rmw_hideleg64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; + } + + return ret; +} + +static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_hideleg64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + + return ret; +} + +static RISCVException rmw_hvip64(CPURISCVState *env, int csrno, + uint64_t *ret_val, + uint64_t new_val, uint64_t wr_mask) +{ + RISCVException ret; + + ret = rmw_mip64(env, csrno, ret_val, new_val, + wr_mask & hvip_writable_mask); + if (ret_val) { + *ret_val &= VS_MODE_INTERRUPTS; + } + + return ret; } static RISCVException rmw_hvip(CPURISCVState *env, int csrno, - target_ulong *ret_value, - target_ulong new_value, target_ulong write_mask) + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - int ret = rmw_mip(env, 0, ret_value, new_value, - write_mask & hvip_writable_mask); + uint64_t rval; + RISCVException ret; - if (ret_value) { - *ret_value &= hvip_writable_mask; + ret = rmw_hvip64(env, csrno, &rval, new_val, wr_mask); + if (ret_val) { + *ret_val = rval; } + + return ret; +} + +static RISCVException rmw_hviph(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) +{ + uint64_t rval; + RISCVException ret; + + ret = rmw_hvip64(env, csrno, &rval, + ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32); + if (ret_val) { + *ret_val = rval >> 32; + } + return ret; } @@ -1237,27 +2106,28 @@ static RISCVException rmw_hip(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { - int ret = rmw_mip(env, 0, ret_value, new_value, + int ret = rmw_mip(env, csrno, ret_value, new_value, write_mask & hip_writable_mask); if (ret_value) { - *ret_value &= hip_writable_mask; + *ret_value &= HS_MODE_INTERRUPTS; } return ret; } -static RISCVException read_hie(CPURISCVState *env, int csrno, - target_ulong *val) +static RISCVException rmw_hie(CPURISCVState *env, int csrno, + target_ulong *ret_val, + target_ulong new_val, target_ulong wr_mask) { - *val = env->mie & VS_MODE_INTERRUPTS; - return RISCV_EXCP_NONE; -} + uint64_t rval; + RISCVException ret; -static RISCVException write_hie(CPURISCVState *env, int csrno, - target_ulong val) -{ - target_ulong newval = (env->mie & ~VS_MODE_INTERRUPTS) | (val & VS_MODE_INTERRUPTS); - return write_mie(env, CSR_MIE, newval); + ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & HS_MODE_INTERRUPTS); + if (ret_val) { + *ret_val = rval & HS_MODE_INTERRUPTS; + } + + return ret; } static RISCVException read_hcounteren(CPURISCVState *env, int csrno, @@ -1274,15 +2144,27 @@ static RISCVException write_hcounteren(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException write_hgeie(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException read_hgeie(CPURISCVState *env, int csrno, + target_ulong *val) { if (val) { - qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); + *val = env->hgeie; } return RISCV_EXCP_NONE; } +static RISCVException write_hgeie(CPURISCVState *env, int csrno, + target_ulong val) +{ + /* Only GEILEN:1 bits implemented and BIT0 is never implemented */ + val &= ((((target_ulong)1) << env->geilen) - 1) << 1; + env->hgeie = val; + /* Update mip.SGEIP bit */ + riscv_cpu_update_mip(env_archcpu(env), MIP_SGEIP, + BOOL_TO_MASK(!!(env->hgeie & env->hgeip))); + return RISCV_EXCP_NONE; +} + static RISCVException read_htval(CPURISCVState *env, int csrno, target_ulong *val) { @@ -1310,11 +2192,11 @@ static RISCVException write_htinst(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException write_hgeip(CPURISCVState *env, int csrno, - target_ulong val) +static RISCVException read_hgeip(CPURISCVState *env, int csrno, + target_ulong *val) { if (val) { - qemu_log_mask(LOG_UNIMP, "No support for a non-zero GEILEN."); + *val = env->hgeip; } return RISCV_EXCP_NONE; } @@ -1381,6 +2263,110 @@ static RISCVException write_htimedeltah(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static int read_hvictl(CPURISCVState *env, int csrno, target_ulong *val) +{ + *val = env->hvictl; + return RISCV_EXCP_NONE; +} + +static int write_hvictl(CPURISCVState *env, int csrno, target_ulong val) +{ + env->hvictl = val & HVICTL_VALID_MASK; + return RISCV_EXCP_NONE; +} + +static int read_hvipriox(CPURISCVState *env, int first_index, + uint8_t *iprio, target_ulong *val) +{ + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); + + /* First index has to be a multiple of number of irqs per register */ + if (first_index % num_irqs) { + return (riscv_cpu_virt_enabled(env)) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + + /* Fill-up return value */ + *val = 0; + for (i = 0; i < num_irqs; i++) { + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { + continue; + } + if (rdzero) { + continue; + } + *val |= ((target_ulong)iprio[irq]) << (i * 8); + } + + return RISCV_EXCP_NONE; +} + +static int write_hvipriox(CPURISCVState *env, int first_index, + uint8_t *iprio, target_ulong val) +{ + int i, irq, rdzero, num_irqs = 4 * (riscv_cpu_mxl_bits(env) / 32); + + /* First index has to be a multiple of number of irqs per register */ + if (first_index % num_irqs) { + return (riscv_cpu_virt_enabled(env)) ? + RISCV_EXCP_VIRT_INSTRUCTION_FAULT : RISCV_EXCP_ILLEGAL_INST; + } + + /* Fill-up priority arrary */ + for (i = 0; i < num_irqs; i++) { + if (riscv_cpu_hviprio_index2irq(first_index + i, &irq, &rdzero)) { + continue; + } + if (rdzero) { + iprio[irq] = 0; + } else { + iprio[irq] = (val >> (i * 8)) & 0xff; + } + } + + return RISCV_EXCP_NONE; +} + +static int read_hviprio1(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 0, env->hviprio, val); +} + +static int write_hviprio1(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 0, env->hviprio, val); +} + +static int read_hviprio1h(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 4, env->hviprio, val); +} + +static int write_hviprio1h(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 4, env->hviprio, val); +} + +static int read_hviprio2(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 8, env->hviprio, val); +} + +static int write_hviprio2(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 8, env->hviprio, val); +} + +static int read_hviprio2h(CPURISCVState *env, int csrno, target_ulong *val) +{ + return read_hvipriox(env, 12, env->hviprio, val); +} + +static int write_hviprio2h(CPURISCVState *env, int csrno, target_ulong val) +{ + return write_hvipriox(env, 12, env->hviprio, val); +} + /* Virtual CSR Registers */ static RISCVException read_vsstatus(CPURISCVState *env, int csrno, target_ulong *val) @@ -2103,9 +3089,9 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { read_mstatus_i128 }, [CSR_MISA] = { "misa", any, read_misa, write_misa, NULL, read_misa_i128 }, - [CSR_MIDELEG] = { "mideleg", any, read_mideleg, write_mideleg }, + [CSR_MIDELEG] = { "mideleg", any, NULL, NULL, rmw_mideleg }, [CSR_MEDELEG] = { "medeleg", any, read_medeleg, write_medeleg }, - [CSR_MIE] = { "mie", any, read_mie, write_mie }, + [CSR_MIE] = { "mie", any, NULL, NULL, rmw_mie }, [CSR_MTVEC] = { "mtvec", any, read_mtvec, write_mtvec }, [CSR_MCOUNTEREN] = { "mcounteren", any, read_mcounteren, write_mcounteren }, @@ -2119,10 +3105,35 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MTVAL] = { "mtval", any, read_mtval, write_mtval }, [CSR_MIP] = { "mip", any, NULL, NULL, rmw_mip }, + /* Machine-Level Window to Indirectly Accessed Registers (AIA) */ + [CSR_MISELECT] = { "miselect", aia_any, NULL, NULL, rmw_xiselect }, + [CSR_MIREG] = { "mireg", aia_any, NULL, NULL, rmw_xireg }, + + /* Machine-Level Interrupts (AIA) */ + [CSR_MTOPI] = { "mtopi", aia_any, read_mtopi }, + + /* Machine-Level IMSIC Interface (AIA) */ + [CSR_MSETEIPNUM] = { "mseteipnum", aia_any, NULL, NULL, rmw_xsetclreinum }, + [CSR_MCLREIPNUM] = { "mclreipnum", aia_any, NULL, NULL, rmw_xsetclreinum }, + [CSR_MSETEIENUM] = { "mseteienum", aia_any, NULL, NULL, rmw_xsetclreinum }, + [CSR_MCLREIENUM] = { "mclreienum", aia_any, NULL, NULL, rmw_xsetclreinum }, + [CSR_MTOPEI] = { "mtopei", aia_any, NULL, NULL, rmw_xtopei }, + + /* Virtual Interrupts for Supervisor Level (AIA) */ + [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore }, + [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore }, + + /* Machine-Level High-Half CSRs (AIA) */ + [CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh }, + [CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh }, + [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore }, + [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore }, + [CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph }, + /* Supervisor Trap Setup */ [CSR_SSTATUS] = { "sstatus", smode, read_sstatus, write_sstatus, NULL, read_sstatus_i128 }, - [CSR_SIE] = { "sie", smode, read_sie, write_sie }, + [CSR_SIE] = { "sie", smode, NULL, NULL, rmw_sie }, [CSR_STVEC] = { "stvec", smode, read_stvec, write_stvec }, [CSR_SCOUNTEREN] = { "scounteren", smode, read_scounteren, write_scounteren }, @@ -2137,24 +3148,42 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { /* Supervisor Protection and Translation */ [CSR_SATP] = { "satp", smode, read_satp, write_satp }, + /* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */ + [CSR_SISELECT] = { "siselect", aia_smode, NULL, NULL, rmw_xiselect }, + [CSR_SIREG] = { "sireg", aia_smode, NULL, NULL, rmw_xireg }, + + /* Supervisor-Level Interrupts (AIA) */ + [CSR_STOPI] = { "stopi", aia_smode, read_stopi }, + + /* Supervisor-Level IMSIC Interface (AIA) */ + [CSR_SSETEIPNUM] = { "sseteipnum", aia_smode, NULL, NULL, rmw_xsetclreinum }, + [CSR_SCLREIPNUM] = { "sclreipnum", aia_smode, NULL, NULL, rmw_xsetclreinum }, + [CSR_SSETEIENUM] = { "sseteienum", aia_smode, NULL, NULL, rmw_xsetclreinum }, + [CSR_SCLREIENUM] = { "sclreienum", aia_smode, NULL, NULL, rmw_xsetclreinum }, + [CSR_STOPEI] = { "stopei", aia_smode, NULL, NULL, rmw_xtopei }, + + /* Supervisor-Level High-Half CSRs (AIA) */ + [CSR_SIEH] = { "sieh", aia_smode32, NULL, NULL, rmw_sieh }, + [CSR_SIPH] = { "siph", aia_smode32, NULL, NULL, rmw_siph }, + [CSR_HSTATUS] = { "hstatus", hmode, read_hstatus, write_hstatus }, [CSR_HEDELEG] = { "hedeleg", hmode, read_hedeleg, write_hedeleg }, - [CSR_HIDELEG] = { "hideleg", hmode, read_hideleg, write_hideleg }, + [CSR_HIDELEG] = { "hideleg", hmode, NULL, NULL, rmw_hideleg }, [CSR_HVIP] = { "hvip", hmode, NULL, NULL, rmw_hvip }, [CSR_HIP] = { "hip", hmode, NULL, NULL, rmw_hip }, - [CSR_HIE] = { "hie", hmode, read_hie, write_hie }, + [CSR_HIE] = { "hie", hmode, NULL, NULL, rmw_hie }, [CSR_HCOUNTEREN] = { "hcounteren", hmode, read_hcounteren, write_hcounteren }, - [CSR_HGEIE] = { "hgeie", hmode, read_zero, write_hgeie }, + [CSR_HGEIE] = { "hgeie", hmode, read_hgeie, write_hgeie }, [CSR_HTVAL] = { "htval", hmode, read_htval, write_htval }, [CSR_HTINST] = { "htinst", hmode, read_htinst, write_htinst }, - [CSR_HGEIP] = { "hgeip", hmode, read_zero, write_hgeip }, + [CSR_HGEIP] = { "hgeip", hmode, read_hgeip, NULL }, [CSR_HGATP] = { "hgatp", hmode, read_hgatp, write_hgatp }, [CSR_HTIMEDELTA] = { "htimedelta", hmode, read_htimedelta, write_htimedelta }, [CSR_HTIMEDELTAH] = { "htimedeltah", hmode32, read_htimedeltah, write_htimedeltah }, [CSR_VSSTATUS] = { "vsstatus", hmode, read_vsstatus, write_vsstatus }, [CSR_VSIP] = { "vsip", hmode, NULL, NULL, rmw_vsip }, - [CSR_VSIE] = { "vsie", hmode, read_vsie, write_vsie }, + [CSR_VSIE] = { "vsie", hmode, NULL, NULL, rmw_vsie }, [CSR_VSTVEC] = { "vstvec", hmode, read_vstvec, write_vstvec }, [CSR_VSSCRATCH] = { "vsscratch", hmode, read_vsscratch, write_vsscratch }, [CSR_VSEPC] = { "vsepc", hmode, read_vsepc, write_vsepc }, @@ -2165,6 +3194,37 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MTVAL2] = { "mtval2", hmode, read_mtval2, write_mtval2 }, [CSR_MTINST] = { "mtinst", hmode, read_mtinst, write_mtinst }, + /* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */ + [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore }, + [CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl, write_hvictl }, + [CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1, write_hviprio1 }, + [CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2, write_hviprio2 }, + + /* + * VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) + */ + [CSR_VSISELECT] = { "vsiselect", aia_hmode, NULL, NULL, rmw_xiselect }, + [CSR_VSIREG] = { "vsireg", aia_hmode, NULL, NULL, rmw_xireg }, + + /* VS-Level Interrupts (H-extension with AIA) */ + [CSR_VSTOPI] = { "vstopi", aia_hmode, read_vstopi }, + + /* VS-Level IMSIC Interface (H-extension with AIA) */ + [CSR_VSSETEIPNUM] = { "vsseteipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum }, + [CSR_VSCLREIPNUM] = { "vsclreipnum", aia_hmode, NULL, NULL, rmw_xsetclreinum }, + [CSR_VSSETEIENUM] = { "vsseteienum", aia_hmode, NULL, NULL, rmw_xsetclreinum }, + [CSR_VSCLREIENUM] = { "vsclreienum", aia_hmode, NULL, NULL, rmw_xsetclreinum }, + [CSR_VSTOPEI] = { "vstopei", aia_hmode, NULL, NULL, rmw_xtopei }, + + /* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */ + [CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL, rmw_hidelegh }, + [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero, write_ignore }, + [CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph }, + [CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h, write_hviprio1h }, + [CSR_HVIPRIO2H] = { "hviprio2h", aia_hmode32, read_hviprio2h, write_hviprio2h }, + [CSR_VSIEH] = { "vsieh", aia_hmode32, NULL, NULL, rmw_vsieh }, + [CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph }, + /* Physical Memory Protection */ [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg }, [CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg }, diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index f531a74c2f..9ed049c29e 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -64,6 +64,7 @@ int riscv_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) case MXL_RV32: return gdb_get_reg32(mem_buf, tmp); case MXL_RV64: + case MXL_RV128: return gdb_get_reg64(mem_buf, tmp); default: g_assert_not_reached(); @@ -84,6 +85,7 @@ int riscv_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) length = 4; break; case MXL_RV64: + case MXL_RV128: if (env->xl < MXL_RV64) { tmp = (int32_t)ldq_p(mem_buf); } else { @@ -420,6 +422,7 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) 1, "riscv-32bit-virtual.xml", 0); break; case MXL_RV64: + case MXL_RV128: gdb_register_coprocessor(cs, riscv_gdb_get_virtual, riscv_gdb_set_virtual, 1, "riscv-64bit-virtual.xml", 0); diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 5bbedc254c..1d3ff1efe1 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -809,3 +809,10 @@ fcvt_l_h 1100010 00010 ..... ... ..... 1010011 @r2_rm fcvt_lu_h 1100010 00011 ..... ... ..... 1010011 @r2_rm fcvt_h_l 1101010 00010 ..... ... ..... 1010011 @r2_rm fcvt_h_lu 1101010 00011 ..... ... ..... 1010011 @r2_rm + +# *** Svinval Standard Extension *** +sinval_vma 0001011 ..... ..... 000 00000 1110011 @sfence_vma +sfence_w_inval 0001100 00000 00000 000 00000 1110011 +sfence_inval_ir 0001100 00001 00000 000 00000 1110011 +hinval_vvma 0010011 ..... ..... 000 00000 1110011 @hfence_vvma +hinval_gvma 0110011 ..... ..... 000 00000 1110011 @hfence_gvma diff --git a/target/riscv/insn_trans/trans_rvb.c.inc b/target/riscv/insn_trans/trans_rvb.c.inc index 810431a1d6..f9bd3b7ec4 100644 --- a/target/riscv/insn_trans/trans_rvb.c.inc +++ b/target/riscv/insn_trans/trans_rvb.c.inc @@ -19,25 +19,25 @@ */ #define REQUIRE_ZBA(ctx) do { \ - if (!RISCV_CPU(ctx->cs)->cfg.ext_zba) { \ + if (ctx->cfg_ptr->ext_zba) { \ return false; \ } \ } while (0) #define REQUIRE_ZBB(ctx) do { \ - if (!RISCV_CPU(ctx->cs)->cfg.ext_zbb) { \ + if (ctx->cfg_ptr->ext_zbb) { \ return false; \ } \ } while (0) #define REQUIRE_ZBC(ctx) do { \ - if (!RISCV_CPU(ctx->cs)->cfg.ext_zbc) { \ + if (ctx->cfg_ptr->ext_zbc) { \ return false; \ } \ } while (0) #define REQUIRE_ZBS(ctx) do { \ - if (!RISCV_CPU(ctx->cs)->cfg.ext_zbs) { \ + if (ctx->cfg_ptr->ext_zbs) { \ return false; \ } \ } while (0) diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index 3cd1b3f877..f1342f30f8 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -806,7 +806,7 @@ static bool trans_fence(DisasContext *ctx, arg_fence *a) static bool trans_fence_i(DisasContext *ctx, arg_fence_i *a) { - if (!ctx->ext_ifencei) { + if (!ctx->cfg_ptr->ext_ifencei) { return false; } diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index f85a9e83b4..275fded6e4 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -74,7 +74,7 @@ static bool require_zve32f(DisasContext *s) } /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->ext_zve32f ? s->sew <= MO_32 : true; + return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true; } static bool require_scale_zve32f(DisasContext *s) @@ -85,7 +85,7 @@ static bool require_scale_zve32f(DisasContext *s) } /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->ext_zve64f ? s->sew <= MO_16 : true; + return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; } static bool require_zve64f(DisasContext *s) @@ -96,7 +96,7 @@ static bool require_zve64f(DisasContext *s) } /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->ext_zve64f ? s->sew <= MO_32 : true; + return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true; } static bool require_scale_zve64f(DisasContext *s) @@ -107,7 +107,7 @@ static bool require_scale_zve64f(DisasContext *s) } /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->ext_zve64f ? s->sew <= MO_16 : true; + return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; } /* Destination vector register group cannot overlap source mask register. */ @@ -174,7 +174,8 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) TCGv s1, dst; if (!require_rvv(s) || - !(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) { + !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || + s->cfg_ptr->ext_zve64f)) { return false; } @@ -210,7 +211,8 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) TCGv dst; if (!require_rvv(s) || - !(has_ext(s, RVV) || s->ext_zve32f || s->ext_zve64f)) { + !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || + s->cfg_ptr->ext_zve64f)) { return false; } @@ -248,7 +250,7 @@ static bool trans_vsetivli(DisasContext *s, arg_vsetivli *a) /* vector register offset from env */ static uint32_t vreg_ofs(DisasContext *s, int reg) { - return offsetof(CPURISCVState, vreg) + reg * s->vlen / 8; + return offsetof(CPURISCVState, vreg) + reg * s->cfg_ptr->vlen / 8; } /* check functions */ @@ -318,7 +320,8 @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf, * when XLEN=32. (Section 18.2) */ if (get_xl(s) == MXL_RV32) { - ret &= (!has_ext(s, RVV) && s->ext_zve64f ? eew != MO_64 : true); + ret &= (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? eew != MO_64 : true); } return ret; @@ -454,7 +457,7 @@ static bool vext_wide_check_common(DisasContext *s, int vd, int vm) { return (s->lmul <= 2) && (s->sew < MO_64) && - ((s->sew + 1) <= (s->elen >> 4)) && + ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) && require_align(vd, s->lmul + 1) && require_vm(vm, vd); } @@ -482,7 +485,7 @@ static bool vext_narrow_check_common(DisasContext *s, int vd, int vs2, { return (s->lmul <= 2) && (s->sew < MO_64) && - ((s->sew + 1) <= (s->elen >> 4)) && + ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)) && require_align(vs2, s->lmul + 1) && require_align(vd, s->lmul) && require_vm(vm, vd); @@ -661,7 +664,8 @@ static bool ldst_us_trans(uint32_t vd, uint32_t rs1, uint32_t data, * The first part is vlen in bytes, encoded in maxsz of simd_desc. * The second part is lmul, encoded in data of simd_desc. */ - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -819,7 +823,8 @@ static bool ldst_stride_trans(uint32_t vd, uint32_t rs1, uint32_t rs2, mask = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); stride = get_gpr(s, rs2, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -925,7 +930,8 @@ static bool ldst_index_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, mask = tcg_temp_new_ptr(); index = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(index, cpu_env, vreg_ofs(s, vs2)); @@ -1065,7 +1071,8 @@ static bool ldff_trans(uint32_t vd, uint32_t rs1, uint32_t data, dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); base = get_gpr(s, rs1, EXT_NONE); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -1120,7 +1127,8 @@ static bool ldst_whole_trans(uint32_t vd, uint32_t rs1, uint32_t nf, uint32_t data = FIELD_DP32(0, VDATA, NF, nf); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); base = get_gpr(s, rs1, EXT_NONE); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); @@ -1185,7 +1193,7 @@ GEN_LDST_WHOLE_TRANS(vs8r_v, 8, true) static inline uint32_t MAXSZ(DisasContext *s) { int scale = s->lmul - 3; - return scale < 0 ? s->vlen >> -scale : s->vlen << scale; + return scale < 0 ? s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale; } static bool opivv_check(DisasContext *s, arg_rmrr *a) @@ -1220,7 +1228,8 @@ do_opivv_gvec(DisasContext *s, arg_rmrr *a, GVecGen3Fn *gvec_fn, data = FIELD_DP32(data, VDATA, LMUL, s->lmul); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, data, fn); + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); } mark_vs_dirty(s); gen_set_label(over); @@ -1262,7 +1271,8 @@ static bool opivx_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VM, vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); @@ -1425,7 +1435,8 @@ static bool opivi_trans(uint32_t vd, uint32_t imm, uint32_t vs2, uint32_t vm, data = FIELD_DP32(data, VDATA, VM, vm); data = FIELD_DP32(data, VDATA, LMUL, s->lmul); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); @@ -1508,7 +1519,8 @@ static bool do_opivv_widen(DisasContext *s, arg_rmrr *a, tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); mark_vs_dirty(s); gen_set_label(over); @@ -1587,7 +1599,8 @@ static bool do_opiwv_widen(DisasContext *s, arg_rmrr *a, tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, data, fn); + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); mark_vs_dirty(s); gen_set_label(over); return true; @@ -1663,7 +1676,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -1843,7 +1857,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -1963,7 +1978,8 @@ static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); } static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) @@ -1976,7 +1992,8 @@ static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); } GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) @@ -2046,7 +2063,8 @@ static bool trans_vmv_v_v(DisasContext *s, arg_vmv_v_v *a) tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over); tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, a->rs1), - cpu_env, s->vlen / 8, s->vlen / 8, data, + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fns[s->sew]); gen_set_label(over); } @@ -2083,7 +2101,8 @@ static bool trans_vmv_v_x(DisasContext *s, arg_vmv_v_x *a) }; tcg_gen_ext_tl_i64(s1_i64, s1); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1_i64, cpu_env, desc); @@ -2123,7 +2142,8 @@ static bool trans_vmv_v_i(DisasContext *s, arg_vmv_v_i *a) s1 = tcg_constant_i64(simm); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew](dest, s1, cpu_env, desc); @@ -2176,7 +2196,8 @@ static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); } static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) @@ -2187,7 +2208,8 @@ static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && s->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) && + s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); } GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check) @@ -2275,7 +2297,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2302,7 +2325,8 @@ static bool opfvf_trans(uint32_t vd, uint32_t rs1, uint32_t vs2, dest = tcg_temp_new_ptr(); mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, vd)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, vs2)); @@ -2391,7 +2415,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2464,7 +2489,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmrr *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2583,7 +2609,8 @@ static bool do_opfv(DisasContext *s, arg_rmr *a, data = FIELD_DP32(data, VDATA, LMUL, s->lmul); tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), cpu_env, - s->vlen / 8, s->vlen / 8, data, fn); + s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); mark_vs_dirty(s); gen_set_label(over); return true; @@ -2696,7 +2723,8 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) do_nanbox(s, t1, cpu_fpr[a->rs1]); dest = tcg_temp_new_ptr(); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(dest, cpu_env, vreg_ofs(s, a->rd)); fns[s->sew - 1](dest, t1, cpu_env, desc); @@ -2782,7 +2810,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2831,7 +2860,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2896,7 +2926,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew - 1]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2947,7 +2978,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, VM, a->vm); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, \ fns[s->sew]); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -2986,7 +3018,7 @@ GEN_OPIVV_TRANS(vredxor_vs, reduction_check) static bool reduction_widen_check(DisasContext *s, arg_rmrr *a) { return reduction_check(s, a) && (s->sew < MO_64) && - ((s->sew + 1) <= (s->elen >> 4)); + ((s->sew + 1) <= (s->cfg_ptr->elen >> 4)); } GEN_OPIVV_WIDEN_TRANS(vwredsum_vs, reduction_widen_check) @@ -3034,7 +3066,8 @@ static bool trans_##NAME(DisasContext *s, arg_r *a) \ tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), \ vreg_ofs(s, a->rs1), \ vreg_ofs(s, a->rs2), cpu_env, \ - s->vlen / 8, s->vlen / 8, data, fn); \ + s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, data, fn); \ mark_vs_dirty(s); \ gen_set_label(over); \ return true; \ @@ -3067,7 +3100,8 @@ static bool trans_vcpop_m(DisasContext *s, arg_rmr *a) mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); dst = dest_gpr(s, a->rd); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -3099,7 +3133,8 @@ static bool trans_vfirst_m(DisasContext *s, arg_rmr *a) mask = tcg_temp_new_ptr(); src2 = tcg_temp_new_ptr(); dst = dest_gpr(s, a->rd); - desc = tcg_constant_i32(simd_desc(s->vlen / 8, s->vlen / 8, data)); + desc = tcg_constant_i32(simd_desc(s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data)); tcg_gen_addi_ptr(src2, cpu_env, vreg_ofs(s, a->rs2)); tcg_gen_addi_ptr(mask, cpu_env, vreg_ofs(s, 0)); @@ -3134,7 +3169,8 @@ static bool trans_##NAME(DisasContext *s, arg_rmr *a) \ data = FIELD_DP32(data, VDATA, LMUL, s->lmul); \ tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), \ vreg_ofs(s, 0), vreg_ofs(s, a->rs2), \ - cpu_env, s->vlen / 8, s->vlen / 8, \ + cpu_env, s->cfg_ptr->vlen / 8, \ + s->cfg_ptr->vlen / 8, \ data, fn); \ mark_vs_dirty(s); \ gen_set_label(over); \ @@ -3174,7 +3210,8 @@ static bool trans_viota_m(DisasContext *s, arg_viota_m *a) }; tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), cpu_env, - s->vlen / 8, s->vlen / 8, data, fns[s->sew]); + s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fns[s->sew]); mark_vs_dirty(s); gen_set_label(over); return true; @@ -3200,7 +3237,8 @@ static bool trans_vid_v(DisasContext *s, arg_vid_v *a) gen_helper_vid_v_w, gen_helper_vid_v_d, }; tcg_gen_gvec_2_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), - cpu_env, s->vlen / 8, s->vlen / 8, + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fns[s->sew]); mark_vs_dirty(s); gen_set_label(over); @@ -3554,7 +3592,8 @@ static bool trans_vrgather_vx(DisasContext *s, arg_rmrr *a) if (a->vm && s->vl_eq_vlmax) { int scale = s->lmul - (s->sew + 3); - int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale; + int vlmax = scale < 0 ? + s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale; TCGv_i64 dest = tcg_temp_new_i64(); if (a->rs1 == 0) { @@ -3586,7 +3625,8 @@ static bool trans_vrgather_vi(DisasContext *s, arg_rmrr *a) if (a->vm && s->vl_eq_vlmax) { int scale = s->lmul - (s->sew + 3); - int vlmax = scale < 0 ? s->vlen >> -scale : s->vlen << scale; + int vlmax = scale < 0 ? + s->cfg_ptr->vlen >> -scale : s->cfg_ptr->vlen << scale; if (a->rs1 >= vlmax) { tcg_gen_gvec_dup_imm(MO_64, vreg_ofs(s, a->rd), MAXSZ(s), MAXSZ(s), 0); @@ -3638,7 +3678,8 @@ static bool trans_vcompress_vm(DisasContext *s, arg_r *a) data = FIELD_DP32(data, VDATA, LMUL, s->lmul); tcg_gen_gvec_4_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs1), vreg_ofs(s, a->rs2), - cpu_env, s->vlen / 8, s->vlen / 8, data, + cpu_env, s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fns[s->sew]); mark_vs_dirty(s); gen_set_label(over); @@ -3657,7 +3698,7 @@ static bool trans_##NAME(DisasContext *s, arg_##NAME * a) \ if (require_rvv(s) && \ QEMU_IS_ALIGNED(a->rd, LEN) && \ QEMU_IS_ALIGNED(a->rs2, LEN)) { \ - uint32_t maxsz = (s->vlen >> 3) * LEN; \ + uint32_t maxsz = (s->cfg_ptr->vlen >> 3) * LEN; \ if (s->vstart == 0) { \ /* EEW = 8 */ \ tcg_gen_gvec_mov(MO_8, vreg_ofs(s, a->rd), \ @@ -3742,7 +3783,8 @@ static bool int_ext_op(DisasContext *s, arg_rmr *a, uint8_t seq) tcg_gen_gvec_3_ptr(vreg_ofs(s, a->rd), vreg_ofs(s, 0), vreg_ofs(s, a->rs2), cpu_env, - s->vlen / 8, s->vlen / 8, data, fn); + s->cfg_ptr->vlen / 8, + s->cfg_ptr->vlen / 8, data, fn); mark_vs_dirty(s); gen_set_label(over); diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc index 5a7cac8958..608c51da2c 100644 --- a/target/riscv/insn_trans/trans_rvzfh.c.inc +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -17,13 +17,13 @@ */ #define REQUIRE_ZFH(ctx) do { \ - if (!ctx->ext_zfh) { \ + if (!ctx->cfg_ptr->ext_zfh) { \ return false; \ } \ } while (0) #define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \ - if (!(ctx->ext_zfh || ctx->ext_zfhmin)) { \ + if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \ return false; \ } \ } while (0) diff --git a/target/riscv/insn_trans/trans_svinval.c.inc b/target/riscv/insn_trans/trans_svinval.c.inc new file mode 100644 index 0000000000..2682bd969f --- /dev/null +++ b/target/riscv/insn_trans/trans_svinval.c.inc @@ -0,0 +1,75 @@ +/* + * RISC-V translation routines for the Svinval Standard Instruction Set. + * + * Copyright (c) 2020-2022 PLCT lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define REQUIRE_SVINVAL(ctx) do { \ + if (!ctx->cfg_ptr->ext_svinval) { \ + return false; \ + } \ +} while (0) + +static bool trans_sinval_vma(DisasContext *ctx, arg_sinval_vma *a) +{ + REQUIRE_SVINVAL(ctx); + /* Do the same as sfence.vma currently */ + REQUIRE_EXT(ctx, RVS); +#ifndef CONFIG_USER_ONLY + gen_helper_tlb_flush(cpu_env); + return true; +#endif + return false; +} + +static bool trans_sfence_w_inval(DisasContext *ctx, arg_sfence_w_inval *a) +{ + REQUIRE_SVINVAL(ctx); + REQUIRE_EXT(ctx, RVS); + /* Do nothing currently */ + return true; +} + +static bool trans_sfence_inval_ir(DisasContext *ctx, arg_sfence_inval_ir *a) +{ + REQUIRE_SVINVAL(ctx); + REQUIRE_EXT(ctx, RVS); + /* Do nothing currently */ + return true; +} + +static bool trans_hinval_vvma(DisasContext *ctx, arg_hinval_vvma *a) +{ + REQUIRE_SVINVAL(ctx); + /* Do the same as hfence.vvma currently */ + REQUIRE_EXT(ctx, RVH); +#ifndef CONFIG_USER_ONLY + gen_helper_hyp_tlb_flush(cpu_env); + return true; +#endif + return false; +} + +static bool trans_hinval_gvma(DisasContext *ctx, arg_hinval_gvma *a) +{ + REQUIRE_SVINVAL(ctx); + /* Do the same as hfence.gvma currently */ + REQUIRE_EXT(ctx, RVH); +#ifndef CONFIG_USER_ONLY + gen_helper_hyp_gvma_tlb_flush(cpu_env); + return true; +#endif + return false; +} diff --git a/target/riscv/insn_trans/trans_xventanacondops.c.inc b/target/riscv/insn_trans/trans_xventanacondops.c.inc new file mode 100644 index 0000000000..16849e6d4e --- /dev/null +++ b/target/riscv/insn_trans/trans_xventanacondops.c.inc @@ -0,0 +1,39 @@ +/* + * RISC-V translation routines for the XVentanaCondOps extension. + * + * Copyright (c) 2021-2022 VRULL GmbH. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +static bool gen_vt_condmask(DisasContext *ctx, arg_r *a, TCGCond cond) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + tcg_gen_movcond_tl(cond, dest, src2, ctx->zero, src1, ctx->zero); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_vt_maskc(DisasContext *ctx, arg_r *a) +{ + return gen_vt_condmask(ctx, a, TCG_COND_NE); +} + +static bool trans_vt_maskcn(DisasContext *ctx, arg_r *a) +{ + return gen_vt_condmask(ctx, a, TCG_COND_EQ); +} diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 740e11fcff..5178b3fec9 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -78,19 +78,24 @@ static bool hyper_needed(void *opaque) static const VMStateDescription vmstate_hyper = { .name = "cpu/hyper", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .needed = hyper_needed, .fields = (VMStateField[]) { VMSTATE_UINTTL(env.hstatus, RISCVCPU), VMSTATE_UINTTL(env.hedeleg, RISCVCPU), - VMSTATE_UINTTL(env.hideleg, RISCVCPU), + VMSTATE_UINT64(env.hideleg, RISCVCPU), VMSTATE_UINTTL(env.hcounteren, RISCVCPU), VMSTATE_UINTTL(env.htval, RISCVCPU), VMSTATE_UINTTL(env.htinst, RISCVCPU), VMSTATE_UINTTL(env.hgatp, RISCVCPU), + VMSTATE_UINTTL(env.hgeie, RISCVCPU), + VMSTATE_UINTTL(env.hgeip, RISCVCPU), VMSTATE_UINT64(env.htimedelta, RISCVCPU), + VMSTATE_UINTTL(env.hvictl, RISCVCPU), + VMSTATE_UINT8_ARRAY(env.hviprio, RISCVCPU, 64), + VMSTATE_UINT64(env.vsstatus, RISCVCPU), VMSTATE_UINTTL(env.vstvec, RISCVCPU), VMSTATE_UINTTL(env.vsscratch, RISCVCPU), @@ -98,6 +103,7 @@ static const VMStateDescription vmstate_hyper = { VMSTATE_UINTTL(env.vscause, RISCVCPU), VMSTATE_UINTTL(env.vstval, RISCVCPU), VMSTATE_UINTTL(env.vsatp, RISCVCPU), + VMSTATE_UINTTL(env.vsiselect, RISCVCPU), VMSTATE_UINTTL(env.mtval2, RISCVCPU), VMSTATE_UINTTL(env.mtinst, RISCVCPU), @@ -233,6 +239,8 @@ const VMStateDescription vmstate_riscv_cpu = { .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), VMSTATE_UINT64_ARRAY(env.fpr, RISCVCPU, 32), + VMSTATE_UINT8_ARRAY(env.miprio, RISCVCPU, 64), + VMSTATE_UINT8_ARRAY(env.siprio, RISCVCPU, 64), VMSTATE_UINTTL(env.pc, RISCVCPU), VMSTATE_UINTTL(env.load_res, RISCVCPU), VMSTATE_UINTTL(env.load_val, RISCVCPU), @@ -251,10 +259,10 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.resetvec, RISCVCPU), VMSTATE_UINTTL(env.mhartid, RISCVCPU), VMSTATE_UINT64(env.mstatus, RISCVCPU), - VMSTATE_UINTTL(env.mip, RISCVCPU), - VMSTATE_UINT32(env.miclaim, RISCVCPU), - VMSTATE_UINTTL(env.mie, RISCVCPU), - VMSTATE_UINTTL(env.mideleg, RISCVCPU), + VMSTATE_UINT64(env.mip, RISCVCPU), + VMSTATE_UINT64(env.miclaim, RISCVCPU), + VMSTATE_UINT64(env.mie, RISCVCPU), + VMSTATE_UINT64(env.mideleg, RISCVCPU), VMSTATE_UINTTL(env.satp, RISCVCPU), VMSTATE_UINTTL(env.stval, RISCVCPU), VMSTATE_UINTTL(env.medeleg, RISCVCPU), @@ -265,6 +273,8 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINTTL(env.mepc, RISCVCPU), VMSTATE_UINTTL(env.mcause, RISCVCPU), VMSTATE_UINTTL(env.mtval, RISCVCPU), + VMSTATE_UINTTL(env.miselect, RISCVCPU), + VMSTATE_UINTTL(env.siselect, RISCVCPU), VMSTATE_UINTTL(env.scounteren, RISCVCPU), VMSTATE_UINTTL(env.mcounteren, RISCVCPU), VMSTATE_UINTTL(env.sscratch, RISCVCPU), diff --git a/target/riscv/meson.build b/target/riscv/meson.build index a3997ed580..91f0ac32ff 100644 --- a/target/riscv/meson.build +++ b/target/riscv/meson.build @@ -4,6 +4,7 @@ dir = meson.current_source_dir() gen = [ decodetree.process('insn16.decode', extra_args: ['--static-decode=decode_insn16', '--insnwidth=16']), decodetree.process('insn32.decode', extra_args: '--static-decode=decode_insn32'), + decodetree.process('XVentanaCondOps.decode', extra_args: '--static-decode=decode_XVentanaCodeOps'), ] riscv_ss = ss.source_set() diff --git a/target/riscv/translate.c b/target/riscv/translate.c index f0bbe80875..84dbfa6340 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -76,11 +76,7 @@ typedef struct DisasContext { int frm; RISCVMXL ol; bool virt_enabled; - bool ext_ifencei; - bool ext_zfh; - bool ext_zfhmin; - bool ext_zve32f; - bool ext_zve64f; + const RISCVCPUConfig *cfg_ptr; bool hlsx; /* vector extension */ bool vill; @@ -98,8 +94,6 @@ typedef struct DisasContext { */ int8_t lmul; uint8_t sew; - uint16_t vlen; - uint16_t elen; target_ulong vstart; bool vl_eq_vlmax; uint8_t ntemp; @@ -117,6 +111,19 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext) return ctx->misa_ext & ext; } +static bool always_true_p(DisasContext *ctx __attribute__((__unused__))) +{ + return true; +} + +#define MATERIALISE_EXT_PREDICATE(ext) \ + static bool has_ ## ext ## _p(DisasContext *ctx) \ + { \ + return ctx->cfg_ptr->ext_ ## ext ; \ + } + +MATERIALISE_EXT_PREDICATE(XVentanaCondOps); + #ifdef TARGET_RISCV32 #define get_xl(ctx) MXL_RV32 #elif defined(CONFIG_USER_ONLY) @@ -855,21 +862,37 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvb.c.inc" #include "insn_trans/trans_rvzfh.c.inc" #include "insn_trans/trans_privileged.c.inc" +#include "insn_trans/trans_svinval.c.inc" +#include "insn_trans/trans_xventanacondops.c.inc" /* Include the auto-generated decoder for 16 bit insn */ #include "decode-insn16.c.inc" +/* Include decoders for factored-out extensions */ +#include "decode-XVentanaCondOps.c.inc" static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) { - /* check for compressed insn */ + /* + * A table with predicate (i.e., guard) functions and decoder functions + * that are tested in-order until a decoder matches onto the opcode. + */ + static const struct { + bool (*guard_func)(DisasContext *); + bool (*decode_func)(DisasContext *, uint32_t); + } decoders[] = { + { always_true_p, decode_insn32 }, + { has_XVentanaCondOps_p, decode_XVentanaCodeOps }, + }; + + /* Check for compressed insn */ if (extract16(opcode, 0, 2) != 3) { if (!has_ext(ctx, RVC)) { gen_exception_illegal(ctx); } else { ctx->opcode = opcode; ctx->pc_succ_insn = ctx->base.pc_next + 2; - if (!decode_insn16(ctx, opcode)) { - gen_exception_illegal(ctx); + if (decode_insn16(ctx, opcode)) { + return; } } } else { @@ -879,10 +902,16 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode) ctx->base.pc_next + 2)); ctx->opcode = opcode32; ctx->pc_succ_insn = ctx->base.pc_next + 4; - if (!decode_insn32(ctx, opcode32)) { - gen_exception_illegal(ctx); + + for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) { + if (decoders[i].guard_func(ctx) && + decoders[i].decode_func(ctx, opcode32)) { + return; + } } } + + gen_exception_illegal(ctx); } static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) @@ -908,13 +937,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) #endif ctx->misa_ext = env->misa_ext; ctx->frm = -1; /* unknown rounding mode */ - ctx->ext_ifencei = cpu->cfg.ext_ifencei; - ctx->ext_zfh = cpu->cfg.ext_zfh; - ctx->ext_zfhmin = cpu->cfg.ext_zfhmin; - ctx->ext_zve32f = cpu->cfg.ext_zve32f; - ctx->ext_zve64f = cpu->cfg.ext_zve64f; - ctx->vlen = cpu->cfg.vlen; - ctx->elen = cpu->cfg.elen; + ctx->cfg_ptr = &(cpu->cfg); ctx->mstatus_hs_fs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_FS); ctx->mstatus_hs_vs = FIELD_EX32(tb_flags, TB_FLAGS, MSTATUS_HS_VS); ctx->hlsx = FIELD_EX32(tb_flags, TB_FLAGS, HLSX); diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 020d2e841f..3bd4aac9c9 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -71,6 +71,7 @@ target_ulong HELPER(vsetvl)(CPURISCVState *env, target_ulong s1, env->vl = vl; env->vtype = s2; env->vstart = 0; + env->vill = 0; return vl; } |