aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-07-10 21:42:50 +0100
committerRichard Henderson <richard.henderson@linaro.org>2023-07-10 21:42:50 +0100
commit94d68c11362240a26ce425f56e2451d88f6814e1 (patch)
tree2e345a15e1c0deecfd431d8a84f7c712272ad54c /target
parent8d309a3a97e2d3734b74b07f355f860a9f2e880e (diff)
parenta47842d16653b4f73b5d56ff0c252dd8a329481b (diff)
Merge tag 'pull-riscv-to-apply-20230710-1' of https://github.com/alistair23/qemu into staging
Third RISC-V PR for 8.1 * Use xl instead of mxl for disassemble * Factor out extension tests to cpu_cfg.h * disas/riscv: Add vendor extension support * disas/riscv: Add support for XVentanaCondOps * disas/riscv: Add support for XThead* instructions * Fix mstatus related problems * Fix veyron-v1 CPU properties * Fix the xlen for data address when MPRV=1 * opensbi: Upgrade from v1.2 to v1.3 * Enable 32-bit Spike OpenSBI boot testing * Support the watchdog timer of HiFive 1 rev b * Only build qemu-system-riscv$$ on rv$$ host * Add RVV registers to log * Restrict ACLINT to TCG * Add syscall riscv_hwprobe * Add support for BF16 extensions * KVM_RISCV_SET_TIMER macro is not configured correctly * Generate devicetree only after machine initialization is complete * virt: Convert fdt_load_addr to uint64_t * KVM: fixes and enhancements * Add support for the Zfa extension # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEaukCtqfKh31tZZKWr3yVEwxTgBMFAmSr+ekACgkQr3yVEwxT # gBMMGg//ZCcyH3KXB49c2KUIFO6FKYUxN9uC3giZCtuGyEH8T2yDgZVVXnxwU+Ij # +3Ej6T/ZdWMpePC9qf+xKzHWZk7Qc8Tcg+JgQbga573894yZInRwYl8HsSlEKA+Z # vlqSBPxTlp9rlDwGP/LjGljyIFqL4konk9zi3FL4ZXTF1iHUGrh/953Y3wIreEfl # KX5UznnWcgy2BqQT1vihMbM8qCVK6iryH+QZ6LiAsPMSX1rIzk8ectQryILzoIYh # bMiwCLVMyr4ZrUXjmGTF+7/WcOWwhhyfpdstf2iotKALelZtVHit0wHcty2GYQde # nvN83jJWu04DGXkPBUsqCUQXczGo1QHjJUH3RIRJzfOby/lGt4pSzHAfKA+iNUht # ikM3SdBsXMO+ogjTtTcCMb7/m2vsMoQP60VRts9Mh3YVD0cgr7RqpqRoEMugVYnr # ca8Vijf71mB+y+pq477eV1Q8BoKpr8xa1OlFkNKPC17uMD7HoDMI44QgFOgtYp10 # TMsqqyB75q6PZhSEwm63xbmH0Zpo8kSqT/E3MTtGTyPeuL8TNNNSkCmFaGYmRrbI # XEp7vG2RaDJOvDomS3nUhA5ruc8SaXd0q25q2gLYQfCsehfFqZAwuNB5xf1zS0M0 # ov1/gwaqU93t6nLbo2cCbb0plkIFKwwJ9KKjD06wJ4KPe0TGFzk= # =3XFD # -----END PGP SIGNATURE----- # gpg: Signature made Mon 10 Jul 2023 01:30:33 PM BST # gpg: using RSA key 6AE902B6A7CA877D6D659296AF7C95130C538013 # gpg: Good signature from "Alistair Francis <alistair@alistair23.me>" [unknown] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: 6AE9 02B6 A7CA 877D 6D65 9296 AF7C 9513 0C53 8013 * tag 'pull-riscv-to-apply-20230710-1' of https://github.com/alistair23/qemu: (54 commits) riscv: Add support for the Zfa extension target/riscv/kvm.c: read/write (cbom|cboz)_blocksize in KVM target/riscv/kvm.c: add kvmconfig_get_cfg_addr() helper target/riscv: update multi-letter extension KVM properties target/riscv/cpu.c: create KVM mock properties target/riscv/cpu.c: remove priv_ver check from riscv_isa_string_ext() target/riscv/cpu.c: add satp_mode properties earlier target/riscv/kvm.c: add multi-letter extension KVM properties target/riscv/kvm.c: update KVM MISA bits target/riscv: add KVM specific MISA properties target/riscv/cpu: add misa_ext_info_arr[] target/riscv/kvm.c: init 'misa_ext_mask' with scratch CPU target/riscv: handle mvendorid/marchid/mimpid for KVM CPUs target/riscv: read marchid/mimpid in kvm_riscv_init_machine_ids() target/riscv: use KVM scratch CPUs to init KVM properties target/riscv/cpu.c: restrict 'marchid' value target/riscv/cpu.c: restrict 'mimpid' value target/riscv/cpu.c: restrict 'mvendorid' value hw/riscv/virt.c: skip 'mmu-type' FDT if satp mode not set target/riscv: skip features setup for KVM CPUs ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/riscv/cpu.c439
-rw-r--r--target/riscv/cpu.h56
-rw-r--r--target/riscv/cpu_cfg.h41
-rw-r--r--target/riscv/cpu_helper.c12
-rw-r--r--target/riscv/csr.c41
-rw-r--r--target/riscv/fpu_helper.c166
-rw-r--r--target/riscv/helper.h29
-rw-r--r--target/riscv/insn32.decode38
-rw-r--r--target/riscv/insn_trans/trans_rvbf16.c.inc175
-rw-r--r--target/riscv/insn_trans/trans_rvzfa.c.inc521
-rw-r--r--target/riscv/insn_trans/trans_rvzfh.c.inc12
-rw-r--r--target/riscv/kvm.c501
-rw-r--r--target/riscv/kvm_riscv.h1
-rw-r--r--target/riscv/op_helper.c3
-rw-r--r--target/riscv/translate.c42
-rw-r--r--target/riscv/vector_helper.c17
16 files changed, 1966 insertions, 128 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c
index 4035fe0e62..9339c0241d 100644
--- a/target/riscv/cpu.c
+++ b/target/riscv/cpu.c
@@ -34,16 +34,11 @@
#include "migration/vmstate.h"
#include "fpu/softfloat-helpers.h"
#include "sysemu/kvm.h"
+#include "sysemu/tcg.h"
#include "kvm_riscv.h"
#include "tcg/tcg.h"
/* RISC-V CPU definitions */
-
-#define RISCV_CPU_MARCHID ((QEMU_VERSION_MAJOR << 16) | \
- (QEMU_VERSION_MINOR << 8) | \
- (QEMU_VERSION_MICRO))
-#define RISCV_CPU_MIMPID RISCV_CPU_MARCHID
-
static const char riscv_single_letter_exts[] = "IEMAFDQCPVH";
struct isa_ext_data {
@@ -56,6 +51,17 @@ struct isa_ext_data {
{#_name, _min_ver, offsetof(struct RISCVCPUConfig, _prop)}
/*
+ * From vector_helper.c
+ * Note that vector data is stored in host-endian 64-bit chunks,
+ * so addressing bytes needs a host-endian fixup.
+ */
+#if HOST_BIG_ENDIAN
+#define BYTE(x) ((x) ^ 7)
+#else
+#define BYTE(x) (x)
+#endif
+
+/*
* Here are the ordering rules of extension naming defined by RISC-V
* specification :
* 1. All extensions should be separated from other multi-letter extensions
@@ -83,6 +89,8 @@ static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zifencei, PRIV_VERSION_1_10_0, ext_ifencei),
ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause),
ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs),
+ ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa),
+ ISA_EXT_DATA_ENTRY(zfbfmin, PRIV_VERSION_1_12_0, ext_zfbfmin),
ISA_EXT_DATA_ENTRY(zfh, PRIV_VERSION_1_11_0, ext_zfh),
ISA_EXT_DATA_ENTRY(zfhmin, PRIV_VERSION_1_11_0, ext_zfhmin),
ISA_EXT_DATA_ENTRY(zfinx, PRIV_VERSION_1_12_0, ext_zfinx),
@@ -114,6 +122,8 @@ static const struct isa_ext_data isa_edata_arr[] = {
ISA_EXT_DATA_ENTRY(zve32f, PRIV_VERSION_1_10_0, ext_zve32f),
ISA_EXT_DATA_ENTRY(zve64f, PRIV_VERSION_1_10_0, ext_zve64f),
ISA_EXT_DATA_ENTRY(zve64d, PRIV_VERSION_1_10_0, ext_zve64d),
+ ISA_EXT_DATA_ENTRY(zvfbfmin, PRIV_VERSION_1_12_0, ext_zvfbfmin),
+ ISA_EXT_DATA_ENTRY(zvfbfwma, PRIV_VERSION_1_12_0, ext_zvfbfwma),
ISA_EXT_DATA_ENTRY(zvfh, PRIV_VERSION_1_12_0, ext_zvfh),
ISA_EXT_DATA_ENTRY(zvfhmin, PRIV_VERSION_1_12_0, ext_zvfhmin),
ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx),
@@ -183,6 +193,14 @@ const char * const riscv_fpr_regnames[] = {
"f30/ft10", "f31/ft11"
};
+const char * const riscv_rvv_regnames[] = {
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6",
+ "v7", "v8", "v9", "v10", "v11", "v12", "v13",
+ "v14", "v15", "v16", "v17", "v18", "v19", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27",
+ "v28", "v29", "v30", "v31"
+};
+
static const char * const riscv_excp_names[] = {
"misaligned_fetch",
"fault_fetch",
@@ -412,6 +430,7 @@ static void rv64_thead_c906_cpu_init(Object *obj)
set_misa(env, MXL_RV64, RVG | RVC | RVS | RVU);
env->priv_ver = PRIV_VERSION_1_11_0;
+ cpu->cfg.ext_zfa = true;
cpu->cfg.ext_zfh = true;
cpu->cfg.mmu = true;
cpu->cfg.ext_xtheadba = true;
@@ -444,6 +463,9 @@ static void rv64_veyron_v1_cpu_init(Object *obj)
/* Enable ISA extensions */
cpu->cfg.mmu = true;
+ cpu->cfg.ext_ifencei = true;
+ cpu->cfg.ext_icsr = true;
+ cpu->cfg.pmp = true;
cpu->cfg.ext_icbom = true;
cpu->cfg.cbom_blocksize = 64;
cpu->cfg.cboz_blocksize = 64;
@@ -608,7 +630,8 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
- int i;
+ int i, j;
+ uint8_t *p;
#if !defined(CONFIG_USER_ONLY)
if (riscv_has_ext(env, RVH)) {
@@ -692,6 +715,41 @@ static void riscv_cpu_dump_state(CPUState *cs, FILE *f, int flags)
}
}
}
+ if (riscv_has_ext(env, RVV) && (flags & CPU_DUMP_VPU)) {
+ static const int dump_rvv_csrs[] = {
+ CSR_VSTART,
+ CSR_VXSAT,
+ CSR_VXRM,
+ CSR_VCSR,
+ CSR_VL,
+ CSR_VTYPE,
+ CSR_VLENB,
+ };
+ for (int i = 0; i < ARRAY_SIZE(dump_rvv_csrs); ++i) {
+ int csrno = dump_rvv_csrs[i];
+ target_ulong val = 0;
+ RISCVException res = riscv_csrrw_debug(env, csrno, &val, 0, 0);
+
+ /*
+ * Rely on the smode, hmode, etc, predicates within csr.c
+ * to do the filtering of the registers that are present.
+ */
+ if (res == RISCV_EXCP_NONE) {
+ qemu_fprintf(f, " %-8s " TARGET_FMT_lx "\n",
+ csr_ops[csrno].name, val);
+ }
+ }
+ uint16_t vlenb = cpu->cfg.vlen >> 3;
+
+ for (i = 0; i < 32; i++) {
+ qemu_fprintf(f, " %-8s ", riscv_rvv_regnames[i]);
+ p = (uint8_t *)env->vreg;
+ for (j = vlenb - 1 ; j >= 0; j--) {
+ qemu_fprintf(f, "%02x", *(p + i * vlenb + BYTE(j)));
+ }
+ qemu_fprintf(f, "\n");
+ }
+ }
}
static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
@@ -858,9 +916,10 @@ static void riscv_cpu_reset_hold(Object *obj)
static void riscv_cpu_disas_set_info(CPUState *s, disassemble_info *info)
{
RISCVCPU *cpu = RISCV_CPU(s);
+ CPURISCVState *env = &cpu->env;
info->target_info = &cpu->cfg;
- switch (riscv_cpu_mxl(&cpu->env)) {
+ switch (env->xl) {
case MXL_RV32:
info->print_insn = print_insn_riscv32;
break;
@@ -1050,6 +1109,11 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
+ if (cpu->cfg.ext_zfa && !riscv_has_ext(env, RVF)) {
+ error_setg(errp, "Zfa extension requires F extension");
+ return;
+ }
+
if (cpu->cfg.ext_zfh) {
cpu->cfg.ext_zfhmin = true;
}
@@ -1059,6 +1123,11 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
+ if (cpu->cfg.ext_zfbfmin && !riscv_has_ext(env, RVF)) {
+ error_setg(errp, "Zfbfmin extension depends on F extension");
+ return;
+ }
+
if (riscv_has_ext(env, RVD) && !riscv_has_ext(env, RVF)) {
error_setg(errp, "D extension requires F extension");
return;
@@ -1109,6 +1178,21 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp)
return;
}
+ if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zfbfmin) {
+ error_setg(errp, "Zvfbfmin extension depends on Zfbfmin extension");
+ return;
+ }
+
+ if (cpu->cfg.ext_zvfbfmin && !cpu->cfg.ext_zve32f) {
+ error_setg(errp, "Zvfbfmin extension depends on Zve32f extension");
+ return;
+ }
+
+ if (cpu->cfg.ext_zvfbfwma && !cpu->cfg.ext_zvfbfmin) {
+ error_setg(errp, "Zvfbfwma extension depends on Zvfbfmin extension");
+ return;
+ }
+
/* Set the ISA extensions, checks should have happened above */
if (cpu->cfg.ext_zhinx) {
cpu->cfg.ext_zhinxmin = true;
@@ -1304,20 +1388,12 @@ static void riscv_cpu_validate_misa_priv(CPURISCVState *env, Error **errp)
}
}
-static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+static void riscv_cpu_realize_tcg(DeviceState *dev, Error **errp)
{
- CPUState *cs = CPU(dev);
RISCVCPU *cpu = RISCV_CPU(dev);
CPURISCVState *env = &cpu->env;
- RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
Error *local_err = NULL;
- cpu_exec_realizefn(cs, &local_err);
- if (local_err != NULL) {
- error_propagate(errp, local_err);
- return;
- }
-
riscv_cpu_validate_misa_mxl(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
@@ -1352,7 +1428,7 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
}
#ifndef CONFIG_USER_ONLY
- cs->tcg_cflags |= CF_PCREL;
+ CPU(dev)->tcg_cflags |= CF_PCREL;
if (cpu->cfg.ext_sstc) {
riscv_timer_init(cpu);
@@ -1365,6 +1441,28 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp)
}
}
#endif
+}
+
+static void riscv_cpu_realize(DeviceState *dev, Error **errp)
+{
+ CPUState *cs = CPU(dev);
+ RISCVCPU *cpu = RISCV_CPU(dev);
+ RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(dev);
+ Error *local_err = NULL;
+
+ cpu_exec_realizefn(cs, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ if (tcg_enabled()) {
+ riscv_cpu_realize_tcg(dev, &local_err);
+ if (local_err != NULL) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ }
riscv_cpu_finalize_features(cpu, &local_err);
if (local_err != NULL) {
@@ -1545,33 +1643,83 @@ static void cpu_get_misa_ext_cfg(Object *obj, Visitor *v, const char *name,
visit_type_bool(v, name, &value, errp);
}
-static const RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
- {.name = "a", .description = "Atomic instructions",
- .misa_bit = RVA, .enabled = true},
- {.name = "c", .description = "Compressed instructions",
- .misa_bit = RVC, .enabled = true},
- {.name = "d", .description = "Double-precision float point",
- .misa_bit = RVD, .enabled = true},
- {.name = "f", .description = "Single-precision float point",
- .misa_bit = RVF, .enabled = true},
- {.name = "i", .description = "Base integer instruction set",
- .misa_bit = RVI, .enabled = true},
- {.name = "e", .description = "Base integer instruction set (embedded)",
- .misa_bit = RVE, .enabled = false},
- {.name = "m", .description = "Integer multiplication and division",
- .misa_bit = RVM, .enabled = true},
- {.name = "s", .description = "Supervisor-level instructions",
- .misa_bit = RVS, .enabled = true},
- {.name = "u", .description = "User-level instructions",
- .misa_bit = RVU, .enabled = true},
- {.name = "h", .description = "Hypervisor",
- .misa_bit = RVH, .enabled = true},
- {.name = "x-j", .description = "Dynamic translated languages",
- .misa_bit = RVJ, .enabled = false},
- {.name = "v", .description = "Vector operations",
- .misa_bit = RVV, .enabled = false},
- {.name = "g", .description = "General purpose (IMAFD_Zicsr_Zifencei)",
- .misa_bit = RVG, .enabled = false},
+typedef struct misa_ext_info {
+ const char *name;
+ const char *description;
+} MISAExtInfo;
+
+#define MISA_INFO_IDX(_bit) \
+ __builtin_ctz(_bit)
+
+#define MISA_EXT_INFO(_bit, _propname, _descr) \
+ [MISA_INFO_IDX(_bit)] = {.name = _propname, .description = _descr}
+
+static const MISAExtInfo misa_ext_info_arr[] = {
+ MISA_EXT_INFO(RVA, "a", "Atomic instructions"),
+ MISA_EXT_INFO(RVC, "c", "Compressed instructions"),
+ MISA_EXT_INFO(RVD, "d", "Double-precision float point"),
+ MISA_EXT_INFO(RVF, "f", "Single-precision float point"),
+ MISA_EXT_INFO(RVI, "i", "Base integer instruction set"),
+ MISA_EXT_INFO(RVE, "e", "Base integer instruction set (embedded)"),
+ MISA_EXT_INFO(RVM, "m", "Integer multiplication and division"),
+ MISA_EXT_INFO(RVS, "s", "Supervisor-level instructions"),
+ MISA_EXT_INFO(RVU, "u", "User-level instructions"),
+ MISA_EXT_INFO(RVH, "h", "Hypervisor"),
+ MISA_EXT_INFO(RVJ, "x-j", "Dynamic translated languages"),
+ MISA_EXT_INFO(RVV, "v", "Vector operations"),
+ MISA_EXT_INFO(RVG, "g", "General purpose (IMAFD_Zicsr_Zifencei)"),
+};
+
+static int riscv_validate_misa_info_idx(uint32_t bit)
+{
+ int idx;
+
+ /*
+ * Our lowest valid input (RVA) is 1 and
+ * __builtin_ctz() is UB with zero.
+ */
+ g_assert(bit != 0);
+ idx = MISA_INFO_IDX(bit);
+
+ g_assert(idx < ARRAY_SIZE(misa_ext_info_arr));
+ return idx;
+}
+
+const char *riscv_get_misa_ext_name(uint32_t bit)
+{
+ int idx = riscv_validate_misa_info_idx(bit);
+ const char *val = misa_ext_info_arr[idx].name;
+
+ g_assert(val != NULL);
+ return val;
+}
+
+const char *riscv_get_misa_ext_description(uint32_t bit)
+{
+ int idx = riscv_validate_misa_info_idx(bit);
+ const char *val = misa_ext_info_arr[idx].description;
+
+ g_assert(val != NULL);
+ return val;
+}
+
+#define MISA_CFG(_bit, _enabled) \
+ {.misa_bit = _bit, .enabled = _enabled}
+
+static RISCVCPUMisaExtConfig misa_ext_cfgs[] = {
+ MISA_CFG(RVA, true),
+ MISA_CFG(RVC, true),
+ MISA_CFG(RVD, true),
+ MISA_CFG(RVF, true),
+ MISA_CFG(RVI, true),
+ MISA_CFG(RVE, false),
+ MISA_CFG(RVM, true),
+ MISA_CFG(RVS, true),
+ MISA_CFG(RVU, true),
+ MISA_CFG(RVH, true),
+ MISA_CFG(RVJ, false),
+ MISA_CFG(RVV, false),
+ MISA_CFG(RVG, false),
};
static void riscv_cpu_add_misa_properties(Object *cpu_obj)
@@ -1579,7 +1727,16 @@ static void riscv_cpu_add_misa_properties(Object *cpu_obj)
int i;
for (i = 0; i < ARRAY_SIZE(misa_ext_cfgs); i++) {
- const RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
+ RISCVCPUMisaExtConfig *misa_cfg = &misa_ext_cfgs[i];
+ int bit = misa_cfg->misa_bit;
+
+ misa_cfg->name = riscv_get_misa_ext_name(bit);
+ misa_cfg->description = riscv_get_misa_ext_description(bit);
+
+ /* Check if KVM already created the property */
+ if (object_property_find(cpu_obj, misa_cfg->name)) {
+ continue;
+ }
object_property_add(cpu_obj, misa_cfg->name, "bool",
cpu_get_misa_ext_cfg,
@@ -1600,6 +1757,7 @@ static Property riscv_cpu_extensions[] = {
DEFINE_PROP_BOOL("Zicsr", RISCVCPU, cfg.ext_icsr, true),
DEFINE_PROP_BOOL("Zihintpause", RISCVCPU, cfg.ext_zihintpause, true),
DEFINE_PROP_BOOL("Zawrs", RISCVCPU, cfg.ext_zawrs, true),
+ DEFINE_PROP_BOOL("Zfa", RISCVCPU, cfg.ext_zfa, true),
DEFINE_PROP_BOOL("Zfh", RISCVCPU, cfg.ext_zfh, false),
DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false),
DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false),
@@ -1683,9 +1841,33 @@ static Property riscv_cpu_extensions[] = {
DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false),
DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false),
+ DEFINE_PROP_BOOL("x-zfbfmin", RISCVCPU, cfg.ext_zfbfmin, false),
+ DEFINE_PROP_BOOL("x-zvfbfmin", RISCVCPU, cfg.ext_zvfbfmin, false),
+ DEFINE_PROP_BOOL("x-zvfbfwma", RISCVCPU, cfg.ext_zvfbfwma, false),
+
DEFINE_PROP_END_OF_LIST(),
};
+
+#ifndef CONFIG_USER_ONLY
+static void cpu_set_cfg_unavailable(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ const char *propname = opaque;
+ bool value;
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ if (value) {
+ error_setg(errp, "extension %s is not available with KVM",
+ propname);
+ }
+}
+#endif
+
/*
* Add CPU properties with user-facing flags.
*
@@ -1697,24 +1879,48 @@ static void riscv_cpu_add_user_properties(Object *obj)
Property *prop;
DeviceState *dev = DEVICE(obj);
- riscv_cpu_add_misa_properties(obj);
+#ifndef CONFIG_USER_ONLY
+ riscv_add_satp_mode_properties(obj);
- for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
- qdev_property_add_static(dev, prop);
+ if (kvm_enabled()) {
+ kvm_riscv_init_user_properties(obj);
}
+#endif
+ riscv_cpu_add_misa_properties(obj);
+
+ for (prop = riscv_cpu_extensions; prop && prop->name; prop++) {
#ifndef CONFIG_USER_ONLY
- riscv_add_satp_mode_properties(obj);
+ if (kvm_enabled()) {
+ /* Check if KVM created the property already */
+ if (object_property_find(obj, prop->name)) {
+ continue;
+ }
+
+ /*
+ * Set the default to disabled for every extension
+ * unknown to KVM and error out if the user attempts
+ * to enable any of them.
+ *
+ * We're giving a pass for non-bool properties since they're
+ * not related to the availability of extensions and can be
+ * safely ignored as is.
+ */
+ if (prop->info == &qdev_prop_bool) {
+ object_property_add(obj, prop->name, "bool",
+ NULL, cpu_set_cfg_unavailable,
+ NULL, (void *)prop->name);
+ continue;
+ }
+ }
#endif
+ qdev_property_add_static(dev, prop);
+ }
}
static Property riscv_cpu_properties[] = {
DEFINE_PROP_BOOL("debug", RISCVCPU, cfg.debug, true),
- DEFINE_PROP_UINT32("mvendorid", RISCVCPU, cfg.mvendorid, 0),
- DEFINE_PROP_UINT64("marchid", RISCVCPU, cfg.marchid, RISCV_CPU_MARCHID),
- DEFINE_PROP_UINT64("mimpid", RISCVCPU, cfg.mimpid, RISCV_CPU_MIMPID),
-
#ifndef CONFIG_USER_ONLY
DEFINE_PROP_UINT64("resetvec", RISCVCPU, env.resetvec, DEFAULT_RSTVEC),
#endif
@@ -1798,6 +2004,119 @@ static const struct TCGCPUOps riscv_tcg_ops = {
#endif /* !CONFIG_USER_ONLY */
};
+static bool riscv_cpu_is_dynamic(Object *cpu_obj)
+{
+ return object_dynamic_cast(cpu_obj, TYPE_RISCV_DYNAMIC_CPU) != NULL;
+}
+
+static void cpu_set_mvendorid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint32_t prev_val = cpu->cfg.mvendorid;
+ uint32_t value;
+
+ if (!visit_type_uint32(v, name, &value, errp)) {
+ return;
+ }
+
+ if (!dynamic_cpu && prev_val != value) {
+ error_setg(errp, "Unable to change %s mvendorid (0x%x)",
+ object_get_typename(obj), prev_val);
+ return;
+ }
+
+ cpu->cfg.mvendorid = value;
+}
+
+static void cpu_get_mvendorid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool value = RISCV_CPU(obj)->cfg.mvendorid;
+
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void cpu_set_mimpid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint64_t prev_val = cpu->cfg.mimpid;
+ uint64_t value;
+
+ if (!visit_type_uint64(v, name, &value, errp)) {
+ return;
+ }
+
+ if (!dynamic_cpu && prev_val != value) {
+ error_setg(errp, "Unable to change %s mimpid (0x%" PRIu64 ")",
+ object_get_typename(obj), prev_val);
+ return;
+ }
+
+ cpu->cfg.mimpid = value;
+}
+
+static void cpu_get_mimpid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool value = RISCV_CPU(obj)->cfg.mimpid;
+
+ visit_type_bool(v, name, &value, errp);
+}
+
+static void cpu_set_marchid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool dynamic_cpu = riscv_cpu_is_dynamic(obj);
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint64_t prev_val = cpu->cfg.marchid;
+ uint64_t value, invalid_val;
+ uint32_t mxlen = 0;
+
+ if (!visit_type_uint64(v, name, &value, errp)) {
+ return;
+ }
+
+ if (!dynamic_cpu && prev_val != value) {
+ error_setg(errp, "Unable to change %s marchid (0x%" PRIu64 ")",
+ object_get_typename(obj), prev_val);
+ return;
+ }
+
+ switch (riscv_cpu_mxl(&cpu->env)) {
+ case MXL_RV32:
+ mxlen = 32;
+ break;
+ case MXL_RV64:
+ case MXL_RV128:
+ mxlen = 64;
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ invalid_val = 1LL << (mxlen - 1);
+
+ if (value == invalid_val) {
+ error_setg(errp, "Unable to set marchid with MSB (%u) bit set "
+ "and the remaining bits zero", mxlen);
+ return;
+ }
+
+ cpu->cfg.marchid = value;
+}
+
+static void cpu_get_marchid(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
+{
+ bool value = RISCV_CPU(obj)->cfg.marchid;
+
+ visit_type_bool(v, name, &value, errp);
+}
+
static void riscv_cpu_class_init(ObjectClass *c, void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(c);
@@ -1829,6 +2148,15 @@ static void riscv_cpu_class_init(ObjectClass *c, void *data)
cc->gdb_get_dynamic_xml = riscv_gdb_get_dynamic_xml;
cc->tcg_ops = &riscv_tcg_ops;
+ object_class_property_add(c, "mvendorid", "uint32", cpu_get_mvendorid,
+ cpu_set_mvendorid, NULL, NULL);
+
+ object_class_property_add(c, "mimpid", "uint64", cpu_get_mimpid,
+ cpu_set_mimpid, NULL, NULL);
+
+ object_class_property_add(c, "marchid", "uint64", cpu_get_marchid,
+ cpu_set_marchid, NULL, NULL);
+
device_class_set_props(dc, riscv_cpu_properties);
}
@@ -1840,8 +2168,7 @@ static void riscv_isa_string_ext(RISCVCPU *cpu, char **isa_str,
int i;
for (i = 0; i < ARRAY_SIZE(isa_edata_arr); i++) {
- if (cpu->env.priv_ver >= isa_edata_arr[i].min_version &&
- isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
+ if (isa_ext_is_enabled(cpu, &isa_edata_arr[i])) {
new = g_strconcat(old, "_", isa_edata_arr[i].name, NULL);
g_free(old);
old = new;
diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h
index 7adb8706ac..6ea22e0eea 100644
--- a/target/riscv/cpu.h
+++ b/target/riscv/cpu.h
@@ -41,7 +41,10 @@
#define RV(x) ((target_ulong)1 << (x - 'A'))
-/* Consider updating misa_ext_cfgs[] when adding new MISA bits here */
+/*
+ * Consider updating misa_ext_info_arr[] and misa_ext_cfgs[]
+ * when adding new MISA bits here.
+ */
#define RVI RV('I')
#define RVE RV('E') /* E and I are mutually exclusive */
#define RVM RV('M')
@@ -56,6 +59,8 @@
#define RVJ RV('J')
#define RVG RV('G')
+const char *riscv_get_misa_ext_name(uint32_t bit);
+const char *riscv_get_misa_ext_description(uint32_t bit);
/* Privileged specification version */
enum {
@@ -500,6 +505,7 @@ FIELD(TB_FLAGS, ITRIGGER, 22, 1)
/* Virtual mode enabled */
FIELD(TB_FLAGS, VIRT_ENABLED, 23, 1)
FIELD(TB_FLAGS, PRIV, 24, 2)
+FIELD(TB_FLAGS, AXL, 26, 2)
#ifdef TARGET_RISCV32
#define riscv_cpu_mxl(env) ((void)(env), MXL_RV32)
@@ -516,13 +522,20 @@ static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env)
return &env_archcpu(env)->cfg;
}
-#if defined(TARGET_RISCV32)
-#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
-#else
-static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
+#if !defined(CONFIG_USER_ONLY)
+static inline int cpu_address_mode(CPURISCVState *env)
+{
+ int mode = env->priv;
+
+ if (mode == PRV_M && get_field(env->mstatus, MSTATUS_MPRV)) {
+ mode = get_field(env->mstatus, MSTATUS_MPP);
+ }
+ return mode;
+}
+
+static inline RISCVMXL cpu_get_xl(CPURISCVState *env, target_ulong mode)
{
RISCVMXL xl = env->misa_mxl;
-#if !defined(CONFIG_USER_ONLY)
/*
* When emulating a 32-bit-only cpu, use RV32.
* When emulating a 64-bit cpu, and MXL has been reduced to RV32,
@@ -530,7 +543,7 @@ static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
* back to RV64 for lower privs.
*/
if (xl != MXL_RV32) {
- switch (env->priv) {
+ switch (mode) {
case PRV_M:
break;
case PRV_U:
@@ -541,11 +554,38 @@ static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
break;
}
}
-#endif
return xl;
}
#endif
+#if defined(TARGET_RISCV32)
+#define cpu_recompute_xl(env) ((void)(env), MXL_RV32)
+#else
+static inline RISCVMXL cpu_recompute_xl(CPURISCVState *env)
+{
+#if !defined(CONFIG_USER_ONLY)
+ return cpu_get_xl(env, env->priv);
+#else
+ return env->misa_mxl;
+#endif
+}
+#endif
+
+#if defined(TARGET_RISCV32)
+#define cpu_address_xl(env) ((void)(env), MXL_RV32)
+#else
+static inline RISCVMXL cpu_address_xl(CPURISCVState *env)
+{
+#ifdef CONFIG_USER_ONLY
+ return env->xl;
+#else
+ int mode = cpu_address_mode(env);
+
+ return cpu_get_xl(env, mode);
+#endif
+}
+#endif
+
static inline int riscv_cpu_xlen(CPURISCVState *env)
{
return 16 << env->xl;
diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h
index c4a627d335..2bd9510ba3 100644
--- a/target/riscv/cpu_cfg.h
+++ b/target/riscv/cpu_cfg.h
@@ -75,6 +75,8 @@ struct RISCVCPUConfig {
bool ext_svpbmt;
bool ext_zdinx;
bool ext_zawrs;
+ bool ext_zfa;
+ bool ext_zfbfmin;
bool ext_zfh;
bool ext_zfhmin;
bool ext_zfinx;
@@ -84,6 +86,8 @@ struct RISCVCPUConfig {
bool ext_zve64f;
bool ext_zve64d;
bool ext_zmmul;
+ bool ext_zvfbfmin;
+ bool ext_zvfbfwma;
bool ext_zvfh;
bool ext_zvfhmin;
bool ext_smaia;
@@ -133,4 +137,41 @@ struct RISCVCPUConfig {
};
typedef struct RISCVCPUConfig RISCVCPUConfig;
+
+/* Helper functions to test for extensions. */
+
+static inline bool always_true_p(const RISCVCPUConfig *cfg __attribute__((__unused__)))
+{
+ return true;
+}
+
+static inline bool has_xthead_p(const RISCVCPUConfig *cfg)
+{
+ return cfg->ext_xtheadba || cfg->ext_xtheadbb ||
+ cfg->ext_xtheadbs || cfg->ext_xtheadcmo ||
+ cfg->ext_xtheadcondmov ||
+ cfg->ext_xtheadfmemidx || cfg->ext_xtheadfmv ||
+ cfg->ext_xtheadmac || cfg->ext_xtheadmemidx ||
+ cfg->ext_xtheadmempair || cfg->ext_xtheadsync;
+}
+
+#define MATERIALISE_EXT_PREDICATE(ext) \
+ static inline bool has_ ## ext ## _p(const RISCVCPUConfig *cfg) \
+ { \
+ return cfg->ext_ ## ext ; \
+ }
+
+MATERIALISE_EXT_PREDICATE(xtheadba)
+MATERIALISE_EXT_PREDICATE(xtheadbb)
+MATERIALISE_EXT_PREDICATE(xtheadbs)
+MATERIALISE_EXT_PREDICATE(xtheadcmo)
+MATERIALISE_EXT_PREDICATE(xtheadcondmov)
+MATERIALISE_EXT_PREDICATE(xtheadfmemidx)
+MATERIALISE_EXT_PREDICATE(xtheadfmv)
+MATERIALISE_EXT_PREDICATE(xtheadmac)
+MATERIALISE_EXT_PREDICATE(xtheadmemidx)
+MATERIALISE_EXT_PREDICATE(xtheadmempair)
+MATERIALISE_EXT_PREDICATE(xtheadsync)
+MATERIALISE_EXT_PREDICATE(XVentanaCondOps)
+
#endif
diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c
index a944f25694..9f611d89bb 100644
--- a/target/riscv/cpu_helper.c
+++ b/target/riscv/cpu_helper.c
@@ -47,7 +47,8 @@ int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
if (mode == PRV_M && get_field(status, MSTATUS_MPRV)) {
mode = get_field(env->mstatus, MSTATUS_MPP);
- virt = get_field(env->mstatus, MSTATUS_MPV);
+ virt = get_field(env->mstatus, MSTATUS_MPV) &&
+ (mode != PRV_M);
if (virt) {
status = env->vsstatus;
}
@@ -134,6 +135,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
flags = FIELD_DP32(flags, TB_FLAGS, FS, fs);
flags = FIELD_DP32(flags, TB_FLAGS, VS, vs);
flags = FIELD_DP32(flags, TB_FLAGS, XL, env->xl);
+ flags = FIELD_DP32(flags, TB_FLAGS, AXL, cpu_address_xl(env));
if (env->cur_pmmask != 0) {
flags = FIELD_DP32(flags, TB_FLAGS, PM_MASK_ENABLED, 1);
}
@@ -147,13 +149,16 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc,
void riscv_cpu_update_mask(CPURISCVState *env)
{
target_ulong mask = 0, base = 0;
+ RISCVMXL xl = env->xl;
/*
* TODO: Current RVJ spec does not specify
* how the extension interacts with XLEN.
*/
#ifndef CONFIG_USER_ONLY
+ int mode = cpu_address_mode(env);
+ xl = cpu_get_xl(env, mode);
if (riscv_has_ext(env, RVJ)) {
- switch (env->priv) {
+ switch (mode) {
case PRV_M:
if (env->mmte & M_PM_ENABLE) {
mask = env->mpmmask;
@@ -177,7 +182,7 @@ void riscv_cpu_update_mask(CPURISCVState *env)
}
}
#endif
- if (env->xl == MXL_RV32) {
+ if (xl == MXL_RV32) {
env->cur_pmmask = mask & UINT32_MAX;
env->cur_pmbase = base & UINT32_MAX;
} else {
@@ -1277,7 +1282,6 @@ bool riscv_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
if (ret == TRANSLATE_G_STAGE_FAIL) {
first_stage_error = false;
two_stage_indirect_error = true;
- access_type = MMU_DATA_LOAD;
}
qemu_log_mask(CPU_LOG_MMU,
diff --git a/target/riscv/csr.c b/target/riscv/csr.c
index 58499b5afc..ea7585329e 100644
--- a/target/riscv/csr.c
+++ b/target/riscv/csr.c
@@ -1311,11 +1311,9 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
}
if (xl != MXL_RV32 || env->debugger) {
- /*
- * RV32: MPV and GVA are not in mstatus. The current plan is to
- * add them to mstatush. For now, we just don't support it.
- */
- mask |= MSTATUS_MPV | MSTATUS_GVA;
+ if (riscv_has_ext(env, RVH)) {
+ mask |= MSTATUS_MPV | MSTATUS_GVA;
+ }
if ((val & MSTATUS64_UXL) != 0) {
mask |= MSTATUS64_UXL;
}
@@ -1323,10 +1321,6 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
mstatus = (mstatus & ~mask) | (val & mask);
- if (xl > MXL_RV32) {
- /* SXL field is for now read only */
- mstatus = set_field(mstatus, MSTATUS64_SXL, xl);
- }
env->mstatus = mstatus;
/*
@@ -1335,8 +1329,9 @@ static RISCVException write_mstatus(CPURISCVState *env, int csrno,
*/
if (env->debugger) {
env->xl = cpu_recompute_xl(env);
- riscv_cpu_update_mask(env);
}
+
+ riscv_cpu_update_mask(env);
return RISCV_EXCP_NONE;
}
@@ -1351,7 +1346,7 @@ static RISCVException write_mstatush(CPURISCVState *env, int csrno,
target_ulong val)
{
uint64_t valh = (uint64_t)val << 32;
- uint64_t mask = MSTATUS_MPV | MSTATUS_GVA;
+ uint64_t mask = riscv_has_ext(env, RVH) ? MSTATUS_MPV | MSTATUS_GVA : 0;
env->mstatus = (env->mstatus & ~mask) | (valh & mask);
@@ -3639,7 +3634,7 @@ static RISCVException write_mpmmask(CPURISCVState *env, int csrno,
uint64_t mstatus;
env->mpmmask = val;
- if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
env->cur_pmmask = val;
}
env->mmte |= EXT_STATUS_DIRTY;
@@ -3667,8 +3662,11 @@ static RISCVException write_spmmask(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
env->spmmask = val;
- if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
env->cur_pmmask = val;
+ if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
+ env->cur_pmmask &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
@@ -3695,8 +3693,11 @@ static RISCVException write_upmmask(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
env->upmmask = val;
- if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
env->cur_pmmask = val;
+ if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
+ env->cur_pmmask &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
@@ -3719,7 +3720,7 @@ static RISCVException write_mpmbase(CPURISCVState *env, int csrno,
uint64_t mstatus;
env->mpmbase = val;
- if ((env->priv == PRV_M) && (env->mmte & M_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_M) && (env->mmte & M_PM_ENABLE)) {
env->cur_pmbase = val;
}
env->mmte |= EXT_STATUS_DIRTY;
@@ -3747,8 +3748,11 @@ static RISCVException write_spmbase(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
env->spmbase = val;
- if ((env->priv == PRV_S) && (env->mmte & S_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_S) && (env->mmte & S_PM_ENABLE)) {
env->cur_pmbase = val;
+ if (cpu_get_xl(env, PRV_S) == MXL_RV32) {
+ env->cur_pmbase &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
@@ -3775,8 +3779,11 @@ static RISCVException write_upmbase(CPURISCVState *env, int csrno,
return RISCV_EXCP_NONE;
}
env->upmbase = val;
- if ((env->priv == PRV_U) && (env->mmte & U_PM_ENABLE)) {
+ if ((cpu_address_mode(env) == PRV_U) && (env->mmte & U_PM_ENABLE)) {
env->cur_pmbase = val;
+ if (cpu_get_xl(env, PRV_U) == MXL_RV32) {
+ env->cur_pmbase &= UINT32_MAX;
+ }
}
env->mmte |= EXT_STATUS_DIRTY;
diff --git a/target/riscv/fpu_helper.c b/target/riscv/fpu_helper.c
index 5dd14d8390..871a70a316 100644
--- a/target/riscv/fpu_helper.c
+++ b/target/riscv/fpu_helper.c
@@ -252,6 +252,14 @@ uint64_t helper_fmin_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
float32_minimum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fminm_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ float32 ret = float32_min(frs1, frs2, &env->fp_status);
+ return nanbox_s(env, ret);
+}
+
uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(env, rs1);
@@ -261,6 +269,14 @@ uint64_t helper_fmax_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
float32_maximum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fmaxm_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ float32 ret = float32_max(frs1, frs2, &env->fp_status);
+ return nanbox_s(env, ret);
+}
+
uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t rs1)
{
float32 frs1 = check_nanbox_s(env, rs1);
@@ -274,6 +290,13 @@ target_ulong helper_fle_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
return float32_le(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fleq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ return float32_le_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(env, rs1);
@@ -281,6 +304,13 @@ target_ulong helper_flt_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
return float32_lt(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fltq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ float32 frs2 = check_nanbox_s(env, rs2);
+ return float32_lt_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_feq_s(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float32 frs1 = check_nanbox_s(env, rs1);
@@ -338,6 +368,30 @@ target_ulong helper_fclass_s(CPURISCVState *env, uint64_t rs1)
return fclass_s(frs1);
}
+uint64_t helper_fround_s(CPURISCVState *env, uint64_t rs1)
+{
+ float_status *fs = &env->fp_status;
+ uint16_t nx_old = get_float_exception_flags(fs) & float_flag_inexact;
+ float32 frs1 = check_nanbox_s(env, rs1);
+
+ frs1 = float32_round_to_int(frs1, fs);
+
+ /* Restore the original NX flag. */
+ uint16_t flags = get_float_exception_flags(fs);
+ flags &= ~float_flag_inexact;
+ flags |= nx_old;
+ set_float_exception_flags(flags, fs);
+
+ return nanbox_s(env, frs1);
+}
+
+uint64_t helper_froundnx_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ frs1 = float32_round_to_int(frs1, &env->fp_status);
+ return nanbox_s(env, frs1);
+}
+
uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_add(frs1, frs2, &env->fp_status);
@@ -365,6 +419,11 @@ uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
float64_minimum_number(frs1, frs2, &env->fp_status);
}
+uint64_t helper_fminm_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_min(frs1, frs2, &env->fp_status);
+}
+
uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return env->priv_ver < PRIV_VERSION_1_11_0 ?
@@ -372,6 +431,11 @@ uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
float64_maximum_number(frs1, frs2, &env->fp_status);
}
+uint64_t helper_fmaxm_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_max(frs1, frs2, &env->fp_status);
+}
+
uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
{
return nanbox_s(env, float64_to_float32(rs1, &env->fp_status));
@@ -393,11 +457,21 @@ target_ulong helper_fle_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
return float64_le(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fleq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_le_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_lt(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fltq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
+{
+ return float64_lt_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_eq_quiet(frs1, frs2, &env->fp_status);
@@ -408,6 +482,11 @@ target_ulong helper_fcvt_w_d(CPURISCVState *env, uint64_t frs1)
return float64_to_int32(frs1, &env->fp_status);
}
+uint64_t helper_fcvtmod_w_d(CPURISCVState *env, uint64_t value)
+{
+ return float64_to_int32_modulo(value, float_round_to_zero, &env->fp_status);
+}
+
target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1)
{
return (int32_t)float64_to_uint32(frs1, &env->fp_status);
@@ -448,6 +527,27 @@ target_ulong helper_fclass_d(uint64_t frs1)
return fclass_d(frs1);
}
+uint64_t helper_fround_d(CPURISCVState *env, uint64_t frs1)
+{
+ float_status *fs = &env->fp_status;
+ uint16_t nx_old = get_float_exception_flags(fs) & float_flag_inexact;
+
+ frs1 = float64_round_to_int(frs1, fs);
+
+ /* Restore the original NX flag. */
+ uint16_t flags = get_float_exception_flags(fs);
+ flags &= ~float_flag_inexact;
+ flags |= nx_old;
+ set_float_exception_flags(flags, fs);
+
+ return frs1;
+}
+
+uint64_t helper_froundnx_d(CPURISCVState *env, uint64_t frs1)
+{
+ return float64_round_to_int(frs1, &env->fp_status);
+}
+
uint64_t helper_fadd_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
@@ -485,6 +585,14 @@ uint64_t helper_fmin_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
float16_minimum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fminm_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ float16 ret = float16_min(frs1, frs2, &env->fp_status);
+ return nanbox_h(env, ret);
+}
+
uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
@@ -494,6 +602,14 @@ uint64_t helper_fmax_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
float16_maximum_number(frs1, frs2, &env->fp_status));
}
+uint64_t helper_fmaxm_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ float16 ret = float16_max(frs1, frs2, &env->fp_status);
+ return nanbox_h(env, ret);
+}
+
uint64_t helper_fsqrt_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(env, rs1);
@@ -507,6 +623,13 @@ target_ulong helper_fle_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
return float16_le(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fleq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ return float16_le_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
@@ -514,6 +637,13 @@ target_ulong helper_flt_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
return float16_lt(frs1, frs2, &env->fp_status);
}
+target_ulong helper_fltq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ float16 frs2 = check_nanbox_h(env, rs2);
+ return float16_lt_quiet(frs1, frs2, &env->fp_status);
+}
+
target_ulong helper_feq_h(CPURISCVState *env, uint64_t rs1, uint64_t rs2)
{
float16 frs1 = check_nanbox_h(env, rs1);
@@ -527,6 +657,30 @@ target_ulong helper_fclass_h(CPURISCVState *env, uint64_t rs1)
return fclass_h(frs1);
}
+uint64_t helper_fround_h(CPURISCVState *env, uint64_t rs1)
+{
+ float_status *fs = &env->fp_status;
+ uint16_t nx_old = get_float_exception_flags(fs) & float_flag_inexact;
+ float16 frs1 = check_nanbox_h(env, rs1);
+
+ frs1 = float16_round_to_int(frs1, fs);
+
+ /* Restore the original NX flag. */
+ uint16_t flags = get_float_exception_flags(fs);
+ flags &= ~float_flag_inexact;
+ flags |= nx_old;
+ set_float_exception_flags(flags, fs);
+
+ return nanbox_h(env, frs1);
+}
+
+uint64_t helper_froundnx_h(CPURISCVState *env, uint64_t rs1)
+{
+ float16 frs1 = check_nanbox_s(env, rs1);
+ frs1 = float16_round_to_int(frs1, &env->fp_status);
+ return nanbox_h(env, frs1);
+}
+
target_ulong helper_fcvt_w_h(CPURISCVState *env, uint64_t rs1)
{
float16 frs1 = check_nanbox_h(env, rs1);
@@ -593,3 +747,15 @@ uint64_t helper_fcvt_d_h(CPURISCVState *env, uint64_t rs1)
float16 frs1 = check_nanbox_h(env, rs1);
return float16_to_float64(frs1, true, &env->fp_status);
}
+
+uint64_t helper_fcvt_bf16_s(CPURISCVState *env, uint64_t rs1)
+{
+ float32 frs1 = check_nanbox_s(env, rs1);
+ return nanbox_h(env, float32_to_bfloat16(frs1, &env->fp_status));
+}
+
+uint64_t helper_fcvt_s_bf16(CPURISCVState *env, uint64_t rs1)
+{
+ float16 frs1 = check_nanbox_h(env, rs1);
+ return nanbox_s(env, bfloat16_to_float32(frs1, &env->fp_status));
+}
diff --git a/target/riscv/helper.h b/target/riscv/helper.h
index 98e97810fd..c95adaf08a 100644
--- a/target/riscv/helper.h
+++ b/target/riscv/helper.h
@@ -25,10 +25,14 @@ DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fminm_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxm_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fleq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fltq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64)
@@ -39,6 +43,8 @@ DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fclass_s, TCG_CALL_NO_RWG_SE, tl, env, i64)
+DEF_HELPER_FLAGS_2(fround_s, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(froundnx_s, TCG_CALL_NO_RWG_SE, i64, env, i64)
/* Floating Point - Double Precision */
DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
@@ -46,14 +52,19 @@ DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fminm_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxm_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_s_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_d_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fleq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fltq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64)
+DEF_HELPER_FLAGS_2(fcvtmod_w_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64)
@@ -62,6 +73,8 @@ DEF_HELPER_FLAGS_2(fcvt_d_wu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64)
+DEF_HELPER_FLAGS_2(fround_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(froundnx_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
/* Bitmanip */
DEF_HELPER_FLAGS_2(clmul, TCG_CALL_NO_RWG_SE, tl, tl, tl)
@@ -78,10 +91,14 @@ DEF_HELPER_FLAGS_3(fsub_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmul_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fminm_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
+DEF_HELPER_FLAGS_3(fmaxm_h, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fsqrt_h, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fleq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
+DEF_HELPER_FLAGS_3(fltq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_h, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_s_h, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_h_s, TCG_CALL_NO_RWG, i64, env, i64)
@@ -96,6 +113,8 @@ DEF_HELPER_FLAGS_2(fcvt_h_wu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_h_lu, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fclass_h, TCG_CALL_NO_RWG_SE, tl, env, i64)
+DEF_HELPER_FLAGS_2(fround_h, TCG_CALL_NO_RWG_SE, i64, env, i64)
+DEF_HELPER_FLAGS_2(froundnx_h, TCG_CALL_NO_RWG_SE, i64, env, i64)
/* Cache-block operations */
DEF_HELPER_2(cbo_clean_flush, void, env, tl)
@@ -1153,3 +1172,13 @@ DEF_HELPER_FLAGS_3(sm4ks, TCG_CALL_NO_RWG_SE, tl, tl, tl, tl)
/* Zce helper */
DEF_HELPER_FLAGS_2(cm_jalt, TCG_CALL_NO_WG, tl, env, i32)
+
+/* BF16 functions */
+DEF_HELPER_FLAGS_2(fcvt_bf16_s, TCG_CALL_NO_RWG, i64, env, i64)
+DEF_HELPER_FLAGS_2(fcvt_s_bf16, TCG_CALL_NO_RWG, i64, env, i64)
+
+DEF_HELPER_5(vfncvtbf16_f_f_w, void, ptr, ptr, ptr, env, i32)
+DEF_HELPER_5(vfwcvtbf16_f_f_v, void, ptr, ptr, ptr, env, i32)
+
+DEF_HELPER_6(vfwmaccbf16_vv, void, ptr, ptr, ptr, ptr, env, i32)
+DEF_HELPER_6(vfwmaccbf16_vf, void, ptr, ptr, i64, ptr, env, i32)
diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode
index 73d5d1b045..e341fa9213 100644
--- a/target/riscv/insn32.decode
+++ b/target/riscv/insn32.decode
@@ -821,6 +821,32 @@ binvi 01101. ........... 001 ..... 0010011 @sh
bset 0010100 .......... 001 ..... 0110011 @r
bseti 00101. ........... 001 ..... 0010011 @sh
+# *** Zfa Standard Extension ***
+fli_s 1111000 00001 ..... 000 ..... 1010011 @r2
+fli_d 1111001 00001 ..... 000 ..... 1010011 @r2
+fli_h 1111010 00001 ..... 000 ..... 1010011 @r2
+fminm_s 0010100 ..... ..... 010 ..... 1010011 @r
+fmaxm_s 0010100 ..... ..... 011 ..... 1010011 @r
+fminm_d 0010101 ..... ..... 010 ..... 1010011 @r
+fmaxm_d 0010101 ..... ..... 011 ..... 1010011 @r
+fminm_h 0010110 ..... ..... 010 ..... 1010011 @r
+fmaxm_h 0010110 ..... ..... 011 ..... 1010011 @r
+fround_s 0100000 00100 ..... ... ..... 1010011 @r2_rm
+froundnx_s 0100000 00101 ..... ... ..... 1010011 @r2_rm
+fround_d 0100001 00100 ..... ... ..... 1010011 @r2_rm
+froundnx_d 0100001 00101 ..... ... ..... 1010011 @r2_rm
+fround_h 0100010 00100 ..... ... ..... 1010011 @r2_rm
+froundnx_h 0100010 00101 ..... ... ..... 1010011 @r2_rm
+fcvtmod_w_d 1100001 01000 ..... 001 ..... 1010011 @r2
+fmvh_x_d 1110001 00001 ..... 000 ..... 1010011 @r2
+fmvp_d_x 1011001 ..... ..... 000 ..... 1010011 @r
+fleq_s 1010000 ..... ..... 100 ..... 1010011 @r
+fltq_s 1010000 ..... ..... 101 ..... 1010011 @r
+fleq_d 1010001 ..... ..... 100 ..... 1010011 @r
+fltq_d 1010001 ..... ..... 101 ..... 1010011 @r
+fleq_h 1010010 ..... ..... 100 ..... 1010011 @r
+fltq_h 1010010 ..... ..... 101 ..... 1010011 @r
+
# *** RV32 Zfh Extension ***
flh ............ ..... 001 ..... 0000111 @i
fsh ....... ..... ..... 001 ..... 0100111 @s
@@ -908,3 +934,15 @@ sm4ks .. 11010 ..... ..... 000 ..... 0110011 @k_aes
# *** RV32 Zicond Standard Extension ***
czero_eqz 0000111 ..... ..... 101 ..... 0110011 @r
czero_nez 0000111 ..... ..... 111 ..... 0110011 @r
+
+# *** Zfbfmin Standard Extension ***
+fcvt_bf16_s 0100010 01000 ..... ... ..... 1010011 @r2_rm
+fcvt_s_bf16 0100000 00110 ..... ... ..... 1010011 @r2_rm
+
+# *** Zvfbfmin Standard Extension ***
+vfncvtbf16_f_f_w 010010 . ..... 11101 001 ..... 1010111 @r2_vm
+vfwcvtbf16_f_f_v 010010 . ..... 01101 001 ..... 1010111 @r2_vm
+
+# *** Zvfbfwma Standard Extension ***
+vfwmaccbf16_vv 111011 . ..... ..... 001 ..... 1010111 @r_vm
+vfwmaccbf16_vf 111011 . ..... ..... 101 ..... 1010111 @r_vm
diff --git a/target/riscv/insn_trans/trans_rvbf16.c.inc b/target/riscv/insn_trans/trans_rvbf16.c.inc
new file mode 100644
index 0000000000..911bc29908
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvbf16.c.inc
@@ -0,0 +1,175 @@
+/*
+ * RISC-V translation routines for the BF16 Standard Extensions.
+ *
+ * Copyright (c) 2020-2023 PLCT Lab
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZFBFMIN(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfbfmin) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZVFBFMIN(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zvfbfmin) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZVFBFWMA(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zvfbfwma) { \
+ return false; \
+ } \
+} while (0)
+
+static bool trans_fcvt_bf16_s(DisasContext *ctx, arg_fcvt_bf16_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFBFMIN(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_bf16_s(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fcvt_s_bf16(DisasContext *ctx, arg_fcvt_s_bf16 *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFBFMIN(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fcvt_s_bf16(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_vfncvtbf16_f_f_w(DisasContext *ctx, arg_vfncvtbf16_f_f_w *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFMIN(ctx);
+
+ if (opfv_narrow_check(ctx, a) && (ctx->sew == MO_16)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+
+ gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
+ vreg_ofs(ctx, a->rs2), cpu_env,
+ ctx->cfg_ptr->vlen / 8,
+ ctx->cfg_ptr->vlen / 8, data,
+ gen_helper_vfncvtbf16_f_f_w);
+ mark_vs_dirty(ctx);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+static bool trans_vfwcvtbf16_f_f_v(DisasContext *ctx, arg_vfwcvtbf16_f_f_v *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFMIN(ctx);
+
+ if (opfv_widen_check(ctx, a) && (ctx->sew == MO_16)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+
+ gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ tcg_gen_gvec_3_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
+ vreg_ofs(ctx, a->rs2), cpu_env,
+ ctx->cfg_ptr->vlen / 8,
+ ctx->cfg_ptr->vlen / 8, data,
+ gen_helper_vfwcvtbf16_f_f_v);
+ mark_vs_dirty(ctx);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+static bool trans_vfwmaccbf16_vv(DisasContext *ctx, arg_vfwmaccbf16_vv *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFWMA(ctx);
+
+ if (require_rvv(ctx) && vext_check_isa_ill(ctx) && (ctx->sew == MO_16) &&
+ vext_check_dss(ctx, a->rd, a->rs1, a->rs2, a->vm)) {
+ uint32_t data = 0;
+ TCGLabel *over = gen_new_label();
+
+ gen_set_rm_chkfrm(ctx, RISCV_FRM_DYN);
+ tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_vl, 0, over);
+ tcg_gen_brcond_tl(TCG_COND_GEU, cpu_vstart, cpu_vl, over);
+
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ tcg_gen_gvec_4_ptr(vreg_ofs(ctx, a->rd), vreg_ofs(ctx, 0),
+ vreg_ofs(ctx, a->rs1),
+ vreg_ofs(ctx, a->rs2), cpu_env,
+ ctx->cfg_ptr->vlen / 8,
+ ctx->cfg_ptr->vlen / 8, data,
+ gen_helper_vfwmaccbf16_vv);
+ mark_vs_dirty(ctx);
+ gen_set_label(over);
+ return true;
+ }
+ return false;
+}
+
+static bool trans_vfwmaccbf16_vf(DisasContext *ctx, arg_vfwmaccbf16_vf *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZVFBFWMA(ctx);
+
+ if (require_rvv(ctx) && (ctx->sew == MO_16) && vext_check_isa_ill(ctx) &&
+ vext_check_ds(ctx, a->rd, a->rs2, a->vm)) {
+ uint32_t data = 0;
+
+ gen_set_rm(ctx, RISCV_FRM_DYN);
+ data = FIELD_DP32(data, VDATA, VM, a->vm);
+ data = FIELD_DP32(data, VDATA, LMUL, ctx->lmul);
+ data = FIELD_DP32(data, VDATA, VTA, ctx->vta);
+ data = FIELD_DP32(data, VDATA, VMA, ctx->vma);
+ return opfvf_trans(a->rd, a->rs1, a->rs2, data,
+ gen_helper_vfwmaccbf16_vf, ctx);
+ }
+
+ return false;
+}
diff --git a/target/riscv/insn_trans/trans_rvzfa.c.inc b/target/riscv/insn_trans/trans_rvzfa.c.inc
new file mode 100644
index 0000000000..2c715af3e5
--- /dev/null
+++ b/target/riscv/insn_trans/trans_rvzfa.c.inc
@@ -0,0 +1,521 @@
+/*
+ * RISC-V translation routines for the Zfa Standard Extension.
+ *
+ * Copyright (c) 2023 Christoph Müllner, christoph.muellner@vrull.eu
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2 or later, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define REQUIRE_ZFA(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfa) { \
+ return false; \
+ } \
+} while (0)
+
+#define REQUIRE_ZFH(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfh) { \
+ return false; \
+ } \
+} while (0)
+
+static bool trans_fli_s(DisasContext *ctx, arg_fli_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ /* Values below are NaN-boxed to avoid a gen_nanbox_s(). */
+ static const uint64_t fli_s_table[] = {
+ 0xffffffffbf800000, /* -1.0 */
+ 0xffffffff00800000, /* minimum positive normal */
+ 0xffffffff37800000, /* 1.0 * 2^-16 */
+ 0xffffffff38000000, /* 1.0 * 2^-15 */
+ 0xffffffff3b800000, /* 1.0 * 2^-8 */
+ 0xffffffff3c000000, /* 1.0 * 2^-7 */
+ 0xffffffff3d800000, /* 1.0 * 2^-4 */
+ 0xffffffff3e000000, /* 1.0 * 2^-3 */
+ 0xffffffff3e800000, /* 0.25 */
+ 0xffffffff3ea00000, /* 0.3125 */
+ 0xffffffff3ec00000, /* 0.375 */
+ 0xffffffff3ee00000, /* 0.4375 */
+ 0xffffffff3f000000, /* 0.5 */
+ 0xffffffff3f200000, /* 0.625 */
+ 0xffffffff3f400000, /* 0.75 */
+ 0xffffffff3f600000, /* 0.875 */
+ 0xffffffff3f800000, /* 1.0 */
+ 0xffffffff3fa00000, /* 1.25 */
+ 0xffffffff3fc00000, /* 1.5 */
+ 0xffffffff3fe00000, /* 1.75 */
+ 0xffffffff40000000, /* 2.0 */
+ 0xffffffff40200000, /* 2.5 */
+ 0xffffffff40400000, /* 3 */
+ 0xffffffff40800000, /* 4 */
+ 0xffffffff41000000, /* 8 */
+ 0xffffffff41800000, /* 16 */
+ 0xffffffff43000000, /* 2^7 */
+ 0xffffffff43800000, /* 2^8 */
+ 0xffffffff47000000, /* 2^15 */
+ 0xffffffff47800000, /* 2^16 */
+ 0xffffffff7f800000, /* +inf */
+ 0xffffffff7fc00000, /* Canonical NaN */
+ };
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ tcg_gen_movi_i64(dest, fli_s_table[a->rs1]);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fli_d(DisasContext *ctx, arg_fli_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ static const uint64_t fli_d_table[] = {
+ 0xbff0000000000000, /* -1.0 */
+ 0x0010000000000000, /* minimum positive normal */
+ 0x3ef0000000000000, /* 1.0 * 2^-16 */
+ 0x3f00000000000000, /* 1.0 * 2^-15 */
+ 0x3f70000000000000, /* 1.0 * 2^-8 */
+ 0x3f80000000000000, /* 1.0 * 2^-7 */
+ 0x3fb0000000000000, /* 1.0 * 2^-4 */
+ 0x3fc0000000000000, /* 1.0 * 2^-3 */
+ 0x3fd0000000000000, /* 0.25 */
+ 0x3fd4000000000000, /* 0.3125 */
+ 0x3fd8000000000000, /* 0.375 */
+ 0x3fdc000000000000, /* 0.4375 */
+ 0x3fe0000000000000, /* 0.5 */
+ 0x3fe4000000000000, /* 0.625 */
+ 0x3fe8000000000000, /* 0.75 */
+ 0x3fec000000000000, /* 0.875 */
+ 0x3ff0000000000000, /* 1.0 */
+ 0x3ff4000000000000, /* 1.25 */
+ 0x3ff8000000000000, /* 1.5 */
+ 0x3ffc000000000000, /* 1.75 */
+ 0x4000000000000000, /* 2.0 */
+ 0x4004000000000000, /* 2.5 */
+ 0x4008000000000000, /* 3 */
+ 0x4010000000000000, /* 4 */
+ 0x4020000000000000, /* 8 */
+ 0x4030000000000000, /* 16 */
+ 0x4060000000000000, /* 2^7 */
+ 0x4070000000000000, /* 2^8 */
+ 0x40e0000000000000, /* 2^15 */
+ 0x40f0000000000000, /* 2^16 */
+ 0x7ff0000000000000, /* +inf */
+ 0x7ff8000000000000, /* Canonical NaN */
+ };
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ tcg_gen_movi_i64(dest, fli_d_table[a->rs1]);
+ gen_set_fpr_d(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fli_h(DisasContext *ctx, arg_fli_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ /* Values below are NaN-boxed to avoid a gen_nanbox_h(). */
+ static const uint64_t fli_h_table[] = {
+ 0xffffffffffffbc00, /* -1.0 */
+ 0xffffffffffff0400, /* minimum positive normal */
+ 0xffffffffffff0100, /* 1.0 * 2^-16 */
+ 0xffffffffffff0200, /* 1.0 * 2^-15 */
+ 0xffffffffffff1c00, /* 1.0 * 2^-8 */
+ 0xffffffffffff2000, /* 1.0 * 2^-7 */
+ 0xffffffffffff2c00, /* 1.0 * 2^-4 */
+ 0xffffffffffff3000, /* 1.0 * 2^-3 */
+ 0xffffffffffff3400, /* 0.25 */
+ 0xffffffffffff3500, /* 0.3125 */
+ 0xffffffffffff3600, /* 0.375 */
+ 0xffffffffffff3700, /* 0.4375 */
+ 0xffffffffffff3800, /* 0.5 */
+ 0xffffffffffff3900, /* 0.625 */
+ 0xffffffffffff3a00, /* 0.75 */
+ 0xffffffffffff3b00, /* 0.875 */
+ 0xffffffffffff3c00, /* 1.0 */
+ 0xffffffffffff3d00, /* 1.25 */
+ 0xffffffffffff3e00, /* 1.5 */
+ 0xffffffffffff3f00, /* 1.75 */
+ 0xffffffffffff4000, /* 2.0 */
+ 0xffffffffffff4100, /* 2.5 */
+ 0xffffffffffff4200, /* 3 */
+ 0xffffffffffff4400, /* 4 */
+ 0xffffffffffff4800, /* 8 */
+ 0xffffffffffff4c00, /* 16 */
+ 0xffffffffffff5800, /* 2^7 */
+ 0xffffffffffff5c00, /* 2^8 */
+ 0xffffffffffff7800, /* 2^15 */
+ 0xffffffffffff7c00, /* 2^16 */
+ 0xffffffffffff7c00, /* +inf */
+ 0xffffffffffff7e00, /* Canonical NaN */
+ };
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ tcg_gen_movi_i64(dest, fli_h_table[a->rs1]);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fminm_s(DisasContext *ctx, arg_fminm_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fminm_s(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmaxm_s(DisasContext *ctx, arg_fmaxm_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fmaxm_s(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fminm_d(DisasContext *ctx, arg_fminm_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
+
+ gen_helper_fminm_d(dest, cpu_env, src1, src2);
+ gen_set_fpr_d(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmaxm_d(DisasContext *ctx, arg_fmaxm_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_d(ctx, a->rs2);
+
+ gen_helper_fmaxm_d(dest, cpu_env, src1, src2);
+ gen_set_fpr_d(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fminm_h(DisasContext *ctx, arg_fminm_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fminm_h(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fmaxm_h(DisasContext *ctx, arg_fmaxm_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fmaxm_h(dest, cpu_env, src1, src2);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fround_s(DisasContext *ctx, arg_fround_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fround_s(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_froundnx_s(DisasContext *ctx, arg_froundnx_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_froundnx_s(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fround_d(DisasContext *ctx, arg_fround_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fround_d(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_froundnx_d(DisasContext *ctx, arg_froundnx_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_froundnx_d(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_fround_h(DisasContext *ctx, arg_fround_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_fround_h(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+static bool trans_froundnx_h(DisasContext *ctx, arg_froundnx_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv_i64 dest = dest_fpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+
+ gen_set_rm(ctx, a->rm);
+ gen_helper_froundnx_h(dest, cpu_env, src1);
+ gen_set_fpr_hs(ctx, a->rd, dest);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+bool trans_fcvtmod_w_d(DisasContext *ctx, arg_fcvtmod_w_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dst = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_d(ctx, a->rs1);
+ TCGv_i64 t1 = tcg_temp_new_i64();
+
+ /* Rounding mode is RTZ. */
+ gen_set_rm(ctx, RISCV_FRM_RTZ);
+ gen_helper_fcvtmod_w_d(t1, cpu_env, src1);
+ tcg_gen_trunc_i64_tl(dst, t1);
+ gen_set_gpr(ctx, a->rd, dst);
+
+ return true;
+}
+
+bool trans_fmvh_x_d(DisasContext *ctx, arg_fmvh_x_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+ REQUIRE_32BIT(ctx);
+
+ TCGv dst = dest_gpr(ctx, a->rd);
+ TCGv_i64 t1 = tcg_temp_new_i64();
+ tcg_gen_sari_i64(t1, cpu_fpr[a->rs1], 32);
+ tcg_gen_trunc_i64_tl(dst, t1);
+ gen_set_gpr(ctx, a->rd, dst);
+ return true;
+}
+
+bool trans_fmvp_d_x(DisasContext *ctx, arg_fmvp_d_x *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+ REQUIRE_32BIT(ctx);
+
+ TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE);
+ TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE);
+ tcg_gen_concat_tl_i64(cpu_fpr[a->rd], src1, src2);
+
+ mark_fs_dirty(ctx);
+ return true;
+}
+
+bool trans_fleq_s(DisasContext *ctx, arg_fleq_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fleq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fltq_s(DisasContext *ctx, arg_fltq_s *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVF);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fleq_d(DisasContext *ctx, arg_fleq_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fltq_d(DisasContext *ctx, arg_fltq_d *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_EXT(ctx, RVD);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_s(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fleq_h(DisasContext *ctx, arg_fleq_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fleq_h(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
+
+bool trans_fltq_h(DisasContext *ctx, arg_fltq_h *a)
+{
+ REQUIRE_FPU;
+ REQUIRE_ZFA(ctx);
+ REQUIRE_ZFH(ctx);
+
+ TCGv dest = dest_gpr(ctx, a->rd);
+ TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1);
+ TCGv_i64 src2 = get_fpr_hs(ctx, a->rs2);
+
+ gen_helper_fltq_h(dest, cpu_env, src1, src2);
+ gen_set_gpr(ctx, a->rd, dest);
+ return true;
+}
diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc
index 74dde37ff7..8b1e2519bb 100644
--- a/target/riscv/insn_trans/trans_rvzfh.c.inc
+++ b/target/riscv/insn_trans/trans_rvzfh.c.inc
@@ -28,8 +28,8 @@
} \
} while (0)
-#define REQUIRE_ZFHMIN(ctx) do { \
- if (!ctx->cfg_ptr->ext_zfhmin) { \
+#define REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx) do { \
+ if (!ctx->cfg_ptr->ext_zfhmin && !ctx->cfg_ptr->ext_zfbfmin) { \
return false; \
} \
} while (0)
@@ -46,7 +46,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a)
TCGv t0;
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
@@ -69,7 +69,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a)
TCGv t0;
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
decode_save_opc(ctx);
t0 = get_gpr(ctx, a->rs1, EXT_NONE);
@@ -574,7 +574,7 @@ static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a)
static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
{
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
TCGv dest = dest_gpr(ctx, a->rd);
@@ -594,7 +594,7 @@ static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a)
static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a)
{
REQUIRE_FPU;
- REQUIRE_ZFHMIN(ctx);
+ REQUIRE_ZFHMIN_OR_ZFBFMIN(ctx);
TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO);
diff --git a/target/riscv/kvm.c b/target/riscv/kvm.c
index 0f932a5b96..9d8a8982f9 100644
--- a/target/riscv/kvm.c
+++ b/target/riscv/kvm.c
@@ -22,8 +22,10 @@
#include <linux/kvm.h>
#include "qemu/timer.h"
+#include "qapi/error.h"
#include "qemu/error-report.h"
#include "qemu/main-loop.h"
+#include "qapi/visitor.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h"
@@ -99,12 +101,280 @@ static uint64_t kvm_riscv_reg_id(CPURISCVState *env, uint64_t type,
#define KVM_RISCV_SET_TIMER(cs, env, name, reg) \
do { \
- int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, time), &reg); \
+ int ret = kvm_set_one_reg(cs, RISCV_TIMER_REG(env, name), &reg); \
if (ret) { \
abort(); \
} \
} while (0)
+typedef struct KVMCPUConfig {
+ const char *name;
+ const char *description;
+ target_ulong offset;
+ int kvm_reg_id;
+ bool user_set;
+ bool supported;
+} KVMCPUConfig;
+
+#define KVM_MISA_CFG(_bit, _reg_id) \
+ {.offset = _bit, .kvm_reg_id = _reg_id}
+
+/* KVM ISA extensions */
+static KVMCPUConfig kvm_misa_ext_cfgs[] = {
+ KVM_MISA_CFG(RVA, KVM_RISCV_ISA_EXT_A),
+ KVM_MISA_CFG(RVC, KVM_RISCV_ISA_EXT_C),
+ KVM_MISA_CFG(RVD, KVM_RISCV_ISA_EXT_D),
+ KVM_MISA_CFG(RVF, KVM_RISCV_ISA_EXT_F),
+ KVM_MISA_CFG(RVH, KVM_RISCV_ISA_EXT_H),
+ KVM_MISA_CFG(RVI, KVM_RISCV_ISA_EXT_I),
+ KVM_MISA_CFG(RVM, KVM_RISCV_ISA_EXT_M),
+};
+
+static void kvm_cpu_set_misa_ext_cfg(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ KVMCPUConfig *misa_ext_cfg = opaque;
+ target_ulong misa_bit = misa_ext_cfg->offset;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ CPURISCVState *env = &cpu->env;
+ bool value, host_bit;
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ host_bit = env->misa_ext_mask & misa_bit;
+
+ if (value == host_bit) {
+ return;
+ }
+
+ if (!value) {
+ misa_ext_cfg->user_set = true;
+ return;
+ }
+
+ /*
+ * Forbid users to enable extensions that aren't
+ * available in the hart.
+ */
+ error_setg(errp, "Enabling MISA bit '%s' is not allowed: it's not "
+ "enabled in the host", misa_ext_cfg->name);
+}
+
+static void kvm_riscv_update_cpu_misa_ext(RISCVCPU *cpu, CPUState *cs)
+{
+ CPURISCVState *env = &cpu->env;
+ uint64_t id, reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
+ KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
+ target_ulong misa_bit = misa_cfg->offset;
+
+ if (!misa_cfg->user_set) {
+ continue;
+ }
+
+ /* If we're here we're going to disable the MISA bit */
+ reg = 0;
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
+ misa_cfg->kvm_reg_id);
+ ret = kvm_set_one_reg(cs, id, &reg);
+ if (ret != 0) {
+ /*
+ * We're not checking for -EINVAL because if the bit is about
+ * to be disabled, it means that it was already enabled by
+ * KVM. We determined that by fetching the 'isa' register
+ * during init() time. Any error at this point is worth
+ * aborting.
+ */
+ error_report("Unable to set KVM reg %s, error %d",
+ misa_cfg->name, ret);
+ exit(EXIT_FAILURE);
+ }
+ env->misa_ext &= ~misa_bit;
+ }
+}
+
+#define CPUCFG(_prop) offsetof(struct RISCVCPUConfig, _prop)
+
+#define KVM_EXT_CFG(_name, _prop, _reg_id) \
+ {.name = _name, .offset = CPUCFG(_prop), \
+ .kvm_reg_id = _reg_id}
+
+static KVMCPUConfig kvm_multi_ext_cfgs[] = {
+ KVM_EXT_CFG("zicbom", ext_icbom, KVM_RISCV_ISA_EXT_ZICBOM),
+ KVM_EXT_CFG("zicboz", ext_icboz, KVM_RISCV_ISA_EXT_ZICBOZ),
+ KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE),
+ KVM_EXT_CFG("zbb", ext_zbb, KVM_RISCV_ISA_EXT_ZBB),
+ KVM_EXT_CFG("ssaia", ext_ssaia, KVM_RISCV_ISA_EXT_SSAIA),
+ KVM_EXT_CFG("sstc", ext_sstc, KVM_RISCV_ISA_EXT_SSTC),
+ KVM_EXT_CFG("svinval", ext_svinval, KVM_RISCV_ISA_EXT_SVINVAL),
+ KVM_EXT_CFG("svpbmt", ext_svpbmt, KVM_RISCV_ISA_EXT_SVPBMT),
+};
+
+static void *kvmconfig_get_cfg_addr(RISCVCPU *cpu, KVMCPUConfig *kvmcfg)
+{
+ return (void *)&cpu->cfg + kvmcfg->offset;
+}
+
+static void kvm_cpu_cfg_set(RISCVCPU *cpu, KVMCPUConfig *multi_ext,
+ uint32_t val)
+{
+ bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
+
+ *ext_enabled = val;
+}
+
+static uint32_t kvm_cpu_cfg_get(RISCVCPU *cpu,
+ KVMCPUConfig *multi_ext)
+{
+ bool *ext_enabled = kvmconfig_get_cfg_addr(cpu, multi_ext);
+
+ return *ext_enabled;
+}
+
+static void kvm_cpu_set_multi_ext_cfg(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ KVMCPUConfig *multi_ext_cfg = opaque;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ bool value, host_val;
+
+ if (!visit_type_bool(v, name, &value, errp)) {
+ return;
+ }
+
+ host_val = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
+
+ /*
+ * Ignore if the user is setting the same value
+ * as the host.
+ */
+ if (value == host_val) {
+ return;
+ }
+
+ if (!multi_ext_cfg->supported) {
+ /*
+ * Error out if the user is trying to enable an
+ * extension that KVM doesn't support. Ignore
+ * option otherwise.
+ */
+ if (value) {
+ error_setg(errp, "KVM does not support disabling extension %s",
+ multi_ext_cfg->name);
+ }
+
+ return;
+ }
+
+ multi_ext_cfg->user_set = true;
+ kvm_cpu_cfg_set(cpu, multi_ext_cfg, value);
+}
+
+static KVMCPUConfig kvm_cbom_blocksize = {
+ .name = "cbom_blocksize",
+ .offset = CPUCFG(cbom_blocksize),
+ .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicbom_block_size)
+};
+
+static KVMCPUConfig kvm_cboz_blocksize = {
+ .name = "cboz_blocksize",
+ .offset = CPUCFG(cboz_blocksize),
+ .kvm_reg_id = KVM_REG_RISCV_CONFIG_REG(zicboz_block_size)
+};
+
+static void kvm_cpu_set_cbomz_blksize(Object *obj, Visitor *v,
+ const char *name,
+ void *opaque, Error **errp)
+{
+ KVMCPUConfig *cbomz_cfg = opaque;
+ RISCVCPU *cpu = RISCV_CPU(obj);
+ uint16_t value, *host_val;
+
+ if (!visit_type_uint16(v, name, &value, errp)) {
+ return;
+ }
+
+ host_val = kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
+
+ if (value != *host_val) {
+ error_report("Unable to set %s to a different value than "
+ "the host (%u)",
+ cbomz_cfg->name, *host_val);
+ exit(EXIT_FAILURE);
+ }
+
+ cbomz_cfg->user_set = true;
+}
+
+static void kvm_riscv_update_cpu_cfg_isa_ext(RISCVCPU *cpu, CPUState *cs)
+{
+ CPURISCVState *env = &cpu->env;
+ uint64_t id, reg;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
+ KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
+
+ if (!multi_ext_cfg->user_set) {
+ continue;
+ }
+
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
+ multi_ext_cfg->kvm_reg_id);
+ reg = kvm_cpu_cfg_get(cpu, multi_ext_cfg);
+ ret = kvm_set_one_reg(cs, id, &reg);
+ if (ret != 0) {
+ error_report("Unable to %s extension %s in KVM, error %d",
+ reg ? "enable" : "disable",
+ multi_ext_cfg->name, ret);
+ exit(EXIT_FAILURE);
+ }
+ }
+}
+
+static void kvm_riscv_add_cpu_user_properties(Object *cpu_obj)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_misa_ext_cfgs); i++) {
+ KVMCPUConfig *misa_cfg = &kvm_misa_ext_cfgs[i];
+ int bit = misa_cfg->offset;
+
+ misa_cfg->name = riscv_get_misa_ext_name(bit);
+ misa_cfg->description = riscv_get_misa_ext_description(bit);
+
+ object_property_add(cpu_obj, misa_cfg->name, "bool",
+ NULL,
+ kvm_cpu_set_misa_ext_cfg,
+ NULL, misa_cfg);
+ object_property_set_description(cpu_obj, misa_cfg->name,
+ misa_cfg->description);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
+ KVMCPUConfig *multi_cfg = &kvm_multi_ext_cfgs[i];
+
+ object_property_add(cpu_obj, multi_cfg->name, "bool",
+ NULL,
+ kvm_cpu_set_multi_ext_cfg,
+ NULL, multi_cfg);
+ }
+
+ object_property_add(cpu_obj, "cbom_blocksize", "uint16",
+ NULL, kvm_cpu_set_cbomz_blksize,
+ NULL, &kvm_cbom_blocksize);
+
+ object_property_add(cpu_obj, "cboz_blocksize", "uint16",
+ NULL, kvm_cpu_set_cbomz_blksize,
+ NULL, &kvm_cboz_blocksize);
+}
+
static int kvm_riscv_get_regs_core(CPUState *cs)
{
int ret = 0;
@@ -309,6 +579,191 @@ static void kvm_riscv_put_regs_timer(CPUState *cs)
env->kvm_timer_dirty = false;
}
+typedef struct KVMScratchCPU {
+ int kvmfd;
+ int vmfd;
+ int cpufd;
+} KVMScratchCPU;
+
+/*
+ * Heavily inspired by kvm_arm_create_scratch_host_vcpu()
+ * from target/arm/kvm.c.
+ */
+static bool kvm_riscv_create_scratch_vcpu(KVMScratchCPU *scratch)
+{
+ int kvmfd = -1, vmfd = -1, cpufd = -1;
+
+ kvmfd = qemu_open_old("/dev/kvm", O_RDWR);
+ if (kvmfd < 0) {
+ goto err;
+ }
+ do {
+ vmfd = ioctl(kvmfd, KVM_CREATE_VM, 0);
+ } while (vmfd == -1 && errno == EINTR);
+ if (vmfd < 0) {
+ goto err;
+ }
+ cpufd = ioctl(vmfd, KVM_CREATE_VCPU, 0);
+ if (cpufd < 0) {
+ goto err;
+ }
+
+ scratch->kvmfd = kvmfd;
+ scratch->vmfd = vmfd;
+ scratch->cpufd = cpufd;
+
+ return true;
+
+ err:
+ if (cpufd >= 0) {
+ close(cpufd);
+ }
+ if (vmfd >= 0) {
+ close(vmfd);
+ }
+ if (kvmfd >= 0) {
+ close(kvmfd);
+ }
+
+ return false;
+}
+
+static void kvm_riscv_destroy_scratch_vcpu(KVMScratchCPU *scratch)
+{
+ close(scratch->cpufd);
+ close(scratch->vmfd);
+ close(scratch->kvmfd);
+}
+
+static void kvm_riscv_init_machine_ids(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+{
+ CPURISCVState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mvendorid));
+ reg.addr = (uint64_t)&cpu->cfg.mvendorid;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ error_report("Unable to retrieve mvendorid from host, error %d", ret);
+ }
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(marchid));
+ reg.addr = (uint64_t)&cpu->cfg.marchid;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ error_report("Unable to retrieve marchid from host, error %d", ret);
+ }
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mimpid));
+ reg.addr = (uint64_t)&cpu->cfg.mimpid;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ error_report("Unable to retrieve mimpid from host, error %d", ret);
+ }
+}
+
+static void kvm_riscv_init_misa_ext_mask(RISCVCPU *cpu,
+ KVMScratchCPU *kvmcpu)
+{
+ CPURISCVState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(isa));
+ reg.addr = (uint64_t)&env->misa_ext_mask;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+
+ if (ret) {
+ error_report("Unable to fetch ISA register from KVM, "
+ "error %d", ret);
+ kvm_riscv_destroy_scratch_vcpu(kvmcpu);
+ exit(EXIT_FAILURE);
+ }
+
+ env->misa_ext = env->misa_ext_mask;
+}
+
+static void kvm_riscv_read_cbomz_blksize(RISCVCPU *cpu, KVMScratchCPU *kvmcpu,
+ KVMCPUConfig *cbomz_cfg)
+{
+ CPURISCVState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ cbomz_cfg->kvm_reg_id);
+ reg.addr = (uint64_t)kvmconfig_get_cfg_addr(cpu, cbomz_cfg);
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ error_report("Unable to read KVM reg %s, error %d",
+ cbomz_cfg->name, ret);
+ exit(EXIT_FAILURE);
+ }
+}
+
+static void kvm_riscv_init_multiext_cfg(RISCVCPU *cpu, KVMScratchCPU *kvmcpu)
+{
+ CPURISCVState *env = &cpu->env;
+ uint64_t val;
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_multi_ext_cfgs); i++) {
+ KVMCPUConfig *multi_ext_cfg = &kvm_multi_ext_cfgs[i];
+ struct kvm_one_reg reg;
+
+ reg.id = kvm_riscv_reg_id(env, KVM_REG_RISCV_ISA_EXT,
+ multi_ext_cfg->kvm_reg_id);
+ reg.addr = (uint64_t)&val;
+ ret = ioctl(kvmcpu->cpufd, KVM_GET_ONE_REG, &reg);
+ if (ret != 0) {
+ if (errno == EINVAL) {
+ /* Silently default to 'false' if KVM does not support it. */
+ multi_ext_cfg->supported = false;
+ val = false;
+ } else {
+ error_report("Unable to read ISA_EXT KVM register %s, "
+ "error %d", multi_ext_cfg->name, ret);
+ kvm_riscv_destroy_scratch_vcpu(kvmcpu);
+ exit(EXIT_FAILURE);
+ }
+ } else {
+ multi_ext_cfg->supported = true;
+ }
+
+ kvm_cpu_cfg_set(cpu, multi_ext_cfg, val);
+ }
+
+ if (cpu->cfg.ext_icbom) {
+ kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cbom_blocksize);
+ }
+
+ if (cpu->cfg.ext_icboz) {
+ kvm_riscv_read_cbomz_blksize(cpu, kvmcpu, &kvm_cboz_blocksize);
+ }
+}
+
+void kvm_riscv_init_user_properties(Object *cpu_obj)
+{
+ RISCVCPU *cpu = RISCV_CPU(cpu_obj);
+ KVMScratchCPU kvmcpu;
+
+ if (!kvm_riscv_create_scratch_vcpu(&kvmcpu)) {
+ return;
+ }
+
+ kvm_riscv_add_cpu_user_properties(cpu_obj);
+ kvm_riscv_init_machine_ids(cpu, &kvmcpu);
+ kvm_riscv_init_misa_ext_mask(cpu, &kvmcpu);
+ kvm_riscv_init_multiext_cfg(cpu, &kvmcpu);
+
+ kvm_riscv_destroy_scratch_vcpu(&kvmcpu);
+}
+
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
};
@@ -394,23 +849,49 @@ void kvm_arch_init_irq_routing(KVMState *s)
{
}
-int kvm_arch_init_vcpu(CPUState *cs)
+static int kvm_vcpu_set_machine_ids(RISCVCPU *cpu, CPUState *cs)
{
- int ret = 0;
- target_ulong isa;
- RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
uint64_t id;
+ int ret;
- qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mvendorid));
+ ret = kvm_set_one_reg(cs, id, &cpu->cfg.mvendorid);
+ if (ret != 0) {
+ return ret;
+ }
id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
- KVM_REG_RISCV_CONFIG_REG(isa));
- ret = kvm_get_one_reg(cs, id, &isa);
- if (ret) {
+ KVM_REG_RISCV_CONFIG_REG(marchid));
+ ret = kvm_set_one_reg(cs, id, &cpu->cfg.marchid);
+ if (ret != 0) {
return ret;
}
- env->misa_ext = isa;
+
+ id = kvm_riscv_reg_id(env, KVM_REG_RISCV_CONFIG,
+ KVM_REG_RISCV_CONFIG_REG(mimpid));
+ ret = kvm_set_one_reg(cs, id, &cpu->cfg.mimpid);
+
+ return ret;
+}
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ int ret = 0;
+ RISCVCPU *cpu = RISCV_CPU(cs);
+
+ qemu_add_vm_change_state_handler(kvm_riscv_vm_state_change, cs);
+
+ if (!object_dynamic_cast(OBJECT(cpu), TYPE_RISCV_CPU_HOST)) {
+ ret = kvm_vcpu_set_machine_ids(cpu, cs);
+ if (ret != 0) {
+ return ret;
+ }
+ }
+
+ kvm_riscv_update_cpu_misa_ext(cpu, cs);
+ kvm_riscv_update_cpu_cfg_isa_ext(cpu, cs);
return ret;
}
diff --git a/target/riscv/kvm_riscv.h b/target/riscv/kvm_riscv.h
index ed281bdce0..e3ba935808 100644
--- a/target/riscv/kvm_riscv.h
+++ b/target/riscv/kvm_riscv.h
@@ -19,6 +19,7 @@
#ifndef QEMU_KVM_RISCV_H
#define QEMU_KVM_RISCV_H
+void kvm_riscv_init_user_properties(Object *cpu_obj);
void kvm_riscv_reset_vcpu(RISCVCPU *cpu);
void kvm_riscv_set_irq(RISCVCPU *cpu, int irq, int level);
diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c
index f563dc3981..9cdb9cdd06 100644
--- a/target/riscv/op_helper.c
+++ b/target/riscv/op_helper.c
@@ -335,7 +335,8 @@ target_ulong helper_mret(CPURISCVState *env)
riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC());
}
- target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV);
+ target_ulong prev_virt = get_field(env->mstatus, MSTATUS_MPV) &&
+ (prev_priv != PRV_M);
mstatus = set_field(mstatus, MSTATUS_MIE,
get_field(mstatus, MSTATUS_MPIE));
mstatus = set_field(mstatus, MSTATUS_MPIE, 1);
diff --git a/target/riscv/translate.c b/target/riscv/translate.c
index 8a33da811e..697df1be9e 100644
--- a/target/riscv/translate.c
+++ b/target/riscv/translate.c
@@ -64,6 +64,7 @@ typedef struct DisasContext {
target_ulong priv_ver;
RISCVMXL misa_mxl_max;
RISCVMXL xl;
+ RISCVMXL address_xl;
uint32_t misa_ext;
uint32_t opcode;
RISCVExtStatus mstatus_fs;
@@ -121,29 +122,6 @@ static inline bool has_ext(DisasContext *ctx, uint32_t ext)
return ctx->misa_ext & ext;
}
-static bool always_true_p(DisasContext *ctx __attribute__((__unused__)))
-{
- return true;
-}
-
-static bool has_xthead_p(DisasContext *ctx __attribute__((__unused__)))
-{
- return ctx->cfg_ptr->ext_xtheadba || ctx->cfg_ptr->ext_xtheadbb ||
- ctx->cfg_ptr->ext_xtheadbs || ctx->cfg_ptr->ext_xtheadcmo ||
- ctx->cfg_ptr->ext_xtheadcondmov ||
- ctx->cfg_ptr->ext_xtheadfmemidx || ctx->cfg_ptr->ext_xtheadfmv ||
- ctx->cfg_ptr->ext_xtheadmac || ctx->cfg_ptr->ext_xtheadmemidx ||
- ctx->cfg_ptr->ext_xtheadmempair || ctx->cfg_ptr->ext_xtheadsync;
-}
-
-#define MATERIALISE_EXT_PREDICATE(ext) \
- static bool has_ ## ext ## _p(DisasContext *ctx) \
- { \
- return ctx->cfg_ptr->ext_ ## ext ; \
- }
-
-MATERIALISE_EXT_PREDICATE(XVentanaCondOps);
-
#ifdef TARGET_RISCV32
#define get_xl(ctx) MXL_RV32
#elif defined(CONFIG_USER_ONLY)
@@ -152,6 +130,14 @@ MATERIALISE_EXT_PREDICATE(XVentanaCondOps);
#define get_xl(ctx) ((ctx)->xl)
#endif
+#ifdef TARGET_RISCV32
+#define get_address_xl(ctx) MXL_RV32
+#elif defined(CONFIG_USER_ONLY)
+#define get_address_xl(ctx) MXL_RV64
+#else
+#define get_address_xl(ctx) ((ctx)->address_xl)
+#endif
+
/* The word size for this machine mode. */
static inline int __attribute__((unused)) get_xlen(DisasContext *ctx)
{
@@ -598,12 +584,13 @@ static TCGv get_address(DisasContext *ctx, int rs1, int imm)
tcg_gen_addi_tl(addr, src1, imm);
if (ctx->pm_mask_enabled) {
tcg_gen_andc_tl(addr, addr, pm_mask);
- } else if (get_xl(ctx) == MXL_RV32) {
+ } else if (get_address_xl(ctx) == MXL_RV32) {
tcg_gen_ext32u_tl(addr, addr);
}
if (ctx->pm_base_enabled) {
tcg_gen_or_tl(addr, addr, pm_base);
}
+
return addr;
}
@@ -1104,10 +1091,12 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc)
#include "insn_trans/trans_rvzicond.c.inc"
#include "insn_trans/trans_rvzawrs.c.inc"
#include "insn_trans/trans_rvzicbo.c.inc"
+#include "insn_trans/trans_rvzfa.c.inc"
#include "insn_trans/trans_rvzfh.c.inc"
#include "insn_trans/trans_rvk.c.inc"
#include "insn_trans/trans_privileged.c.inc"
#include "insn_trans/trans_svinval.c.inc"
+#include "insn_trans/trans_rvbf16.c.inc"
#include "decode-xthead.c.inc"
#include "insn_trans/trans_xthead.c.inc"
#include "insn_trans/trans_xventanacondops.c.inc"
@@ -1134,7 +1123,7 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
* that are tested in-order until a decoder matches onto the opcode.
*/
static const struct {
- bool (*guard_func)(DisasContext *);
+ bool (*guard_func)(const RISCVCPUConfig *);
bool (*decode_func)(DisasContext *, uint32_t);
} decoders[] = {
{ always_true_p, decode_insn32 },
@@ -1163,7 +1152,7 @@ static void decode_opc(CPURISCVState *env, DisasContext *ctx, uint16_t opcode)
ctx->opcode = opcode32;
for (size_t i = 0; i < ARRAY_SIZE(decoders); ++i) {
- if (decoders[i].guard_func(ctx) &&
+ if (decoders[i].guard_func(ctx->cfg_ptr) &&
decoders[i].decode_func(ctx, opcode32)) {
return;
}
@@ -1200,6 +1189,7 @@ static void riscv_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
ctx->vl_eq_vlmax = FIELD_EX32(tb_flags, TB_FLAGS, VL_EQ_VLMAX);
ctx->misa_mxl_max = env->misa_mxl_max;
ctx->xl = FIELD_EX32(tb_flags, TB_FLAGS, XL);
+ ctx->address_xl = FIELD_EX32(tb_flags, TB_FLAGS, AXL);
ctx->cs = cs;
ctx->pm_mask_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_MASK_ENABLED);
ctx->pm_base_enabled = FIELD_EX32(tb_flags, TB_FLAGS, PM_BASE_ENABLED);
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 1e06e7447c..71bb9b4457 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -3554,6 +3554,17 @@ RVVCALL(OPFVF3, vfwmacc_vf_w, WOP_UUU_W, H8, H4, fwmacc32)
GEN_VEXT_VF(vfwmacc_vf_h, 4)
GEN_VEXT_VF(vfwmacc_vf_w, 8)
+static uint32_t fwmaccbf16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
+{
+ return float32_muladd(bfloat16_to_float32(a, s),
+ bfloat16_to_float32(b, s), d, 0, s);
+}
+
+RVVCALL(OPFVV3, vfwmaccbf16_vv, WOP_UUU_H, H4, H2, H2, fwmaccbf16)
+GEN_VEXT_VV_ENV(vfwmaccbf16_vv, 4)
+RVVCALL(OPFVF3, vfwmaccbf16_vf, WOP_UUU_H, H4, H2, fwmacc16)
+GEN_VEXT_VF(vfwmaccbf16_vf, 4)
+
static uint32_t fwnmacc16(uint16_t a, uint16_t b, uint32_t d, float_status *s)
{
return float32_muladd(float16_to_float32(a, true, s),
@@ -4535,6 +4546,9 @@ RVVCALL(OPFVV1, vfwcvt_f_f_v_w, WOP_UU_W, H8, H4, float32_to_float64)
GEN_VEXT_V_ENV(vfwcvt_f_f_v_h, 4)
GEN_VEXT_V_ENV(vfwcvt_f_f_v_w, 8)
+RVVCALL(OPFVV1, vfwcvtbf16_f_f_v, WOP_UU_H, H4, H2, bfloat16_to_float32)
+GEN_VEXT_V_ENV(vfwcvtbf16_f_f_v, 4)
+
/* Narrowing Floating-Point/Integer Type-Convert Instructions */
/* (TD, T2, TX2) */
#define NOP_UU_B uint8_t, uint16_t, uint32_t
@@ -4581,6 +4595,9 @@ RVVCALL(OPFVV1, vfncvt_f_f_w_w, NOP_UU_W, H4, H8, float64_to_float32)
GEN_VEXT_V_ENV(vfncvt_f_f_w_h, 2)
GEN_VEXT_V_ENV(vfncvt_f_f_w_w, 4)
+RVVCALL(OPFVV1, vfncvtbf16_f_f_w, NOP_UU_H, H2, H4, float32_to_bfloat16)
+GEN_VEXT_V_ENV(vfncvtbf16_f_f_w, 2)
+
/*
* Vector Reduction Operations
*/