diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2023-03-03 11:04:46 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2023-03-03 11:04:46 +0000 |
commit | 76116e28e1491a06966fd594fefdaacb084ada10 (patch) | |
tree | 710ca36e110f9597c9a2b613519cee161496046f /target | |
parent | c61d1a066cb6cf90662c82d0e35660fc0ccacbaf (diff) | |
parent | 37151032989ecf6e7ce8b65bc7bcb400d0318b2c (diff) |
Merge tag 'pull-riscv-to-apply-20230303' of https://gitlab.com/palmer-dabbelt/qemu into staging
Fifth RISC-V PR for QEMU 8.0
* Experimantal support for writable misa.
* Support for Svadu extension.
* Support for the Zicond extension.
* Fixes to gdbstub, CSR accesses, dependencies between the various
floating-point exceptions, and XTheadMemPair.
* Many cleanups.
# -----BEGIN PGP SIGNATURE-----
#
# iQJHBAABCAAxFiEEKzw3R0RoQ7JKlDp6LhMZ81+7GIkFAmQBrrUTHHBhbG1lckBk
# YWJiZWx0LmNvbQAKCRAuExnzX7sYicHrD/9dHBDOYNwyT/C2Q31NHMcEsVN6J0kW
# 0sVyDb2/TUFoXmClMwS6jZYQQwWD7tjjB7BDcvPJ0QKLblDoZFX5JyxpQypIKWcs
# It/E6mk7aG0epH1GoB/mbHFDbeCm4tbo7Vf6cQGpV/vGWBUaOS67c5nenUK7Tlqw
# NTr9qak+9NYVswvMHZ0lUKtO12W1g/1EVkict2/90P2snWbPZ+foWomifGNljmhy
# 5WtCNp27uBKF/uuD9xubLOxSEcqtZFTuKJy7U3azV4I0IKfd6Is83Kd0IwBOrTgT
# MYkFdtQE1jgbkXYVZjft6ymLuqJrcLFYwD8C2zdNAXJLk1Y+MCtGafgW6f6SkT6B
# FrNaSOqQ9xXiaNStF2FwYdmZ476zcY+eEg2rH1grTwCMewZ9r7m3+H8iat/tR0pt
# 9scYAre1oaL33LB6DGZi3JkssNYyj42sutcNao2hQXRHcsh+vv1dLR+Di2mO6Ji5
# MNfvEgCrWWZjNVSwvhwCXdJPqqpyTbkRf8HJEp0gWvjk6VoF8sWidDw/8oMLj+wW
# qZur7GNe+piJNvly85aFSL9J3SX7RyNeDzX/yK3b4k+g6I/ZziQaNgQtB9gYcm6w
# mj3snCwRbEMEhdhPH0+Chm0Wb97knHJS14Vq9wCe2xh16o3HM5FspboLFkGZMjDV
# tRDPFb7pitwdlA==
# =FMkl
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 03 Mar 2023 08:24:21 GMT
# gpg: using RSA key 2B3C3747446843B24A943A7A2E1319F35FBB1889
# gpg: issuer "palmer@dabbelt.com"
# gpg: Good signature from "Palmer Dabbelt <palmer@dabbelt.com>" [unknown]
# gpg: aka "Palmer Dabbelt <palmerdabbelt@google.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 00CE 76D1 8349 60DF CE88 6DF8 EF4C A150 2CCB AB41
# Subkey fingerprint: 2B3C 3747 4468 43B2 4A94 3A7A 2E13 19F3 5FBB 1889
* tag 'pull-riscv-to-apply-20230303' of https://gitlab.com/palmer-dabbelt/qemu: (59 commits)
target/riscv/vector_helper.c: avoid env_archcpu() when reading RISCVCPUConfig
target/riscv/vector_helper.c: create vext_set_tail_elems_1s()
target/riscv/csr.c: avoid env_archcpu() usages when reading RISCVCPUConfig
target/riscv/csr.c: use riscv_cpu_cfg() to avoid env_cpu() pointers
target/riscv/csr.c: simplify mctr()
target/riscv/csr.c: use env_archcpu() in ctr()
target/riscv: Export Svadu property
target/riscv: Add *envcfg.HADE related check in address translation
target/riscv: Add *envcfg.PBMTE related check in address translation
target/riscv: Add csr support for svadu
target/riscv: Fix the relationship of PBMTE/STCE fields between menvcfg and henvcfg
target/riscv: Fix the relationship between menvcfg.PBMTE/STCE and Svpbmt/Sstc extensions
hw/riscv: Move the dtb load bits outside of create_fdt()
hw/riscv: Skip re-generating DT nodes for a given DTB
target/riscv: Add support for Zicond extension
RISC-V: XTheadMemPair: Remove register restrictions for store-pair
target/riscv: Fix checking of whether instruciton at 'pc_next' spans pages
target/riscv: Group all predicate() routines together
target/riscv: Drop priv level check in mseccfg predicate()
target/riscv: Allow debugger to access sstc CSRs
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/riscv/cpu.c | 134 | ||||
-rw-r--r-- | target/riscv/cpu.h | 34 | ||||
-rw-r--r-- | target/riscv/cpu_bits.h | 4 | ||||
-rw-r--r-- | target/riscv/cpu_helper.c | 24 | ||||
-rw-r--r-- | target/riscv/csr.c | 354 | ||||
-rw-r--r-- | target/riscv/gdbstub.c | 100 | ||||
-rw-r--r-- | target/riscv/insn32.decode | 4 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvv.c.inc | 184 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvzfh.c.inc | 25 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_rvzicond.c.inc | 49 | ||||
-rw-r--r-- | target/riscv/insn_trans/trans_xthead.c.inc | 4 | ||||
-rw-r--r-- | target/riscv/machine.c | 11 | ||||
-rw-r--r-- | target/riscv/monitor.c | 2 | ||||
-rw-r--r-- | target/riscv/op_helper.c | 2 | ||||
-rw-r--r-- | target/riscv/pmp.c | 8 | ||||
-rw-r--r-- | target/riscv/translate.c | 3 | ||||
-rw-r--r-- | target/riscv/vector_helper.c | 104 |
17 files changed, 504 insertions, 542 deletions
diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 9eb748a283..5bc0005cc7 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -75,6 +75,7 @@ struct isa_ext_data { static const struct isa_ext_data isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(h, false, PRIV_VERSION_1_12_0, ext_h), ISA_EXT_DATA_ENTRY(v, false, PRIV_VERSION_1_10_0, ext_v), + ISA_EXT_DATA_ENTRY(zicond, true, PRIV_VERSION_1_12_0, ext_zicond), ISA_EXT_DATA_ENTRY(zicsr, true, PRIV_VERSION_1_10_0, ext_icsr), ISA_EXT_DATA_ENTRY(zifencei, true, PRIV_VERSION_1_10_0, ext_ifencei), ISA_EXT_DATA_ENTRY(zihintpause, true, PRIV_VERSION_1_10_0, ext_zihintpause), @@ -102,12 +103,16 @@ static const struct isa_ext_data isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zkt, true, PRIV_VERSION_1_12_0, ext_zkt), ISA_EXT_DATA_ENTRY(zve32f, true, PRIV_VERSION_1_12_0, ext_zve32f), ISA_EXT_DATA_ENTRY(zve64f, true, PRIV_VERSION_1_12_0, ext_zve64f), + ISA_EXT_DATA_ENTRY(zve64d, true, PRIV_VERSION_1_12_0, ext_zve64d), + ISA_EXT_DATA_ENTRY(zvfh, true, PRIV_VERSION_1_12_0, ext_zvfh), + ISA_EXT_DATA_ENTRY(zvfhmin, true, PRIV_VERSION_1_12_0, ext_zvfhmin), ISA_EXT_DATA_ENTRY(zhinx, true, PRIV_VERSION_1_12_0, ext_zhinx), ISA_EXT_DATA_ENTRY(zhinxmin, true, PRIV_VERSION_1_12_0, ext_zhinxmin), ISA_EXT_DATA_ENTRY(smaia, true, PRIV_VERSION_1_12_0, ext_smaia), ISA_EXT_DATA_ENTRY(ssaia, true, PRIV_VERSION_1_12_0, ext_ssaia), ISA_EXT_DATA_ENTRY(sscofpmf, true, PRIV_VERSION_1_12_0, ext_sscofpmf), ISA_EXT_DATA_ENTRY(sstc, true, PRIV_VERSION_1_12_0, ext_sstc), + ISA_EXT_DATA_ENTRY(svadu, true, PRIV_VERSION_1_12_0, ext_svadu), ISA_EXT_DATA_ENTRY(svinval, true, PRIV_VERSION_1_12_0, ext_svinval), ISA_EXT_DATA_ENTRY(svnapot, true, PRIV_VERSION_1_12_0, ext_svnapot), ISA_EXT_DATA_ENTRY(svpbmt, true, PRIV_VERSION_1_12_0, ext_svpbmt), @@ -616,6 +621,11 @@ static void riscv_cpu_reset_hold(Object *obj) env->bins = 0; env->two_stage_lookup = false; + env->menvcfg = (cpu->cfg.ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cpu->cfg.ext_svadu ? MENVCFG_HADE : 0); + env->henvcfg = (cpu->cfg.ext_svpbmt ? HENVCFG_PBMTE : 0) | + (cpu->cfg.ext_svadu ? HENVCFG_HADE : 0); + /* Initialized default priorities of local interrupts. */ for (i = 0; i < ARRAY_SIZE(env->miprio); i++) { iprio = riscv_cpu_default_priority(i); @@ -640,7 +650,7 @@ static void riscv_cpu_reset_hold(Object *obj) set_default_nan_mode(1, &env->fp_status); #ifndef CONFIG_USER_ONLY - if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + if (cpu->cfg.debug) { riscv_trigger_init(env); } @@ -732,7 +742,11 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if ((cpu->cfg.ext_zfh || cpu->cfg.ext_zfhmin) && !cpu->cfg.ext_f) { + if (cpu->cfg.ext_zfh) { + cpu->cfg.ext_zfhmin = true; + } + + if (cpu->cfg.ext_zfhmin && !cpu->cfg.ext_f) { error_setg(errp, "Zfh/Zfhmin extensions require F extension"); return; } @@ -742,19 +756,51 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) return; } - if (cpu->cfg.ext_v && !cpu->cfg.ext_d) { - error_setg(errp, "V extension requires D extension"); + /* The V vector extension depends on the Zve64d extension */ + if (cpu->cfg.ext_v) { + cpu->cfg.ext_zve64d = true; + } + + /* The Zve64d extension depends on the Zve64f extension */ + if (cpu->cfg.ext_zve64d) { + cpu->cfg.ext_zve64f = true; + } + + /* The Zve64f extension depends on the Zve32f extension */ + if (cpu->cfg.ext_zve64f) { + cpu->cfg.ext_zve32f = true; + } + + if (cpu->cfg.ext_zve64d && !cpu->cfg.ext_d) { + error_setg(errp, "Zve64d/V extensions require D extension"); return; } - if ((cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) && !cpu->cfg.ext_f) { + if (cpu->cfg.ext_zve32f && !cpu->cfg.ext_f) { error_setg(errp, "Zve32f/Zve64f extensions require F extension"); return; } + if (cpu->cfg.ext_zvfh) { + cpu->cfg.ext_zvfhmin = true; + } + + if (cpu->cfg.ext_zvfhmin && !cpu->cfg.ext_zve32f) { + error_setg(errp, "Zvfh/Zvfhmin extensions require Zve32f extension"); + return; + } + + if (cpu->cfg.ext_zvfh && !cpu->cfg.ext_zfhmin) { + error_setg(errp, "Zvfh extensions requires Zfhmin extension"); + return; + } + /* Set the ISA extensions, checks should have happened above */ - if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinx || - cpu->cfg.ext_zhinxmin) { + if (cpu->cfg.ext_zhinx) { + cpu->cfg.ext_zhinxmin = true; + } + + if (cpu->cfg.ext_zdinx || cpu->cfg.ext_zhinxmin) { cpu->cfg.ext_zfinx = true; } @@ -765,7 +811,7 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) } if (cpu->cfg.ext_f) { error_setg(errp, - "Zfinx cannot be supported together with F extension"); + "Zfinx cannot be supported together with F extension"); return; } } @@ -828,40 +874,40 @@ static void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) ext |= RVV; if (!is_power_of_2(cpu->cfg.vlen)) { error_setg(errp, - "Vector extension VLEN must be power of 2"); + "Vector extension VLEN must be power of 2"); return; } if (cpu->cfg.vlen > RV_VLEN_MAX || cpu->cfg.vlen < 128) { error_setg(errp, - "Vector extension implementation only supports VLEN " - "in the range [128, %d]", RV_VLEN_MAX); + "Vector extension implementation only supports VLEN " + "in the range [128, %d]", RV_VLEN_MAX); return; } if (!is_power_of_2(cpu->cfg.elen)) { error_setg(errp, - "Vector extension ELEN must be power of 2"); + "Vector extension ELEN must be power of 2"); return; } - if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) { - error_setg(errp, - "Vector extension implementation only supports ELEN " - "in the range [8, 64]"); - return; - } - if (cpu->cfg.vext_spec) { - if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) { - vext_version = VEXT_VERSION_1_00_0; - } else { + if (cpu->cfg.elen > 64 || cpu->cfg.elen < 8) { error_setg(errp, - "Unsupported vector spec version '%s'", - cpu->cfg.vext_spec); + "Vector extension implementation only supports ELEN " + "in the range [8, 64]"); return; } - } else { - qemu_log("vector version is not specified, " - "use the default value v1.0\n"); - } - set_vext_version(env, vext_version); + if (cpu->cfg.vext_spec) { + if (!g_strcmp0(cpu->cfg.vext_spec, "v1.0")) { + vext_version = VEXT_VERSION_1_00_0; + } else { + error_setg(errp, + "Unsupported vector spec version '%s'", + cpu->cfg.vext_spec); + return; + } + } else { + qemu_log("vector version is not specified, " + "use the default value v1.0\n"); + } + set_vext_version(env, vext_version); } if (cpu->cfg.ext_j) { ext |= RVJ; @@ -922,24 +968,13 @@ static void riscv_cpu_realize(DeviceState *dev, Error **errp) } } - if (cpu->cfg.mmu) { - riscv_set_feature(env, RISCV_FEATURE_MMU); - } - - if (cpu->cfg.pmp) { - riscv_set_feature(env, RISCV_FEATURE_PMP); - + if (cpu->cfg.epmp && !cpu->cfg.pmp) { /* * Enhanced PMP should only be available * on harts with PMP support */ - if (cpu->cfg.epmp) { - riscv_set_feature(env, RISCV_FEATURE_EPMP); - } - } - - if (cpu->cfg.debug) { - riscv_set_feature(env, RISCV_FEATURE_DEBUG); + error_setg(errp, "Invalid configuration: EPMP requires PMP support"); + return; } @@ -1093,6 +1128,7 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("Zfhmin", RISCVCPU, cfg.ext_zfhmin, false), DEFINE_PROP_BOOL("Zve32f", RISCVCPU, cfg.ext_zve32f, false), DEFINE_PROP_BOOL("Zve64f", RISCVCPU, cfg.ext_zve64f, false), + DEFINE_PROP_BOOL("Zve64d", RISCVCPU, cfg.ext_zve64d, false), DEFINE_PROP_BOOL("mmu", RISCVCPU, cfg.mmu, true), DEFINE_PROP_BOOL("pmp", RISCVCPU, cfg.pmp, true), DEFINE_PROP_BOOL("sstc", RISCVCPU, cfg.ext_sstc, true), @@ -1102,6 +1138,8 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_UINT16("vlen", RISCVCPU, cfg.vlen, 128), DEFINE_PROP_UINT16("elen", RISCVCPU, cfg.elen, 64), + DEFINE_PROP_BOOL("svadu", RISCVCPU, cfg.ext_svadu, true), + DEFINE_PROP_BOOL("svinval", RISCVCPU, cfg.ext_svinval, false), DEFINE_PROP_BOOL("svnapot", RISCVCPU, cfg.ext_svnapot, false), DEFINE_PROP_BOOL("svpbmt", RISCVCPU, cfg.ext_svpbmt, false), @@ -1146,12 +1184,16 @@ static Property riscv_cpu_extensions[] = { DEFINE_PROP_BOOL("xventanacondops", RISCVCPU, cfg.ext_XVentanaCondOps, false), /* These are experimental so mark with 'x-' */ + DEFINE_PROP_BOOL("x-zicond", RISCVCPU, cfg.ext_zicond, false), DEFINE_PROP_BOOL("x-j", RISCVCPU, cfg.ext_j, false), /* ePMP 0.9.3 */ DEFINE_PROP_BOOL("x-epmp", RISCVCPU, cfg.epmp, false), DEFINE_PROP_BOOL("x-smaia", RISCVCPU, cfg.ext_smaia, false), DEFINE_PROP_BOOL("x-ssaia", RISCVCPU, cfg.ext_ssaia, false), + DEFINE_PROP_BOOL("x-zvfh", RISCVCPU, cfg.ext_zvfh, false), + DEFINE_PROP_BOOL("x-zvfhmin", RISCVCPU, cfg.ext_zvfhmin, false), + DEFINE_PROP_END_OF_LIST(), }; @@ -1213,6 +1255,12 @@ static Property riscv_cpu_properties[] = { DEFINE_PROP_BOOL("rvv_ta_all_1s", RISCVCPU, cfg.rvv_ta_all_1s, false), DEFINE_PROP_BOOL("rvv_ma_all_1s", RISCVCPU, cfg.rvv_ma_all_1s, false), + + /* + * write_misa() is marked as experimental for now so mark + * it with -x and default to 'false'. + */ + DEFINE_PROP_BOOL("x-misa-w", RISCVCPU, cfg.misa_w, false), DEFINE_PROP_END_OF_LIST(), }; diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 31537fc05f..665b4c60b0 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -81,17 +81,6 @@ #define RVH RV('H') #define RVJ RV('J') -/* S extension denotes that Supervisor mode exists, however it is possible - to have a core that support S mode but does not have an MMU and there - is currently no bit in misa to indicate whether an MMU exists or not - so a cpu features bitfield is required, likewise for optional PMP support */ -enum { - RISCV_FEATURE_MMU, - RISCV_FEATURE_PMP, - RISCV_FEATURE_EPMP, - RISCV_FEATURE_MISA, - RISCV_FEATURE_DEBUG -}; /* Privileged specification version */ enum { @@ -186,8 +175,6 @@ struct CPUArchState { /* 128-bit helpers upper part return value */ target_ulong retxh; - uint32_t features; - #ifdef CONFIG_USER_ONLY uint32_t elf_flags; #endif @@ -447,9 +434,11 @@ struct RISCVCPUConfig { bool ext_zkt; bool ext_ifencei; bool ext_icsr; + bool ext_zicond; bool ext_zihintpause; bool ext_smstateen; bool ext_sstc; + bool ext_svadu; bool ext_svinval; bool ext_svnapot; bool ext_svpbmt; @@ -462,7 +451,10 @@ struct RISCVCPUConfig { bool ext_zhinxmin; bool ext_zve32f; bool ext_zve64f; + bool ext_zve64d; bool ext_zmmul; + bool ext_zvfh; + bool ext_zvfhmin; bool ext_smaia; bool ext_ssaia; bool ext_sscofpmf; @@ -498,6 +490,7 @@ struct RISCVCPUConfig { bool pmp; bool epmp; bool debug; + bool misa_w; bool short_isa_string; }; @@ -535,16 +528,6 @@ static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext) return (env->misa_ext & ext) != 0; } -static inline bool riscv_feature(CPURISCVState *env, int feature) -{ - return env->features & (1ULL << feature); -} - -static inline void riscv_set_feature(CPURISCVState *env, int feature) -{ - env->features |= (1ULL << feature); -} - #include "cpu_user.h" extern const char * const riscv_int_regnames[]; @@ -654,6 +637,11 @@ static inline RISCVMXL riscv_cpu_mxl(CPURISCVState *env) #endif #define riscv_cpu_mxl_bits(env) (1UL << (4 + riscv_cpu_mxl(env))) +static inline const RISCVCPUConfig *riscv_cpu_cfg(CPURISCVState *env) +{ + return &env_archcpu(env)->cfg; +} + #if defined(TARGET_RISCV32) #define cpu_recompute_xl(env) ((void)(env), MXL_RV32) #else diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 8b0d7e20ea..fca7ef0cef 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -747,10 +747,12 @@ typedef enum RISCVException { #define MENVCFG_CBIE (3UL << 4) #define MENVCFG_CBCFE BIT(6) #define MENVCFG_CBZE BIT(7) +#define MENVCFG_HADE (1ULL << 61) #define MENVCFG_PBMTE (1ULL << 62) #define MENVCFG_STCE (1ULL << 63) /* For RV32 */ +#define MENVCFGH_HADE BIT(29) #define MENVCFGH_PBMTE BIT(30) #define MENVCFGH_STCE BIT(31) @@ -763,10 +765,12 @@ typedef enum RISCVException { #define HENVCFG_CBIE MENVCFG_CBIE #define HENVCFG_CBCFE MENVCFG_CBCFE #define HENVCFG_CBZE MENVCFG_CBZE +#define HENVCFG_HADE MENVCFG_HADE #define HENVCFG_PBMTE MENVCFG_PBMTE #define HENVCFG_STCE MENVCFG_STCE /* For RV32 */ +#define HENVCFGH_HADE MENVCFGH_HADE #define HENVCFGH_PBMTE MENVCFGH_PBMTE #define HENVCFGH_STCE MENVCFGH_STCE diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 3a9472a2ff..f88c503cf4 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -51,7 +51,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, *pc = env->xl == MXL_RV32 ? env->pc & UINT32_MAX : env->pc; *cs_base = 0; - if (riscv_has_ext(env, RVV) || cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { + if (cpu->cfg.ext_zve32f) { /* * If env->vl equals to VLMAX, we can use generic vector operation * expanders (GVEC) to accerlate the vector operations. @@ -105,7 +105,7 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc, flags = FIELD_DP32(flags, TB_FLAGS, MSTATUS_HS_VS, get_field(env->mstatus_hs, MSTATUS_VS)); } - if (riscv_feature(env, RISCV_FEATURE_DEBUG) && !icount_enabled()) { + if (cpu->cfg.debug && !icount_enabled()) { flags = FIELD_DP32(flags, TB_FLAGS, ITRIGGER, env->itrigger_enabled); } #endif @@ -706,7 +706,7 @@ static int get_physical_address_pmp(CPURISCVState *env, int *prot, pmp_priv_t pmp_priv; int pmp_index = -1; - if (!riscv_feature(env, RISCV_FEATURE_PMP)) { + if (!riscv_cpu_cfg(env)->pmp) { *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; } @@ -796,7 +796,7 @@ static int get_physical_address(CPURISCVState *env, hwaddr *physical, mode = PRV_U; } - if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) { + if (mode == PRV_M || !riscv_cpu_cfg(env)->mmu) { *physical = addr; *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; return TRANSLATE_SUCCESS; @@ -936,9 +936,17 @@ restart: return TRANSLATE_FAIL; } + bool pbmte = env->menvcfg & MENVCFG_PBMTE; + bool hade = env->menvcfg & MENVCFG_HADE; + + if (first_stage && two_stage && riscv_cpu_virt_enabled(env)) { + pbmte = pbmte && (env->henvcfg & HENVCFG_PBMTE); + hade = hade && (env->henvcfg & HENVCFG_HADE); + } + if (riscv_cpu_sxl(env) == MXL_RV32) { ppn = pte >> PTE_PPN_SHIFT; - } else if (cpu->cfg.ext_svpbmt || cpu->cfg.ext_svnapot) { + } else if (pbmte || cpu->cfg.ext_svnapot) { ppn = (pte & (target_ulong)PTE_PPN_MASK) >> PTE_PPN_SHIFT; } else { ppn = pte >> PTE_PPN_SHIFT; @@ -950,7 +958,7 @@ restart: if (!(pte & PTE_V)) { /* Invalid PTE */ return TRANSLATE_FAIL; - } else if (!cpu->cfg.ext_svpbmt && (pte & PTE_PBMT)) { + } else if (!pbmte && (pte & PTE_PBMT)) { return TRANSLATE_FAIL; } else if (!(pte & (PTE_R | PTE_W | PTE_X))) { /* Inner PTE, continue walking */ @@ -992,6 +1000,10 @@ restart: /* Page table updates need to be atomic with MTTCG enabled */ if (updated_pte != pte) { + if (!hade) { + return TRANSLATE_FAIL; + } + /* * - if accessed or dirty bits need updating, and the PTE is * in RAM, then we do so atomically with a compare and swap. diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 1b0a0c1693..3106f96212 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -46,10 +46,8 @@ static RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit) { bool virt = riscv_cpu_virt_enabled(env); - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - if (env->priv == PRV_M || !cpu->cfg.ext_smstateen) { + if (env->priv == PRV_M || !riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_NONE; } @@ -81,7 +79,7 @@ static RISCVException fs(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_fp_enabled(env) && - !RISCV_CPU(env_cpu(env))->cfg.ext_zfinx) { + !riscv_cpu_cfg(env)->ext_zfinx) { return RISCV_EXCP_ILLEGAL_INST; } #endif @@ -90,11 +88,9 @@ static RISCVException fs(CPURISCVState *env, int csrno) static RISCVException vs(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); - if (env->misa_ext & RVV || - cpu->cfg.ext_zve32f || cpu->cfg.ext_zve64f) { + if (cpu->cfg.ext_zve32f) { #if !defined(CONFIG_USER_ONLY) if (!env->debugger && !riscv_cpu_vector_enabled(env)) { return RISCV_EXCP_ILLEGAL_INST; @@ -108,8 +104,7 @@ static RISCVException vs(CPURISCVState *env, int csrno) static RISCVException ctr(CPURISCVState *env, int csrno) { #if !defined(CONFIG_USER_ONLY) - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); int ctr_index; target_ulong ctr_mask; int base_csrno = CSR_CYCLE; @@ -134,6 +129,10 @@ static RISCVException ctr(CPURISCVState *env, int csrno) skip_ext_pmu_check: + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M && !get_field(env->mcounteren, ctr_mask)) { return RISCV_EXCP_ILLEGAL_INST; } @@ -166,8 +165,7 @@ static RISCVException ctr32(CPURISCVState *env, int csrno) #if !defined(CONFIG_USER_ONLY) static RISCVException mctr(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + int pmu_num = riscv_cpu_cfg(env)->pmu_num; int ctr_index; int base_csrno = CSR_MHPMCOUNTER3; @@ -176,7 +174,7 @@ static RISCVException mctr(CPURISCVState *env, int csrno) base_csrno += 0x80; } ctr_index = csrno - base_csrno; - if (!cpu->cfg.pmu_num || ctr_index >= cpu->cfg.pmu_num) { + if (!pmu_num || ctr_index >= pmu_num) { /* The PMU is not enabled or counter is out of range*/ return RISCV_EXCP_ILLEGAL_INST; } @@ -195,8 +193,7 @@ static RISCVException mctr32(CPURISCVState *env, int csrno) static RISCVException sscofpmf(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); if (!cpu->cfg.ext_sscofpmf) { return RISCV_EXCP_ILLEGAL_INST; @@ -222,9 +219,7 @@ static RISCVException any32(CPURISCVState *env, int csrno) static int aia_any(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_smaia) { + if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -233,9 +228,7 @@ static int aia_any(CPURISCVState *env, int csrno) static int aia_any32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_smaia) { + if (!riscv_cpu_cfg(env)->ext_smaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -262,9 +255,7 @@ static int smode32(CPURISCVState *env, int csrno) static int aia_smode(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -273,9 +264,7 @@ static int aia_smode(CPURISCVState *env, int csrno) static int aia_smode32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -321,8 +310,7 @@ static RISCVException umode32(CPURISCVState *env, int csrno) static RISCVException mstateen(CPURISCVState *env, int csrno) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); if (!cpu->cfg.ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; @@ -333,20 +321,28 @@ static RISCVException mstateen(CPURISCVState *env, int csrno) static RISCVException hstateen_pred(CPURISCVState *env, int csrno, int base) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); if (!cpu->cfg.ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; } + RISCVException ret = hmode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M) { if (!(env->mstateen[csrno - base] & SMSTATEEN_STATEEN)) { return RISCV_EXCP_ILLEGAL_INST; } } - return hmode(env, csrno); + return RISCV_EXCP_NONE; } static RISCVException hstateen(CPURISCVState *env, int csrno) @@ -363,13 +359,20 @@ static RISCVException sstateen(CPURISCVState *env, int csrno) { bool virt = riscv_cpu_virt_enabled(env); int index = csrno - CSR_SSTATEEN0; - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - if (!cpu->cfg.ext_smstateen) { + if (!riscv_cpu_cfg(env)->ext_smstateen) { return RISCV_EXCP_ILLEGAL_INST; } + RISCVException ret = smode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + if (env->priv < PRV_M) { if (!(env->mstateen[index] & SMSTATEEN_STATEEN)) { return RISCV_EXCP_ILLEGAL_INST; @@ -382,7 +385,61 @@ static RISCVException sstateen(CPURISCVState *env, int csrno) } } - return smode(env, csrno); + return RISCV_EXCP_NONE; +} + +static RISCVException sstc(CPURISCVState *env, int csrno) +{ + RISCVCPU *cpu = env_archcpu(env); + bool hmode_check = false; + + if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { + hmode_check = true; + } + + RISCVException ret = hmode_check ? hmode(env, csrno) : smode(env, csrno); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (env->debugger) { + return RISCV_EXCP_NONE; + } + + if (env->priv == PRV_M) { + return RISCV_EXCP_NONE; + } + + /* + * No need of separate function for rv32 as menvcfg stores both menvcfg + * menvcfgh for RV32. + */ + if (!(get_field(env->mcounteren, COUNTEREN_TM) && + get_field(env->menvcfg, MENVCFG_STCE))) { + return RISCV_EXCP_ILLEGAL_INST; + } + + if (riscv_cpu_virt_enabled(env)) { + if (!(get_field(env->hcounteren, COUNTEREN_TM) && + get_field(env->henvcfg, HENVCFG_STCE))) { + return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; + } + } + + return RISCV_EXCP_NONE; +} + +static RISCVException sstc_32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return sstc(env, csrno); } /* Checks if PointerMasking registers could be accessed */ @@ -397,9 +454,7 @@ static RISCVException pointer_masking(CPURISCVState *env, int csrno) static int aia_hmode(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -408,9 +463,7 @@ static int aia_hmode(CPURISCVState *env, int csrno) static int aia_hmode32(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_ssaia) { + if (!riscv_cpu_cfg(env)->ext_ssaia) { return RISCV_EXCP_ILLEGAL_INST; } @@ -419,7 +472,16 @@ static int aia_hmode32(CPURISCVState *env, int csrno) static RISCVException pmp(CPURISCVState *env, int csrno) { - if (riscv_feature(env, RISCV_FEATURE_PMP)) { + if (riscv_cpu_cfg(env)->pmp) { + if (csrno <= CSR_PMPCFG3) { + uint32_t reg_index = csrno - CSR_PMPCFG0; + + /* TODO: RV128 restriction check */ + if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { + return RISCV_EXCP_ILLEGAL_INST; + } + } + return RISCV_EXCP_NONE; } @@ -428,7 +490,7 @@ static RISCVException pmp(CPURISCVState *env, int csrno) static RISCVException epmp(CPURISCVState *env, int csrno) { - if (env->priv == PRV_M && riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { return RISCV_EXCP_NONE; } @@ -437,7 +499,7 @@ static RISCVException epmp(CPURISCVState *env, int csrno) static RISCVException debug(CPURISCVState *env, int csrno) { - if (riscv_feature(env, RISCV_FEATURE_DEBUG)) { + if (riscv_cpu_cfg(env)->debug) { return RISCV_EXCP_NONE; } @@ -447,13 +509,15 @@ static RISCVException debug(CPURISCVState *env, int csrno) static RISCVException seed(CPURISCVState *env, int csrno) { - RISCVCPU *cpu = env_archcpu(env); - - if (!cpu->cfg.ext_zkr) { + if (!riscv_cpu_cfg(env)->ext_zkr) { return RISCV_EXCP_ILLEGAL_INST; } #if !defined(CONFIG_USER_ONLY) + if (env->debugger) { + return RISCV_EXCP_NONE; + } + /* * With a CSR read-write instruction: * 1) The seed CSR is always available in machine mode as normal. @@ -572,7 +636,7 @@ static RISCVException read_vl(CPURISCVState *env, int csrno, static int read_vlenb(CPURISCVState *env, int csrno, target_ulong *val) { - *val = env_archcpu(env)->cfg.vlen >> 3; + *val = riscv_cpu_cfg(env)->vlen >> 3; return RISCV_EXCP_NONE; } @@ -627,7 +691,7 @@ static RISCVException write_vstart(CPURISCVState *env, int csrno, * The vstart CSR is defined to have only enough writable bits * to hold the largest element index, i.e. lg2(VLEN) bits. */ - env->vstart = val & ~(~0ULL << ctzl(env_archcpu(env)->cfg.vlen)); + env->vstart = val & ~(~0ULL << ctzl(riscv_cpu_cfg(env)->vlen)); return RISCV_EXCP_NONE; } @@ -916,54 +980,8 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException sstc(CPURISCVState *env, int csrno) -{ - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); - bool hmode_check = false; - - if (!cpu->cfg.ext_sstc || !env->rdtime_fn) { - return RISCV_EXCP_ILLEGAL_INST; - } - - if (env->priv == PRV_M) { - return RISCV_EXCP_NONE; - } - - /* - * No need of separate function for rv32 as menvcfg stores both menvcfg - * menvcfgh for RV32. - */ - if (!(get_field(env->mcounteren, COUNTEREN_TM) && - get_field(env->menvcfg, MENVCFG_STCE))) { - return RISCV_EXCP_ILLEGAL_INST; - } - - if (riscv_cpu_virt_enabled(env)) { - if (!(get_field(env->hcounteren, COUNTEREN_TM) && - get_field(env->henvcfg, HENVCFG_STCE))) { - return RISCV_EXCP_VIRT_INSTRUCTION_FAULT; - } - } - - if ((csrno == CSR_VSTIMECMP) || (csrno == CSR_VSTIMECMPH)) { - hmode_check = true; - } - - return hmode_check ? hmode(env, csrno) : smode(env, csrno); -} - -static RISCVException sstc_32(CPURISCVState *env, int csrno) -{ - if (riscv_cpu_mxl(env) != MXL_RV32) { - return RISCV_EXCP_ILLEGAL_INST; - } - - return sstc(env, csrno); -} - static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->vstimecmp; @@ -971,7 +989,7 @@ static RISCVException read_vstimecmp(CPURISCVState *env, int csrno, } static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->vstimecmp >> 32; @@ -979,7 +997,7 @@ static RISCVException read_vstimecmph(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -996,7 +1014,7 @@ static RISCVException write_vstimecmp(CPURISCVState *env, int csrno, } static RISCVException write_vstimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -1020,7 +1038,7 @@ static RISCVException read_stimecmp(CPURISCVState *env, int csrno, } static RISCVException read_stimecmph(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { if (riscv_cpu_virt_enabled(env)) { *val = env->vstimecmp >> 32; @@ -1032,7 +1050,7 @@ static RISCVException read_stimecmph(CPURISCVState *env, int csrno, } static RISCVException write_stimecmp(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -1055,7 +1073,7 @@ static RISCVException write_stimecmp(CPURISCVState *env, int csrno, } static RISCVException write_stimecmph(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { RISCVCPU *cpu = env_archcpu(env); @@ -1152,8 +1170,7 @@ static RISCVException write_ignore(CPURISCVState *env, int csrno, static RISCVException read_mvendorid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); *val = cpu->cfg.mvendorid; return RISCV_EXCP_NONE; @@ -1162,8 +1179,7 @@ static RISCVException read_mvendorid(CPURISCVState *env, int csrno, static RISCVException read_marchid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); *val = cpu->cfg.marchid; return RISCV_EXCP_NONE; @@ -1172,8 +1188,7 @@ static RISCVException read_marchid(CPURISCVState *env, int csrno, static RISCVException read_mimpid(CPURISCVState *env, int csrno, target_ulong *val) { - CPUState *cs = env_cpu(env); - RISCVCPU *cpu = RISCV_CPU(cs); + RISCVCPU *cpu = env_archcpu(env); *val = cpu->cfg.mimpid; return RISCV_EXCP_NONE; @@ -1329,7 +1344,7 @@ static RISCVException read_misa(CPURISCVState *env, int csrno, static RISCVException write_misa(CPURISCVState *env, int csrno, target_ulong val) { - if (!riscv_feature(env, RISCV_FEATURE_MISA)) { + if (!riscv_cpu_cfg(env)->misa_w) { /* drop write to misa */ return RISCV_EXCP_NONE; } @@ -1342,7 +1357,8 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* 'E' excludes all other extensions */ if (val & RVE) { - /* when we support 'E' we can do "val = RVE;" however + /* + * when we support 'E' we can do "val = RVE;" however * for now we just drop writes if 'E' is present. */ return RISCV_EXCP_NONE; @@ -1356,15 +1372,13 @@ static RISCVException write_misa(CPURISCVState *env, int csrno, /* Mask extensions that are not supported by this hart */ val &= env->misa_ext_mask; - /* Mask extensions that are not supported by QEMU */ - val &= (RVI | RVE | RVM | RVA | RVF | RVD | RVC | RVS | RVU | RVV); - /* 'D' depends on 'F', so clear 'D' if 'F' is not present */ if ((val & RVD) && !(val & RVF)) { val &= ~RVD; } - /* Suppress 'C' if next instruction is not aligned + /* + * Suppress 'C' if next instruction is not aligned * TODO: this should check next_pc */ if ((val & RVC) && (GETPC() & ~3) != 0) { @@ -1833,28 +1847,28 @@ static RISCVException write_mscratch(CPURISCVState *env, int csrno, } static RISCVException read_mepc(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mepc; return RISCV_EXCP_NONE; } static RISCVException write_mepc(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { env->mepc = val; return RISCV_EXCP_NONE; } static RISCVException read_mcause(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mcause; return RISCV_EXCP_NONE; } static RISCVException write_mcause(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { env->mcause = val; return RISCV_EXCP_NONE; @@ -1876,19 +1890,22 @@ static RISCVException write_mtval(CPURISCVState *env, int csrno, /* Execution environment configuration setup */ static RISCVException read_menvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->menvcfg; return RISCV_EXCP_NONE; } static RISCVException write_menvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { + RISCVCPUConfig *cfg = &env_archcpu(env)->cfg; uint64_t mask = MENVCFG_FIOM | MENVCFG_CBIE | MENVCFG_CBCFE | MENVCFG_CBZE; if (riscv_cpu_mxl(env) == MXL_RV64) { - mask |= MENVCFG_PBMTE | MENVCFG_STCE; + mask |= (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cfg->ext_sstc ? MENVCFG_STCE : 0) | + (cfg->ext_svadu ? MENVCFG_HADE : 0); } env->menvcfg = (env->menvcfg & ~mask) | (val & mask); @@ -1896,16 +1913,19 @@ static RISCVException write_menvcfg(CPURISCVState *env, int csrno, } static RISCVException read_menvcfgh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->menvcfg >> 32; return RISCV_EXCP_NONE; } static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - uint64_t mask = MENVCFG_PBMTE | MENVCFG_STCE; + RISCVCPUConfig *cfg = &env_archcpu(env)->cfg; + uint64_t mask = (cfg->ext_svpbmt ? MENVCFG_PBMTE : 0) | + (cfg->ext_sstc ? MENVCFG_STCE : 0) | + (cfg->ext_svadu ? MENVCFG_HADE : 0); uint64_t valh = (uint64_t)val << 32; env->menvcfg = (env->menvcfg & ~mask) | (valh & mask); @@ -1914,7 +1934,7 @@ static RISCVException write_menvcfgh(CPURISCVState *env, int csrno, } static RISCVException read_senvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1928,7 +1948,7 @@ static RISCVException read_senvcfg(CPURISCVState *env, int csrno, } static RISCVException write_senvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { uint64_t mask = SENVCFG_FIOM | SENVCFG_CBIE | SENVCFG_CBCFE | SENVCFG_CBZE; RISCVException ret; @@ -1943,7 +1963,7 @@ static RISCVException write_senvcfg(CPURISCVState *env, int csrno, } static RISCVException read_henvcfg(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1952,12 +1972,18 @@ static RISCVException read_henvcfg(CPURISCVState *env, int csrno, return ret; } - *val = env->henvcfg; + /* + * henvcfg.pbmte is read_only 0 when menvcfg.pbmte = 0 + * henvcfg.stce is read_only 0 when menvcfg.stce = 0 + * henvcfg.hade is read_only 0 when menvcfg.hade = 0 + */ + *val = env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) | + env->menvcfg); return RISCV_EXCP_NONE; } static RISCVException write_henvcfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { uint64_t mask = HENVCFG_FIOM | HENVCFG_CBIE | HENVCFG_CBCFE | HENVCFG_CBZE; RISCVException ret; @@ -1968,7 +1994,7 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, } if (riscv_cpu_mxl(env) == MXL_RV64) { - mask |= HENVCFG_PBMTE | HENVCFG_STCE; + mask |= env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE); } env->henvcfg = (env->henvcfg & ~mask) | (val & mask); @@ -1977,7 +2003,7 @@ static RISCVException write_henvcfg(CPURISCVState *env, int csrno, } static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { RISCVException ret; @@ -1986,14 +2012,16 @@ static RISCVException read_henvcfgh(CPURISCVState *env, int csrno, return ret; } - *val = env->henvcfg >> 32; + *val = (env->henvcfg & (~(HENVCFG_PBMTE | HENVCFG_STCE | HENVCFG_HADE) | + env->menvcfg)) >> 32; return RISCV_EXCP_NONE; } static RISCVException write_henvcfgh(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { - uint64_t mask = HENVCFG_PBMTE | HENVCFG_STCE; + uint64_t mask = env->menvcfg & (HENVCFG_PBMTE | HENVCFG_STCE | + HENVCFG_HADE); uint64_t valh = (uint64_t)val << 32; RISCVException ret; @@ -2034,13 +2062,13 @@ static RISCVException write_mstateen0(CPURISCVState *env, int csrno, } static RISCVException write_mstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_mstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } static RISCVException read_mstateenh(CPURISCVState *env, int csrno, - target_ulong *val) + target_ulong *val) { *val = env->mstateen[csrno - CSR_MSTATEEN0H] >> 32; @@ -2061,7 +2089,7 @@ static RISCVException write_mstateenh(CPURISCVState *env, int csrno, } static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { uint64_t wr_mask = SMSTATEEN_STATEEN | SMSTATEEN0_HSENVCFG; @@ -2069,7 +2097,7 @@ static RISCVException write_mstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_mstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_mstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2106,7 +2134,7 @@ static RISCVException write_hstateen0(CPURISCVState *env, int csrno, } static RISCVException write_hstateen_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_hstateen(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2145,7 +2173,7 @@ static RISCVException write_hstateen0h(CPURISCVState *env, int csrno, } static RISCVException write_hstateenh_1_3(CPURISCVState *env, int csrno, - target_ulong new_val) + target_ulong new_val) { return write_hstateenh(env, csrno, SMSTATEEN_STATEEN, new_val); } @@ -2624,7 +2652,7 @@ static RISCVException rmw_siph(CPURISCVState *env, int csrno, static RISCVException read_satp(CPURISCVState *env, int csrno, target_ulong *val) { - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { *val = 0; return RISCV_EXCP_NONE; } @@ -2643,7 +2671,7 @@ static RISCVException write_satp(CPURISCVState *env, int csrno, { target_ulong vm, mask; - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { return RISCV_EXCP_NONE; } @@ -3338,30 +3366,18 @@ static RISCVException read_mseccfg(CPURISCVState *env, int csrno, } static RISCVException write_mseccfg(CPURISCVState *env, int csrno, - target_ulong val) + target_ulong val) { mseccfg_csr_write(env, val); return RISCV_EXCP_NONE; } -static bool check_pmp_reg_index(CPURISCVState *env, uint32_t reg_index) -{ - /* TODO: RV128 restriction check */ - if ((reg_index & 1) && (riscv_cpu_mxl(env) == MXL_RV64)) { - return false; - } - return true; -} - static RISCVException read_pmpcfg(CPURISCVState *env, int csrno, target_ulong *val) { uint32_t reg_index = csrno - CSR_PMPCFG0; - if (!check_pmp_reg_index(env, reg_index)) { - return RISCV_EXCP_ILLEGAL_INST; - } - *val = pmpcfg_csr_read(env, csrno - CSR_PMPCFG0); + *val = pmpcfg_csr_read(env, reg_index); return RISCV_EXCP_NONE; } @@ -3370,10 +3386,7 @@ static RISCVException write_pmpcfg(CPURISCVState *env, int csrno, { uint32_t reg_index = csrno - CSR_PMPCFG0; - if (!check_pmp_reg_index(env, reg_index)) { - return RISCV_EXCP_ILLEGAL_INST; - } - pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val); + pmpcfg_csr_write(env, reg_index, val); return RISCV_EXCP_NONE; } @@ -3776,27 +3789,32 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, RISCVCPU *cpu) { /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ - int read_only = get_field(csrno, 0xC00) == 3; + bool read_only = get_field(csrno, 0xC00) == 3; int csr_min_priv = csr_ops[csrno].min_priv_ver; - /* ensure the CSR extension is enabled. */ + /* ensure the CSR extension is enabled */ if (!cpu->cfg.ext_icsr) { return RISCV_EXCP_ILLEGAL_INST; } + /* privileged spec version check */ if (env->priv_ver < csr_min_priv) { return RISCV_EXCP_ILLEGAL_INST; } - /* check predicate */ - if (!csr_ops[csrno].predicate) { - return RISCV_EXCP_ILLEGAL_INST; - } - + /* read / write check */ if (write_mask && read_only) { return RISCV_EXCP_ILLEGAL_INST; } + /* + * The predicate() not only does existence check but also does some + * access control check which triggers for example virtual instruction + * exception in some cases. When writing read-only CSRs in those cases + * illegal instruction exception should be triggered instead of virtual + * instruction exception. Hence this comes after the read / write check. + */ + g_assert(csr_ops[csrno].predicate != NULL); RISCVException ret = csr_ops[csrno].predicate(env, csrno); if (ret != RISCV_EXCP_NONE) { return ret; diff --git a/target/riscv/gdbstub.c b/target/riscv/gdbstub.c index 6e7bbdbd5e..6048541606 100644 --- a/target/riscv/gdbstub.c +++ b/target/riscv/gdbstub.c @@ -127,40 +127,6 @@ static int riscv_gdb_set_fpu(CPURISCVState *env, uint8_t *mem_buf, int n) return 0; } -/* - * Convert register index number passed by GDB to the correspond - * vector CSR number. Vector CSRs are defined after vector registers - * in dynamic generated riscv-vector.xml, thus the starting register index - * of vector CSRs is 32. - * Return 0 if register index number is out of range. - */ -static int riscv_gdb_vector_csrno(int num_regs) -{ - /* - * The order of vector CSRs in the switch case - * should match with the order defined in csr_ops[]. - */ - switch (num_regs) { - case 32: - return CSR_VSTART; - case 33: - return CSR_VXSAT; - case 34: - return CSR_VXRM; - case 35: - return CSR_VCSR; - case 36: - return CSR_VL; - case 37: - return CSR_VTYPE; - case 38: - return CSR_VLENB; - default: - /* Unknown register. */ - return 0; - } -} - static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) { uint16_t vlenb = env_archcpu(env)->cfg.vlen >> 3; @@ -174,19 +140,6 @@ static int riscv_gdb_get_vector(CPURISCVState *env, GByteArray *buf, int n) return cnt; } - int csrno = riscv_gdb_vector_csrno(n); - - if (!csrno) { - return 0; - } - - target_ulong val = 0; - int result = riscv_csrrw_debug(env, csrno, &val, 0, 0); - - if (result == RISCV_EXCP_NONE) { - return gdb_get_regl(buf, val); - } - return 0; } @@ -201,19 +154,6 @@ static int riscv_gdb_set_vector(CPURISCVState *env, uint8_t *mem_buf, int n) return vlenb; } - int csrno = riscv_gdb_vector_csrno(n); - - if (!csrno) { - return 0; - } - - target_ulong val = ldtul_p(mem_buf); - int result = riscv_csrrw_debug(env, csrno, NULL, val, -1); - - if (result == RISCV_EXCP_NONE) { - return sizeof(target_ulong); - } - return 0; } @@ -280,6 +220,10 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) int bitsize = 16 << env->misa_mxl_max; int i; +#if !defined(CONFIG_USER_ONLY) + env->debugger = true; +#endif + /* Until gdb knows about 128-bit registers */ if (bitsize > 64) { bitsize = 64; @@ -290,6 +234,9 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) g_string_append_printf(s, "<feature name=\"org.gnu.gdb.riscv.csr\">"); for (i = 0; i < CSR_TABLE_SIZE; i++) { + if (env->priv_ver < csr_ops[i].min_priv_ver) { + continue; + } predicate = csr_ops[i].predicate; if (predicate && (predicate(env, i) == RISCV_EXCP_NONE)) { if (csr_ops[i].name) { @@ -305,6 +252,11 @@ static int riscv_gen_dynamic_csr_xml(CPUState *cs, int base_reg) g_string_append_printf(s, "</feature>"); cpu->dyn_csr_xml = g_string_free(s, false); + +#if !defined(CONFIG_USER_ONLY) + env->debugger = false; +#endif + return CSR_TABLE_SIZE; } @@ -349,21 +301,6 @@ static int ricsv_gen_dynamic_vector_xml(CPUState *cs, int base_reg) num_regs++; } - /* Define vector CSRs */ - const char *vector_csrs[7] = { - "vstart", "vxsat", "vxrm", "vcsr", - "vl", "vtype", "vlenb" - }; - - for (i = 0; i < 7; i++) { - g_string_append_printf(s, - "<reg name=\"%s\" bitsize=\"%d\"" - " regnum=\"%d\" group=\"vector\"" - " type=\"int\"/>", - vector_csrs[i], TARGET_LONG_BITS, base_reg++); - num_regs++; - } - g_string_append_printf(s, "</feature>"); cpu->dyn_vreg_xml = g_string_free(s, false); @@ -382,9 +319,9 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) 32, "riscv-32bit-fpu.xml", 0); } if (env->misa_ext & RVV) { + int base_reg = cs->gdb_num_regs; gdb_register_coprocessor(cs, riscv_gdb_get_vector, riscv_gdb_set_vector, - ricsv_gen_dynamic_vector_xml(cs, - cs->gdb_num_regs), + ricsv_gen_dynamic_vector_xml(cs, base_reg), "riscv-vector.xml", 0); } switch (env->misa_mxl_max) { @@ -403,7 +340,10 @@ void riscv_cpu_register_gdb_regs_for_features(CPUState *cs) g_assert_not_reached(); } - gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, - riscv_gen_dynamic_csr_xml(cs, cs->gdb_num_regs), - "riscv-csr.xml", 0); + if (cpu->cfg.ext_icsr) { + int base_reg = cs->gdb_num_regs; + gdb_register_coprocessor(cs, riscv_gdb_get_csr, riscv_gdb_set_csr, + riscv_gen_dynamic_csr_xml(cs, base_reg), + "riscv-csr.xml", 0); + } } diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index b7e7613ea2..fb537e922e 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -890,3 +890,7 @@ sm3p1 00 01000 01001 ..... 001 ..... 0010011 @r2 # *** RV32 Zksed Standard Extension *** sm4ed .. 11000 ..... ..... 000 ..... 0110011 @k_aes sm4ks .. 11010 ..... ..... 000 ..... 0110011 @k_aes + +# *** RV32 Zicond Standard Extension *** +czero_eqz 0000111 ..... ..... 101 ..... 0110011 @r +czero_nez 0000111 ..... ..... 111 ..... 0110011 @r diff --git a/target/riscv/insn_trans/trans_rvv.c.inc b/target/riscv/insn_trans/trans_rvv.c.inc index bbb5c3a7b5..fc0d0d60e8 100644 --- a/target/riscv/insn_trans/trans_rvv.c.inc +++ b/target/riscv/insn_trans/trans_rvv.c.inc @@ -40,10 +40,11 @@ static bool require_rvf(DisasContext *s) switch (s->sew) { case MO_16: + return s->cfg_ptr->ext_zvfh; case MO_32: - return has_ext(s, RVF); + return s->cfg_ptr->ext_zve32f; case MO_64: - return has_ext(s, RVD); + return s->cfg_ptr->ext_zve64d; default: return false; } @@ -57,57 +58,32 @@ static bool require_scale_rvf(DisasContext *s) switch (s->sew) { case MO_8: + return s->cfg_ptr->ext_zvfh; case MO_16: - return has_ext(s, RVF); + return s->cfg_ptr->ext_zve32f; case MO_32: - return has_ext(s, RVD); + return s->cfg_ptr->ext_zve64d; default: return false; } } -static bool require_zve32f(DisasContext *s) +static bool require_scale_rvfmin(DisasContext *s) { - /* RVV + Zve32f = RVV. */ - if (has_ext(s, RVV)) { - return true; - } - - /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve32f ? s->sew <= MO_32 : true; -} - -static bool require_scale_zve32f(DisasContext *s) -{ - /* RVV + Zve32f = RVV. */ - if (has_ext(s, RVV)) { - return true; - } - - /* Zve32f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; -} - -static bool require_zve64f(DisasContext *s) -{ - /* RVV + Zve64f = RVV. */ - if (has_ext(s, RVV)) { - return true; + if (s->mstatus_fs == 0) { + return false; } - /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_32 : true; -} - -static bool require_scale_zve64f(DisasContext *s) -{ - /* RVV + Zve64f = RVV. */ - if (has_ext(s, RVV)) { - return true; + switch (s->sew) { + case MO_8: + return s->cfg_ptr->ext_zvfhmin; + case MO_16: + return s->cfg_ptr->ext_zve32f; + case MO_32: + return s->cfg_ptr->ext_zve64d; + default: + return false; } - - /* Zve64f doesn't support FP64. (Section 18.2) */ - return s->cfg_ptr->ext_zve64f ? s->sew <= MO_16 : true; } /* Destination vector register group cannot overlap source mask register. */ @@ -173,9 +149,7 @@ static bool do_vsetvl(DisasContext *s, int rd, int rs1, TCGv s2) { TCGv s1, dst; - if (!require_rvv(s) || - !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || - s->cfg_ptr->ext_zve64f)) { + if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) { return false; } @@ -210,9 +184,7 @@ static bool do_vsetivli(DisasContext *s, int rd, TCGv s1, TCGv s2) { TCGv dst; - if (!require_rvv(s) || - !(has_ext(s, RVV) || s->cfg_ptr->ext_zve32f || - s->cfg_ptr->ext_zve64f)) { + if (!require_rvv(s) || !s->cfg_ptr->ext_zve32f) { return false; } @@ -315,13 +287,12 @@ static bool vext_check_st_index(DisasContext *s, int vd, int vs2, int nf, require_nf(vd, nf, s->lmul); /* - * All Zve* extensions support all vector load and store instructions, - * except Zve64* extensions do not support EEW=64 for index values - * when XLEN=32. (Section 18.2) + * V extension supports all vector load and store instructions, + * except V extension does not support EEW=64 for index values + * when XLEN=32. (Section 18.3) */ if (get_xl(s) == MXL_RV32) { - ret &= (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? eew != MO_64 : true); + ret &= (eew != MO_64); } return ret; @@ -2027,8 +1998,7 @@ static bool vmulh_vv_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) @@ -2041,8 +2011,7 @@ static bool vmulh_vx_check(DisasContext *s, arg_rmrr *a) * are not included for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } GEN_OPIVV_GVEC_TRANS(vmul_vv, mul) @@ -2259,8 +2228,7 @@ static bool vsmul_vv_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivv_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) @@ -2271,8 +2239,7 @@ static bool vsmul_vx_check(DisasContext *s, arg_rmrr *a) * for EEW=64 in Zve64*. (Section 18.2) */ return opivx_check(s, a) && - (!has_ext(s, RVV) && - s->cfg_ptr->ext_zve64f ? s->sew != MO_64 : true); + (!has_ext(s, RVV) ? s->sew != MO_64 : true); } GEN_OPIVV_TRANS(vsmul_vv, vsmul_vv_check) @@ -2335,9 +2302,7 @@ static bool opfvv_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_sss(s, a->rd, a->rs1, a->rs2, a->vm); } /* OPFVV without GVEC IR */ @@ -2425,9 +2390,7 @@ static bool opfvf_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ss(s, a->rd, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ss(s, a->rd, a->rs2, a->vm); } /* OPFVF without GVEC IR */ @@ -2465,9 +2428,7 @@ static bool opfvv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dss(s, a->rd, a->rs1, a->rs2, a->vm); } /* OPFVV with WIDEN */ @@ -2510,9 +2471,7 @@ static bool opfvf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_ds(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_ds(s, a->rd, a->rs2, a->vm); } /* OPFVF with WIDEN */ @@ -2544,9 +2503,7 @@ static bool opfwv_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dds(s, a->rd, a->rs1, a->rs2, a->vm); } /* WIDEN OPFVV with WIDEN */ @@ -2589,9 +2546,7 @@ static bool opfwf_widen_check(DisasContext *s, arg_rmrr *a) require_scale_rvf(s) && (s->sew != MO_8) && vext_check_isa_ill(s) && - vext_check_dd(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_dd(s, a->rd, a->rs2, a->vm); } /* WIDEN OPFVF with WIDEN */ @@ -2668,9 +2623,7 @@ static bool opfv_check(DisasContext *s, arg_rmr *a) require_rvf(s) && vext_check_isa_ill(s) && /* OPFV instructions ignore vs1 check */ - vext_check_ss(s, a->rd, a->rs2, a->vm) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ss(s, a->rd, a->rs2, a->vm); } static bool do_opfv(DisasContext *s, arg_rmr *a, @@ -2735,9 +2688,7 @@ static bool opfvv_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_mss(s, a->rd, a->rs1, a->rs2) && - require_zve32f(s) && - require_zve64f(s); + vext_check_mss(s, a->rd, a->rs1, a->rs2); } GEN_OPFVV_TRANS(vmfeq_vv, opfvv_cmp_check) @@ -2750,9 +2701,7 @@ static bool opfvf_cmp_check(DisasContext *s, arg_rmrr *a) return require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - vext_check_ms(s, a->rd, a->rs2) && - require_zve32f(s) && - require_zve64f(s); + vext_check_ms(s, a->rd, a->rs2); } GEN_OPFVF_TRANS(vmfeq_vf, opfvf_cmp_check) @@ -2773,9 +2722,7 @@ static bool trans_vfmv_v_f(DisasContext *s, arg_vfmv_v_f *a) if (require_rvv(s) && require_rvf(s) && vext_check_isa_ill(s) && - require_align(a->rd, s->lmul) && - require_zve32f(s) && - require_zve64f(s)) { + require_align(a->rd, s->lmul)) { gen_set_rm(s, RISCV_FRM_DYN); TCGv_i64 t1; @@ -2860,18 +2807,14 @@ static bool opfv_widen_check(DisasContext *s, arg_rmr *a) static bool opxfv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } static bool opffv_widen_check(DisasContext *s, arg_rmr *a) { return opfv_widen_check(s, a) && - require_scale_rvf(s) && - (s->sew != MO_8) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + require_scale_rvfmin(s) && + (s->sew != MO_8); } #define GEN_OPFV_WIDEN_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -2922,9 +2865,7 @@ static bool opfxv_widen_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV widening instructions ignore vs1 check */ - vext_check_ds(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_ds(s, a->rd, a->rs2, a->vm); } #define GEN_OPFXV_WIDEN_TRANS(NAME) \ @@ -2979,18 +2920,21 @@ static bool opfxv_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && require_rvf(s) && - (s->sew != MO_64) && - require_zve32f(s) && - require_zve64f(s); + (s->sew != MO_64); } static bool opffv_narrow_check(DisasContext *s, arg_rmr *a) { return opfv_narrow_check(s, a) && + require_scale_rvfmin(s) && + (s->sew != MO_8); +} + +static bool opffv_rod_narrow_check(DisasContext *s, arg_rmr *a) +{ + return opfv_narrow_check(s, a) && require_scale_rvf(s) && - (s->sew != MO_8) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + (s->sew != MO_8); } #define GEN_OPFV_NARROW_TRANS(NAME, CHECK, HELPER, FRM) \ @@ -3030,7 +2974,7 @@ GEN_OPFV_NARROW_TRANS(vfncvt_f_x_w, opfxv_narrow_check, vfncvt_f_x_w, GEN_OPFV_NARROW_TRANS(vfncvt_f_f_w, opffv_narrow_check, vfncvt_f_f_w, RISCV_FRM_DYN) /* Reuse the helper function from vfncvt.f.f.w */ -GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_narrow_check, vfncvt_f_f_w, +GEN_OPFV_NARROW_TRANS(vfncvt_rod_f_f_w, opffv_rod_narrow_check, vfncvt_f_f_w, RISCV_FRM_ROD) static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) @@ -3039,9 +2983,7 @@ static bool opxfv_narrow_check(DisasContext *s, arg_rmr *a) require_scale_rvf(s) && vext_check_isa_ill(s) && /* OPFV narrowing instructions ignore vs1 check */ - vext_check_sd(s, a->rd, a->rs2, a->vm) && - require_scale_zve32f(s) && - require_scale_zve64f(s); + vext_check_sd(s, a->rd, a->rs2, a->vm); } #define GEN_OPXFV_NARROW_TRANS(NAME, HELPER, FRM) \ @@ -3115,9 +3057,7 @@ GEN_OPIVV_WIDEN_TRANS(vwredsumu_vs, reduction_widen_check) static bool freduction_check(DisasContext *s, arg_rmrr *a) { return reduction_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } GEN_OPFVV_TRANS(vfredusum_vs, freduction_check) @@ -3544,9 +3484,7 @@ static bool trans_vfmv_f_s(DisasContext *s, arg_vfmv_f_s *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s) && - require_zve32f(s) && - require_zve64f(s)) { + vext_check_isa_ill(s)) { gen_set_rm(s, RISCV_FRM_DYN); unsigned int ofs = (8 << s->sew); @@ -3572,9 +3510,7 @@ static bool trans_vfmv_s_f(DisasContext *s, arg_vfmv_s_f *a) { if (require_rvv(s) && require_rvf(s) && - vext_check_isa_ill(s) && - require_zve32f(s) && - require_zve64f(s)) { + vext_check_isa_ill(s)) { gen_set_rm(s, RISCV_FRM_DYN); /* The instructions ignore LMUL and vector register group. */ @@ -3625,17 +3561,13 @@ GEN_OPIVI_TRANS(vslidedown_vi, IMM_ZX, vslidedown_vx, slidedown_check) static bool fslideup_check(DisasContext *s, arg_rmrr *a) { return slideup_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } static bool fslidedown_check(DisasContext *s, arg_rmrr *a) { return slidedown_check(s, a) && - require_rvf(s) && - require_zve32f(s) && - require_zve64f(s); + require_rvf(s); } GEN_OPFVF_TRANS(vfslide1up_vf, fslideup_check) diff --git a/target/riscv/insn_trans/trans_rvzfh.c.inc b/target/riscv/insn_trans/trans_rvzfh.c.inc index 2ad5716312..85fc1aa822 100644 --- a/target/riscv/insn_trans/trans_rvzfh.c.inc +++ b/target/riscv/insn_trans/trans_rvzfh.c.inc @@ -28,15 +28,14 @@ } \ } while (0) -#define REQUIRE_ZFH_OR_ZFHMIN(ctx) do { \ - if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin)) { \ +#define REQUIRE_ZFHMIN(ctx) do { \ + if (!ctx->cfg_ptr->ext_zfhmin) { \ return false; \ } \ } while (0) -#define REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx) do { \ - if (!(ctx->cfg_ptr->ext_zfh || ctx->cfg_ptr->ext_zfhmin || \ - ctx->cfg_ptr->ext_zhinx || ctx->cfg_ptr->ext_zhinxmin)) { \ +#define REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx) do { \ + if (!(ctx->cfg_ptr->ext_zfhmin || ctx->cfg_ptr->ext_zhinxmin)) { \ return false; \ } \ } while (0) @@ -47,7 +46,7 @@ static bool trans_flh(DisasContext *ctx, arg_flh *a) TCGv t0; REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); decode_save_opc(ctx); t0 = get_gpr(ctx, a->rs1, EXT_NONE); @@ -70,7 +69,7 @@ static bool trans_fsh(DisasContext *ctx, arg_fsh *a) TCGv t0; REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); decode_save_opc(ctx); t0 = get_gpr(ctx, a->rs1, EXT_NONE); @@ -401,7 +400,7 @@ static bool trans_fmax_h(DisasContext *ctx, arg_fmax_h *a) static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); @@ -418,7 +417,7 @@ static bool trans_fcvt_s_h(DisasContext *ctx, arg_fcvt_s_h *a) static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); REQUIRE_ZDINX_OR_D(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); @@ -436,7 +435,7 @@ static bool trans_fcvt_d_h(DisasContext *ctx, arg_fcvt_d_h *a) static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); TCGv_i64 src1 = get_fpr_hs(ctx, a->rs1); @@ -452,7 +451,7 @@ static bool trans_fcvt_h_s(DisasContext *ctx, arg_fcvt_h_s *a) static bool trans_fcvt_h_d(DisasContext *ctx, arg_fcvt_h_d *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN_OR_ZHINX_OR_ZHINXMIN(ctx); + REQUIRE_ZFHMIN_OR_ZHINXMIN(ctx); REQUIRE_ZDINX_OR_D(ctx); TCGv_i64 dest = dest_fpr(ctx, a->rd); @@ -585,7 +584,7 @@ static bool trans_fcvt_h_wu(DisasContext *ctx, arg_fcvt_h_wu *a) static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); TCGv dest = dest_gpr(ctx, a->rd); @@ -605,7 +604,7 @@ static bool trans_fmv_x_h(DisasContext *ctx, arg_fmv_x_h *a) static bool trans_fmv_h_x(DisasContext *ctx, arg_fmv_h_x *a) { REQUIRE_FPU; - REQUIRE_ZFH_OR_ZFHMIN(ctx); + REQUIRE_ZFHMIN(ctx); TCGv t0 = get_gpr(ctx, a->rs1, EXT_ZERO); diff --git a/target/riscv/insn_trans/trans_rvzicond.c.inc b/target/riscv/insn_trans/trans_rvzicond.c.inc new file mode 100644 index 0000000000..645260164e --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzicond.c.inc @@ -0,0 +1,49 @@ +/* + * RISC-V translation routines for the Zicond Standard Extension. + * + * Copyright (c) 2020-2023 PLCT Lab + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define REQUIRE_ZICOND(ctx) do { \ + if (!ctx->cfg_ptr->ext_zicond) { \ + return false; \ + } \ +} while (0) + +static bool trans_czero_eqz(DisasContext *ctx, arg_czero_eqz *a) +{ + REQUIRE_ZICOND(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + tcg_gen_movcond_tl(TCG_COND_EQ, dest, src2, ctx->zero, ctx->zero, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} + +static bool trans_czero_nez(DisasContext *ctx, arg_czero_nez *a) +{ + REQUIRE_ZICOND(ctx); + + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1 = get_gpr(ctx, a->rs1, EXT_NONE); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + tcg_gen_movcond_tl(TCG_COND_NE, dest, src2, ctx->zero, ctx->zero, src1); + gen_set_gpr(ctx, a->rd, dest); + return true; +} diff --git a/target/riscv/insn_trans/trans_xthead.c.inc b/target/riscv/insn_trans/trans_xthead.c.inc index be87c34f56..cf1731b08d 100644 --- a/target/riscv/insn_trans/trans_xthead.c.inc +++ b/target/riscv/insn_trans/trans_xthead.c.inc @@ -980,10 +980,6 @@ static bool trans_th_lwud(DisasContext *ctx, arg_th_pair *a) static bool gen_storepair_tl(DisasContext *ctx, arg_th_pair *a, MemOp memop, int shamt) { - if (a->rs == a->rd1 || a->rs == a->rd2 || a->rd1 == a->rd2) { - return false; - } - TCGv data1 = get_gpr(ctx, a->rd1, EXT_NONE); TCGv data2 = get_gpr(ctx, a->rd2, EXT_NONE); TCGv addr1 = tcg_temp_new(); diff --git a/target/riscv/machine.c b/target/riscv/machine.c index c6ce318cce..9c455931d8 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -27,9 +27,8 @@ static bool pmp_needed(void *opaque) { RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - return riscv_feature(env, RISCV_FEATURE_PMP); + return cpu->cfg.pmp; } static int pmp_post_load(void *opaque, int version_id) @@ -226,9 +225,8 @@ static const VMStateDescription vmstate_kvmtimer = { static bool debug_needed(void *opaque) { RISCVCPU *cpu = opaque; - CPURISCVState *env = &cpu->env; - return riscv_feature(env, RISCV_FEATURE_DEBUG); + return cpu->cfg.debug; } static int debug_post_load(void *opaque, int version_id) @@ -333,8 +331,8 @@ static const VMStateDescription vmstate_pmu_ctr_state = { const VMStateDescription vmstate_riscv_cpu = { .name = "cpu", - .version_id = 6, - .minimum_version_id = 6, + .version_id = 7, + .minimum_version_id = 7, .post_load = riscv_cpu_post_load, .fields = (VMStateField[]) { VMSTATE_UINTTL_ARRAY(env.gpr, RISCVCPU, 32), @@ -353,7 +351,6 @@ const VMStateDescription vmstate_riscv_cpu = { VMSTATE_UINT32(env.misa_ext, RISCVCPU), VMSTATE_UINT32(env.misa_mxl_max, RISCVCPU), VMSTATE_UINT32(env.misa_ext_mask, RISCVCPU), - VMSTATE_UINT32(env.features, RISCVCPU), VMSTATE_UINTTL(env.priv, RISCVCPU), VMSTATE_UINTTL(env.virt, RISCVCPU), VMSTATE_UINT64(env.resetvec, RISCVCPU), diff --git a/target/riscv/monitor.c b/target/riscv/monitor.c index 236f93b9f5..f36ddfa967 100644 --- a/target/riscv/monitor.c +++ b/target/riscv/monitor.c @@ -218,7 +218,7 @@ void hmp_info_mem(Monitor *mon, const QDict *qdict) return; } - if (!riscv_feature(env, RISCV_FEATURE_MMU)) { + if (!riscv_cpu_cfg(env)->mmu) { monitor_printf(mon, "S-mode MMU unavailable\n"); return; } diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 48f918b71b..9c0b91c88f 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -195,7 +195,7 @@ target_ulong helper_mret(CPURISCVState *env) uint64_t mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); - if (riscv_feature(env, RISCV_FEATURE_PMP) && + if (riscv_cpu_cfg(env)->pmp && !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_INST_ACCESS_FAULT, GETPC()); } diff --git a/target/riscv/pmp.c b/target/riscv/pmp.c index 4bc4113531..a08cd95658 100644 --- a/target/riscv/pmp.c +++ b/target/riscv/pmp.c @@ -88,7 +88,7 @@ static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val) if (pmp_index < MAX_RISCV_PMPS) { bool locked = true; - if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { /* mseccfg.RLB is set */ if (MSECCFG_RLB_ISSET(env)) { locked = false; @@ -239,7 +239,7 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, { bool ret; - if (riscv_feature(env, RISCV_FEATURE_EPMP)) { + if (riscv_cpu_cfg(env)->epmp) { if (MSECCFG_MMWP_ISSET(env)) { /* * The Machine Mode Whitelist Policy (mseccfg.MMWP) is set @@ -265,7 +265,7 @@ static bool pmp_hart_has_privs_default(CPURISCVState *env, target_ulong addr, } } - if ((!riscv_feature(env, RISCV_FEATURE_PMP)) || (mode == PRV_M)) { + if (!riscv_cpu_cfg(env)->pmp || (mode == PRV_M)) { /* * Privileged spec v1.10 states if HW doesn't implement any PMP entry * or no PMP entry matches an M-Mode access, the access succeeds. @@ -315,7 +315,7 @@ int pmp_hart_has_privs(CPURISCVState *env, target_ulong addr, } if (size == 0) { - if (riscv_feature(env, RISCV_FEATURE_MMU)) { + if (riscv_cpu_cfg(env)->mmu) { /* * If size is unknown (0), assume that all bytes * from addr to the end of the page will be accessed. diff --git a/target/riscv/translate.c b/target/riscv/translate.c index f9d5d1097e..a8d516ca3e 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1103,6 +1103,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvh.c.inc" #include "insn_trans/trans_rvv.c.inc" #include "insn_trans/trans_rvb.c.inc" +#include "insn_trans/trans_rvzicond.c.inc" #include "insn_trans/trans_rvzawrs.c.inc" #include "insn_trans/trans_rvzfh.c.inc" #include "insn_trans/trans_rvk.c.inc" @@ -1261,7 +1262,7 @@ static void riscv_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) uint16_t next_insn = cpu_lduw_code(env, ctx->base.pc_next); int len = insn_len(next_insn); - if (!is_same_page(&ctx->base, ctx->base.pc_next + len)) { + if (!is_same_page(&ctx->base, ctx->base.pc_next + len - 1)) { ctx->base.is_jmp = DISAS_TOO_MANY; } } diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c index 3073c54871..2423affe37 100644 --- a/target/riscv/vector_helper.c +++ b/target/riscv/vector_helper.c @@ -267,6 +267,28 @@ GEN_VEXT_ST_ELEM(ste_h, int16_t, H2, stw) GEN_VEXT_ST_ELEM(ste_w, int32_t, H4, stl) GEN_VEXT_ST_ELEM(ste_d, int64_t, H8, stq) +static void vext_set_tail_elems_1s(CPURISCVState *env, target_ulong vl, + void *vd, uint32_t desc, uint32_t nf, + uint32_t esz, uint32_t max_elems) +{ + uint32_t total_elems = vext_get_total_elems(env, desc, esz); + uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; + uint32_t vta = vext_vta(desc); + uint32_t registers_used; + int k; + + for (k = 0; k < nf; ++k) { + vext_set_elems_1s(vd, vta, (k * max_elems + vl) * esz, + (k * max_elems + max_elems) * esz); + } + + if (nf * max_elems % total_elems != 0) { + registers_used = ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; + vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, + registers_used * vlenb); + } +} + /* *** stride: access vector element from strided memory */ @@ -281,8 +303,6 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); for (i = env->vstart; i < env->vl; i++, env->vstart++) { @@ -301,18 +321,8 @@ vext_ldst_stride(void *vd, void *v0, target_ulong base, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LD_STRIDE(NAME, ETYPE, LOAD_FN) \ @@ -359,8 +369,6 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, uint32_t nf = vext_nf(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); /* load bytes from guest memory */ for (i = env->vstart; i < evl; i++, env->vstart++) { @@ -372,18 +380,8 @@ vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + evl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, evl, vd, desc, nf, esz, max_elems); } /* @@ -484,8 +482,6 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, uint32_t vm = vext_vm(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); /* load bytes from guest memory */ @@ -505,18 +501,8 @@ vext_ldst_index(void *vd, void *v0, target_ulong base, } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LD_INDEX(NAME, ETYPE, INDEX_FN, LOAD_FN) \ @@ -585,8 +571,6 @@ vext_ldff(void *vd, void *v0, target_ulong base, uint32_t vm = vext_vm(desc); uint32_t max_elems = vext_max_elems(desc, log2_esz); uint32_t esz = 1 << log2_esz; - uint32_t total_elems = vext_get_total_elems(env, desc, esz); - uint32_t vta = vext_vta(desc); uint32_t vma = vext_vma(desc); target_ulong addr, offset, remain; @@ -647,18 +631,8 @@ ProbeSuccess: } } env->vstart = 0; - /* set tail elements to 1s */ - for (k = 0; k < nf; ++k) { - vext_set_elems_1s(vd, vta, (k * max_elems + env->vl) * esz, - (k * max_elems + max_elems) * esz); - } - if (nf * max_elems % total_elems != 0) { - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; - uint32_t registers_used = - ((nf * max_elems) * esz + (vlenb - 1)) / vlenb; - vext_set_elems_1s(vd, vta, (nf * max_elems) * esz, - registers_used * vlenb); - } + + vext_set_tail_elems_1s(env, env->vl, vd, desc, nf, esz, max_elems); } #define GEN_VEXT_LDFF(NAME, ETYPE, LOAD_FN) \ @@ -697,7 +671,7 @@ vext_ldst_whole(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc, { uint32_t i, k, off, pos; uint32_t nf = vext_nf(desc); - uint32_t vlenb = env_archcpu(env)->cfg.vlen >> 3; + uint32_t vlenb = riscv_cpu_cfg(env)->vlen >> 3; uint32_t max_elems = vlenb >> log2_esz; k = env->vstart / max_elems; @@ -1167,7 +1141,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -1203,7 +1177,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, \ { \ uint32_t vl = env->vl; \ uint32_t vm = vext_vm(desc); \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ \ @@ -1402,7 +1376,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -1465,7 +1439,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4178,7 +4152,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4216,7 +4190,7 @@ void HELPER(NAME)(void *vd, void *v0, uint64_t s1, void *vs2, \ { \ uint32_t vm = vext_vm(desc); \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t vma = vext_vma(desc); \ uint32_t i; \ @@ -4747,7 +4721,7 @@ void HELPER(NAME)(void *vd, void *v0, void *vs1, \ uint32_t desc) \ { \ uint32_t vl = env->vl; \ - uint32_t total_elems = env_archcpu(env)->cfg.vlen; \ + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; \ uint32_t vta_all_1s = vext_vta_all_1s(desc); \ uint32_t i; \ int a, b; \ @@ -4834,7 +4808,7 @@ static void vmsetm(void *vd, void *v0, void *vs2, CPURISCVState *env, { uint32_t vm = vext_vm(desc); uint32_t vl = env->vl; - uint32_t total_elems = env_archcpu(env)->cfg.vlen; + uint32_t total_elems = riscv_cpu_cfg(env)->vlen; uint32_t vta_all_1s = vext_vta_all_1s(desc); uint32_t vma = vext_vma(desc); int i; |