From 6eab278d389bc3569135773265e0d481d89ef82e Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:42 +0800 Subject: target/riscv: Add zimop extension Zimop extension defines an encoding space for 40 MOPs.The Zimop extension defines 32 MOP instructions named MOP.R.n, where n is an integer between 0 and 31, inclusive. The Zimop extension additionally defines 8 MOP instructions named MOP.RR.n, where n is an integer between 0 and 7. These 40 MOPs initially are defined to simply write zero to x[rd], but are designed to be redefined by later extensions to perform some other action. Signed-off-by: LIU Zhiwei Reviewed-by: Alistair Francis Reviewed-by: Deepak Gupta Message-ID: <20240709113652.1239-2-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.c | 2 ++ target/riscv/cpu_cfg.h | 1 + target/riscv/insn32.decode | 11 +++++++++ target/riscv/insn_trans/trans_rvzimop.c.inc | 37 +++++++++++++++++++++++++++++ target/riscv/translate.c | 1 + 5 files changed, 52 insertions(+) create mode 100644 target/riscv/insn_trans/trans_rvzimop.c.inc (limited to 'target') diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index c53b0d5b40..ac503826ce 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -113,6 +113,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zihintntl, PRIV_VERSION_1_10_0, ext_zihintntl), ISA_EXT_DATA_ENTRY(zihintpause, PRIV_VERSION_1_10_0, ext_zihintpause), ISA_EXT_DATA_ENTRY(zihpm, PRIV_VERSION_1_12_0, ext_zihpm), + ISA_EXT_DATA_ENTRY(zimop, PRIV_VERSION_1_13_0, ext_zimop), ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), @@ -1471,6 +1472,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), + MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index fb7eebde52..9f53512053 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -71,6 +71,7 @@ struct RISCVCPUConfig { bool ext_zihintntl; bool ext_zihintpause; bool ext_zihpm; + bool ext_zimop; bool ext_ztso; bool ext_smstateen; bool ext_sstc; diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index f22df04cfd..60da673153 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -38,6 +38,8 @@ %imm_bs 30:2 !function=ex_shift_3 %imm_rnum 20:4 %imm_z6 26:1 15:5 +%imm_mop5 30:1 26:2 20:2 +%imm_mop3 30:1 26:2 # Argument sets: &empty @@ -56,6 +58,8 @@ &r2nfvm vm rd rs1 nf &rnfvm vm rd rs1 rs2 nf &k_aes shamt rs2 rs1 rd +&mop5 imm rd rs1 +&mop3 imm rd rs1 rs2 # Formats 32: @r ....... ..... ..... ... ..... ....... &r %rs2 %rs1 %rd @@ -98,6 +102,9 @@ @k_aes .. ..... ..... ..... ... ..... ....... &k_aes shamt=%imm_bs %rs2 %rs1 %rd @i_aes .. ..... ..... ..... ... ..... ....... &i imm=%imm_rnum %rs1 %rd +@mop5 . . .. .. .... .. ..... ... ..... ....... &mop5 imm=%imm_mop5 %rd %rs1 +@mop3 . . .. .. . ..... ..... ... ..... ....... &mop3 imm=%imm_mop3 %rd %rs1 %rs2 + # Formats 64: @sh5 ....... ..... ..... ... ..... ....... &shift shamt=%sh5 %rs1 %rd @@ -1010,3 +1017,7 @@ amocas_w 00101 . . ..... ..... 010 ..... 0101111 @atom_st amocas_d 00101 . . ..... ..... 011 ..... 0101111 @atom_st # *** RV64 Zacas Standard Extension *** amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st + +# *** Zimop may-be-operation extension *** +mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5 +mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3 diff --git a/target/riscv/insn_trans/trans_rvzimop.c.inc b/target/riscv/insn_trans/trans_rvzimop.c.inc new file mode 100644 index 0000000000..165aacd2b6 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzimop.c.inc @@ -0,0 +1,37 @@ +/* + * RISC-V translation routines for May-Be-Operation(zimop). + * + * Copyright (c) 2024 Alibaba Group. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZIMOP(ctx) do { \ + if (!ctx->cfg_ptr->ext_zimop) { \ + return false; \ + } \ +} while (0) + +static bool trans_mop_r_n(DisasContext *ctx, arg_mop_r_n *a) +{ + REQUIRE_ZIMOP(ctx); + gen_set_gpr(ctx, a->rd, ctx->zero); + return true; +} + +static bool trans_mop_rr_n(DisasContext *ctx, arg_mop_rr_n *a) +{ + REQUIRE_ZIMOP(ctx); + gen_set_gpr(ctx, a->rd, ctx->zero); + return true; +} diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 0569224e53..379b68289f 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1099,6 +1099,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvzacas.c.inc" #include "insn_trans/trans_rvzawrs.c.inc" #include "insn_trans/trans_rvzicbo.c.inc" +#include "insn_trans/trans_rvzimop.c.inc" #include "insn_trans/trans_rvzfa.c.inc" #include "insn_trans/trans_rvzfh.c.inc" #include "insn_trans/trans_rvk.c.inc" -- cgit v1.2.3 From 197e4d29889453760c74763662e3812d0ec7a645 Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:44 +0800 Subject: target/riscv: Add zcmop extension Zcmop defines eight 16-bit MOP instructions named C.MOP.n, where n is an odd integer between 1 and 15, inclusive. C.MOP.n is encoded in the reserved encoding space corresponding to C.LUI xn, 0. Unlike the MOPs defined in the Zimop extension, the C.MOP.n instructions are defined to not write any register. In current implementation, C.MOP.n only has an check function, without any other more behavior. Signed-off-by: LIU Zhiwei Reviewed-by: Alistair Francis Reviewed-by: Deepak Gupta Message-ID: <20240709113652.1239-4-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.c | 2 ++ target/riscv/cpu_cfg.h | 1 + target/riscv/insn16.decode | 1 + target/riscv/insn_trans/trans_rvzcmop.c.inc | 29 +++++++++++++++++++++++++++++ target/riscv/tcg/tcg-cpu.c | 5 +++++ target/riscv/translate.c | 1 + 6 files changed, 39 insertions(+) create mode 100644 target/riscv/insn_trans/trans_rvzcmop.c.inc (limited to 'target') diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index ac503826ce..f4f8287a6d 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -131,6 +131,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zcf, PRIV_VERSION_1_12_0, ext_zcf), ISA_EXT_DATA_ENTRY(zcd, PRIV_VERSION_1_12_0, ext_zcd), ISA_EXT_DATA_ENTRY(zce, PRIV_VERSION_1_12_0, ext_zce), + ISA_EXT_DATA_ENTRY(zcmop, PRIV_VERSION_1_13_0, ext_zcmop), ISA_EXT_DATA_ENTRY(zcmp, PRIV_VERSION_1_12_0, ext_zcmp), ISA_EXT_DATA_ENTRY(zcmt, PRIV_VERSION_1_12_0, ext_zcmt), ISA_EXT_DATA_ENTRY(zba, PRIV_VERSION_1_12_0, ext_zba), @@ -1473,6 +1474,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), MULTI_EXT_CFG_BOOL("zihintpause", ext_zihintpause, true), MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), + MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index 9f53512053..d85e54b475 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -72,6 +72,7 @@ struct RISCVCPUConfig { bool ext_zihintpause; bool ext_zihpm; bool ext_zimop; + bool ext_zcmop; bool ext_ztso; bool ext_smstateen; bool ext_sstc; diff --git a/target/riscv/insn16.decode b/target/riscv/insn16.decode index b96c534e73..3953bcf82d 100644 --- a/target/riscv/insn16.decode +++ b/target/riscv/insn16.decode @@ -140,6 +140,7 @@ sw 110 ... ... .. ... 00 @cs_w addi 000 . ..... ..... 01 @ci addi 010 . ..... ..... 01 @c_li { + c_mop_n 011 0 0 n:3 1 00000 01 illegal 011 0 ----- 00000 01 # c.addi16sp and c.lui, RES nzimm=0 addi 011 . 00010 ..... 01 @c_addi16sp lui 011 . ..... ..... 01 @c_lui diff --git a/target/riscv/insn_trans/trans_rvzcmop.c.inc b/target/riscv/insn_trans/trans_rvzcmop.c.inc new file mode 100644 index 0000000000..7205586508 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzcmop.c.inc @@ -0,0 +1,29 @@ +/* + * RISC-V translation routines for compressed May-Be-Operation(zcmop). + * + * Copyright (c) 2024 Alibaba Group. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZCMOP(ctx) do { \ + if (!ctx->cfg_ptr->ext_zcmop) { \ + return false; \ + } \ +} while (0) + +static bool trans_c_mop_n(DisasContext *ctx, arg_c_mop_n *a) +{ + REQUIRE_ZCMOP(ctx); + return true; +} diff --git a/target/riscv/tcg/tcg-cpu.c b/target/riscv/tcg/tcg-cpu.c index ecf366d6c7..b8814ab753 100644 --- a/target/riscv/tcg/tcg-cpu.c +++ b/target/riscv/tcg/tcg-cpu.c @@ -549,6 +549,11 @@ void riscv_cpu_validate_set_extensions(RISCVCPU *cpu, Error **errp) } } + if (cpu->cfg.ext_zcmop && !cpu->cfg.ext_zca) { + error_setg(errp, "Zcmop extensions require Zca"); + return; + } + if (mcc->misa_mxl_max != MXL_RV32 && cpu->cfg.ext_zcf) { error_setg(errp, "Zcf extension is only relevant to RV32"); return; diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 379b68289f..8a546f4ece 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1114,6 +1114,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) /* Include the auto-generated decoder for 16 bit insn */ #include "decode-insn16.c.inc" #include "insn_trans/trans_rvzce.c.inc" +#include "insn_trans/trans_rvzcmop.c.inc" /* Include decoders for factored-out extensions */ #include "decode-XVentanaCondOps.c.inc" -- cgit v1.2.3 From a60ce58fd971bdcbec6ba96ce989fd399ca1f2d7 Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:46 +0800 Subject: target/riscv: Support Zama16b extension MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Zama16b is the property that misaligned load/stores/atomics within a naturally aligned 16-byte region are atomic. According to the specification, Zama16b applies only to AMOs, loads and stores defined in the base ISAs, and loads and stores of no more than XLEN bits defined in the F, D, and Q extensions. Thus it should not apply to zacas or RVC instructions. For an instruction in that set, if all accessed bytes lie within 16B granule, the instruction will not raise an exception for reasons of address alignment, and the instruction will give rise to only one memory operation for the purposes of RVWMO—i.e., it will execute atomically. Signed-off-by: LIU Zhiwei Reviewed-by: Alistair Francis Message-ID: <20240709113652.1239-6-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.c | 2 ++ target/riscv/cpu_cfg.h | 1 + target/riscv/insn_trans/trans_rva.c.inc | 42 +++++++++++++++++++-------------- target/riscv/insn_trans/trans_rvd.c.inc | 14 +++++++++-- target/riscv/insn_trans/trans_rvf.c.inc | 14 +++++++++-- target/riscv/insn_trans/trans_rvi.c.inc | 6 +++++ 6 files changed, 57 insertions(+), 22 deletions(-) (limited to 'target') diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index f4f8287a6d..de9c06904f 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -118,6 +118,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), + ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), ISA_EXT_DATA_ENTRY(zawrs, PRIV_VERSION_1_12_0, ext_zawrs), ISA_EXT_DATA_ENTRY(zfa, PRIV_VERSION_1_12_0, ext_zfa), @@ -1476,6 +1477,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zimop", ext_zimop, false), MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), + MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index d85e54b475..ddbfae37e5 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -83,6 +83,7 @@ struct RISCVCPUConfig { bool ext_zdinx; bool ext_zaamo; bool ext_zacas; + bool ext_zama16b; bool ext_zalrsc; bool ext_zawrs; bool ext_zfa; diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index 4a9e4591d1..eb080baddd 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -103,6 +103,12 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, TCGv dest = dest_gpr(ctx, a->rd); TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + if (ctx->cfg_ptr->ext_zama16b) { + mop |= MO_ATOM_WITHIN16; + } else { + mop |= MO_ALIGN; + } + decode_save_opc(ctx); src1 = get_address(ctx, a->rs1, 0); func(dest, src1, src2, ctx->mem_idx, mop); @@ -126,55 +132,55 @@ static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESL); } static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESL); } static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESL); } static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESL); } static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESL); } static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESL); } static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESL); } static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESL); } static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) { REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESL); } static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) @@ -195,61 +201,61 @@ static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TEUQ); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TEUQ); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TEUQ); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TEUQ); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TEUQ); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TEUQ); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TEUQ); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TEUQ); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { REQUIRE_64BIT(ctx); REQUIRE_A_OR_ZAAMO(ctx); - return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEUQ)); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TEUQ); } diff --git a/target/riscv/insn_trans/trans_rvd.c.inc b/target/riscv/insn_trans/trans_rvd.c.inc index d9ce9e407f..1f5fac65a2 100644 --- a/target/riscv/insn_trans/trans_rvd.c.inc +++ b/target/riscv/insn_trans/trans_rvd.c.inc @@ -42,13 +42,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) { TCGv addr; + MemOp memop = MO_TEUQ; REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_ld_i64(cpu_fpr[a->rd], addr, ctx->mem_idx, memop); mark_fs_dirty(ctx); return true; @@ -57,13 +62,18 @@ static bool trans_fld(DisasContext *ctx, arg_fld *a) static bool trans_fsd(DisasContext *ctx, arg_fsd *a) { TCGv addr; + MemOp memop = MO_TEUQ; REQUIRE_FPU; REQUIRE_EXT(ctx, RVD); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUQ); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvf.c.inc b/target/riscv/insn_trans/trans_rvf.c.inc index 97a368970b..f771aa1939 100644 --- a/target/riscv/insn_trans/trans_rvf.c.inc +++ b/target/riscv/insn_trans/trans_rvf.c.inc @@ -43,14 +43,19 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) { TCGv_i64 dest; TCGv addr; + MemOp memop = MO_TEUL; REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); dest = cpu_fpr[a->rd]; - tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_ld_i64(dest, addr, ctx->mem_idx, memop); gen_nanbox_s(dest, dest); mark_fs_dirty(ctx); @@ -60,13 +65,18 @@ static bool trans_flw(DisasContext *ctx, arg_flw *a) static bool trans_fsw(DisasContext *ctx, arg_fsw *a) { TCGv addr; + MemOp memop = MO_TEUL; REQUIRE_FPU; REQUIRE_EXT(ctx, RVF); + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } + decode_save_opc(ctx); addr = get_address(ctx, a->rs1, a->imm); - tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, MO_TEUL); + tcg_gen_qemu_st_i64(cpu_fpr[a->rs2], addr, ctx->mem_idx, memop); return true; } diff --git a/target/riscv/insn_trans/trans_rvi.c.inc b/target/riscv/insn_trans/trans_rvi.c.inc index ad40d3e87f..98e3806d5e 100644 --- a/target/riscv/insn_trans/trans_rvi.c.inc +++ b/target/riscv/insn_trans/trans_rvi.c.inc @@ -268,6 +268,9 @@ static bool gen_load(DisasContext *ctx, arg_lb *a, MemOp memop) { bool out; + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { out = gen_load_i128(ctx, a, memop); @@ -366,6 +369,9 @@ static bool gen_store_i128(DisasContext *ctx, arg_sb *a, MemOp memop) static bool gen_store(DisasContext *ctx, arg_sb *a, MemOp memop) { + if (ctx->cfg_ptr->ext_zama16b && (ctx->cur_insn_len != 2)) { + memop |= MO_ATOM_WITHIN16; + } decode_save_opc(ctx); if (get_xl(ctx) == MXL_RV128) { return gen_store_i128(ctx, a, memop); -- cgit v1.2.3 From 24da9cbacacadaf95fbddc11da81ff68d3b1e795 Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:47 +0800 Subject: target/riscv: Move gen_amo before implement Zabha Signed-off-by: LIU Zhiwei Acked-by: Alistair Francis Message-ID: <20240709113652.1239-7-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/insn_trans/trans_rva.c.inc | 21 --------------------- target/riscv/translate.c | 21 +++++++++++++++++++++ 2 files changed, 21 insertions(+), 21 deletions(-) (limited to 'target') diff --git a/target/riscv/insn_trans/trans_rva.c.inc b/target/riscv/insn_trans/trans_rva.c.inc index eb080baddd..39bbf60f3c 100644 --- a/target/riscv/insn_trans/trans_rva.c.inc +++ b/target/riscv/insn_trans/trans_rva.c.inc @@ -96,27 +96,6 @@ static bool gen_sc(DisasContext *ctx, arg_atomic *a, MemOp mop) return true; } -static bool gen_amo(DisasContext *ctx, arg_atomic *a, - void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), - MemOp mop) -{ - TCGv dest = dest_gpr(ctx, a->rd); - TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); - - if (ctx->cfg_ptr->ext_zama16b) { - mop |= MO_ATOM_WITHIN16; - } else { - mop |= MO_ALIGN; - } - - decode_save_opc(ctx); - src1 = get_address(ctx, a->rs1, 0); - func(dest, src1, src2, ctx->mem_idx, mop); - - gen_set_gpr(ctx, a->rd, dest); - return true; -} - static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) { REQUIRE_A_OR_ZALRSC(ctx); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 8a546f4ece..133550d6e2 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1077,6 +1077,27 @@ static bool gen_unary_per_ol(DisasContext *ctx, arg_r2 *a, DisasExtend ext, return gen_unary(ctx, a, ext, f_tl); } +static bool gen_amo(DisasContext *ctx, arg_atomic *a, + void(*func)(TCGv, TCGv, TCGv, TCGArg, MemOp), + MemOp mop) +{ + TCGv dest = dest_gpr(ctx, a->rd); + TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + if (ctx->cfg_ptr->ext_zama16b) { + mop |= MO_ATOM_WITHIN16; + } else { + mop |= MO_ALIGN; + } + + decode_save_opc(ctx); + src1 = get_address(ctx, a->rs1, 0); + func(dest, src1, src2, ctx->mem_idx, mop); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) { DisasContext *ctx = container_of(dcbase, DisasContext, base); -- cgit v1.2.3 From be4a8db7f304347395b081ae5848bad2f507d0c4 Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:48 +0800 Subject: target/riscv: Add AMO instructions for Zabha Signed-off-by: LIU Zhiwei Acked-by: Alistair Francis Message-ID: <20240709113652.1239-8-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/cpu_cfg.h | 1 + target/riscv/insn32.decode | 20 +++++ target/riscv/insn_trans/trans_rvzabha.c.inc | 131 ++++++++++++++++++++++++++++ target/riscv/translate.c | 4 +- 4 files changed, 155 insertions(+), 1 deletion(-) create mode 100644 target/riscv/insn_trans/trans_rvzabha.c.inc (limited to 'target') diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index ddbfae37e5..120905a254 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -84,6 +84,7 @@ struct RISCVCPUConfig { bool ext_zaamo; bool ext_zacas; bool ext_zama16b; + bool ext_zabha; bool ext_zalrsc; bool ext_zawrs; bool ext_zfa; diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 60da673153..3bad6372f2 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -1021,3 +1021,23 @@ amocas_q 00101 . . ..... ..... 100 ..... 0101111 @atom_st # *** Zimop may-be-operation extension *** mop_r_n 1 . 00 .. 0111 .. ..... 100 ..... 1110011 @mop5 mop_rr_n 1 . 00 .. 1 ..... ..... 100 ..... 1110011 @mop3 + +# *** Zabhb Standard Extension *** +amoswap_b 00001 . . ..... ..... 000 ..... 0101111 @atom_st +amoadd_b 00000 . . ..... ..... 000 ..... 0101111 @atom_st +amoxor_b 00100 . . ..... ..... 000 ..... 0101111 @atom_st +amoand_b 01100 . . ..... ..... 000 ..... 0101111 @atom_st +amoor_b 01000 . . ..... ..... 000 ..... 0101111 @atom_st +amomin_b 10000 . . ..... ..... 000 ..... 0101111 @atom_st +amomax_b 10100 . . ..... ..... 000 ..... 0101111 @atom_st +amominu_b 11000 . . ..... ..... 000 ..... 0101111 @atom_st +amomaxu_b 11100 . . ..... ..... 000 ..... 0101111 @atom_st +amoswap_h 00001 . . ..... ..... 001 ..... 0101111 @atom_st +amoadd_h 00000 . . ..... ..... 001 ..... 0101111 @atom_st +amoxor_h 00100 . . ..... ..... 001 ..... 0101111 @atom_st +amoand_h 01100 . . ..... ..... 001 ..... 0101111 @atom_st +amoor_h 01000 . . ..... ..... 001 ..... 0101111 @atom_st +amomin_h 10000 . . ..... ..... 001 ..... 0101111 @atom_st +amomax_h 10100 . . ..... ..... 001 ..... 0101111 @atom_st +amominu_h 11000 . . ..... ..... 001 ..... 0101111 @atom_st +amomaxu_h 11100 . . ..... ..... 001 ..... 0101111 @atom_st diff --git a/target/riscv/insn_trans/trans_rvzabha.c.inc b/target/riscv/insn_trans/trans_rvzabha.c.inc new file mode 100644 index 0000000000..9093a1cfc1 --- /dev/null +++ b/target/riscv/insn_trans/trans_rvzabha.c.inc @@ -0,0 +1,131 @@ +/* + * RISC-V translation routines for the Zabha Standard Extension. + * + * Copyright (c) 2024 Alibaba Group + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#define REQUIRE_ZABHA(ctx) do { \ + if (!ctx->cfg_ptr->ext_zabha) { \ + return false; \ + } \ +} while (0) + +static bool trans_amoswap_b(DisasContext *ctx, arg_amoswap_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_SB); +} + +static bool trans_amoadd_b(DisasContext *ctx, arg_amoadd_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_SB); +} + +static bool trans_amoxor_b(DisasContext *ctx, arg_amoxor_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_SB); +} + +static bool trans_amoand_b(DisasContext *ctx, arg_amoand_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_SB); +} + +static bool trans_amoor_b(DisasContext *ctx, arg_amoor_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_SB); +} + +static bool trans_amomin_b(DisasContext *ctx, arg_amomin_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_SB); +} + +static bool trans_amomax_b(DisasContext *ctx, arg_amomax_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_SB); +} + +static bool trans_amominu_b(DisasContext *ctx, arg_amominu_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_SB); +} + +static bool trans_amomaxu_b(DisasContext *ctx, arg_amomaxu_b *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_SB); +} + +static bool trans_amoswap_h(DisasContext *ctx, arg_amoswap_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, MO_TESW); +} + +static bool trans_amoadd_h(DisasContext *ctx, arg_amoadd_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, MO_TESW); +} + +static bool trans_amoxor_h(DisasContext *ctx, arg_amoxor_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, MO_TESW); +} + +static bool trans_amoand_h(DisasContext *ctx, arg_amoand_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, MO_TESW); +} + +static bool trans_amoor_h(DisasContext *ctx, arg_amoor_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, MO_TESW); +} + +static bool trans_amomin_h(DisasContext *ctx, arg_amomin_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, MO_TESW); +} + +static bool trans_amomax_h(DisasContext *ctx, arg_amomax_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, MO_TESW); +} + +static bool trans_amominu_h(DisasContext *ctx, arg_amominu_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, MO_TESW); +} + +static bool trans_amomaxu_h(DisasContext *ctx, arg_amomaxu_h *a) +{ + REQUIRE_ZABHA(ctx); + return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESW); +} diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 133550d6e2..4a3e786560 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1083,8 +1083,9 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, { TCGv dest = dest_gpr(ctx, a->rd); TCGv src1, src2 = get_gpr(ctx, a->rs2, EXT_NONE); + MemOp size = mop & MO_SIZE; - if (ctx->cfg_ptr->ext_zama16b) { + if (ctx->cfg_ptr->ext_zama16b && size >= MO_32) { mop |= MO_ATOM_WITHIN16; } else { mop |= MO_ALIGN; @@ -1118,6 +1119,7 @@ static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) #include "insn_trans/trans_rvb.c.inc" #include "insn_trans/trans_rvzicond.c.inc" #include "insn_trans/trans_rvzacas.c.inc" +#include "insn_trans/trans_rvzabha.c.inc" #include "insn_trans/trans_rvzawrs.c.inc" #include "insn_trans/trans_rvzicbo.c.inc" #include "insn_trans/trans_rvzimop.c.inc" -- cgit v1.2.3 From 8d07887bcbf13749224bd10b0303f7a79a959edb Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:49 +0800 Subject: target/riscv: Move gen_cmpxchg before adding amocas.[b|h] Signed-off-by: LIU Zhiwei Acked-by: Alistair Francis Message-ID: <20240709113652.1239-9-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/insn_trans/trans_rvzacas.c.inc | 13 ------------- target/riscv/translate.c | 13 +++++++++++++ 2 files changed, 13 insertions(+), 13 deletions(-) (limited to 'target') diff --git a/target/riscv/insn_trans/trans_rvzacas.c.inc b/target/riscv/insn_trans/trans_rvzacas.c.inc index 5d274d4c08..fcced99fc7 100644 --- a/target/riscv/insn_trans/trans_rvzacas.c.inc +++ b/target/riscv/insn_trans/trans_rvzacas.c.inc @@ -22,19 +22,6 @@ } \ } while (0) -static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop) -{ - TCGv dest = get_gpr(ctx, a->rd, EXT_NONE); - TCGv src1 = get_address(ctx, a->rs1, 0); - TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); - - decode_save_opc(ctx); - tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop); - - gen_set_gpr(ctx, a->rd, dest); - return true; -} - static bool trans_amocas_w(DisasContext *ctx, arg_amocas_w *a) { REQUIRE_ZACAS(ctx); diff --git a/target/riscv/translate.c b/target/riscv/translate.c index 4a3e786560..acba90f170 100644 --- a/target/riscv/translate.c +++ b/target/riscv/translate.c @@ -1099,6 +1099,19 @@ static bool gen_amo(DisasContext *ctx, arg_atomic *a, return true; } +static bool gen_cmpxchg(DisasContext *ctx, arg_atomic *a, MemOp mop) +{ + TCGv dest = get_gpr(ctx, a->rd, EXT_NONE); + TCGv src1 = get_address(ctx, a->rs1, 0); + TCGv src2 = get_gpr(ctx, a->rs2, EXT_NONE); + + decode_save_opc(ctx); + tcg_gen_atomic_cmpxchg_tl(dest, src1, dest, src2, ctx->mem_idx, mop); + + gen_set_gpr(ctx, a->rd, dest); + return true; +} + static uint32_t opcode_at(DisasContextBase *dcbase, target_ulong pc) { DisasContext *ctx = container_of(dcbase, DisasContext, base); -- cgit v1.2.3 From d34e4066025378dff8d444104100d3b07e127fe6 Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:50 +0800 Subject: target/riscv: Add amocas.[b|h] for Zabha Signed-off-by: LIU Zhiwei Reviewed-by: Alistair Francis Message-ID: <20240709113652.1239-10-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/insn32.decode | 2 ++ target/riscv/insn_trans/trans_rvzabha.c.inc | 14 ++++++++++++++ 2 files changed, 16 insertions(+) (limited to 'target') diff --git a/target/riscv/insn32.decode b/target/riscv/insn32.decode index 3bad6372f2..c45b8fa1d8 100644 --- a/target/riscv/insn32.decode +++ b/target/riscv/insn32.decode @@ -1041,3 +1041,5 @@ amomin_h 10000 . . ..... ..... 001 ..... 0101111 @atom_st amomax_h 10100 . . ..... ..... 001 ..... 0101111 @atom_st amominu_h 11000 . . ..... ..... 001 ..... 0101111 @atom_st amomaxu_h 11100 . . ..... ..... 001 ..... 0101111 @atom_st +amocas_b 00101 . . ..... ..... 000 ..... 0101111 @atom_st +amocas_h 00101 . . ..... ..... 001 ..... 0101111 @atom_st diff --git a/target/riscv/insn_trans/trans_rvzabha.c.inc b/target/riscv/insn_trans/trans_rvzabha.c.inc index 9093a1cfc1..ce8edcba62 100644 --- a/target/riscv/insn_trans/trans_rvzabha.c.inc +++ b/target/riscv/insn_trans/trans_rvzabha.c.inc @@ -129,3 +129,17 @@ static bool trans_amomaxu_h(DisasContext *ctx, arg_amomaxu_h *a) REQUIRE_ZABHA(ctx); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, MO_TESW); } + +static bool trans_amocas_b(DisasContext *ctx, arg_amocas_b *a) +{ + REQUIRE_ZACAS(ctx); + REQUIRE_ZABHA(ctx); + return gen_cmpxchg(ctx, a, MO_SB); +} + +static bool trans_amocas_h(DisasContext *ctx, arg_amocas_h *a) +{ + REQUIRE_ZACAS(ctx); + REQUIRE_ZABHA(ctx); + return gen_cmpxchg(ctx, a, MO_ALIGN | MO_TESW); +} -- cgit v1.2.3 From 8aebaa2591320619cbd3e75f91f6e088f2c3dcfa Mon Sep 17 00:00:00 2001 From: LIU Zhiwei Date: Tue, 9 Jul 2024 19:36:51 +0800 Subject: target/riscv: Expose zabha extension as a cpu property Signed-off-by: LIU Zhiwei Reviewed-by: Alistair Francis Message-ID: <20240709113652.1239-11-zhiwei_liu@linux.alibaba.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'target') diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index de9c06904f..33ef4eb795 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -117,6 +117,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zmmul, PRIV_VERSION_1_12_0, ext_zmmul), ISA_EXT_DATA_ENTRY(za64rs, PRIV_VERSION_1_12_0, has_priv_1_11), ISA_EXT_DATA_ENTRY(zaamo, PRIV_VERSION_1_12_0, ext_zaamo), + ISA_EXT_DATA_ENTRY(zabha, PRIV_VERSION_1_13_0, ext_zabha), ISA_EXT_DATA_ENTRY(zacas, PRIV_VERSION_1_12_0, ext_zacas), ISA_EXT_DATA_ENTRY(zama16b, PRIV_VERSION_1_13_0, ext_zama16b), ISA_EXT_DATA_ENTRY(zalrsc, PRIV_VERSION_1_12_0, ext_zalrsc), @@ -1478,6 +1479,7 @@ const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { MULTI_EXT_CFG_BOOL("zcmop", ext_zcmop, false), MULTI_EXT_CFG_BOOL("zacas", ext_zacas, false), MULTI_EXT_CFG_BOOL("zama16b", ext_zama16b, false), + MULTI_EXT_CFG_BOOL("zabha", ext_zabha, false), MULTI_EXT_CFG_BOOL("zaamo", ext_zaamo, false), MULTI_EXT_CFG_BOOL("zalrsc", ext_zalrsc, false), MULTI_EXT_CFG_BOOL("zawrs", ext_zawrs, true), -- cgit v1.2.3 From 910c18a91738f9b77c5faf66e3534bb10d6e306e Mon Sep 17 00:00:00 2001 From: Jiayi Li Date: Mon, 1 Jul 2024 10:25:53 +0800 Subject: target/riscv: Validate the mode in write_vstvec Base on the riscv-privileged spec, vstvec substitutes for the usual stvec. Therefore, the encoding of the MODE should also be restricted to 0 and 1. Signed-off-by: Jiayi Li Reviewed-by: Alistair Francis Reviewed-by: LIU Zhiwei Message-ID: <20240701022553.1982-1-lijiayi@eswincomputing.com> Signed-off-by: Alistair Francis --- target/riscv/csr.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'target') diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 432c59dc66..f9229d92ab 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -3791,7 +3791,12 @@ static RISCVException read_vstvec(CPURISCVState *env, int csrno, static RISCVException write_vstvec(CPURISCVState *env, int csrno, target_ulong val) { - env->vstvec = val; + /* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */ + if ((val & 3) < 2) { + env->vstvec = val; + } else { + qemu_log_mask(LOG_UNIMP, "CSR_VSTVEC: reserved mode not supported\n"); + } return RISCV_EXCP_NONE; } -- cgit v1.2.3 From 3cb9f20499cd7da644387a17a79230f8ffd89993 Mon Sep 17 00:00:00 2001 From: Daniel Henrique Barboza Date: Tue, 9 Jul 2024 05:54:31 -0300 Subject: target/riscv/kvm: update KVM regs to Linux 6.10-rc5 Two new regs added: ztso and zacas. Signed-off-by: Daniel Henrique Barboza Reviewed-by: Alistair Francis Message-ID: <20240709085431.455541-1-dbarboza@ventanamicro.com> Signed-off-by: Alistair Francis --- target/riscv/kvm/kvm-cpu.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'target') diff --git a/target/riscv/kvm/kvm-cpu.c b/target/riscv/kvm/kvm-cpu.c index 1047961fed..f6e3156b8d 100644 --- a/target/riscv/kvm/kvm-cpu.c +++ b/target/riscv/kvm/kvm-cpu.c @@ -281,6 +281,7 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zihintntl", ext_zihintntl, KVM_RISCV_ISA_EXT_ZIHINTNTL), KVM_EXT_CFG("zihintpause", ext_zihintpause, KVM_RISCV_ISA_EXT_ZIHINTPAUSE), KVM_EXT_CFG("zihpm", ext_zihpm, KVM_RISCV_ISA_EXT_ZIHPM), + KVM_EXT_CFG("zacas", ext_zacas, KVM_RISCV_ISA_EXT_ZACAS), KVM_EXT_CFG("zfa", ext_zfa, KVM_RISCV_ISA_EXT_ZFA), KVM_EXT_CFG("zfh", ext_zfh, KVM_RISCV_ISA_EXT_ZFH), KVM_EXT_CFG("zfhmin", ext_zfhmin, KVM_RISCV_ISA_EXT_ZFHMIN), @@ -298,6 +299,7 @@ static KVMCPUConfig kvm_multi_ext_cfgs[] = { KVM_EXT_CFG("zksed", ext_zksed, KVM_RISCV_ISA_EXT_ZKSED), KVM_EXT_CFG("zksh", ext_zksh, KVM_RISCV_ISA_EXT_ZKSH), KVM_EXT_CFG("zkt", ext_zkt, KVM_RISCV_ISA_EXT_ZKT), + KVM_EXT_CFG("ztso", ext_ztso, KVM_RISCV_ISA_EXT_ZTSO), KVM_EXT_CFG("zvbb", ext_zvbb, KVM_RISCV_ISA_EXT_ZVBB), KVM_EXT_CFG("zvbc", ext_zvbc, KVM_RISCV_ISA_EXT_ZVBC), KVM_EXT_CFG("zvfh", ext_zvfh, KVM_RISCV_ISA_EXT_ZVFH), -- cgit v1.2.3 From 68c05fb53036f59a55a54c943b713e8ac78fc627 Mon Sep 17 00:00:00 2001 From: Rajnesh Kanwal Date: Thu, 11 Jul 2024 15:31:04 -0700 Subject: target/riscv: Combine set_mode and set_virt functions. Combining riscv_cpu_set_virt_enabled() and riscv_cpu_set_mode() functions. This is to make complete mode change information available through a single function. This allows to easily differentiate between HS->VS, VS->HS and VS->VS transitions when executing state update codes. For example: One use-case which inspired this change is to update mode-specific instruction and cycle counters which requires information of both prev mode and current mode. Signed-off-by: Rajnesh Kanwal Reviewed-by: Alistair Francis Reviewed-by: Daniel Henrique Barboza Message-ID: <20240711-smcntrpmf_v7-v8-1-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.h | 2 +- target/riscv/cpu_helper.c | 57 +++++++++++++++++++++++------------------------ target/riscv/op_helper.c | 17 +++++--------- 3 files changed, 35 insertions(+), 41 deletions(-) (limited to 'target') diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 87742047ce..6520e0f5d5 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -567,7 +567,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, RISCVException smstateen_acc_ok(CPURISCVState *env, int index, uint64_t bit); #endif /* !CONFIG_USER_ONLY */ -void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv); +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en); void riscv_translate_init(void); G_NORETURN void riscv_raise_exception(CPURISCVState *env, diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 6709622dd3..10d3fdaed3 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -619,30 +619,6 @@ void riscv_cpu_set_geilen(CPURISCVState *env, target_ulong geilen) env->geilen = geilen; } -/* This function can only be called to set virt when RVH is enabled */ -void riscv_cpu_set_virt_enabled(CPURISCVState *env, bool enable) -{ - /* Flush the TLB on all virt mode changes. */ - if (env->virt_enabled != enable) { - tlb_flush(env_cpu(env)); - } - - env->virt_enabled = enable; - - if (enable) { - /* - * The guest external interrupts from an interrupt controller are - * delivered only when the Guest/VM is running (i.e. V=1). This means - * any guest external interrupt which is triggered while the Guest/VM - * is not running (i.e. V=0) will be missed on QEMU resulting in guest - * with sluggish response to serial console input and other I/O events. - * - * To solve this, we check and inject interrupt after setting V=1. - */ - riscv_cpu_update_mip(env, 0, 0); - } -} - int riscv_cpu_claim_interrupts(RISCVCPU *cpu, uint64_t interrupts) { CPURISCVState *env = &cpu->env; @@ -715,7 +691,7 @@ void riscv_cpu_set_aia_ireg_rmw_fn(CPURISCVState *env, uint32_t priv, } } -void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) +void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en) { g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); @@ -736,6 +712,28 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv) * preemptive context switch. As a result, do both. */ env->load_res = -1; + + if (riscv_has_ext(env, RVH)) { + /* Flush the TLB on all virt mode changes. */ + if (env->virt_enabled != virt_en) { + tlb_flush(env_cpu(env)); + } + + env->virt_enabled = virt_en; + if (virt_en) { + /* + * The guest external interrupts from an interrupt controller are + * delivered only when the Guest/VM is running (i.e. V=1). This + * means any guest external interrupt which is triggered while the + * Guest/VM is not running (i.e. V=0) will be missed on QEMU + * resulting in guest with sluggish response to serial console + * input and other I/O events. + * + * To solve this, we check and inject interrupt after setting V=1. + */ + riscv_cpu_update_mip(env, 0, 0); + } + } } /* @@ -1648,6 +1646,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) { RISCVCPU *cpu = RISCV_CPU(cs); CPURISCVState *env = &cpu->env; + bool virt = env->virt_enabled; bool write_gva = false; uint64_t s; @@ -1778,7 +1777,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) htval = env->guest_phys_fault_addr; - riscv_cpu_set_virt_enabled(env, 0); + virt = false; } else { /* Trap into HS mode */ env->hstatus = set_field(env->hstatus, HSTATUS_SPV, false); @@ -1799,7 +1798,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->htinst = tinst; env->pc = (env->stvec >> 2 << 2) + ((async && (env->stvec & 3) == 1) ? cause * 4 : 0); - riscv_cpu_set_mode(env, PRV_S); + riscv_cpu_set_mode(env, PRV_S, virt); } else { /* handle the trap in M-mode */ if (riscv_has_ext(env, RVH)) { @@ -1815,7 +1814,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) mtval2 = env->guest_phys_fault_addr; /* Trapping to M mode, virt is disabled */ - riscv_cpu_set_virt_enabled(env, 0); + virt = false; } s = env->mstatus; @@ -1830,7 +1829,7 @@ void riscv_cpu_do_interrupt(CPUState *cs) env->mtinst = tinst; env->pc = (env->mtvec >> 2 << 2) + ((async && (env->mtvec & 3) == 1) ? cause * 4 : 0); - riscv_cpu_set_mode(env, PRV_M); + riscv_cpu_set_mode(env, PRV_M, virt); } /* diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index 2baf5bc3ca..ec1408ba0f 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -264,7 +264,7 @@ void helper_cbo_inval(CPURISCVState *env, target_ulong address) target_ulong helper_sret(CPURISCVState *env) { uint64_t mstatus; - target_ulong prev_priv, prev_virt; + target_ulong prev_priv, prev_virt = env->virt_enabled; if (!(env->priv >= PRV_S)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); @@ -307,11 +307,9 @@ target_ulong helper_sret(CPURISCVState *env) if (prev_virt) { riscv_cpu_swap_hypervisor_regs(env); } - - riscv_cpu_set_virt_enabled(env, prev_virt); } - riscv_cpu_set_mode(env, prev_priv); + riscv_cpu_set_mode(env, prev_priv, prev_virt); return retpc; } @@ -347,16 +345,13 @@ target_ulong helper_mret(CPURISCVState *env) mstatus = set_field(mstatus, MSTATUS_MPRV, 0); } env->mstatus = mstatus; - riscv_cpu_set_mode(env, prev_priv); - - if (riscv_has_ext(env, RVH)) { - if (prev_virt) { - riscv_cpu_swap_hypervisor_regs(env); - } - riscv_cpu_set_virt_enabled(env, prev_virt); + if (riscv_has_ext(env, RVH) && prev_virt) { + riscv_cpu_swap_hypervisor_regs(env); } + riscv_cpu_set_mode(env, prev_priv, prev_virt); + return retpc; } -- cgit v1.2.3 From be470e597708451e6fecb0631728fa759164d03e Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 11 Jul 2024 15:31:05 -0700 Subject: target/riscv: Fix the predicate functions for mhpmeventhX CSRs mhpmeventhX CSRs are available for RV32. The predicate function should check that first before checking sscofpmf extension. Fixes: 14664483457b ("target/riscv: Add sscofpmf extension support") Reviewed-by: Daniel Henrique Barboza Reviewed-by: Alistair Francis Signed-off-by: Atish Patra Message-ID: <20240711-smcntrpmf_v7-v8-2-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/csr.c | 67 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 38 insertions(+), 29 deletions(-) (limited to 'target') diff --git a/target/riscv/csr.c b/target/riscv/csr.c index f9229d92ab..1bcf75f91f 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -227,6 +227,15 @@ static RISCVException sscofpmf(CPURISCVState *env, int csrno) return RISCV_EXCP_NONE; } +static RISCVException sscofpmf_32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return sscofpmf(env, csrno); +} + static RISCVException any(CPURISCVState *env, int csrno) { return RISCV_EXCP_NONE; @@ -5106,91 +5115,91 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent, write_mhpmevent }, - [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT4H] = { "mhpmevent4h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT5H] = { "mhpmevent5h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT6H] = { "mhpmevent6h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT7H] = { "mhpmevent7h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT8H] = { "mhpmevent8h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT9H] = { "mhpmevent9h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT10H] = { "mhpmevent10h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT11H] = { "mhpmevent11h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT12H] = { "mhpmevent12h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT13H] = { "mhpmevent13h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT14H] = { "mhpmevent14h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT15H] = { "mhpmevent15h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT16H] = { "mhpmevent16h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT17H] = { "mhpmevent17h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT18H] = { "mhpmevent18h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT19H] = { "mhpmevent19h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT20H] = { "mhpmevent20h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT21H] = { "mhpmevent21h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT22H] = { "mhpmevent22h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT23H] = { "mhpmevent23h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT24H] = { "mhpmevent24h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT25H] = { "mhpmevent25h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT26H] = { "mhpmevent26h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT27H] = { "mhpmevent27h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT28H] = { "mhpmevent28h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT29H] = { "mhpmevent29h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT30H] = { "mhpmevent30h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, - [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf, read_mhpmeventh, + [CSR_MHPMEVENT31H] = { "mhpmevent31h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, -- cgit v1.2.3 From 251dccc09af363900436656461151681687e2470 Mon Sep 17 00:00:00 2001 From: Kaiwen Xue Date: Thu, 11 Jul 2024 15:31:06 -0700 Subject: target/riscv: Add cycle & instret privilege mode filtering properties This adds the properties for ISA extension smcntrpmf. Patches implementing it will follow. Signed-off-by: Kaiwen Xue Reviewed-by: Daniel Henrique Barboza Signed-off-by: Atish Patra Reviewed-by: Alistair Francis Message-ID: <20240711-smcntrpmf_v7-v8-3-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.c | 1 + target/riscv/cpu_cfg.h | 1 + 2 files changed, 2 insertions(+) (limited to 'target') diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 33ef4eb795..4efe7ee3b0 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -182,6 +182,7 @@ const RISCVIsaExtData isa_edata_arr[] = { ISA_EXT_DATA_ENTRY(zhinx, PRIV_VERSION_1_12_0, ext_zhinx), ISA_EXT_DATA_ENTRY(zhinxmin, PRIV_VERSION_1_12_0, ext_zhinxmin), ISA_EXT_DATA_ENTRY(smaia, PRIV_VERSION_1_12_0, ext_smaia), + ISA_EXT_DATA_ENTRY(smcntrpmf, PRIV_VERSION_1_12_0, ext_smcntrpmf), ISA_EXT_DATA_ENTRY(smepmp, PRIV_VERSION_1_12_0, ext_smepmp), ISA_EXT_DATA_ENTRY(smstateen, PRIV_VERSION_1_12_0, ext_smstateen), ISA_EXT_DATA_ENTRY(ssaia, PRIV_VERSION_1_12_0, ext_ssaia), diff --git a/target/riscv/cpu_cfg.h b/target/riscv/cpu_cfg.h index 120905a254..8b272fb826 100644 --- a/target/riscv/cpu_cfg.h +++ b/target/riscv/cpu_cfg.h @@ -76,6 +76,7 @@ struct RISCVCPUConfig { bool ext_ztso; bool ext_smstateen; bool ext_sstc; + bool ext_smcntrpmf; bool ext_svadu; bool ext_svinval; bool ext_svnapot; -- cgit v1.2.3 From 6d1e3893cfaeedb47a16edbb766fcc0c7907ab94 Mon Sep 17 00:00:00 2001 From: Kaiwen Xue Date: Thu, 11 Jul 2024 15:31:07 -0700 Subject: target/riscv: Add cycle & instret privilege mode filtering definitions This adds the definitions for ISA extension smcntrpmf. Signed-off-by: Kaiwen Xue Reviewed-by: Daniel Henrique Barboza Reviewed-by: Alistair Francis Signed-off-by: Atish Patra Message-ID: <20240711-smcntrpmf_v7-v8-4-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.h | 6 ++++++ target/riscv/cpu_bits.h | 29 +++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) (limited to 'target') diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 6520e0f5d5..980e2154cd 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -362,6 +362,12 @@ struct CPUArchState { uint32_t mcountinhibit; + /* PMU cycle & instret privilege mode filtering */ + target_ulong mcyclecfg; + target_ulong mcyclecfgh; + target_ulong minstretcfg; + target_ulong minstretcfgh; + /* PMU counter state */ PMUCTRState pmu_ctrs[RV_MAX_MHPMCOUNTERS]; diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index c257c5ed7d..5faa817453 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -397,6 +397,10 @@ /* Machine counter-inhibit register */ #define CSR_MCOUNTINHIBIT 0x320 +/* Machine counter configuration registers */ +#define CSR_MCYCLECFG 0x321 +#define CSR_MINSTRETCFG 0x322 + #define CSR_MHPMEVENT3 0x323 #define CSR_MHPMEVENT4 0x324 #define CSR_MHPMEVENT5 0x325 @@ -427,6 +431,9 @@ #define CSR_MHPMEVENT30 0x33e #define CSR_MHPMEVENT31 0x33f +#define CSR_MCYCLECFGH 0x721 +#define CSR_MINSTRETCFGH 0x722 + #define CSR_MHPMEVENT3H 0x723 #define CSR_MHPMEVENT4H 0x724 #define CSR_MHPMEVENT5H 0x725 @@ -884,6 +891,28 @@ typedef enum RISCVException { /* PMU related bits */ #define MIE_LCOFIE (1 << IRQ_PMU_OVF) +#define MCYCLECFG_BIT_MINH BIT_ULL(62) +#define MCYCLECFGH_BIT_MINH BIT(30) +#define MCYCLECFG_BIT_SINH BIT_ULL(61) +#define MCYCLECFGH_BIT_SINH BIT(29) +#define MCYCLECFG_BIT_UINH BIT_ULL(60) +#define MCYCLECFGH_BIT_UINH BIT(28) +#define MCYCLECFG_BIT_VSINH BIT_ULL(59) +#define MCYCLECFGH_BIT_VSINH BIT(27) +#define MCYCLECFG_BIT_VUINH BIT_ULL(58) +#define MCYCLECFGH_BIT_VUINH BIT(26) + +#define MINSTRETCFG_BIT_MINH BIT_ULL(62) +#define MINSTRETCFGH_BIT_MINH BIT(30) +#define MINSTRETCFG_BIT_SINH BIT_ULL(61) +#define MINSTRETCFGH_BIT_SINH BIT(29) +#define MINSTRETCFG_BIT_UINH BIT_ULL(60) +#define MINSTRETCFGH_BIT_UINH BIT(28) +#define MINSTRETCFG_BIT_VSINH BIT_ULL(59) +#define MINSTRETCFGH_BIT_VSINH BIT(27) +#define MINSTRETCFG_BIT_VUINH BIT_ULL(58) +#define MINSTRETCFGH_BIT_VUINH BIT(26) + #define MHPMEVENT_BIT_OF BIT_ULL(63) #define MHPMEVENTH_BIT_OF BIT(31) #define MHPMEVENT_BIT_MINH BIT_ULL(62) -- cgit v1.2.3 From b54a84c15e389a4795022cc7edfe3a7e32dc065d Mon Sep 17 00:00:00 2001 From: Kaiwen Xue Date: Thu, 11 Jul 2024 15:31:08 -0700 Subject: target/riscv: Add cycle & instret privilege mode filtering support QEMU only calculates dummy cycles and instructions, so there is no actual means to stop the icount in QEMU. Hence this patch merely adds the functionality of accessing the cfg registers, and cause no actual effects on the counting of cycle and instret counters. Signed-off-by: Atish Patra Reviewed-by: Daniel Henrique Barboza Signed-off-by: Kaiwen Xue Acked-by: Alistair Francis Message-ID: <20240711-smcntrpmf_v7-v8-5-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/cpu_bits.h | 12 +++++ target/riscv/csr.c | 138 +++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 149 insertions(+), 1 deletion(-) (limited to 'target') diff --git a/target/riscv/cpu_bits.h b/target/riscv/cpu_bits.h index 5faa817453..32b068f18a 100644 --- a/target/riscv/cpu_bits.h +++ b/target/riscv/cpu_bits.h @@ -926,6 +926,18 @@ typedef enum RISCVException { #define MHPMEVENT_BIT_VUINH BIT_ULL(58) #define MHPMEVENTH_BIT_VUINH BIT(26) +#define MHPMEVENT_FILTER_MASK (MHPMEVENT_BIT_MINH | \ + MHPMEVENT_BIT_SINH | \ + MHPMEVENT_BIT_UINH | \ + MHPMEVENT_BIT_VSINH | \ + MHPMEVENT_BIT_VUINH) + +#define MHPMEVENTH_FILTER_MASK (MHPMEVENTH_BIT_MINH | \ + MHPMEVENTH_BIT_SINH | \ + MHPMEVENTH_BIT_UINH | \ + MHPMEVENTH_BIT_VSINH | \ + MHPMEVENTH_BIT_VUINH) + #define MHPMEVENT_SSCOF_MASK _ULL(0xFFFF000000000000) #define MHPMEVENT_IDX_MASK 0xFFFFF #define MHPMEVENT_SSCOF_RESVD 16 diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 1bcf75f91f..8831d4f5ec 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -30,7 +30,6 @@ #include "qemu/guest-random.h" #include "qapi/error.h" - /* CSR function table public API */ void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops) { @@ -236,6 +235,24 @@ static RISCVException sscofpmf_32(CPURISCVState *env, int csrno) return sscofpmf(env, csrno); } +static RISCVException smcntrpmf(CPURISCVState *env, int csrno) +{ + if (!riscv_cpu_cfg(env)->ext_smcntrpmf) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException smcntrpmf_32(CPURISCVState *env, int csrno) +{ + if (riscv_cpu_mxl(env) != MXL_RV32) { + return RISCV_EXCP_ILLEGAL_INST; + } + + return smcntrpmf(env, csrno); +} + static RISCVException any(CPURISCVState *env, int csrno) { return RISCV_EXCP_NONE; @@ -830,6 +847,111 @@ static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, #else /* CONFIG_USER_ONLY */ +static RISCVException read_mcyclecfg(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mcyclecfg; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mcyclecfg(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t inh_avail_mask; + + if (riscv_cpu_mxl(env) == MXL_RV32) { + env->mcyclecfg = val; + } else { + /* Set xINH fields if priv mode supported */ + inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MCYCLECFG_BIT_MINH; + inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFG_BIT_UINH : 0; + inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFG_BIT_SINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVU)) ? MCYCLECFG_BIT_VUINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVS)) ? MCYCLECFG_BIT_VSINH : 0; + env->mcyclecfg = val & inh_avail_mask; + } + + return RISCV_EXCP_NONE; +} + +static RISCVException read_mcyclecfgh(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->mcyclecfgh; + return RISCV_EXCP_NONE; +} + +static RISCVException write_mcyclecfgh(CPURISCVState *env, int csrno, + target_ulong val) +{ + target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | + MCYCLECFGH_BIT_MINH); + + /* Set xINH fields if priv mode supported */ + inh_avail_mask |= riscv_has_ext(env, RVU) ? MCYCLECFGH_BIT_UINH : 0; + inh_avail_mask |= riscv_has_ext(env, RVS) ? MCYCLECFGH_BIT_SINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVU)) ? MCYCLECFGH_BIT_VUINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVS)) ? MCYCLECFGH_BIT_VSINH : 0; + + env->mcyclecfgh = val & inh_avail_mask; + return RISCV_EXCP_NONE; +} + +static RISCVException read_minstretcfg(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->minstretcfg; + return RISCV_EXCP_NONE; +} + +static RISCVException write_minstretcfg(CPURISCVState *env, int csrno, + target_ulong val) +{ + uint64_t inh_avail_mask; + + if (riscv_cpu_mxl(env) == MXL_RV32) { + env->minstretcfg = val; + } else { + inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MINSTRETCFG_BIT_MINH; + inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFG_BIT_UINH : 0; + inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFG_BIT_SINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVU)) ? MINSTRETCFG_BIT_VUINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVS)) ? MINSTRETCFG_BIT_VSINH : 0; + env->minstretcfg = val & inh_avail_mask; + } + return RISCV_EXCP_NONE; +} + +static RISCVException read_minstretcfgh(CPURISCVState *env, int csrno, + target_ulong *val) +{ + *val = env->minstretcfgh; + return RISCV_EXCP_NONE; +} + +static RISCVException write_minstretcfgh(CPURISCVState *env, int csrno, + target_ulong val) +{ + target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | + MINSTRETCFGH_BIT_MINH); + + inh_avail_mask |= riscv_has_ext(env, RVU) ? MINSTRETCFGH_BIT_UINH : 0; + inh_avail_mask |= riscv_has_ext(env, RVS) ? MINSTRETCFGH_BIT_SINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVU)) ? MINSTRETCFGH_BIT_VUINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVS)) ? MINSTRETCFGH_BIT_VSINH : 0; + + env->minstretcfgh = val & inh_avail_mask; + return RISCV_EXCP_NONE; +} + static RISCVException read_mhpmevent(CPURISCVState *env, int csrno, target_ulong *val) { @@ -5056,6 +5178,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { write_mcountinhibit, .min_priv_ver = PRIV_VERSION_1_11_0 }, + [CSR_MCYCLECFG] = { "mcyclecfg", smcntrpmf, read_mcyclecfg, + write_mcyclecfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MINSTRETCFG] = { "minstretcfg", smcntrpmf, read_minstretcfg, + write_minstretcfg, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT3] = { "mhpmevent3", any, read_mhpmevent, write_mhpmevent }, [CSR_MHPMEVENT4] = { "mhpmevent4", any, read_mhpmevent, @@ -5115,6 +5244,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = { [CSR_MHPMEVENT31] = { "mhpmevent31", any, read_mhpmevent, write_mhpmevent }, + [CSR_MCYCLECFGH] = { "mcyclecfgh", smcntrpmf_32, read_mcyclecfgh, + write_mcyclecfgh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MINSTRETCFGH] = { "minstretcfgh", smcntrpmf_32, read_minstretcfgh, + write_minstretcfgh, + .min_priv_ver = PRIV_VERSION_1_12_0 }, + [CSR_MHPMEVENT3H] = { "mhpmevent3h", sscofpmf_32, read_mhpmeventh, write_mhpmeventh, .min_priv_ver = PRIV_VERSION_1_12_0 }, -- cgit v1.2.3 From 3b31b7baff02b357a6c921b26ae953e04d0cfdbb Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 11 Jul 2024 15:31:09 -0700 Subject: target/riscv: Only set INH fields if priv mode is available Currently, the INH fields are set in mhpmevent uncoditionally without checking if a particular priv mode is supported or not. Suggested-by: Alistair Francis Signed-off-by: Atish Patra Acked-by: Alistair Francis Message-ID: <20240711-smcntrpmf_v7-v8-6-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/csr.c | 29 +++++++++++++++++++++++++---- 1 file changed, 25 insertions(+), 4 deletions(-) (limited to 'target') diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 8831d4f5ec..364583dc03 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -967,13 +967,24 @@ static RISCVException write_mhpmevent(CPURISCVState *env, int csrno, { int evt_index = csrno - CSR_MCOUNTINHIBIT; uint64_t mhpmevt_val = val; - - env->mhpmevent_val[evt_index] = val; + uint64_t inh_avail_mask; if (riscv_cpu_mxl(env) == MXL_RV32) { + env->mhpmevent_val[evt_index] = val; mhpmevt_val = mhpmevt_val | ((uint64_t)env->mhpmeventh_val[evt_index] << 32); + } else { + inh_avail_mask = ~MHPMEVENT_FILTER_MASK | MHPMEVENT_BIT_MINH; + inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENT_BIT_UINH : 0; + inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENT_BIT_SINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVU)) ? MHPMEVENT_BIT_VUINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVS)) ? MHPMEVENT_BIT_VSINH : 0; + mhpmevt_val = val & inh_avail_mask; + env->mhpmevent_val[evt_index] = mhpmevt_val; } + riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); return RISCV_EXCP_NONE; @@ -993,11 +1004,21 @@ static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, target_ulong val) { int evt_index = csrno - CSR_MHPMEVENT3H + 3; - uint64_t mhpmevth_val = val; + uint64_t mhpmevth_val; uint64_t mhpmevt_val = env->mhpmevent_val[evt_index]; + target_ulong inh_avail_mask = (target_ulong)(~MHPMEVENTH_FILTER_MASK | + MHPMEVENTH_BIT_MINH); + + inh_avail_mask |= riscv_has_ext(env, RVU) ? MHPMEVENTH_BIT_UINH : 0; + inh_avail_mask |= riscv_has_ext(env, RVS) ? MHPMEVENTH_BIT_SINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVU)) ? MHPMEVENTH_BIT_VUINH : 0; + inh_avail_mask |= (riscv_has_ext(env, RVH) && + riscv_has_ext(env, RVS)) ? MHPMEVENTH_BIT_VSINH : 0; + mhpmevth_val = val & inh_avail_mask; mhpmevt_val = mhpmevt_val | (mhpmevth_val << 32); - env->mhpmeventh_val[evt_index] = val; + env->mhpmeventh_val[evt_index] = mhpmevth_val; riscv_pmu_update_event_map(env, mhpmevt_val, evt_index); -- cgit v1.2.3 From b2d7a7c7e4e30fb5341d38deac968de675f9419c Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 11 Jul 2024 15:31:10 -0700 Subject: target/riscv: Implement privilege mode filtering for cycle/instret Privilege mode filtering can also be emulated for cycle/instret by tracking host_ticks/icount during each privilege mode switch. This patch implements that for both cycle/instret and mhpmcounters. The first one requires Smcntrpmf while the other one requires Sscofpmf to be enabled. The cycle/instret are still computed using host ticks when icount is not enabled. Otherwise, they are computed using raw icount which is more accurate in icount mode. Co-Developed-by: Rajnesh Kanwal Signed-off-by: Rajnesh Kanwal Reviewed-by: Daniel Henrique Barboza Acked-by: Alistair Francis Signed-off-by: Atish Patra Message-ID: <20240711-smcntrpmf_v7-v8-7-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.h | 11 +++++ target/riscv/cpu_helper.c | 9 +++- target/riscv/csr.c | 117 ++++++++++++++++++++++++++++++++-------------- target/riscv/pmu.c | 92 ++++++++++++++++++++++++++++++++++++ target/riscv/pmu.h | 2 + 5 files changed, 194 insertions(+), 37 deletions(-) (limited to 'target') diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 980e2154cd..f515ad072b 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -181,6 +181,15 @@ typedef struct PMUCTRState { target_ulong irq_overflow_left; } PMUCTRState; +typedef struct PMUFixedCtrState { + /* Track cycle and icount for each privilege mode */ + uint64_t counter[4]; + uint64_t counter_prev[4]; + /* Track cycle and icount for each privilege mode when V = 1*/ + uint64_t counter_virt[2]; + uint64_t counter_virt_prev[2]; +} PMUFixedCtrState; + struct CPUArchState { target_ulong gpr[32]; target_ulong gprh[32]; /* 64 top bits of the 128-bit registers */ @@ -377,6 +386,8 @@ struct CPUArchState { /* PMU event selector configured values for RV32 */ target_ulong mhpmeventh_val[RV_MAX_MHPMEVENTS]; + PMUFixedCtrState pmu_fixed_ctrs[2]; + target_ulong sscratch; target_ulong mscratch; diff --git a/target/riscv/cpu_helper.c b/target/riscv/cpu_helper.c index 10d3fdaed3..395a1d9140 100644 --- a/target/riscv/cpu_helper.c +++ b/target/riscv/cpu_helper.c @@ -695,9 +695,14 @@ void riscv_cpu_set_mode(CPURISCVState *env, target_ulong newpriv, bool virt_en) { g_assert(newpriv <= PRV_M && newpriv != PRV_RESERVED); - if (icount_enabled() && newpriv != env->priv) { - riscv_itrigger_update_priv(env); + if (newpriv != env->priv || env->virt_enabled != virt_en) { + if (icount_enabled()) { + riscv_itrigger_update_priv(env); + } + + riscv_pmu_update_fixed_ctrs(env, newpriv, virt_en); } + /* tlb_flush is unnecessary as mode is contained in mmu_idx */ env->priv = newpriv; env->xl = cpu_recompute_xl(env); diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 364583dc03..85d3f0aa3f 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -787,36 +787,16 @@ static RISCVException write_vcsr(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +#if defined(CONFIG_USER_ONLY) /* User Timers and Counters */ -static target_ulong get_ticks(bool shift, bool instructions) +static target_ulong get_ticks(bool shift) { - int64_t val; - target_ulong result; - -#if !defined(CONFIG_USER_ONLY) - if (icount_enabled()) { - if (instructions) { - val = icount_get_raw(); - } else { - val = icount_get(); - } - } else { - val = cpu_get_host_ticks(); - } -#else - val = cpu_get_host_ticks(); -#endif - - if (shift) { - result = val >> 32; - } else { - result = val; - } + int64_t val = cpu_get_host_ticks(); + target_ulong result = shift ? val >> 32 : val; return result; } -#if defined(CONFIG_USER_ONLY) static RISCVException read_time(CPURISCVState *env, int csrno, target_ulong *val) { @@ -834,14 +814,14 @@ static RISCVException read_timeh(CPURISCVState *env, int csrno, static RISCVException read_hpmcounter(CPURISCVState *env, int csrno, target_ulong *val) { - *val = get_ticks(false, (csrno == CSR_INSTRET)); + *val = get_ticks(false); return RISCV_EXCP_NONE; } static RISCVException read_hpmcounterh(CPURISCVState *env, int csrno, target_ulong *val) { - *val = get_ticks(true, (csrno == CSR_INSTRETH)); + *val = get_ticks(true); return RISCV_EXCP_NONE; } @@ -1025,17 +1005,82 @@ static RISCVException write_mhpmeventh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env, + int counter_idx, + bool upper_half) +{ + int inst = riscv_pmu_ctr_monitor_instructions(env, counter_idx); + uint64_t *counter_arr_virt = env->pmu_fixed_ctrs[inst].counter_virt; + uint64_t *counter_arr = env->pmu_fixed_ctrs[inst].counter; + target_ulong result = 0; + uint64_t curr_val = 0; + uint64_t cfg_val = 0; + + if (counter_idx == 0) { + cfg_val = upper_half ? ((uint64_t)env->mcyclecfgh << 32) : + env->mcyclecfg; + } else if (counter_idx == 2) { + cfg_val = upper_half ? ((uint64_t)env->minstretcfgh << 32) : + env->minstretcfg; + } else { + cfg_val = upper_half ? + ((uint64_t)env->mhpmeventh_val[counter_idx] << 32) : + env->mhpmevent_val[counter_idx]; + cfg_val &= MHPMEVENT_FILTER_MASK; + } + + if (!cfg_val) { + if (icount_enabled()) { + curr_val = inst ? icount_get_raw() : icount_get(); + } else { + curr_val = cpu_get_host_ticks(); + } + + goto done; + } + + if (!(cfg_val & MCYCLECFG_BIT_MINH)) { + curr_val += counter_arr[PRV_M]; + } + + if (!(cfg_val & MCYCLECFG_BIT_SINH)) { + curr_val += counter_arr[PRV_S]; + } + + if (!(cfg_val & MCYCLECFG_BIT_UINH)) { + curr_val += counter_arr[PRV_U]; + } + + if (!(cfg_val & MCYCLECFG_BIT_VSINH)) { + curr_val += counter_arr_virt[PRV_S]; + } + + if (!(cfg_val & MCYCLECFG_BIT_VUINH)) { + curr_val += counter_arr_virt[PRV_U]; + } + +done: + if (riscv_cpu_mxl(env) == MXL_RV32) { + result = upper_half ? curr_val >> 32 : curr_val; + } else { + result = curr_val; + } + + return result; +} + static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, target_ulong val) { int ctr_idx = csrno - CSR_MCYCLE; PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; uint64_t mhpmctr_val = val; - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx); counter->mhpmcounter_val = val; - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) { - counter->mhpmcounter_prev = get_ticks(false, instr); + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env, + ctr_idx, false); if (ctr_idx > 2) { if (riscv_cpu_mxl(env) == MXL_RV32) { mhpmctr_val = mhpmctr_val | @@ -1058,12 +1103,13 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; uint64_t mhpmctr_val = counter->mhpmcounter_val; uint64_t mhpmctrh_val = val; - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx); counter->mhpmcounterh_val = val; mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32); - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) { - counter->mhpmcounterh_prev = get_ticks(true, instr); + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env, + ctr_idx, true); if (ctr_idx > 2) { riscv_pmu_setup_timer(env, mhpmctr_val, ctr_idx); } @@ -1082,7 +1128,6 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, counter->mhpmcounter_prev; target_ulong ctr_val = upper_half ? counter->mhpmcounterh_val : counter->mhpmcounter_val; - bool instr = riscv_pmu_ctr_monitor_instructions(env, ctr_idx); if (get_field(env->mcountinhibit, BIT(ctr_idx))) { /* @@ -1103,8 +1148,10 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, * The kernel computes the perf delta by subtracting the current value from * the value it initialized previously (ctr_val). */ - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || instr) { - *val = get_ticks(upper_half, instr) - ctr_prev + ctr_val; + if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + *val = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, upper_half) - + ctr_prev + ctr_val; } else { *val = ctr_val; } diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index 0e7d58b8a5..ac648cff8d 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -19,6 +19,7 @@ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu/error-report.h" +#include "qemu/timer.h" #include "cpu.h" #include "pmu.h" #include "sysemu/cpu-timers.h" @@ -176,6 +177,97 @@ static int riscv_pmu_incr_ctr_rv64(RISCVCPU *cpu, uint32_t ctr_idx) return 0; } +/* + * Information needed to update counters: + * new_priv, new_virt: To correctly save starting snapshot for the newly + * started mode. Look at array being indexed with newprv. + * old_priv, old_virt: To correctly select previous snapshot for old priv + * and compute delta. Also to select correct counter + * to inc. Look at arrays being indexed with env->priv. + * + * To avoid the complexity of calling this function, we assume that + * env->priv and env->virt_enabled contain old priv and old virt and + * new priv and new virt values are passed in as arguments. + */ +static void riscv_pmu_icount_update_priv(CPURISCVState *env, + target_ulong newpriv, bool new_virt) +{ + uint64_t *snapshot_prev, *snapshot_new; + uint64_t current_icount; + uint64_t *counter_arr; + uint64_t delta; + + if (icount_enabled()) { + current_icount = icount_get_raw(); + } else { + current_icount = cpu_get_host_ticks(); + } + + if (env->virt_enabled) { + counter_arr = env->pmu_fixed_ctrs[1].counter_virt; + snapshot_prev = env->pmu_fixed_ctrs[1].counter_virt_prev; + } else { + counter_arr = env->pmu_fixed_ctrs[1].counter; + snapshot_prev = env->pmu_fixed_ctrs[1].counter_prev; + } + + if (new_virt) { + snapshot_new = env->pmu_fixed_ctrs[1].counter_virt_prev; + } else { + snapshot_new = env->pmu_fixed_ctrs[1].counter_prev; + } + + /* + * new_priv can be same as env->priv. So we need to calculate + * delta first before updating snapshot_new[new_priv]. + */ + delta = current_icount - snapshot_prev[env->priv]; + snapshot_new[newpriv] = current_icount; + + counter_arr[env->priv] += delta; +} + +static void riscv_pmu_cycle_update_priv(CPURISCVState *env, + target_ulong newpriv, bool new_virt) +{ + uint64_t *snapshot_prev, *snapshot_new; + uint64_t current_ticks; + uint64_t *counter_arr; + uint64_t delta; + + if (icount_enabled()) { + current_ticks = icount_get(); + } else { + current_ticks = cpu_get_host_ticks(); + } + + if (env->virt_enabled) { + counter_arr = env->pmu_fixed_ctrs[0].counter_virt; + snapshot_prev = env->pmu_fixed_ctrs[0].counter_virt_prev; + } else { + counter_arr = env->pmu_fixed_ctrs[0].counter; + snapshot_prev = env->pmu_fixed_ctrs[0].counter_prev; + } + + if (new_virt) { + snapshot_new = env->pmu_fixed_ctrs[0].counter_virt_prev; + } else { + snapshot_new = env->pmu_fixed_ctrs[0].counter_prev; + } + + delta = current_ticks - snapshot_prev[env->priv]; + snapshot_new[newpriv] = current_ticks; + + counter_arr[env->priv] += delta; +} + +void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv, + bool new_virt) +{ + riscv_pmu_cycle_update_priv(env, newpriv, new_virt); + riscv_pmu_icount_update_priv(env, newpriv, new_virt); +} + int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx) { uint32_t ctr_idx; diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h index 7c0ad661e0..ca40cfeed6 100644 --- a/target/riscv/pmu.h +++ b/target/riscv/pmu.h @@ -34,5 +34,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx); void riscv_pmu_generate_fdt_node(void *fdt, uint32_t cmask, char *pmu_name); int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx); +void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv, + bool new_virt); #endif /* RISCV_PMU_H */ -- cgit v1.2.3 From 46023470e014234f3e15ec4497e003550bd7da0d Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 11 Jul 2024 15:31:11 -0700 Subject: target/riscv: Save counter values during countinhibit update Currently, if a counter monitoring cycle/instret is stopped via mcountinhibit we just update the state while the value is saved during the next read. This is not accurate as the read may happen many cycles after the counter is stopped. Ideally, the read should return the value saved when the counter is stopped. Thus, save the value of the counter during the inhibit update operation and return that value during the read if corresponding bit in mcountihibit is set. Acked-by: Alistair Francis Reviewed-by: Daniel Henrique Barboza Signed-off-by: Atish Patra Message-ID: <20240711-smcntrpmf_v7-v8-8-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.h | 1 - target/riscv/csr.c | 34 ++++++++++++++++++++++------------ target/riscv/machine.c | 5 ++--- 3 files changed, 24 insertions(+), 16 deletions(-) (limited to 'target') diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index f515ad072b..093c86b8b9 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -176,7 +176,6 @@ typedef struct PMUCTRState { target_ulong mhpmcounter_prev; /* Snapshort value of a counter in RV32 */ target_ulong mhpmcounterh_prev; - bool started; /* Value beyond UINT32_MAX/UINT64_MAX before overflow interrupt trigger */ target_ulong irq_overflow_left; } PMUCTRState; diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 85d3f0aa3f..b7a24f9c60 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -1131,17 +1131,11 @@ static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, if (get_field(env->mcountinhibit, BIT(ctr_idx))) { /* - * Counter should not increment if inhibit bit is set. We can't really - * stop the icount counting. Just return the counter value written by - * the supervisor to indicate that counter was not incremented. + * Counter should not increment if inhibit bit is set. Just return the + * current counter value. */ - if (!counter->started) { - *val = ctr_val; - return RISCV_EXCP_NONE; - } else { - /* Mark that the counter has been stopped */ - counter->started = false; - } + *val = ctr_val; + return RISCV_EXCP_NONE; } /* @@ -2183,9 +2177,25 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, /* Check if any other counter is also monitoring cycles/instructions */ for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) { - if (!get_field(env->mcountinhibit, BIT(cidx))) { counter = &env->pmu_ctrs[cidx]; - counter->started = true; + if (get_field(env->mcountinhibit, BIT(cidx)) && (val & BIT(cidx))) { + /* + * Update the counter value for cycle/instret as we can't stop the + * host ticks. But we should show the current value at this moment. + */ + if (riscv_pmu_ctr_monitor_cycles(env, cidx) || + riscv_pmu_ctr_monitor_instructions(env, cidx)) { + counter->mhpmcounter_val = + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false) - + counter->mhpmcounter_prev + + counter->mhpmcounter_val; + if (riscv_cpu_mxl(env) == MXL_RV32) { + counter->mhpmcounterh_val = + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true) - + counter->mhpmcounterh_prev + + counter->mhpmcounterh_val; + } + } } } diff --git a/target/riscv/machine.c b/target/riscv/machine.c index 76f2150f78..492c2c6d9d 100644 --- a/target/riscv/machine.c +++ b/target/riscv/machine.c @@ -320,15 +320,14 @@ static bool pmu_needed(void *opaque) static const VMStateDescription vmstate_pmu_ctr_state = { .name = "cpu/pmu", - .version_id = 1, - .minimum_version_id = 1, + .version_id = 2, + .minimum_version_id = 2, .needed = pmu_needed, .fields = (const VMStateField[]) { VMSTATE_UINTTL(mhpmcounter_val, PMUCTRState), VMSTATE_UINTTL(mhpmcounterh_val, PMUCTRState), VMSTATE_UINTTL(mhpmcounter_prev, PMUCTRState), VMSTATE_UINTTL(mhpmcounterh_prev, PMUCTRState), - VMSTATE_BOOL(started, PMUCTRState), VMSTATE_END_OF_LIST() } }; -- cgit v1.2.3 From 8cff74c26dbdfc746d8f0165c233be3d396d4572 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 11 Jul 2024 15:31:12 -0700 Subject: target/riscv: Enforce WARL behavior for scounteren/hcounteren scounteren/hcountern are also WARL registers similar to mcountern. Only set the bits for the available counters during the write to preserve the WARL behavior. Signed-off-by: Atish Patra Reviewed-by: Daniel Henrique Barboza Reviewed-by: Alistair Francis Message-ID: <20240711-smcntrpmf_v7-v8-9-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/csr.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'target') diff --git a/target/riscv/csr.c b/target/riscv/csr.c index b7a24f9c60..d6c5b73afd 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -3063,7 +3063,11 @@ static RISCVException read_scounteren(CPURISCVState *env, int csrno, static RISCVException write_scounteren(CPURISCVState *env, int csrno, target_ulong val) { - env->scounteren = val; + RISCVCPU *cpu = env_archcpu(env); + + /* WARL register - disable unavailable counters */ + env->scounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM | + COUNTEREN_IR); return RISCV_EXCP_NONE; } @@ -3722,7 +3726,11 @@ static RISCVException read_hcounteren(CPURISCVState *env, int csrno, static RISCVException write_hcounteren(CPURISCVState *env, int csrno, target_ulong val) { - env->hcounteren = val; + RISCVCPU *cpu = env_archcpu(env); + + /* WARL register - disable unavailable counters */ + env->hcounteren = val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_TM | + COUNTEREN_IR); return RISCV_EXCP_NONE; } -- cgit v1.2.3 From 22c721c34c1609b5ac53dfc0d34125ec479205a0 Mon Sep 17 00:00:00 2001 From: Rajnesh Kanwal Date: Thu, 11 Jul 2024 15:31:13 -0700 Subject: target/riscv: Start counters from both mhpmcounter and mcountinhibit Currently we start timer counter from write_mhpmcounter path only without checking for mcountinhibit bit. This changes adds mcountinhibit check and also programs the counter from write_mcountinhibit as well. When a counter is stopped using mcountinhibit we simply update the value of the counter based on current host ticks and save it for future reads. We don't need to disable running timer as pmu_timer_trigger_irq will discard the interrupt if the counter has been inhibited. Signed-off-by: Rajnesh Kanwal Reviewed-by: Daniel Henrique Barboza Message-ID: <20240711-smcntrpmf_v7-v8-10-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/csr.c | 75 ++++++++++++++++++++++++++++++++++++++---------------- target/riscv/pmu.c | 3 +-- 2 files changed, 54 insertions(+), 24 deletions(-) (limited to 'target') diff --git a/target/riscv/csr.c b/target/riscv/csr.c index d6c5b73afd..bb6ac33ac2 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -1077,8 +1077,9 @@ static RISCVException write_mhpmcounter(CPURISCVState *env, int csrno, uint64_t mhpmctr_val = val; counter->mhpmcounter_val = val; - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || - riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + if (!get_field(env->mcountinhibit, BIT(ctr_idx)) && + (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) { counter->mhpmcounter_prev = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, false); if (ctr_idx > 2) { @@ -1106,8 +1107,9 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, counter->mhpmcounterh_val = val; mhpmctr_val = mhpmctr_val | (mhpmctrh_val << 32); - if (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || - riscv_pmu_ctr_monitor_instructions(env, ctr_idx)) { + if (!get_field(env->mcountinhibit, BIT(ctr_idx)) && + (riscv_pmu_ctr_monitor_cycles(env, ctr_idx) || + riscv_pmu_ctr_monitor_instructions(env, ctr_idx))) { counter->mhpmcounterh_prev = riscv_pmu_ctr_get_fixed_counters_val(env, ctr_idx, true); if (ctr_idx > 2) { @@ -2170,31 +2172,60 @@ static RISCVException write_mcountinhibit(CPURISCVState *env, int csrno, int cidx; PMUCTRState *counter; RISCVCPU *cpu = env_archcpu(env); + uint32_t present_ctrs = cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR; + target_ulong updated_ctrs = (env->mcountinhibit ^ val) & present_ctrs; + uint64_t mhpmctr_val, prev_count, curr_count; /* WARL register - disable unavailable counters; TM bit is always 0 */ - env->mcountinhibit = - val & (cpu->pmu_avail_ctrs | COUNTEREN_CY | COUNTEREN_IR); + env->mcountinhibit = val & present_ctrs; /* Check if any other counter is also monitoring cycles/instructions */ for (cidx = 0; cidx < RV_MAX_MHPMCOUNTERS; cidx++) { - counter = &env->pmu_ctrs[cidx]; - if (get_field(env->mcountinhibit, BIT(cidx)) && (val & BIT(cidx))) { - /* - * Update the counter value for cycle/instret as we can't stop the - * host ticks. But we should show the current value at this moment. - */ - if (riscv_pmu_ctr_monitor_cycles(env, cidx) || - riscv_pmu_ctr_monitor_instructions(env, cidx)) { - counter->mhpmcounter_val = - riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false) - - counter->mhpmcounter_prev + - counter->mhpmcounter_val; + if (!(updated_ctrs & BIT(cidx)) || + (!riscv_pmu_ctr_monitor_cycles(env, cidx) && + !riscv_pmu_ctr_monitor_instructions(env, cidx))) { + continue; + } + + counter = &env->pmu_ctrs[cidx]; + + if (!get_field(env->mcountinhibit, BIT(cidx))) { + counter->mhpmcounter_prev = + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false); + if (riscv_cpu_mxl(env) == MXL_RV32) { + counter->mhpmcounterh_prev = + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true); + } + + if (cidx > 2) { + mhpmctr_val = counter->mhpmcounter_val; if (riscv_cpu_mxl(env) == MXL_RV32) { - counter->mhpmcounterh_val = - riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true) - - counter->mhpmcounterh_prev + - counter->mhpmcounterh_val; + mhpmctr_val = mhpmctr_val | + ((uint64_t)counter->mhpmcounterh_val << 32); } + riscv_pmu_setup_timer(env, mhpmctr_val, cidx); + } + } else { + curr_count = riscv_pmu_ctr_get_fixed_counters_val(env, cidx, false); + + mhpmctr_val = counter->mhpmcounter_val; + prev_count = counter->mhpmcounter_prev; + if (riscv_cpu_mxl(env) == MXL_RV32) { + uint64_t tmp = + riscv_pmu_ctr_get_fixed_counters_val(env, cidx, true); + + curr_count = curr_count | (tmp << 32); + mhpmctr_val = mhpmctr_val | + ((uint64_t)counter->mhpmcounterh_val << 32); + prev_count = prev_count | + ((uint64_t)counter->mhpmcounterh_prev << 32); + } + + /* Adjust the counter for later reads. */ + mhpmctr_val = curr_count - prev_count + mhpmctr_val; + counter->mhpmcounter_val = mhpmctr_val; + if (riscv_cpu_mxl(env) == MXL_RV32) { + counter->mhpmcounterh_val = mhpmctr_val >> 32; } } } diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index ac648cff8d..63420d9f36 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -285,8 +285,7 @@ int riscv_pmu_incr_ctr(RISCVCPU *cpu, enum riscv_pmu_event_idx event_idx) } ctr_idx = GPOINTER_TO_UINT(value); - if (!riscv_pmu_counter_enabled(cpu, ctr_idx) || - get_field(env->mcountinhibit, BIT(ctr_idx))) { + if (!riscv_pmu_counter_enabled(cpu, ctr_idx)) { return -1; } -- cgit v1.2.3 From 74112400df659b0edb1da35db5c948313ffeffd0 Mon Sep 17 00:00:00 2001 From: Rajnesh Kanwal Date: Thu, 11 Jul 2024 15:31:14 -0700 Subject: target/riscv: More accurately model priv mode filtering. In case of programmable counters configured to count inst/cycles we often end-up with counter not incrementing at all from kernel's perspective. For example: - Kernel configures hpm3 to count instructions and sets hpmcounter to -10000 and all modes except U mode are inhibited. - In QEMU we configure a timer to expire after ~10000 instructions. - Problem is, it's often the case that kernel might not even schedule Umode task and we hit the timer callback in QEMU. - In the timer callback we inject the interrupt into kernel, kernel runs the handler and reads hpmcounter3 value. - Given QEMU maintains individual counters to count for each privilege mode, and given umode never ran, the umode counter didn't increment and QEMU returns same value as was programmed by the kernel when starting the counter. - Kernel checks for overflow using previous and current value of the counter and reprograms the counter given there wasn't an overflow as per the counter value. (Which itself is a problem. We have QEMU telling kernel that counter3 overflowed but the counter value returned by QEMU doesn't seem to reflect that.). This change makes sure that timer is reprogrammed from the handler if the counter didn't overflow based on the counter value. Second, this change makes sure that whenever the counter is read, it's value is updated to reflect the latest count. Signed-off-by: Rajnesh Kanwal Reviewed-by: Daniel Henrique Barboza Message-ID: <20240711-smcntrpmf_v7-v8-11-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/csr.c | 5 ++++- target/riscv/pmu.c | 30 +++++++++++++++++++++++++++--- target/riscv/pmu.h | 2 ++ 3 files changed, 33 insertions(+), 4 deletions(-) (limited to 'target') diff --git a/target/riscv/csr.c b/target/riscv/csr.c index bb6ac33ac2..781ef27eba 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -1039,6 +1039,9 @@ static target_ulong riscv_pmu_ctr_get_fixed_counters_val(CPURISCVState *env, goto done; } + /* Update counter before reading. */ + riscv_pmu_update_fixed_ctrs(env, env->priv, env->virt_enabled); + if (!(cfg_val & MCYCLECFG_BIT_MINH)) { curr_val += counter_arr[PRV_M]; } @@ -1122,7 +1125,7 @@ static RISCVException write_mhpmcounterh(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } -static RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, +RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, bool upper_half, uint32_t ctr_idx) { PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index 63420d9f36..a4729f6c53 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -425,6 +425,8 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu, target_ulong *mhpmevent_val; uint64_t of_bit_mask; int64_t irq_trigger_at; + uint64_t curr_ctr_val, curr_ctrh_val; + uint64_t ctr_val; if (evt_idx != RISCV_PMU_EVENT_HW_CPU_CYCLES && evt_idx != RISCV_PMU_EVENT_HW_INSTRUCTIONS) { @@ -454,6 +456,26 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu, return; } + riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctr_val, false, ctr_idx); + ctr_val = counter->mhpmcounter_val; + if (riscv_cpu_mxl(env) == MXL_RV32) { + riscv_pmu_read_ctr(env, (target_ulong *)&curr_ctrh_val, true, ctr_idx); + curr_ctr_val = curr_ctr_val | (curr_ctrh_val << 32); + ctr_val = ctr_val | + ((uint64_t)counter->mhpmcounterh_val << 32); + } + + /* + * We can not accommodate for inhibited modes when setting up timer. Check + * if the counter has actually overflowed or not by comparing current + * counter value (accommodated for inhibited modes) with software written + * counter value. + */ + if (curr_ctr_val >= ctr_val) { + riscv_pmu_setup_timer(env, curr_ctr_val, ctr_idx); + return; + } + if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) { /* Generate interrupt only if OF bit is clear */ if (!(*mhpmevent_val & of_bit_mask)) { @@ -475,7 +497,7 @@ void riscv_pmu_timer_cb(void *priv) int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx) { - uint64_t overflow_delta, overflow_at; + uint64_t overflow_delta, overflow_at, curr_ns; int64_t overflow_ns, overflow_left = 0; RISCVCPU *cpu = env_archcpu(env); PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; @@ -506,8 +528,10 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx) } else { return -1; } - overflow_at = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - overflow_ns; + curr_ns = (uint64_t)qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + overflow_at = curr_ns + overflow_ns; + if (overflow_at <= curr_ns) + overflow_at = UINT64_MAX; if (overflow_at > INT64_MAX) { overflow_left += overflow_at - INT64_MAX; diff --git a/target/riscv/pmu.h b/target/riscv/pmu.h index ca40cfeed6..3853d0e262 100644 --- a/target/riscv/pmu.h +++ b/target/riscv/pmu.h @@ -36,5 +36,7 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx); void riscv_pmu_update_fixed_ctrs(CPURISCVState *env, target_ulong newpriv, bool new_virt); +RISCVException riscv_pmu_read_ctr(CPURISCVState *env, target_ulong *val, + bool upper_half, uint32_t ctr_idx); #endif /* RISCV_PMU_H */ -- cgit v1.2.3 From dd4c123636dbdabd7dcc4f76c97af90271cd5fb6 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 11 Jul 2024 15:31:15 -0700 Subject: target/riscv: Do not setup pmu timer if OF is disabled The timer is setup function is invoked in both hpmcounter write and mcountinhibit write path. If the OF bit set, the LCOFI interrupt is disabled. There is no benefitting in setting up the qemu timer until LCOFI is cleared to indicate that interrupts can be fired again. Reviewed-by: Daniel Henrique Barboza Signed-off-by: Atish Patra Message-ID: <20240711-smcntrpmf_v7-v8-12-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/pmu.c | 56 ++++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 44 insertions(+), 12 deletions(-) (limited to 'target') diff --git a/target/riscv/pmu.c b/target/riscv/pmu.c index a4729f6c53..3cc0b3648c 100644 --- a/target/riscv/pmu.c +++ b/target/riscv/pmu.c @@ -416,14 +416,49 @@ int riscv_pmu_update_event_map(CPURISCVState *env, uint64_t value, return 0; } +static bool pmu_hpmevent_is_of_set(CPURISCVState *env, uint32_t ctr_idx) +{ + target_ulong mhpmevent_val; + uint64_t of_bit_mask; + + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpmevent_val = env->mhpmeventh_val[ctr_idx]; + of_bit_mask = MHPMEVENTH_BIT_OF; + } else { + mhpmevent_val = env->mhpmevent_val[ctr_idx]; + of_bit_mask = MHPMEVENT_BIT_OF; + } + + return get_field(mhpmevent_val, of_bit_mask); +} + +static bool pmu_hpmevent_set_of_if_clear(CPURISCVState *env, uint32_t ctr_idx) +{ + target_ulong *mhpmevent_val; + uint64_t of_bit_mask; + + if (riscv_cpu_mxl(env) == MXL_RV32) { + mhpmevent_val = &env->mhpmeventh_val[ctr_idx]; + of_bit_mask = MHPMEVENTH_BIT_OF; + } else { + mhpmevent_val = &env->mhpmevent_val[ctr_idx]; + of_bit_mask = MHPMEVENT_BIT_OF; + } + + if (!get_field(*mhpmevent_val, of_bit_mask)) { + *mhpmevent_val |= of_bit_mask; + return true; + } + + return false; +} + static void pmu_timer_trigger_irq(RISCVCPU *cpu, enum riscv_pmu_event_idx evt_idx) { uint32_t ctr_idx; CPURISCVState *env = &cpu->env; PMUCTRState *counter; - target_ulong *mhpmevent_val; - uint64_t of_bit_mask; int64_t irq_trigger_at; uint64_t curr_ctr_val, curr_ctrh_val; uint64_t ctr_val; @@ -439,12 +474,9 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu, return; } - if (riscv_cpu_mxl(env) == MXL_RV32) { - mhpmevent_val = &env->mhpmeventh_val[ctr_idx]; - of_bit_mask = MHPMEVENTH_BIT_OF; - } else { - mhpmevent_val = &env->mhpmevent_val[ctr_idx]; - of_bit_mask = MHPMEVENT_BIT_OF; + /* Generate interrupt only if OF bit is clear */ + if (pmu_hpmevent_is_of_set(env, ctr_idx)) { + return; } counter = &env->pmu_ctrs[ctr_idx]; @@ -477,9 +509,7 @@ static void pmu_timer_trigger_irq(RISCVCPU *cpu, } if (cpu->pmu_avail_ctrs & BIT(ctr_idx)) { - /* Generate interrupt only if OF bit is clear */ - if (!(*mhpmevent_val & of_bit_mask)) { - *mhpmevent_val |= of_bit_mask; + if (pmu_hpmevent_set_of_if_clear(env, ctr_idx)) { riscv_cpu_update_mip(env, MIP_LCOFIP, BOOL_TO_MASK(1)); } } @@ -502,7 +532,9 @@ int riscv_pmu_setup_timer(CPURISCVState *env, uint64_t value, uint32_t ctr_idx) RISCVCPU *cpu = env_archcpu(env); PMUCTRState *counter = &env->pmu_ctrs[ctr_idx]; - if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf) { + /* No need to setup a timer if LCOFI is disabled when OF is set */ + if (!riscv_pmu_counter_valid(cpu, ctr_idx) || !cpu->cfg.ext_sscofpmf || + pmu_hpmevent_is_of_set(env, ctr_idx)) { return -1; } -- cgit v1.2.3 From 6f6592d62ebaffff353bdd27ec4480972695d24b Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 11 Jul 2024 15:31:16 -0700 Subject: target/riscv: Expose the Smcntrpmf config Create a new config for Smcntrpmf extension so that it can be enabled/ disabled from the qemu commandline. Signed-off-by: Atish Patra Acked-by: Alistair Francis Message-ID: <20240711-smcntrpmf_v7-v8-13-b7c38ae7b263@rivosinc.com> Signed-off-by: Alistair Francis --- target/riscv/cpu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'target') diff --git a/target/riscv/cpu.c b/target/riscv/cpu.c index 4efe7ee3b0..a90808a3ba 100644 --- a/target/riscv/cpu.c +++ b/target/riscv/cpu.c @@ -1472,6 +1472,7 @@ const char *riscv_get_misa_ext_description(uint32_t bit) const RISCVCPUMultiExtConfig riscv_cpu_extensions[] = { /* Defaults for standard extensions */ MULTI_EXT_CFG_BOOL("sscofpmf", ext_sscofpmf, false), + MULTI_EXT_CFG_BOOL("smcntrpmf", ext_smcntrpmf, false), MULTI_EXT_CFG_BOOL("zifencei", ext_zifencei, true), MULTI_EXT_CFG_BOOL("zicsr", ext_zicsr, true), MULTI_EXT_CFG_BOOL("zihintntl", ext_zihintntl, true), -- cgit v1.2.3 From 38c83e8d3a333b8b377367756a2b6c700c7d0084 Mon Sep 17 00:00:00 2001 From: Yu-Ming Chang Date: Fri, 8 Mar 2024 15:48:03 +0800 Subject: target/riscv: raise an exception when CSRRS/CSRRC writes a read-only CSR Both CSRRS and CSRRC always read the addressed CSR and cause any read side effects regardless of rs1 and rd fields. Note that if rs1 specifies a register holding a zero value other than x0, the instruction will still attempt to write the unmodified value back to the CSR and will cause any attendant side effects. So if CSRRS or CSRRC tries to write a read-only CSR with rs1 which specifies a register holding a zero value, an illegal instruction exception should be raised. Signed-off-by: Yu-Ming Chang Signed-off-by: Alvin Chang Reviewed-by: Alistair Francis Message-ID: <172100444279.18077.6893072378718059541-0@git.sr.ht> Signed-off-by: Alistair Francis --- target/riscv/cpu.h | 4 ++++ target/riscv/csr.c | 57 +++++++++++++++++++++++++++++++++++++++++++----- target/riscv/op_helper.c | 6 ++--- 3 files changed, 58 insertions(+), 9 deletions(-) (limited to 'target') diff --git a/target/riscv/cpu.h b/target/riscv/cpu.h index 093c86b8b9..1619c3acb6 100644 --- a/target/riscv/cpu.h +++ b/target/riscv/cpu.h @@ -751,6 +751,8 @@ void cpu_get_tb_cpu_state(CPURISCVState *env, vaddr *pc, void riscv_cpu_update_mask(CPURISCVState *env); bool riscv_cpu_is_32bit(RISCVCPU *cpu); +RISCVException riscv_csrr(CPURISCVState *env, int csrno, + target_ulong *ret_value); RISCVException riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask); @@ -783,6 +785,8 @@ typedef RISCVException (*riscv_csr_op_fn)(CPURISCVState *env, int csrno, target_ulong new_value, target_ulong write_mask); +RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, + Int128 *ret_value); RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, Int128 *ret_value, Int128 new_value, Int128 write_mask); diff --git a/target/riscv/csr.c b/target/riscv/csr.c index 781ef27eba..ea3560342c 100644 --- a/target/riscv/csr.c +++ b/target/riscv/csr.c @@ -4624,7 +4624,7 @@ static RISCVException rmw_seed(CPURISCVState *env, int csrno, static inline RISCVException riscv_csrrw_check(CPURISCVState *env, int csrno, - bool write_mask) + bool write) { /* check privileges and return RISCV_EXCP_ILLEGAL_INST if check fails */ bool read_only = get_field(csrno, 0xC00) == 3; @@ -4646,7 +4646,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env, } /* read / write check */ - if (write_mask && read_only) { + if (write && read_only) { return RISCV_EXCP_ILLEGAL_INST; } @@ -4733,11 +4733,22 @@ static RISCVException riscv_csrrw_do64(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +RISCVException riscv_csrr(CPURISCVState *env, int csrno, + target_ulong *ret_value) +{ + RISCVException ret = riscv_csrrw_check(env, csrno, false); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + return riscv_csrrw_do64(env, csrno, ret_value, 0, 0); +} + RISCVException riscv_csrrw(CPURISCVState *env, int csrno, target_ulong *ret_value, target_ulong new_value, target_ulong write_mask) { - RISCVException ret = riscv_csrrw_check(env, csrno, write_mask); + RISCVException ret = riscv_csrrw_check(env, csrno, true); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -4785,13 +4796,45 @@ static RISCVException riscv_csrrw_do128(CPURISCVState *env, int csrno, return RISCV_EXCP_NONE; } +RISCVException riscv_csrr_i128(CPURISCVState *env, int csrno, + Int128 *ret_value) +{ + RISCVException ret; + + ret = riscv_csrrw_check(env, csrno, false); + if (ret != RISCV_EXCP_NONE) { + return ret; + } + + if (csr_ops[csrno].read128) { + return riscv_csrrw_do128(env, csrno, ret_value, + int128_zero(), int128_zero()); + } + + /* + * Fall back to 64-bit version for now, if the 128-bit alternative isn't + * at all defined. + * Note, some CSRs don't need to extend to MXLEN (64 upper bits non + * significant), for those, this fallback is correctly handling the + * accesses + */ + target_ulong old_value; + ret = riscv_csrrw_do64(env, csrno, &old_value, + (target_ulong)0, + (target_ulong)0); + if (ret == RISCV_EXCP_NONE && ret_value) { + *ret_value = int128_make64(old_value); + } + return ret; +} + RISCVException riscv_csrrw_i128(CPURISCVState *env, int csrno, Int128 *ret_value, Int128 new_value, Int128 write_mask) { RISCVException ret; - ret = riscv_csrrw_check(env, csrno, int128_nz(write_mask)); + ret = riscv_csrrw_check(env, csrno, true); if (ret != RISCV_EXCP_NONE) { return ret; } @@ -4830,7 +4873,11 @@ RISCVException riscv_csrrw_debug(CPURISCVState *env, int csrno, #if !defined(CONFIG_USER_ONLY) env->debugger = true; #endif - ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); + if (!write_mask) { + ret = riscv_csrr(env, csrno, ret_value); + } else { + ret = riscv_csrrw(env, csrno, ret_value, new_value, write_mask); + } #if !defined(CONFIG_USER_ONLY) env->debugger = false; #endif diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index ec1408ba0f..25a5263573 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -51,7 +51,7 @@ target_ulong helper_csrr(CPURISCVState *env, int csr) } target_ulong val = 0; - RISCVException ret = riscv_csrrw(env, csr, &val, 0, 0); + RISCVException ret = riscv_csrr(env, csr, &val); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); @@ -84,9 +84,7 @@ target_ulong helper_csrrw(CPURISCVState *env, int csr, target_ulong helper_csrr_i128(CPURISCVState *env, int csr) { Int128 rv = int128_zero(); - RISCVException ret = riscv_csrrw_i128(env, csr, &rv, - int128_zero(), - int128_zero()); + RISCVException ret = riscv_csrr_i128(env, csr, &rv); if (ret != RISCV_EXCP_NONE) { riscv_raise_exception(env, ret, GETPC()); -- cgit v1.2.3