aboutsummaryrefslogtreecommitdiff
path: root/target/riscv/vector_helper.c
diff options
context:
space:
mode:
authorLIU Zhiwei <zhiwei_liu@c-sky.com>2020-07-01 23:24:57 +0800
committerAlistair Francis <alistair.francis@wdc.com>2020-07-02 09:19:33 -0700
commit268fcca66bde62257960ec8d859de374315a5e3d (patch)
tree95386744c36b65e491c5e6a828546332f6154e7b /target/riscv/vector_helper.c
parent022b4ecf775ffeff522eaea4f0d94edcfe00a0a9 (diff)
target/riscv: add vector amo operations
Vector AMOs operate as if aq and rl bits were zero on each element with regard to ordering relative to other instructions in the same hart. Vector AMOs provide no ordering guarantee between element operations in the same vector AMO instruction Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20200701152549.1218-10-zhiwei_liu@c-sky.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
Diffstat (limited to 'target/riscv/vector_helper.c')
-rw-r--r--target/riscv/vector_helper.c147
1 files changed, 147 insertions, 0 deletions
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index cbe87265a1..7c3b2bfd12 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -95,6 +95,11 @@ static inline uint32_t vext_lmul(uint32_t desc)
return FIELD_EX32(simd_data(desc), VDATA, LMUL);
}
+static uint32_t vext_wd(uint32_t desc)
+{
+ return (simd_data(desc) >> 11) & 0x1;
+}
+
/*
* Get vector group length in bytes. Its range is [64, 2048].
*
@@ -687,3 +692,145 @@ GEN_VEXT_LDFF(vlhuff_v_w, uint16_t, uint32_t, ldhu_w, clearl)
GEN_VEXT_LDFF(vlhuff_v_d, uint16_t, uint64_t, ldhu_d, clearq)
GEN_VEXT_LDFF(vlwuff_v_w, uint32_t, uint32_t, ldwu_w, clearl)
GEN_VEXT_LDFF(vlwuff_v_d, uint32_t, uint64_t, ldwu_d, clearq)
+
+/*
+ *** Vector AMO Operations (Zvamo)
+ */
+typedef void vext_amo_noatomic_fn(void *vs3, target_ulong addr,
+ uint32_t wd, uint32_t idx, CPURISCVState *env,
+ uintptr_t retaddr);
+
+/* no atomic opreation for vector atomic insructions */
+#define DO_SWAP(N, M) (M)
+#define DO_AND(N, M) (N & M)
+#define DO_XOR(N, M) (N ^ M)
+#define DO_OR(N, M) (N | M)
+#define DO_ADD(N, M) (N + M)
+
+#define GEN_VEXT_AMO_NOATOMIC_OP(NAME, ESZ, MSZ, H, DO_OP, SUF) \
+static void \
+vext_##NAME##_noatomic_op(void *vs3, target_ulong addr, \
+ uint32_t wd, uint32_t idx, \
+ CPURISCVState *env, uintptr_t retaddr)\
+{ \
+ typedef int##ESZ##_t ETYPE; \
+ typedef int##MSZ##_t MTYPE; \
+ typedef uint##MSZ##_t UMTYPE __attribute__((unused)); \
+ ETYPE *pe3 = (ETYPE *)vs3 + H(idx); \
+ MTYPE a = cpu_ld##SUF##_data(env, addr), b = *pe3; \
+ \
+ cpu_st##SUF##_data(env, addr, DO_OP(a, b)); \
+ if (wd) { \
+ *pe3 = a; \
+ } \
+}
+
+/* Signed min/max */
+#define DO_MAX(N, M) ((N) >= (M) ? (N) : (M))
+#define DO_MIN(N, M) ((N) >= (M) ? (M) : (N))
+
+/* Unsigned min/max */
+#define DO_MAXU(N, M) DO_MAX((UMTYPE)N, (UMTYPE)M)
+#define DO_MINU(N, M) DO_MIN((UMTYPE)N, (UMTYPE)M)
+
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_w, 32, 32, H4, DO_SWAP, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_w, 32, 32, H4, DO_ADD, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_w, 32, 32, H4, DO_XOR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_w, 32, 32, H4, DO_AND, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_w, 32, 32, H4, DO_OR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_w, 32, 32, H4, DO_MIN, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_w, 32, 32, H4, DO_MAX, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_w, 32, 32, H4, DO_MINU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_w, 32, 32, H4, DO_MAXU, l)
+#ifdef TARGET_RISCV64
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapw_v_d, 64, 32, H8, DO_SWAP, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoswapd_v_d, 64, 64, H8, DO_SWAP, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddw_v_d, 64, 32, H8, DO_ADD, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoaddd_v_d, 64, 64, H8, DO_ADD, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxorw_v_d, 64, 32, H8, DO_XOR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoxord_v_d, 64, 64, H8, DO_XOR, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandw_v_d, 64, 32, H8, DO_AND, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoandd_v_d, 64, 64, H8, DO_AND, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoorw_v_d, 64, 32, H8, DO_OR, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamoord_v_d, 64, 64, H8, DO_OR, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominw_v_d, 64, 32, H8, DO_MIN, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomind_v_d, 64, 64, H8, DO_MIN, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxw_v_d, 64, 32, H8, DO_MAX, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxd_v_d, 64, 64, H8, DO_MAX, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominuw_v_d, 64, 32, H8, DO_MINU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamominud_v_d, 64, 64, H8, DO_MINU, q)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxuw_v_d, 64, 32, H8, DO_MAXU, l)
+GEN_VEXT_AMO_NOATOMIC_OP(vamomaxud_v_d, 64, 64, H8, DO_MAXU, q)
+#endif
+
+static inline void
+vext_amo_noatomic(void *vs3, void *v0, target_ulong base,
+ void *vs2, CPURISCVState *env, uint32_t desc,
+ vext_get_index_addr get_index_addr,
+ vext_amo_noatomic_fn *noatomic_op,
+ clear_fn *clear_elem,
+ uint32_t esz, uint32_t msz, uintptr_t ra)
+{
+ uint32_t i;
+ target_long addr;
+ uint32_t wd = vext_wd(desc);
+ uint32_t vm = vext_vm(desc);
+ uint32_t mlen = vext_mlen(desc);
+ uint32_t vlmax = vext_maxsz(desc) / esz;
+
+ for (i = 0; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_LOAD);
+ probe_pages(env, get_index_addr(base, i, vs2), msz, ra, MMU_DATA_STORE);
+ }
+ for (i = 0; i < env->vl; i++) {
+ if (!vm && !vext_elem_mask(v0, mlen, i)) {
+ continue;
+ }
+ addr = get_index_addr(base, i, vs2);
+ noatomic_op(vs3, addr, wd, i, env, ra);
+ }
+ clear_elem(vs3, env->vl, env->vl * esz, vlmax * esz);
+}
+
+#define GEN_VEXT_AMO(NAME, MTYPE, ETYPE, INDEX_FN, CLEAR_FN) \
+void HELPER(NAME)(void *vs3, void *v0, target_ulong base, \
+ void *vs2, CPURISCVState *env, uint32_t desc) \
+{ \
+ vext_amo_noatomic(vs3, v0, base, vs2, env, desc, \
+ INDEX_FN, vext_##NAME##_noatomic_op, \
+ CLEAR_FN, sizeof(ETYPE), sizeof(MTYPE), \
+ GETPC()); \
+}
+
+#ifdef TARGET_RISCV64
+GEN_VEXT_AMO(vamoswapw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoswapd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoaddw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoaddd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoxorw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoxord_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoandw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoandd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoorw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamoord_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamominw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomind_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxw_v_d, int32_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxd_v_d, int64_t, int64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamominuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamominud_v_d, uint64_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxuw_v_d, uint32_t, uint64_t, idx_d, clearq)
+GEN_VEXT_AMO(vamomaxud_v_d, uint64_t, uint64_t, idx_d, clearq)
+#endif
+GEN_VEXT_AMO(vamoswapw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoaddw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoxorw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoandw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamoorw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamominw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamomaxw_v_w, int32_t, int32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamominuw_v_w, uint32_t, uint32_t, idx_w, clearl)
+GEN_VEXT_AMO(vamomaxuw_v_w, uint32_t, uint32_t, idx_w, clearl)