aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-08-13 17:11:53 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-08-25 10:48:49 +0100
commit7f061c0ab9289cb0ed55eaf09bec1b6cb474e6ee (patch)
tree1607e36c8172bba1379b6c6c994903aa567503ed /target
parent688ba4cf33f4976e26124c4c24e9eb738615b0bf (diff)
target/arm: Implement MVE VABAV
Implement the MVE VABAV insn, which computes absolute differences between elements of two vectors and accumulates the result into a general purpose register. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/helper-mve.h7
-rw-r--r--target/arm/mve.decode6
-rw-r--r--target/arm/mve_helper.c26
-rw-r--r--target/arm/translate-mve.c43
4 files changed, 82 insertions, 0 deletions
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h
index 2c66fcba79..c7e7aab2cb 100644
--- a/target/arm/helper-mve.h
+++ b/target/arm/helper-mve.h
@@ -402,6 +402,13 @@ DEF_HELPER_FLAGS_3(mve_vminavw, TCG_CALL_NO_WG, i32, env, ptr, i32)
DEF_HELPER_FLAGS_3(mve_vaddlv_s, TCG_CALL_NO_WG, i64, env, ptr, i64)
DEF_HELPER_FLAGS_3(mve_vaddlv_u, TCG_CALL_NO_WG, i64, env, ptr, i64)
+DEF_HELPER_FLAGS_4(mve_vabavsb, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavsh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavsw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavub, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavuh, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+DEF_HELPER_FLAGS_4(mve_vabavuw, TCG_CALL_NO_WG, i32, env, ptr, ptr, i32)
+
DEF_HELPER_FLAGS_3(mve_vmovi, TCG_CALL_NO_WG, void, env, ptr, i64)
DEF_HELPER_FLAGS_3(mve_vandi, TCG_CALL_NO_WG, void, env, ptr, i64)
DEF_HELPER_FLAGS_3(mve_vorri, TCG_CALL_NO_WG, void, env, ptr, i64)
diff --git a/target/arm/mve.decode b/target/arm/mve.decode
index 83dc0300d6..c8a06edca7 100644
--- a/target/arm/mve.decode
+++ b/target/arm/mve.decode
@@ -41,6 +41,7 @@
&vcmp_scalar qn rm size mask
&shl_scalar qda rm size
&vmaxv qm rda size
+&vabav qn qm rda size
@vldr_vstr ....... . . . . l:1 rn:4 ... ...... imm:7 &vldr_vstr qd=%qd u=0
# Note that both Rn and Qd are 3 bits only (no D bit)
@@ -386,6 +387,11 @@ VMLAS 111- 1110 0 . .. ... 1 ... 1 1110 . 100 .... @2scalar
rdahi=%rdahi rdalo=%rdalo
}
+@vabav .... .... .. size:2 .... rda:4 .... .... .... &vabav qn=%qn qm=%qm
+
+VABAV_S 111 0 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav
+VABAV_U 111 1 1110 10 .. ... 0 .... 1111 . 0 . 0 ... 1 @vabav
+
# Logical immediate operations (1 reg and modified-immediate)
# The cmode/op bits here decode VORR/VBIC/VMOV/VMVN, but
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c
index 924ad7f2bd..fed0f3cd61 100644
--- a/target/arm/mve_helper.c
+++ b/target/arm/mve_helper.c
@@ -1320,6 +1320,32 @@ DO_VMAXMINV(vminavb, 1, int8_t, uint8_t, do_mina)
DO_VMAXMINV(vminavh, 2, int16_t, uint16_t, do_mina)
DO_VMAXMINV(vminavw, 4, int32_t, uint32_t, do_mina)
+#define DO_VABAV(OP, ESIZE, TYPE) \
+ uint32_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \
+ void *vm, uint32_t ra) \
+ { \
+ uint16_t mask = mve_element_mask(env); \
+ unsigned e; \
+ TYPE *m = vm, *n = vn; \
+ for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \
+ if (mask & 1) { \
+ int64_t n0 = n[H##ESIZE(e)]; \
+ int64_t m0 = m[H##ESIZE(e)]; \
+ uint32_t r = n0 >= m0 ? (n0 - m0) : (m0 - n0); \
+ ra += r; \
+ } \
+ } \
+ mve_advance_vpt(env); \
+ return ra; \
+ }
+
+DO_VABAV(vabavsb, 1, int8_t)
+DO_VABAV(vabavsh, 2, int16_t)
+DO_VABAV(vabavsw, 4, int32_t)
+DO_VABAV(vabavub, 1, uint8_t)
+DO_VABAV(vabavuh, 2, uint16_t)
+DO_VABAV(vabavuw, 4, uint32_t)
+
#define DO_VADDLV(OP, TYPE, LTYPE) \
uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vm, \
uint64_t ra) \
diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c
index 2fce74f86a..247f6719e6 100644
--- a/target/arm/translate-mve.c
+++ b/target/arm/translate-mve.c
@@ -45,6 +45,7 @@ typedef void MVEGenVIDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32);
typedef void MVEGenVIWDUPFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_i32, TCGv_i32, TCGv_i32);
typedef void MVEGenCmpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
typedef void MVEGenScalarCmpFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
+typedef void MVEGenVABAVFn(TCGv_i32, TCGv_ptr, TCGv_ptr, TCGv_ptr, TCGv_i32);
/* Return the offset of a Qn register (same semantics as aa32_vfp_qreg()) */
static inline long mve_qreg_offset(unsigned reg)
@@ -1369,3 +1370,45 @@ DO_VMAXV(VMAXAV, vmaxav)
DO_VMAXV(VMINV_S, vminvs)
DO_VMAXV(VMINV_U, vminvu)
DO_VMAXV(VMINAV, vminav)
+
+static bool do_vabav(DisasContext *s, arg_vabav *a, MVEGenVABAVFn *fn)
+{
+ /* Absolute difference accumulated across vector */
+ TCGv_ptr qn, qm;
+ TCGv_i32 rda;
+
+ if (!dc_isar_feature(aa32_mve, s) ||
+ !mve_check_qreg_bank(s, a->qm | a->qn) ||
+ !fn || a->rda == 13 || a->rda == 15) {
+ /* Rda cases are UNPREDICTABLE */
+ return false;
+ }
+ if (!mve_eci_check(s) || !vfp_access_check(s)) {
+ return true;
+ }
+
+ qm = mve_qreg_ptr(a->qm);
+ qn = mve_qreg_ptr(a->qn);
+ rda = load_reg(s, a->rda);
+ fn(rda, cpu_env, qn, qm, rda);
+ store_reg(s, a->rda, rda);
+ tcg_temp_free_ptr(qm);
+ tcg_temp_free_ptr(qn);
+ mve_update_eci(s);
+ return true;
+}
+
+#define DO_VABAV(INSN, FN) \
+ static bool trans_##INSN(DisasContext *s, arg_vabav *a) \
+ { \
+ static MVEGenVABAVFn * const fns[] = { \
+ gen_helper_mve_##FN##b, \
+ gen_helper_mve_##FN##h, \
+ gen_helper_mve_##FN##w, \
+ NULL, \
+ }; \
+ return do_vabav(s, a, fns[a->size]); \
+ }
+
+DO_VABAV(VABAV_S, vabavs)
+DO_VABAV(VABAV_U, vabavu)