diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2021-09-01 09:02:35 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-09-01 11:08:16 +0100 |
commit | d3cd965c846bb350637090d2d11bc578b79f87cd (patch) | |
tree | 08765434534c60ad15838154ff81c9678a21d145 /target | |
parent | 3173c0dd933cbd80578bde6aa116f8f519174a2e (diff) |
target/arm: Implement MVE VCMUL and VCMLA
Implement the MVE VCMUL and VCMLA insns.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/helper-mve.h | 18 | ||||
-rw-r--r-- | target/arm/mve.decode | 35 | ||||
-rw-r--r-- | target/arm/mve_helper.c | 86 | ||||
-rw-r--r-- | target/arm/translate-mve.c | 8 |
4 files changed, 139 insertions, 8 deletions
diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index c230610d25..73950403bc 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -440,6 +440,24 @@ DEF_HELPER_FLAGS_4(mve_vfmas, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vfmsh, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) DEF_HELPER_FLAGS_4(mve_vfmss, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul0h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul0s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul180h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul180s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmul270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + +DEF_HELPER_FLAGS_4(mve_vcmla0h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla0s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla90h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla90s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla180h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla180s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla270h, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) +DEF_HELPER_FLAGS_4(mve_vcmla270s, TCG_CALL_NO_WG, void, env, ptr, ptr, ptr) + DEF_HELPER_FLAGS_4(mve_vadd_scalarb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vadd_scalarh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vadd_scalarw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 3a2056f6b3..403381eef6 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -286,15 +286,29 @@ VQSHL_U 111 1 1111 0 . .. ... 0 ... 0 0100 . 1 . 1 ... 0 @2op_rev VQRSHL_S 111 0 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev VQRSHL_U 111 1 1111 0 . .. ... 0 ... 0 0101 . 1 . 1 ... 0 @2op_rev -VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op -VQDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op -VQRDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op -VQRDMLADHX 1110 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op +{ + VCMUL0 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 0 @2op_sz28 + VQDMLADH 1110 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op + VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op +} -VQDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 0 @2op -VQDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op -VQRDMLSDH 1111 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op -VQRDMLSDHX 1111 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op +{ + VCMUL180 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 0 @2op_sz28 + VQDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op + VQDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 0 @2op +} + +{ + VCMUL90 111 . 1110 0 . 11 ... 0 ... 0 1110 . 0 . 0 ... 1 @2op_sz28 + VQRDMLADH 111 0 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op + VQRDMLSDH 111 1 1110 0 . .. ... 0 ... 0 1110 . 0 . 0 ... 1 @2op +} + +{ + VCMUL270 111 . 1110 0 . 11 ... 0 ... 1 1110 . 0 . 0 ... 1 @2op_sz28 + VQRDMLADHX 111 0 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op + VQRDMLSDHX 111 1 1110 0 . .. ... 0 ... 1 1110 . 0 . 0 ... 1 @2op +} VQDMULLB 111 . 1110 0 . 11 ... 0 ... 0 1111 . 0 . 0 ... 1 @2op_sz28 VQDMULLT 111 . 1110 0 . 11 ... 0 ... 1 1111 . 0 . 0 ... 1 @2op_sz28 @@ -642,3 +656,8 @@ VCADD270_fp 1111 1101 1 . 0 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_ VFMA 1110 1111 0 . 0 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp VFMS 1110 1111 0 . 1 . ... 0 ... 0 1100 . 1 . 1 ... 0 @2op_fp + +VCMLA0 1111 110 00 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev +VCMLA90 1111 110 01 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev +VCMLA180 1111 110 10 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev +VCMLA270 1111 110 11 . 1 . ... 0 ... 0 1000 . 1 . 0 ... 0 @2op_fp_size_rev diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index d7f250a445..e478408fdd 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -2931,3 +2931,89 @@ DO_VFMA(vfmah, 2, float16, false) DO_VFMA(vfmas, 4, float32, false) DO_VFMA(vfmsh, 2, float16, true) DO_VFMA(vfmss, 4, float32, true) + +#define DO_VCMLA(OP, ESIZE, TYPE, ROT, FN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, \ + void *vd, void *vn, void *vm) \ + { \ + TYPE *d = vd, *n = vn, *m = vm; \ + TYPE r0, r1, e1, e2, e3, e4; \ + uint16_t mask = mve_element_mask(env); \ + unsigned e; \ + float_status *fpst0, *fpst1; \ + float_status scratch_fpst; \ + /* We loop through pairs of elements at a time */ \ + for (e = 0; e < 16 / ESIZE; e += 2, mask >>= ESIZE * 2) { \ + if ((mask & MAKE_64BIT_MASK(0, ESIZE * 2)) == 0) { \ + continue; \ + } \ + fpst0 = (ESIZE == 2) ? &env->vfp.standard_fp_status_f16 : \ + &env->vfp.standard_fp_status; \ + fpst1 = fpst0; \ + if (!(mask & 1)) { \ + scratch_fpst = *fpst0; \ + fpst0 = &scratch_fpst; \ + } \ + if (!(mask & (1 << ESIZE))) { \ + scratch_fpst = *fpst1; \ + fpst1 = &scratch_fpst; \ + } \ + switch (ROT) { \ + case 0: \ + e1 = m[H##ESIZE(e)]; \ + e2 = n[H##ESIZE(e)]; \ + e3 = m[H##ESIZE(e + 1)]; \ + e4 = n[H##ESIZE(e)]; \ + break; \ + case 1: \ + e1 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ + e2 = n[H##ESIZE(e + 1)]; \ + e3 = m[H##ESIZE(e)]; \ + e4 = n[H##ESIZE(e + 1)]; \ + break; \ + case 2: \ + e1 = TYPE##_chs(m[H##ESIZE(e)]); \ + e2 = n[H##ESIZE(e)]; \ + e3 = TYPE##_chs(m[H##ESIZE(e + 1)]); \ + e4 = n[H##ESIZE(e)]; \ + break; \ + case 3: \ + e1 = m[H##ESIZE(e + 1)]; \ + e2 = n[H##ESIZE(e + 1)]; \ + e3 = TYPE##_chs(m[H##ESIZE(e)]); \ + e4 = n[H##ESIZE(e + 1)]; \ + break; \ + default: \ + g_assert_not_reached(); \ + } \ + r0 = FN(e2, e1, d[H##ESIZE(e)], fpst0); \ + r1 = FN(e4, e3, d[H##ESIZE(e + 1)], fpst1); \ + mergemask(&d[H##ESIZE(e)], r0, mask); \ + mergemask(&d[H##ESIZE(e + 1)], r1, mask >> ESIZE); \ + } \ + mve_advance_vpt(env); \ + } + +#define DO_VCMULH(N, M, D, S) float16_mul(N, M, S) +#define DO_VCMULS(N, M, D, S) float32_mul(N, M, S) + +#define DO_VCMLAH(N, M, D, S) float16_muladd(N, M, D, 0, S) +#define DO_VCMLAS(N, M, D, S) float32_muladd(N, M, D, 0, S) + +DO_VCMLA(vcmul0h, 2, float16, 0, DO_VCMULH) +DO_VCMLA(vcmul0s, 4, float32, 0, DO_VCMULS) +DO_VCMLA(vcmul90h, 2, float16, 1, DO_VCMULH) +DO_VCMLA(vcmul90s, 4, float32, 1, DO_VCMULS) +DO_VCMLA(vcmul180h, 2, float16, 2, DO_VCMULH) +DO_VCMLA(vcmul180s, 4, float32, 2, DO_VCMULS) +DO_VCMLA(vcmul270h, 2, float16, 3, DO_VCMULH) +DO_VCMLA(vcmul270s, 4, float32, 3, DO_VCMULS) + +DO_VCMLA(vcmla0h, 2, float16, 0, DO_VCMLAH) +DO_VCMLA(vcmla0s, 4, float32, 0, DO_VCMLAS) +DO_VCMLA(vcmla90h, 2, float16, 1, DO_VCMLAH) +DO_VCMLA(vcmla90s, 4, float32, 1, DO_VCMLAS) +DO_VCMLA(vcmla180h, 2, float16, 2, DO_VCMLAH) +DO_VCMLA(vcmla180s, 4, float32, 2, DO_VCMLAS) +DO_VCMLA(vcmla270h, 2, float16, 3, DO_VCMLAH) +DO_VCMLA(vcmla270s, 4, float32, 3, DO_VCMLAS) diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index d61abc6d46..d62ed1fc29 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -856,6 +856,14 @@ DO_2OP_FP(VCADD90_fp, vfcadd90) DO_2OP_FP(VCADD270_fp, vfcadd270) DO_2OP_FP(VFMA, vfma) DO_2OP_FP(VFMS, vfms) +DO_2OP_FP(VCMUL0, vcmul0) +DO_2OP_FP(VCMUL90, vcmul90) +DO_2OP_FP(VCMUL180, vcmul180) +DO_2OP_FP(VCMUL270, vcmul270) +DO_2OP_FP(VCMLA0, vcmla0) +DO_2OP_FP(VCMLA90, vcmla90) +DO_2OP_FP(VCMLA180, vcmla180) +DO_2OP_FP(VCMLA270, vcmla270) static bool do_2op_scalar(DisasContext *s, arg_2scalar *a, MVEGenTwoOpScalarFn fn) |