diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2021-06-28 14:58:19 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-07-02 11:48:36 +0100 |
commit | 303db86fc73c68d8774203d4796b9995cc122886 (patch) | |
tree | 8099b68b25de78ffe3de72533851a559caccb2f4 | |
parent | d59ccc30f64249d5727bc084e0f3cf4b2483117b (diff) |
target/arm: Fix bugs in MVE VRMLALDAVH, VRMLSLDAVH
The initial implementation of the MVE VRMLALDAVH and VRMLSLDAVH
insns had some bugs:
* the 32x32 multiply of elements was being done as 32x32->32,
not 32x32->64
* we were incorrectly maintaining the accumulator in its full
72-bit form across all 4 beats of the insn; in the pseudocode
it is squashed back into the 64 bits of the RdaHi:RdaLo
registers after each beat
In particular, fixing the second of these allows us to recast
the implementation to avoid 128-bit arithmetic entirely.
Since the element size here is always 4, we can also drop the
parameterization of ESIZE to make the code a little more readable.
Suggested-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210628135835.6690-3-peter.maydell@linaro.org
-rw-r--r-- | target/arm/mve_helper.c | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 05552ce7ee..85a552fe07 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -18,7 +18,6 @@ */ #include "qemu/osdep.h" -#include "qemu/int128.h" #include "cpu.h" #include "internals.h" #include "vec_internal.h" @@ -1100,40 +1099,45 @@ DO_LDAV(vmlsldavsw, 4, int32_t, false, +=, -=) DO_LDAV(vmlsldavxsw, 4, int32_t, true, +=, -=) /* - * Rounding multiply add long dual accumulate high: we must keep - * a 72-bit internal accumulator value and return the top 64 bits. + * Rounding multiply add long dual accumulate high. In the pseudocode + * this is implemented with a 72-bit internal accumulator value of which + * the top 64 bits are returned. We optimize this to avoid having to + * use 128-bit arithmetic -- we can do this because the 74-bit accumulator + * is squashed back into 64-bits after each beat. */ -#define DO_LDAVH(OP, ESIZE, TYPE, XCHG, EVENACC, ODDACC, TO128) \ +#define DO_LDAVH(OP, TYPE, LTYPE, XCHG, SUB) \ uint64_t HELPER(glue(mve_, OP))(CPUARMState *env, void *vn, \ void *vm, uint64_t a) \ { \ uint16_t mask = mve_element_mask(env); \ unsigned e; \ TYPE *n = vn, *m = vm; \ - Int128 acc = int128_lshift(TO128(a), 8); \ - for (e = 0; e < 16 / ESIZE; e++, mask >>= ESIZE) { \ + for (e = 0; e < 16 / 4; e++, mask >>= 4) { \ if (mask & 1) { \ + LTYPE mul; \ if (e & 1) { \ - acc = ODDACC(acc, TO128(n[H##ESIZE(e - 1 * XCHG)] * \ - m[H##ESIZE(e)])); \ + mul = (LTYPE)n[H4(e - 1 * XCHG)] * m[H4(e)]; \ + if (SUB) { \ + mul = -mul; \ + } \ } else { \ - acc = EVENACC(acc, TO128(n[H##ESIZE(e + 1 * XCHG)] * \ - m[H##ESIZE(e)])); \ + mul = (LTYPE)n[H4(e + 1 * XCHG)] * m[H4(e)]; \ } \ - acc = int128_add(acc, int128_make64(1 << 7)); \ + mul = (mul >> 8) + ((mul >> 7) & 1); \ + a += mul; \ } \ } \ mve_advance_vpt(env); \ - return int128_getlo(int128_rshift(acc, 8)); \ + return a; \ } -DO_LDAVH(vrmlaldavhsw, 4, int32_t, false, int128_add, int128_add, int128_makes64) -DO_LDAVH(vrmlaldavhxsw, 4, int32_t, true, int128_add, int128_add, int128_makes64) +DO_LDAVH(vrmlaldavhsw, int32_t, int64_t, false, false) +DO_LDAVH(vrmlaldavhxsw, int32_t, int64_t, true, false) -DO_LDAVH(vrmlaldavhuw, 4, uint32_t, false, int128_add, int128_add, int128_make64) +DO_LDAVH(vrmlaldavhuw, uint32_t, uint64_t, false, false) -DO_LDAVH(vrmlsldavhsw, 4, int32_t, false, int128_add, int128_sub, int128_makes64) -DO_LDAVH(vrmlsldavhxsw, 4, int32_t, true, int128_add, int128_sub, int128_makes64) +DO_LDAVH(vrmlsldavhsw, int32_t, int64_t, false, true) +DO_LDAVH(vrmlsldavhxsw, int32_t, int64_t, true, true) /* Vector add across vector */ #define DO_VADDV(OP, ESIZE, TYPE) \ |