aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2020-08-28 10:02:49 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-08-28 10:02:49 +0100
commitd21798856b227a20a0a41640236af445f4f4aeb0 (patch)
treeb2a1a50658abc5e720485eb25f9570699429b1f0
parent830d1a5a05619b0d407040ab8418018076765249 (diff)
target/arm: Generalize inl_qrdmlah_* helper functions
Unify add/sub helpers and add a parameter for rounding. This will allow saturating non-rounding to reuse this code. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> [PMM: fixed accidental use of '=' rather than '+=' in do_sqrdmlah_s] Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Message-id: 20200815013145.539409-15-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--target/arm/vec_helper.c80
1 files changed, 29 insertions, 51 deletions
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c
index 7d76412ee0..9f10be03ed 100644
--- a/target/arm/vec_helper.c
+++ b/target/arm/vec_helper.c
@@ -37,19 +37,24 @@
#endif
/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
-static int16_t inl_qrdmlah_s16(int16_t src1, int16_t src2,
- int16_t src3, uint32_t *sat)
+static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
+ bool neg, bool round, uint32_t *sat)
{
- /* Simplify:
+ /*
+ * Simplify:
* = ((a3 << 16) + ((e1 * e2) << 1) + (1 << 15)) >> 16
* = ((a3 << 15) + (e1 * e2) + (1 << 14)) >> 15
*/
int32_t ret = (int32_t)src1 * src2;
- ret = ((int32_t)src3 << 15) + ret + (1 << 14);
+ if (neg) {
+ ret = -ret;
+ }
+ ret += ((int32_t)src3 << 15) + (round << 14);
ret >>= 15;
+
if (ret != (int16_t)ret) {
*sat = 1;
- ret = (ret < 0 ? -0x8000 : 0x7fff);
+ ret = (ret < 0 ? INT16_MIN : INT16_MAX);
}
return ret;
}
@@ -58,8 +63,9 @@ uint32_t HELPER(neon_qrdmlah_s16)(CPUARMState *env, uint32_t src1,
uint32_t src2, uint32_t src3)
{
uint32_t *sat = &env->vfp.qc[0];
- uint16_t e1 = inl_qrdmlah_s16(src1, src2, src3, sat);
- uint16_t e2 = inl_qrdmlah_s16(src1 >> 16, src2 >> 16, src3 >> 16, sat);
+ uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, false, true, sat);
+ uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
+ false, true, sat);
return deposit32(e1, 16, 16, e2);
}
@@ -73,35 +79,18 @@ void HELPER(gvec_qrdmlah_s16)(void *vd, void *vn, void *vm,
uintptr_t i;
for (i = 0; i < opr_sz / 2; ++i) {
- d[i] = inl_qrdmlah_s16(n[i], m[i], d[i], vq);
+ d[i] = do_sqrdmlah_h(n[i], m[i], d[i], false, true, vq);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-/* Signed saturating rounding doubling multiply-subtract high half, 16-bit */
-static int16_t inl_qrdmlsh_s16(int16_t src1, int16_t src2,
- int16_t src3, uint32_t *sat)
-{
- /* Similarly, using subtraction:
- * = ((a3 << 16) - ((e1 * e2) << 1) + (1 << 15)) >> 16
- * = ((a3 << 15) - (e1 * e2) + (1 << 14)) >> 15
- */
- int32_t ret = (int32_t)src1 * src2;
- ret = ((int32_t)src3 << 15) - ret + (1 << 14);
- ret >>= 15;
- if (ret != (int16_t)ret) {
- *sat = 1;
- ret = (ret < 0 ? -0x8000 : 0x7fff);
- }
- return ret;
-}
-
uint32_t HELPER(neon_qrdmlsh_s16)(CPUARMState *env, uint32_t src1,
uint32_t src2, uint32_t src3)
{
uint32_t *sat = &env->vfp.qc[0];
- uint16_t e1 = inl_qrdmlsh_s16(src1, src2, src3, sat);
- uint16_t e2 = inl_qrdmlsh_s16(src1 >> 16, src2 >> 16, src3 >> 16, sat);
+ uint16_t e1 = do_sqrdmlah_h(src1, src2, src3, true, true, sat);
+ uint16_t e2 = do_sqrdmlah_h(src1 >> 16, src2 >> 16, src3 >> 16,
+ true, true, sat);
return deposit32(e1, 16, 16, e2);
}
@@ -115,19 +104,23 @@ void HELPER(gvec_qrdmlsh_s16)(void *vd, void *vn, void *vm,
uintptr_t i;
for (i = 0; i < opr_sz / 2; ++i) {
- d[i] = inl_qrdmlsh_s16(n[i], m[i], d[i], vq);
+ d[i] = do_sqrdmlah_h(n[i], m[i], d[i], true, true, vq);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
-static int32_t inl_qrdmlah_s32(int32_t src1, int32_t src2,
- int32_t src3, uint32_t *sat)
+static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
+ bool neg, bool round, uint32_t *sat)
{
/* Simplify similarly to int_qrdmlah_s16 above. */
int64_t ret = (int64_t)src1 * src2;
- ret = ((int64_t)src3 << 31) + ret + (1 << 30);
+ if (neg) {
+ ret = -ret;
+ }
+ ret += ((int64_t)src3 << 31) + (round << 30);
ret >>= 31;
+
if (ret != (int32_t)ret) {
*sat = 1;
ret = (ret < 0 ? INT32_MIN : INT32_MAX);
@@ -139,7 +132,7 @@ uint32_t HELPER(neon_qrdmlah_s32)(CPUARMState *env, int32_t src1,
int32_t src2, int32_t src3)
{
uint32_t *sat = &env->vfp.qc[0];
- return inl_qrdmlah_s32(src1, src2, src3, sat);
+ return do_sqrdmlah_s(src1, src2, src3, false, true, sat);
}
void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm,
@@ -152,31 +145,16 @@ void HELPER(gvec_qrdmlah_s32)(void *vd, void *vn, void *vm,
uintptr_t i;
for (i = 0; i < opr_sz / 4; ++i) {
- d[i] = inl_qrdmlah_s32(n[i], m[i], d[i], vq);
+ d[i] = do_sqrdmlah_s(n[i], m[i], d[i], false, true, vq);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}
-/* Signed saturating rounding doubling multiply-subtract high half, 32-bit */
-static int32_t inl_qrdmlsh_s32(int32_t src1, int32_t src2,
- int32_t src3, uint32_t *sat)
-{
- /* Simplify similarly to int_qrdmlsh_s16 above. */
- int64_t ret = (int64_t)src1 * src2;
- ret = ((int64_t)src3 << 31) - ret + (1 << 30);
- ret >>= 31;
- if (ret != (int32_t)ret) {
- *sat = 1;
- ret = (ret < 0 ? INT32_MIN : INT32_MAX);
- }
- return ret;
-}
-
uint32_t HELPER(neon_qrdmlsh_s32)(CPUARMState *env, int32_t src1,
int32_t src2, int32_t src3)
{
uint32_t *sat = &env->vfp.qc[0];
- return inl_qrdmlsh_s32(src1, src2, src3, sat);
+ return do_sqrdmlah_s(src1, src2, src3, true, true, sat);
}
void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm,
@@ -189,7 +167,7 @@ void HELPER(gvec_qrdmlsh_s32)(void *vd, void *vn, void *vm,
uintptr_t i;
for (i = 0; i < opr_sz / 4; ++i) {
- d[i] = inl_qrdmlsh_s32(n[i], m[i], d[i], vq);
+ d[i] = do_sqrdmlah_s(n[i], m[i], d[i], true, true, vq);
}
clear_tail(d, opr_sz, simd_maxsz(desc));
}