aboutsummaryrefslogtreecommitdiff
path: root/target/riscv/vector_helper.c
diff options
context:
space:
mode:
authorLIU Zhiwei <zhiwei_liu@c-sky.com>2020-07-01 23:25:13 +0800
committerAlistair Francis <alistair.francis@wdc.com>2020-07-02 09:19:33 -0700
commitb7aee4819206cbb7adfdb624d4f2fa9918c25d43 (patch)
tree72222e29ba1bb837d524a1a0dcc8aa26a95ae2bf /target/riscv/vector_helper.c
parenteb2650e35ec1ed60ff302ce3330bd6c770640833 (diff)
target/riscv: vector single-width averaging add and subtract
Signed-off-by: LIU Zhiwei <zhiwei_liu@c-sky.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20200701152549.1218-26-zhiwei_liu@c-sky.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
Diffstat (limited to 'target/riscv/vector_helper.c')
-rw-r--r--target/riscv/vector_helper.c100
1 files changed, 100 insertions, 0 deletions
diff --git a/target/riscv/vector_helper.c b/target/riscv/vector_helper.c
index 1277aa1c10..0b2119b6cc 100644
--- a/target/riscv/vector_helper.c
+++ b/target/riscv/vector_helper.c
@@ -2497,3 +2497,103 @@ GEN_VEXT_VX_RM(vssub_vx_b, 1, 1, clearb)
GEN_VEXT_VX_RM(vssub_vx_h, 2, 2, clearh)
GEN_VEXT_VX_RM(vssub_vx_w, 4, 4, clearl)
GEN_VEXT_VX_RM(vssub_vx_d, 8, 8, clearq)
+
+/* Vector Single-Width Averaging Add and Subtract */
+static inline uint8_t get_round(int vxrm, uint64_t v, uint8_t shift)
+{
+ uint8_t d = extract64(v, shift, 1);
+ uint8_t d1;
+ uint64_t D1, D2;
+
+ if (shift == 0 || shift > 64) {
+ return 0;
+ }
+
+ d1 = extract64(v, shift - 1, 1);
+ D1 = extract64(v, 0, shift);
+ if (vxrm == 0) { /* round-to-nearest-up (add +0.5 LSB) */
+ return d1;
+ } else if (vxrm == 1) { /* round-to-nearest-even */
+ if (shift > 1) {
+ D2 = extract64(v, 0, shift - 1);
+ return d1 & ((D2 != 0) | d);
+ } else {
+ return d1 & d;
+ }
+ } else if (vxrm == 3) { /* round-to-odd (OR bits into LSB, aka "jam") */
+ return !d & (D1 != 0);
+ }
+ return 0; /* round-down (truncate) */
+}
+
+static inline int32_t aadd32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ int64_t res = (int64_t)a + b;
+ uint8_t round = get_round(vxrm, res, 1);
+
+ return (res >> 1) + round;
+}
+
+static inline int64_t aadd64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ int64_t res = a + b;
+ uint8_t round = get_round(vxrm, res, 1);
+ int64_t over = (res ^ a) & (res ^ b) & INT64_MIN;
+
+ /* With signed overflow, bit 64 is inverse of bit 63. */
+ return ((res >> 1) ^ over) + round;
+}
+
+RVVCALL(OPIVV2_RM, vaadd_vv_b, OP_SSS_B, H1, H1, H1, aadd32)
+RVVCALL(OPIVV2_RM, vaadd_vv_h, OP_SSS_H, H2, H2, H2, aadd32)
+RVVCALL(OPIVV2_RM, vaadd_vv_w, OP_SSS_W, H4, H4, H4, aadd32)
+RVVCALL(OPIVV2_RM, vaadd_vv_d, OP_SSS_D, H8, H8, H8, aadd64)
+GEN_VEXT_VV_RM(vaadd_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vaadd_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vaadd_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vaadd_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vaadd_vx_b, OP_SSS_B, H1, H1, aadd32)
+RVVCALL(OPIVX2_RM, vaadd_vx_h, OP_SSS_H, H2, H2, aadd32)
+RVVCALL(OPIVX2_RM, vaadd_vx_w, OP_SSS_W, H4, H4, aadd32)
+RVVCALL(OPIVX2_RM, vaadd_vx_d, OP_SSS_D, H8, H8, aadd64)
+GEN_VEXT_VX_RM(vaadd_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vaadd_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vaadd_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vaadd_vx_d, 8, 8, clearq)
+
+static inline int32_t asub32(CPURISCVState *env, int vxrm, int32_t a, int32_t b)
+{
+ int64_t res = (int64_t)a - b;
+ uint8_t round = get_round(vxrm, res, 1);
+
+ return (res >> 1) + round;
+}
+
+static inline int64_t asub64(CPURISCVState *env, int vxrm, int64_t a, int64_t b)
+{
+ int64_t res = (int64_t)a - b;
+ uint8_t round = get_round(vxrm, res, 1);
+ int64_t over = (res ^ a) & (a ^ b) & INT64_MIN;
+
+ /* With signed overflow, bit 64 is inverse of bit 63. */
+ return ((res >> 1) ^ over) + round;
+}
+
+RVVCALL(OPIVV2_RM, vasub_vv_b, OP_SSS_B, H1, H1, H1, asub32)
+RVVCALL(OPIVV2_RM, vasub_vv_h, OP_SSS_H, H2, H2, H2, asub32)
+RVVCALL(OPIVV2_RM, vasub_vv_w, OP_SSS_W, H4, H4, H4, asub32)
+RVVCALL(OPIVV2_RM, vasub_vv_d, OP_SSS_D, H8, H8, H8, asub64)
+GEN_VEXT_VV_RM(vasub_vv_b, 1, 1, clearb)
+GEN_VEXT_VV_RM(vasub_vv_h, 2, 2, clearh)
+GEN_VEXT_VV_RM(vasub_vv_w, 4, 4, clearl)
+GEN_VEXT_VV_RM(vasub_vv_d, 8, 8, clearq)
+
+RVVCALL(OPIVX2_RM, vasub_vx_b, OP_SSS_B, H1, H1, asub32)
+RVVCALL(OPIVX2_RM, vasub_vx_h, OP_SSS_H, H2, H2, asub32)
+RVVCALL(OPIVX2_RM, vasub_vx_w, OP_SSS_W, H4, H4, asub32)
+RVVCALL(OPIVX2_RM, vasub_vx_d, OP_SSS_D, H8, H8, asub64)
+GEN_VEXT_VX_RM(vasub_vx_b, 1, 1, clearb)
+GEN_VEXT_VX_RM(vasub_vx_h, 2, 2, clearh)
+GEN_VEXT_VX_RM(vasub_vx_w, 4, 4, clearl)
+GEN_VEXT_VX_RM(vasub_vx_d, 8, 8, clearq)