diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2018-06-29 15:11:15 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-06-29 15:11:15 +0100 |
commit | 16fcfdc7325649b187ac489f3ae0b0d2a20b6230 (patch) | |
tree | 63e0b25b953fdb0e7ec1286eaca221b0395bbe7f /target/arm/vec_helper.c | |
parent | d730ecaae77ac696515207a5ef99509240fc792b (diff) |
target/arm: Implement SVE dot product (indexed)
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Message-id: 20180627043328.11531-34-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm/vec_helper.c')
-rw-r--r-- | target/arm/vec_helper.c | 124 |
1 files changed, 124 insertions, 0 deletions
diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index c16a30c3b5..37f338732e 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -261,6 +261,130 @@ void HELPER(gvec_udot_h)(void *vd, void *vn, void *vm, uint32_t desc) clear_tail(d, opr_sz, simd_maxsz(desc)); } +void HELPER(gvec_sdot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; + intptr_t index = simd_data(desc); + uint32_t *d = vd; + int8_t *n = vn; + int8_t *m_indexed = (int8_t *)vm + index * 4; + + /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. + * Otherwise opr_sz is a multiple of 16. + */ + segend = MIN(4, opr_sz_4); + i = 0; + do { + int8_t m0 = m_indexed[i * 4 + 0]; + int8_t m1 = m_indexed[i * 4 + 1]; + int8_t m2 = m_indexed[i * 4 + 2]; + int8_t m3 = m_indexed[i * 4 + 3]; + + do { + d[i] += n[i * 4 + 0] * m0 + + n[i * 4 + 1] * m1 + + n[i * 4 + 2] * m2 + + n[i * 4 + 3] * m3; + } while (++i < segend); + segend = i + 4; + } while (i < opr_sz_4); + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_udot_idx_b)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, segend, opr_sz = simd_oprsz(desc), opr_sz_4 = opr_sz / 4; + intptr_t index = simd_data(desc); + uint32_t *d = vd; + uint8_t *n = vn; + uint8_t *m_indexed = (uint8_t *)vm + index * 4; + + /* Notice the special case of opr_sz == 8, from aa64/aa32 advsimd. + * Otherwise opr_sz is a multiple of 16. + */ + segend = MIN(4, opr_sz_4); + i = 0; + do { + uint8_t m0 = m_indexed[i * 4 + 0]; + uint8_t m1 = m_indexed[i * 4 + 1]; + uint8_t m2 = m_indexed[i * 4 + 2]; + uint8_t m3 = m_indexed[i * 4 + 3]; + + do { + d[i] += n[i * 4 + 0] * m0 + + n[i * 4 + 1] * m1 + + n[i * 4 + 2] * m2 + + n[i * 4 + 3] * m3; + } while (++i < segend); + segend = i + 4; + } while (i < opr_sz_4); + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_sdot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; + intptr_t index = simd_data(desc); + uint64_t *d = vd; + int16_t *n = vn; + int16_t *m_indexed = (int16_t *)vm + index * 4; + + /* This is supported by SVE only, so opr_sz is always a multiple of 16. + * Process the entire segment all at once, writing back the results + * only after we've consumed all of the inputs. + */ + for (i = 0; i < opr_sz_8 ; i += 2) { + uint64_t d0, d1; + + d0 = n[i * 4 + 0] * (int64_t)m_indexed[i * 4 + 0]; + d0 += n[i * 4 + 1] * (int64_t)m_indexed[i * 4 + 1]; + d0 += n[i * 4 + 2] * (int64_t)m_indexed[i * 4 + 2]; + d0 += n[i * 4 + 3] * (int64_t)m_indexed[i * 4 + 3]; + d1 = n[i * 4 + 4] * (int64_t)m_indexed[i * 4 + 0]; + d1 += n[i * 4 + 5] * (int64_t)m_indexed[i * 4 + 1]; + d1 += n[i * 4 + 6] * (int64_t)m_indexed[i * 4 + 2]; + d1 += n[i * 4 + 7] * (int64_t)m_indexed[i * 4 + 3]; + + d[i + 0] += d0; + d[i + 1] += d1; + } + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + +void HELPER(gvec_udot_idx_h)(void *vd, void *vn, void *vm, uint32_t desc) +{ + intptr_t i, opr_sz = simd_oprsz(desc), opr_sz_8 = opr_sz / 8; + intptr_t index = simd_data(desc); + uint64_t *d = vd; + uint16_t *n = vn; + uint16_t *m_indexed = (uint16_t *)vm + index * 4; + + /* This is supported by SVE only, so opr_sz is always a multiple of 16. + * Process the entire segment all at once, writing back the results + * only after we've consumed all of the inputs. + */ + for (i = 0; i < opr_sz_8 ; i += 2) { + uint64_t d0, d1; + + d0 = n[i * 4 + 0] * (uint64_t)m_indexed[i * 4 + 0]; + d0 += n[i * 4 + 1] * (uint64_t)m_indexed[i * 4 + 1]; + d0 += n[i * 4 + 2] * (uint64_t)m_indexed[i * 4 + 2]; + d0 += n[i * 4 + 3] * (uint64_t)m_indexed[i * 4 + 3]; + d1 = n[i * 4 + 4] * (uint64_t)m_indexed[i * 4 + 0]; + d1 += n[i * 4 + 5] * (uint64_t)m_indexed[i * 4 + 1]; + d1 += n[i * 4 + 6] * (uint64_t)m_indexed[i * 4 + 2]; + d1 += n[i * 4 + 7] * (uint64_t)m_indexed[i * 4 + 3]; + + d[i + 0] += d0; + d[i + 1] += d1; + } + + clear_tail(d, opr_sz, simd_maxsz(desc)); +} + void HELPER(gvec_fcaddh)(void *vd, void *vn, void *vm, void *vfpst, uint32_t desc) { |