diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-03-09 07:52:59 -0800 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2021-03-12 12:40:10 +0000 |
commit | 8e7fefed1bdcc0f7e722ccf2a2fc2b4f79fe725e (patch) | |
tree | 7f2914eaa2ffc4604f0ddbf326e6246d7dd4e644 /target | |
parent | 226e6c046c0fce8da32575aad020ca56a5a8064d (diff) |
target/arm: Fix sve_zip_p vs odd vector lengths
Wrote too much with low-half zip (zip1) with vl % 512 != 0.
Adjust all of the x + (y << s) to x | (y << s) as a style fix.
We only ever have exact overlap between D, M, and N. Therefore
we only need a single temporary, and we do not need to check for
partial overlap.
Reported-by: Laurent Desnogues <laurent.desnogues@gmail.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210309155305.11301-3-richard.henderson@linaro.org
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/sve_helper.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index 11c9397dbb..2fb4b2c1ea 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -1871,6 +1871,7 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) intptr_t oprsz = FIELD_EX32(pred_desc, PREDDESC, OPRSZ); int esz = FIELD_EX32(pred_desc, PREDDESC, ESZ); intptr_t high = FIELD_EX32(pred_desc, PREDDESC, DATA); + int esize = 1 << esz; uint64_t *d = vd; intptr_t i; @@ -1883,33 +1884,35 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) mm = extract64(mm, high * half, half); nn = expand_bits(nn, esz); mm = expand_bits(mm, esz); - d[0] = nn + (mm << (1 << esz)); + d[0] = nn | (mm << esize); } else { - ARMPredicateReg tmp_n, tmp_m; + ARMPredicateReg tmp; /* We produce output faster than we consume input. Therefore we must be mindful of possible overlap. */ - if ((vn - vd) < (uintptr_t)oprsz) { - vn = memcpy(&tmp_n, vn, oprsz); - } - if ((vm - vd) < (uintptr_t)oprsz) { - vm = memcpy(&tmp_m, vm, oprsz); + if (vd == vn) { + vn = memcpy(&tmp, vn, oprsz); + if (vd == vm) { + vm = vn; + } + } else if (vd == vm) { + vm = memcpy(&tmp, vm, oprsz); } if (high) { high = oprsz >> 1; } - if ((high & 3) == 0) { + if ((oprsz & 7) == 0) { uint32_t *n = vn, *m = vm; high >>= 2; - for (i = 0; i < DIV_ROUND_UP(oprsz, 8); i++) { + for (i = 0; i < oprsz / 8; i++) { uint64_t nn = n[H4(high + i)]; uint64_t mm = m[H4(high + i)]; nn = expand_bits(nn, esz); mm = expand_bits(mm, esz); - d[i] = nn + (mm << (1 << esz)); + d[i] = nn | (mm << esize); } } else { uint8_t *n = vn, *m = vm; @@ -1921,7 +1924,7 @@ void HELPER(sve_zip_p)(void *vd, void *vn, void *vm, uint32_t pred_desc) nn = expand_bits(nn, esz); mm = expand_bits(mm, esz); - d16[H2(i)] = nn + (mm << (1 << esz)); + d16[H2(i)] = nn | (mm << esize); } } } |