aboutsummaryrefslogtreecommitdiff
path: root/tcg/tcg-op-gvec.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2020-06-17 14:13:43 -0700
committerRichard Henderson <richard.henderson@linaro.org>2020-09-03 13:13:58 -0700
commitfe4b0b5bfa96c38ad1cad0689a86cca9f307e353 (patch)
tree3ea004162ff888e9192a54cef606f1fec691f31e /tcg/tcg-op-gvec.c
parent6a17646176e011ddc463a2870a64c7aaccfe9c50 (diff)
tcg: Implement 256-bit dup for tcg_gen_gvec_dup_mem
We already support duplication of 128-bit blocks. This extends that support to 256-bit blocks. This will be needed by SVE2. Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/tcg-op-gvec.c')
-rw-r--r--tcg/tcg-op-gvec.c52
1 files changed, 49 insertions, 3 deletions
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index fcc25b04e6..7ebd9e8298 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -1570,12 +1570,10 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
do_dup(vece, dofs, oprsz, maxsz, NULL, in, 0);
tcg_temp_free_i64(in);
}
- } else {
+ } else if (vece == 4) {
/* 128-bit duplicate. */
- /* ??? Dup to 256-bit vector. */
int i;
- tcg_debug_assert(vece == 4);
tcg_debug_assert(oprsz >= 16);
if (TCG_TARGET_HAS_v128) {
TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V128);
@@ -1601,6 +1599,54 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
if (oprsz < maxsz) {
expand_clr(dofs + oprsz, maxsz - oprsz);
}
+ } else if (vece == 5) {
+ /* 256-bit duplicate. */
+ int i;
+
+ tcg_debug_assert(oprsz >= 32);
+ tcg_debug_assert(oprsz % 32 == 0);
+ if (TCG_TARGET_HAS_v256) {
+ TCGv_vec in = tcg_temp_new_vec(TCG_TYPE_V256);
+
+ tcg_gen_ld_vec(in, cpu_env, aofs);
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ tcg_gen_st_vec(in, cpu_env, dofs + i);
+ }
+ tcg_temp_free_vec(in);
+ } else if (TCG_TARGET_HAS_v128) {
+ TCGv_vec in0 = tcg_temp_new_vec(TCG_TYPE_V128);
+ TCGv_vec in1 = tcg_temp_new_vec(TCG_TYPE_V128);
+
+ tcg_gen_ld_vec(in0, cpu_env, aofs);
+ tcg_gen_ld_vec(in1, cpu_env, aofs + 16);
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ tcg_gen_st_vec(in0, cpu_env, dofs + i);
+ tcg_gen_st_vec(in1, cpu_env, dofs + i + 16);
+ }
+ tcg_temp_free_vec(in0);
+ tcg_temp_free_vec(in1);
+ } else {
+ TCGv_i64 in[4];
+ int j;
+
+ for (j = 0; j < 4; ++j) {
+ in[j] = tcg_temp_new_i64();
+ tcg_gen_ld_i64(in[j], cpu_env, aofs + j * 8);
+ }
+ for (i = (aofs == dofs) * 32; i < oprsz; i += 32) {
+ for (j = 0; j < 4; ++j) {
+ tcg_gen_st_i64(in[j], cpu_env, dofs + i + j * 8);
+ }
+ }
+ for (j = 0; j < 4; ++j) {
+ tcg_temp_free_i64(in[j]);
+ }
+ }
+ if (oprsz < maxsz) {
+ expand_clr(dofs + oprsz, maxsz - oprsz);
+ }
+ } else {
+ g_assert_not_reached();
}
}