aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-05-07 09:45:54 +0100
committerPeter Maydell <peter.maydell@linaro.org>2020-05-07 09:45:54 +0100
commit609dd53df540edd72faee705205aceca9c42fea5 (patch)
treea4c43eaa4dff9bbd228a1197863a8d25e69be04f /target
parent298d893dd5006a38d0475fb584ae71b9e9f6e161 (diff)
parent07dada0336a83002dfa8673a9220a88e13d9a45c (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20200506' into staging
Add tcg_gen_gvec_dup_imm Misc tcg patches # gpg: Signature made Wed 06 May 2020 19:23:43 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20200506: tcg: Fix integral argument type to tcg_gen_rot[rl]i_i{32,64} tcg: Add load_dest parameter to GVecGen2 tcg: Improve vector tail clearing tcg: Add tcg_gen_gvec_dup_tl tcg: Remove tcg_gen_gvec_dup{8,16,32,64}i tcg: Use tcg_gen_gvec_dup_imm in logical simplifications target/arm: Use tcg_gen_gvec_dup_imm target/ppc: Use tcg_gen_gvec_dup_imm target/s390x: Use tcg_gen_gvec_dup_imm tcg: Add tcg_gen_gvec_dup_imm Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/arm/translate-a64.c10
-rw-r--r--target/arm/translate-sve.c12
-rw-r--r--target/arm/translate.c9
-rw-r--r--target/ppc/translate/vmx-impl.inc.c32
-rw-r--r--target/ppc/translate/vsx-impl.inc.c2
-rw-r--r--target/s390x/translate_vx.inc.c41
6 files changed, 43 insertions, 63 deletions
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index a896f9c4b8..62e5729904 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -502,7 +502,7 @@ static void clear_vec_high(DisasContext *s, bool is_q, int rd)
tcg_temp_free_i64(tcg_zero);
}
if (vsz > 16) {
- tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
+ tcg_gen_gvec_dup_imm(MO_64, ofs + 16, vsz - 16, vsz - 16, 0);
}
}
@@ -7785,8 +7785,8 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
/* MOVI or MVNI, with MVNI negation handled above. */
- tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
- vec_full_reg_size(s), imm);
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
+ vec_full_reg_size(s), imm);
} else {
/* ORR or BIC, with BIC negation to AND handled above. */
if (is_neg) {
@@ -10214,8 +10214,8 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
if (is_u) {
if (shift == 8 << size) {
/* Shift count the same size as element size produces zero. */
- tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
- is_q ? 16 : 8, vec_full_reg_size(s), 0);
+ tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
+ is_q ? 16 : 8, vec_full_reg_size(s), 0);
} else {
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index b35bad245e..6c8bda4e4c 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -177,7 +177,7 @@ static bool do_mov_z(DisasContext *s, int rd, int rn)
static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
{
unsigned vsz = vec_full_reg_size(s);
- tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), vsz, vsz, word);
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
}
/* Invoke a vector expander on two Pregs. */
@@ -1453,7 +1453,7 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
unsigned oprsz = size_for_gvec(setsz / 8);
if (oprsz * 8 == setsz) {
- tcg_gen_gvec_dup64i(ofs, oprsz, maxsz, word);
+ tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
goto done;
}
}
@@ -2044,7 +2044,7 @@ static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
} else {
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, 0);
+ tcg_gen_gvec_dup_imm(esz, dofs, vsz, vsz, 0);
}
}
return true;
@@ -3260,9 +3260,7 @@ static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
/* Decode the VFP immediate. */
imm = vfp_expand_imm(a->esz, a->imm);
- imm = dup_const(a->esz, imm);
-
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, imm);
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
}
return true;
}
@@ -3276,7 +3274,7 @@ static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
unsigned vsz = vec_full_reg_size(s);
int dofs = vec_full_reg_offset(s, a->rd);
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, dup_const(a->esz, a->imm));
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
}
return true;
}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 025747c0bd..74fac1d09c 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -5209,7 +5209,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
MIN(shift, (8 << size) - 1),
vec_size, vec_size);
} else if (shift >= 8 << size) {
- tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ tcg_gen_gvec_dup_imm(MO_8, rd_ofs, vec_size,
+ vec_size, 0);
} else {
tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@@ -5260,7 +5261,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
* architecturally valid and results in zero.
*/
if (shift >= 8 << size) {
- tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ tcg_gen_gvec_dup_imm(size, rd_ofs,
+ vec_size, vec_size, 0);
} else {
tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@@ -5606,7 +5608,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
tcg_temp_free_i64(t64);
} else {
- tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
+ tcg_gen_gvec_dup_imm(MO_32, reg_ofs, vec_size,
+ vec_size, imm);
}
}
}
diff --git a/target/ppc/translate/vmx-impl.inc.c b/target/ppc/translate/vmx-impl.inc.c
index 81d5a7a341..403ed3a01c 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -1035,21 +1035,25 @@ GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
-#define GEN_VXFORM_DUPI(name, tcg_op, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- int simm; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- simm = SIMM5(ctx->opcode); \
- tcg_op(avr_full_offset(rD(ctx->opcode)), 16, 16, simm); \
+static void gen_vsplti(DisasContext *ctx, int vece)
+{
+ int simm;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
}
-GEN_VXFORM_DUPI(vspltisb, tcg_gen_gvec_dup8i, 6, 12);
-GEN_VXFORM_DUPI(vspltish, tcg_gen_gvec_dup16i, 6, 13);
-GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14);
+ simm = SIMM5(ctx->opcode);
+ tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
+}
+
+#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
+
+GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
+GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
+GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
#define GEN_VXFORM_NOA(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
@@ -1559,7 +1563,7 @@ GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
#undef GEN_VXRFORM_DUAL
#undef GEN_VXRFORM1
#undef GEN_VXRFORM
-#undef GEN_VXFORM_DUPI
+#undef GEN_VXFORM_VSPLTI
#undef GEN_VXFORM_NOA
#undef GEN_VXFORM_UIMM
#undef GEN_VAFORM_PAIRED
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
index 8287e272f5..b518de46db 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -1579,7 +1579,7 @@ static void gen_xxspltib(DisasContext *ctx)
return;
}
}
- tcg_gen_gvec_dup8i(vsr_full_offset(rt), 16, 16, uim8);
+ tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8);
}
static void gen_xxsldwi(DisasContext *ctx)
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
index 24558cce80..12347f8a03 100644
--- a/target/s390x/translate_vx.inc.c
+++ b/target/s390x/translate_vx.inc.c
@@ -231,8 +231,8 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
#define gen_gvec_mov(v1, v2) \
tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
16)
-#define gen_gvec_dup64i(v1, c) \
- tcg_gen_gvec_dup64i(vec_full_reg_offset(v1), 16, 16, c)
+#define gen_gvec_dup_imm(es, v1, c) \
+ tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
#define gen_gvec_fn_2(fn, es, v1, v2) \
tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
16, 16)
@@ -316,31 +316,6 @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
tcg_temp_free_i64(cl);
}
-static void gen_gvec_dupi(uint8_t es, uint8_t reg, uint64_t c)
-{
- switch (es) {
- case ES_8:
- tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, c);
- break;
- case ES_16:
- tcg_gen_gvec_dup16i(vec_full_reg_offset(reg), 16, 16, c);
- break;
- case ES_32:
- tcg_gen_gvec_dup32i(vec_full_reg_offset(reg), 16, 16, c);
- break;
- case ES_64:
- gen_gvec_dup64i(reg, c);
- break;
- default:
- g_assert_not_reached();
- }
-}
-
-static void zero_vec(uint8_t reg)
-{
- tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, 0);
-}
-
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
uint64_t b)
{
@@ -396,8 +371,8 @@ static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
* Masks for both 64 bit elements of the vector are the same.
* Trust tcg to produce a good constant loading.
*/
- gen_gvec_dup64i(get_field(s, v1),
- generate_byte_mask(i2 & 0xff));
+ gen_gvec_dup_imm(ES_64, get_field(s, v1),
+ generate_byte_mask(i2 & 0xff));
} else {
TCGv_i64 t = tcg_temp_new_i64();
@@ -432,7 +407,7 @@ static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
}
}
- gen_gvec_dupi(es, get_field(s, v1), mask);
+ gen_gvec_dup_imm(es, get_field(s, v1), mask);
return DISAS_NEXT;
}
@@ -585,7 +560,7 @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
t = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
- zero_vec(get_field(s, v1));
+ gen_gvec_dup_imm(es, get_field(s, v1), 0);
write_vec_element_i64(t, get_field(s, v1), enr, es);
tcg_temp_free_i64(t);
return DISAS_NEXT;
@@ -892,7 +867,7 @@ static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- gen_gvec_dupi(es, get_field(s, v1), data);
+ gen_gvec_dup_imm(es, get_field(s, v1), data);
return DISAS_NEXT;
}
@@ -1372,7 +1347,7 @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
read_vec_element_i32(tmp, get_field(s, v2), i, ES_32);
tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp);
}
- zero_vec(get_field(s, v1));
+ gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
tcg_temp_free_i32(tmp);