aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/tcg/tcg-op-gvec.h13
-rw-r--r--include/tcg/tcg-op.h8
-rw-r--r--target/arm/translate-a64.c10
-rw-r--r--target/arm/translate-sve.c12
-rw-r--r--target/arm/translate.c9
-rw-r--r--target/ppc/translate/vmx-impl.inc.c32
-rw-r--r--target/ppc/translate/vsx-impl.inc.c2
-rw-r--r--target/s390x/translate_vx.inc.c41
-rw-r--r--tcg/tcg-op-gvec.c164
-rw-r--r--tcg/tcg-op.c16
10 files changed, 167 insertions, 140 deletions
diff --git a/include/tcg/tcg-op-gvec.h b/include/tcg/tcg-op-gvec.h
index 74534e2480..cea6497341 100644
--- a/include/tcg/tcg-op-gvec.h
+++ b/include/tcg/tcg-op-gvec.h
@@ -109,6 +109,8 @@ typedef struct {
uint8_t vece;
/* Prefer i64 to v64. */
bool prefer_i64;
+ /* Load dest as a 2nd source operand. */
+ bool load_dest;
} GVecGen2;
typedef struct {
@@ -313,15 +315,18 @@ void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t s, uint32_t m);
+void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t s,
+ uint32_t m, uint64_t imm);
void tcg_gen_gvec_dup_i32(unsigned vece, uint32_t dofs, uint32_t s,
uint32_t m, TCGv_i32);
void tcg_gen_gvec_dup_i64(unsigned vece, uint32_t dofs, uint32_t s,
uint32_t m, TCGv_i64);
-void tcg_gen_gvec_dup8i(uint32_t dofs, uint32_t s, uint32_t m, uint8_t x);
-void tcg_gen_gvec_dup16i(uint32_t dofs, uint32_t s, uint32_t m, uint16_t x);
-void tcg_gen_gvec_dup32i(uint32_t dofs, uint32_t s, uint32_t m, uint32_t x);
-void tcg_gen_gvec_dup64i(uint32_t dofs, uint32_t s, uint32_t m, uint64_t x);
+#if TARGET_LONG_BITS == 64
+# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i64
+#else
+# define tcg_gen_gvec_dup_tl tcg_gen_gvec_dup_i32
+#endif
void tcg_gen_gvec_shli(unsigned vece, uint32_t dofs, uint32_t aofs,
int64_t shift, uint32_t oprsz, uint32_t maxsz);
diff --git a/include/tcg/tcg-op.h b/include/tcg/tcg-op.h
index 230db6e022..e3399d6a5e 100644
--- a/include/tcg/tcg-op.h
+++ b/include/tcg/tcg-op.h
@@ -297,9 +297,9 @@ void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2);
void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg);
void tcg_gen_ctpop_i32(TCGv_i32 a1, TCGv_i32 a2);
void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
+void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2);
-void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2);
+void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2);
void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2,
unsigned int ofs, unsigned int len);
void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg,
@@ -493,9 +493,9 @@ void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2);
void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg);
void tcg_gen_ctpop_i64(TCGv_i64 a1, TCGv_i64 a2);
void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
+void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2);
-void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2);
+void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2);
void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2,
unsigned int ofs, unsigned int len);
void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg,
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c
index a896f9c4b8..62e5729904 100644
--- a/target/arm/translate-a64.c
+++ b/target/arm/translate-a64.c
@@ -502,7 +502,7 @@ static void clear_vec_high(DisasContext *s, bool is_q, int rd)
tcg_temp_free_i64(tcg_zero);
}
if (vsz > 16) {
- tcg_gen_gvec_dup8i(ofs + 16, vsz - 16, vsz - 16, 0);
+ tcg_gen_gvec_dup_imm(MO_64, ofs + 16, vsz - 16, vsz - 16, 0);
}
}
@@ -7785,8 +7785,8 @@ static void disas_simd_mod_imm(DisasContext *s, uint32_t insn)
if (!((cmode & 0x9) == 0x1 || (cmode & 0xd) == 0x9)) {
/* MOVI or MVNI, with MVNI negation handled above. */
- tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), is_q ? 16 : 8,
- vec_full_reg_size(s), imm);
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), is_q ? 16 : 8,
+ vec_full_reg_size(s), imm);
} else {
/* ORR or BIC, with BIC negation to AND handled above. */
if (is_neg) {
@@ -10214,8 +10214,8 @@ static void handle_vec_simd_shri(DisasContext *s, bool is_q, bool is_u,
if (is_u) {
if (shift == 8 << size) {
/* Shift count the same size as element size produces zero. */
- tcg_gen_gvec_dup8i(vec_full_reg_offset(s, rd),
- is_q ? 16 : 8, vec_full_reg_size(s), 0);
+ tcg_gen_gvec_dup_imm(size, vec_full_reg_offset(s, rd),
+ is_q ? 16 : 8, vec_full_reg_size(s), 0);
} else {
gen_gvec_fn2i(s, is_q, rd, rn, shift, tcg_gen_gvec_shri, size);
}
diff --git a/target/arm/translate-sve.c b/target/arm/translate-sve.c
index b35bad245e..6c8bda4e4c 100644
--- a/target/arm/translate-sve.c
+++ b/target/arm/translate-sve.c
@@ -177,7 +177,7 @@ static bool do_mov_z(DisasContext *s, int rd, int rn)
static void do_dupi_z(DisasContext *s, int rd, uint64_t word)
{
unsigned vsz = vec_full_reg_size(s);
- tcg_gen_gvec_dup64i(vec_full_reg_offset(s, rd), vsz, vsz, word);
+ tcg_gen_gvec_dup_imm(MO_64, vec_full_reg_offset(s, rd), vsz, vsz, word);
}
/* Invoke a vector expander on two Pregs. */
@@ -1453,7 +1453,7 @@ static bool do_predset(DisasContext *s, int esz, int rd, int pat, bool setflag)
unsigned oprsz = size_for_gvec(setsz / 8);
if (oprsz * 8 == setsz) {
- tcg_gen_gvec_dup64i(ofs, oprsz, maxsz, word);
+ tcg_gen_gvec_dup_imm(MO_64, ofs, oprsz, maxsz, word);
goto done;
}
}
@@ -2044,7 +2044,7 @@ static bool trans_DUP_x(DisasContext *s, arg_DUP_x *a)
unsigned nofs = vec_reg_offset(s, a->rn, index, esz);
tcg_gen_gvec_dup_mem(esz, dofs, nofs, vsz, vsz);
} else {
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, 0);
+ tcg_gen_gvec_dup_imm(esz, dofs, vsz, vsz, 0);
}
}
return true;
@@ -3260,9 +3260,7 @@ static bool trans_FDUP(DisasContext *s, arg_FDUP *a)
/* Decode the VFP immediate. */
imm = vfp_expand_imm(a->esz, a->imm);
- imm = dup_const(a->esz, imm);
-
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, imm);
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, imm);
}
return true;
}
@@ -3276,7 +3274,7 @@ static bool trans_DUP_i(DisasContext *s, arg_DUP_i *a)
unsigned vsz = vec_full_reg_size(s);
int dofs = vec_full_reg_offset(s, a->rd);
- tcg_gen_gvec_dup64i(dofs, vsz, vsz, dup_const(a->esz, a->imm));
+ tcg_gen_gvec_dup_imm(a->esz, dofs, vsz, vsz, a->imm);
}
return true;
}
diff --git a/target/arm/translate.c b/target/arm/translate.c
index 025747c0bd..74fac1d09c 100644
--- a/target/arm/translate.c
+++ b/target/arm/translate.c
@@ -5209,7 +5209,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
MIN(shift, (8 << size) - 1),
vec_size, vec_size);
} else if (shift >= 8 << size) {
- tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ tcg_gen_gvec_dup_imm(MO_8, rd_ofs, vec_size,
+ vec_size, 0);
} else {
tcg_gen_gvec_shri(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@@ -5260,7 +5261,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
* architecturally valid and results in zero.
*/
if (shift >= 8 << size) {
- tcg_gen_gvec_dup8i(rd_ofs, vec_size, vec_size, 0);
+ tcg_gen_gvec_dup_imm(size, rd_ofs,
+ vec_size, vec_size, 0);
} else {
tcg_gen_gvec_shli(size, rd_ofs, rm_ofs, shift,
vec_size, vec_size);
@@ -5606,7 +5608,8 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
}
tcg_temp_free_i64(t64);
} else {
- tcg_gen_gvec_dup32i(reg_ofs, vec_size, vec_size, imm);
+ tcg_gen_gvec_dup_imm(MO_32, reg_ofs, vec_size,
+ vec_size, imm);
}
}
}
diff --git a/target/ppc/translate/vmx-impl.inc.c b/target/ppc/translate/vmx-impl.inc.c
index 81d5a7a341..403ed3a01c 100644
--- a/target/ppc/translate/vmx-impl.inc.c
+++ b/target/ppc/translate/vmx-impl.inc.c
@@ -1035,21 +1035,25 @@ GEN_VXRFORM_DUAL(vcmpbfp, PPC_ALTIVEC, PPC_NONE, \
GEN_VXRFORM_DUAL(vcmpgtfp, PPC_ALTIVEC, PPC_NONE, \
vcmpgtud, PPC_NONE, PPC2_ALTIVEC_207)
-#define GEN_VXFORM_DUPI(name, tcg_op, opc2, opc3) \
-static void glue(gen_, name)(DisasContext *ctx) \
- { \
- int simm; \
- if (unlikely(!ctx->altivec_enabled)) { \
- gen_exception(ctx, POWERPC_EXCP_VPU); \
- return; \
- } \
- simm = SIMM5(ctx->opcode); \
- tcg_op(avr_full_offset(rD(ctx->opcode)), 16, 16, simm); \
+static void gen_vsplti(DisasContext *ctx, int vece)
+{
+ int simm;
+
+ if (unlikely(!ctx->altivec_enabled)) {
+ gen_exception(ctx, POWERPC_EXCP_VPU);
+ return;
}
-GEN_VXFORM_DUPI(vspltisb, tcg_gen_gvec_dup8i, 6, 12);
-GEN_VXFORM_DUPI(vspltish, tcg_gen_gvec_dup16i, 6, 13);
-GEN_VXFORM_DUPI(vspltisw, tcg_gen_gvec_dup32i, 6, 14);
+ simm = SIMM5(ctx->opcode);
+ tcg_gen_gvec_dup_imm(vece, avr_full_offset(rD(ctx->opcode)), 16, 16, simm);
+}
+
+#define GEN_VXFORM_VSPLTI(name, vece, opc2, opc3) \
+static void glue(gen_, name)(DisasContext *ctx) { gen_vsplti(ctx, vece); }
+
+GEN_VXFORM_VSPLTI(vspltisb, MO_8, 6, 12);
+GEN_VXFORM_VSPLTI(vspltish, MO_16, 6, 13);
+GEN_VXFORM_VSPLTI(vspltisw, MO_32, 6, 14);
#define GEN_VXFORM_NOA(name, opc2, opc3) \
static void glue(gen_, name)(DisasContext *ctx) \
@@ -1559,7 +1563,7 @@ GEN_VXFORM_DUAL(vsldoi, PPC_ALTIVEC, PPC_NONE,
#undef GEN_VXRFORM_DUAL
#undef GEN_VXRFORM1
#undef GEN_VXRFORM
-#undef GEN_VXFORM_DUPI
+#undef GEN_VXFORM_VSPLTI
#undef GEN_VXFORM_NOA
#undef GEN_VXFORM_UIMM
#undef GEN_VAFORM_PAIRED
diff --git a/target/ppc/translate/vsx-impl.inc.c b/target/ppc/translate/vsx-impl.inc.c
index 8287e272f5..b518de46db 100644
--- a/target/ppc/translate/vsx-impl.inc.c
+++ b/target/ppc/translate/vsx-impl.inc.c
@@ -1579,7 +1579,7 @@ static void gen_xxspltib(DisasContext *ctx)
return;
}
}
- tcg_gen_gvec_dup8i(vsr_full_offset(rt), 16, 16, uim8);
+ tcg_gen_gvec_dup_imm(MO_8, vsr_full_offset(rt), 16, 16, uim8);
}
static void gen_xxsldwi(DisasContext *ctx)
diff --git a/target/s390x/translate_vx.inc.c b/target/s390x/translate_vx.inc.c
index 24558cce80..12347f8a03 100644
--- a/target/s390x/translate_vx.inc.c
+++ b/target/s390x/translate_vx.inc.c
@@ -231,8 +231,8 @@ static void get_vec_element_ptr_i64(TCGv_ptr ptr, uint8_t reg, TCGv_i64 enr,
#define gen_gvec_mov(v1, v2) \
tcg_gen_gvec_mov(0, vec_full_reg_offset(v1), vec_full_reg_offset(v2), 16, \
16)
-#define gen_gvec_dup64i(v1, c) \
- tcg_gen_gvec_dup64i(vec_full_reg_offset(v1), 16, 16, c)
+#define gen_gvec_dup_imm(es, v1, c) \
+ tcg_gen_gvec_dup_imm(es, vec_full_reg_offset(v1), 16, 16, c);
#define gen_gvec_fn_2(fn, es, v1, v2) \
tcg_gen_gvec_##fn(es, vec_full_reg_offset(v1), vec_full_reg_offset(v2), \
16, 16)
@@ -316,31 +316,6 @@ static void gen_gvec128_4_i64(gen_gvec128_4_i64_fn fn, uint8_t d, uint8_t a,
tcg_temp_free_i64(cl);
}
-static void gen_gvec_dupi(uint8_t es, uint8_t reg, uint64_t c)
-{
- switch (es) {
- case ES_8:
- tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, c);
- break;
- case ES_16:
- tcg_gen_gvec_dup16i(vec_full_reg_offset(reg), 16, 16, c);
- break;
- case ES_32:
- tcg_gen_gvec_dup32i(vec_full_reg_offset(reg), 16, 16, c);
- break;
- case ES_64:
- gen_gvec_dup64i(reg, c);
- break;
- default:
- g_assert_not_reached();
- }
-}
-
-static void zero_vec(uint8_t reg)
-{
- tcg_gen_gvec_dup8i(vec_full_reg_offset(reg), 16, 16, 0);
-}
-
static void gen_addi2_i64(TCGv_i64 dl, TCGv_i64 dh, TCGv_i64 al, TCGv_i64 ah,
uint64_t b)
{
@@ -396,8 +371,8 @@ static DisasJumpType op_vgbm(DisasContext *s, DisasOps *o)
* Masks for both 64 bit elements of the vector are the same.
* Trust tcg to produce a good constant loading.
*/
- gen_gvec_dup64i(get_field(s, v1),
- generate_byte_mask(i2 & 0xff));
+ gen_gvec_dup_imm(ES_64, get_field(s, v1),
+ generate_byte_mask(i2 & 0xff));
} else {
TCGv_i64 t = tcg_temp_new_i64();
@@ -432,7 +407,7 @@ static DisasJumpType op_vgm(DisasContext *s, DisasOps *o)
}
}
- gen_gvec_dupi(es, get_field(s, v1), mask);
+ gen_gvec_dup_imm(es, get_field(s, v1), mask);
return DISAS_NEXT;
}
@@ -585,7 +560,7 @@ static DisasJumpType op_vllez(DisasContext *s, DisasOps *o)
t = tcg_temp_new_i64();
tcg_gen_qemu_ld_i64(t, o->addr1, get_mem_index(s), MO_TE | es);
- zero_vec(get_field(s, v1));
+ gen_gvec_dup_imm(es, get_field(s, v1), 0);
write_vec_element_i64(t, get_field(s, v1), enr, es);
tcg_temp_free_i64(t);
return DISAS_NEXT;
@@ -892,7 +867,7 @@ static DisasJumpType op_vrepi(DisasContext *s, DisasOps *o)
return DISAS_NORETURN;
}
- gen_gvec_dupi(es, get_field(s, v1), data);
+ gen_gvec_dup_imm(es, get_field(s, v1), data);
return DISAS_NEXT;
}
@@ -1372,7 +1347,7 @@ static DisasJumpType op_vcksm(DisasContext *s, DisasOps *o)
read_vec_element_i32(tmp, get_field(s, v2), i, ES_32);
tcg_gen_add2_i32(tmp, sum, sum, sum, tmp, tmp);
}
- zero_vec(get_field(s, v1));
+ gen_gvec_dup_imm(ES_32, get_field(s, v1), 0);
write_vec_element_i32(sum, get_field(s, v1), 1, ES_32);
tcg_temp_free_i32(tmp);
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 327d9588e0..049a55e700 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -326,11 +326,34 @@ void tcg_gen_gvec_5_ptr(uint32_t dofs, uint32_t aofs, uint32_t bofs,
in units of LNSZ. This limits the expansion of inline code. */
static inline bool check_size_impl(uint32_t oprsz, uint32_t lnsz)
{
- if (oprsz % lnsz == 0) {
- uint32_t lnct = oprsz / lnsz;
- return lnct >= 1 && lnct <= MAX_UNROLL;
+ uint32_t q, r;
+
+ if (oprsz < lnsz) {
+ return false;
+ }
+
+ q = oprsz / lnsz;
+ r = oprsz % lnsz;
+ tcg_debug_assert((r & 7) == 0);
+
+ if (lnsz < 16) {
+ /* For sizes below 16, accept no remainder. */
+ if (r != 0) {
+ return false;
+ }
+ } else {
+ /*
+ * Recall that ARM SVE allows vector sizes that are not a
+ * power of 2, but always a multiple of 16. The intent is
+ * that e.g. size == 80 would be expanded with 2x32 + 1x16.
+ * In addition, expand_clr needs to handle a multiple of 8.
+ * Thus we can handle the tail with one more operation per
+ * diminishing power of 2.
+ */
+ q += ctpop32(r);
}
- return false;
+
+ return q <= MAX_UNROLL;
}
static void expand_clr(uint32_t dofs, uint32_t maxsz);
@@ -402,22 +425,31 @@ static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
static TCGType choose_vector_type(const TCGOpcode *list, unsigned vece,
uint32_t size, bool prefer_i64)
{
- if (TCG_TARGET_HAS_v256 && check_size_impl(size, 32)) {
- /*
- * Recall that ARM SVE allows vector sizes that are not a
- * power of 2, but always a multiple of 16. The intent is
- * that e.g. size == 80 would be expanded with 2x32 + 1x16.
- * It is hard to imagine a case in which v256 is supported
- * but v128 is not, but check anyway.
- */
- if (tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece)
- && (size % 32 == 0
- || tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) {
- return TCG_TYPE_V256;
- }
- }
- if (TCG_TARGET_HAS_v128 && check_size_impl(size, 16)
- && tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece)) {
+ /*
+ * Recall that ARM SVE allows vector sizes that are not a
+ * power of 2, but always a multiple of 16. The intent is
+ * that e.g. size == 80 would be expanded with 2x32 + 1x16.
+ * It is hard to imagine a case in which v256 is supported
+ * but v128 is not, but check anyway.
+ * In addition, expand_clr needs to handle a multiple of 8.
+ */
+ if (TCG_TARGET_HAS_v256 &&
+ check_size_impl(size, 32) &&
+ tcg_can_emit_vecop_list(list, TCG_TYPE_V256, vece) &&
+ (!(size & 16) ||
+ (TCG_TARGET_HAS_v128 &&
+ tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece))) &&
+ (!(size & 8) ||
+ (TCG_TARGET_HAS_v64 &&
+ tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
+ return TCG_TYPE_V256;
+ }
+ if (TCG_TARGET_HAS_v128 &&
+ check_size_impl(size, 16) &&
+ tcg_can_emit_vecop_list(list, TCG_TYPE_V128, vece) &&
+ (!(size & 8) ||
+ (TCG_TARGET_HAS_v64 &&
+ tcg_can_emit_vecop_list(list, TCG_TYPE_V64, vece)))) {
return TCG_TYPE_V128;
}
if (TCG_TARGET_HAS_v64 && !prefer_i64 && check_size_impl(size, 8)
@@ -432,6 +464,18 @@ static void do_dup_store(TCGType type, uint32_t dofs, uint32_t oprsz,
{
uint32_t i = 0;
+ tcg_debug_assert(oprsz >= 8);
+
+ /*
+ * This may be expand_clr for the tail of an operation, e.g.
+ * oprsz == 8 && maxsz == 64. The first 8 bytes of this store
+ * are misaligned wrt the maximum vector size, so do that first.
+ */
+ if (dofs & 8) {
+ tcg_gen_stl_vec(t_vec, cpu_env, dofs + i, TCG_TYPE_V64);
+ i += 8;
+ }
+
switch (type) {
case TCG_TYPE_V256:
/*
@@ -619,17 +663,22 @@ static void expand_clr(uint32_t dofs, uint32_t maxsz)
/* Expand OPSZ bytes worth of two-operand operations using i32 elements. */
static void expand_2_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- void (*fni)(TCGv_i32, TCGv_i32))
+ bool load_dest, void (*fni)(TCGv_i32, TCGv_i32))
{
TCGv_i32 t0 = tcg_temp_new_i32();
+ TCGv_i32 t1 = tcg_temp_new_i32();
uint32_t i;
for (i = 0; i < oprsz; i += 4) {
tcg_gen_ld_i32(t0, cpu_env, aofs + i);
- fni(t0, t0);
- tcg_gen_st_i32(t0, cpu_env, dofs + i);
+ if (load_dest) {
+ tcg_gen_ld_i32(t1, cpu_env, dofs + i);
+ }
+ fni(t1, t0);
+ tcg_gen_st_i32(t1, cpu_env, dofs + i);
}
tcg_temp_free_i32(t0);
+ tcg_temp_free_i32(t1);
}
static void expand_2i_i32(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
@@ -749,17 +798,22 @@ static void expand_4_i32(uint32_t dofs, uint32_t aofs, uint32_t bofs,
/* Expand OPSZ bytes worth of two-operand operations using i64 elements. */
static void expand_2_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
- void (*fni)(TCGv_i64, TCGv_i64))
+ bool load_dest, void (*fni)(TCGv_i64, TCGv_i64))
{
TCGv_i64 t0 = tcg_temp_new_i64();
+ TCGv_i64 t1 = tcg_temp_new_i64();
uint32_t i;
for (i = 0; i < oprsz; i += 8) {
tcg_gen_ld_i64(t0, cpu_env, aofs + i);
- fni(t0, t0);
- tcg_gen_st_i64(t0, cpu_env, dofs + i);
+ if (load_dest) {
+ tcg_gen_ld_i64(t1, cpu_env, dofs + i);
+ }
+ fni(t1, t0);
+ tcg_gen_st_i64(t1, cpu_env, dofs + i);
}
tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
}
static void expand_2i_i64(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
@@ -880,17 +934,23 @@ static void expand_4_i64(uint32_t dofs, uint32_t aofs, uint32_t bofs,
/* Expand OPSZ bytes worth of two-operand operations using host vectors. */
static void expand_2_vec(unsigned vece, uint32_t dofs, uint32_t aofs,
uint32_t oprsz, uint32_t tysz, TCGType type,
+ bool load_dest,
void (*fni)(unsigned, TCGv_vec, TCGv_vec))
{
TCGv_vec t0 = tcg_temp_new_vec(type);
+ TCGv_vec t1 = tcg_temp_new_vec(type);
uint32_t i;
for (i = 0; i < oprsz; i += tysz) {
tcg_gen_ld_vec(t0, cpu_env, aofs + i);
- fni(vece, t0, t0);
- tcg_gen_st_vec(t0, cpu_env, dofs + i);
+ if (load_dest) {
+ tcg_gen_ld_vec(t1, cpu_env, dofs + i);
+ }
+ fni(vece, t1, t0);
+ tcg_gen_st_vec(t1, cpu_env, dofs + i);
}
tcg_temp_free_vec(t0);
+ tcg_temp_free_vec(t1);
}
/* Expand OPSZ bytes worth of two-vector operands and an immediate operand
@@ -1044,7 +1104,8 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
* that e.g. size == 80 would be expanded with 2x32 + 1x16.
*/
some = QEMU_ALIGN_DOWN(oprsz, 32);
- expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256, g->fniv);
+ expand_2_vec(g->vece, dofs, aofs, some, 32, TCG_TYPE_V256,
+ g->load_dest, g->fniv);
if (some == oprsz) {
break;
}
@@ -1054,17 +1115,19 @@ void tcg_gen_gvec_2(uint32_t dofs, uint32_t aofs,
maxsz -= some;
/* fallthru */
case TCG_TYPE_V128:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128, g->fniv);
+ expand_2_vec(g->vece, dofs, aofs, oprsz, 16, TCG_TYPE_V128,
+ g->load_dest, g->fniv);
break;
case TCG_TYPE_V64:
- expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64, g->fniv);
+ expand_2_vec(g->vece, dofs, aofs, oprsz, 8, TCG_TYPE_V64,
+ g->load_dest, g->fniv);
break;
case 0:
if (g->fni8 && check_size_impl(oprsz, 8)) {
- expand_2_i64(dofs, aofs, oprsz, g->fni8);
+ expand_2_i64(dofs, aofs, oprsz, g->load_dest, g->fni8);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
- expand_2_i32(dofs, aofs, oprsz, g->fni4);
+ expand_2_i32(dofs, aofs, oprsz, g->load_dest, g->fni4);
} else {
assert(g->fno != NULL);
tcg_gen_gvec_2_ool(dofs, aofs, oprsz, maxsz, g->data, g->fno);
@@ -1541,32 +1604,11 @@ void tcg_gen_gvec_dup_mem(unsigned vece, uint32_t dofs, uint32_t aofs,
}
}
-void tcg_gen_gvec_dup64i(uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, uint64_t x)
-{
- check_size_align(oprsz, maxsz, dofs);
- do_dup(MO_64, dofs, oprsz, maxsz, NULL, NULL, x);
-}
-
-void tcg_gen_gvec_dup32i(uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, uint32_t x)
-{
- check_size_align(oprsz, maxsz, dofs);
- do_dup(MO_32, dofs, oprsz, maxsz, NULL, NULL, x);
-}
-
-void tcg_gen_gvec_dup16i(uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, uint16_t x)
-{
- check_size_align(oprsz, maxsz, dofs);
- do_dup(MO_16, dofs, oprsz, maxsz, NULL, NULL, x);
-}
-
-void tcg_gen_gvec_dup8i(uint32_t dofs, uint32_t oprsz,
- uint32_t maxsz, uint8_t x)
+void tcg_gen_gvec_dup_imm(unsigned vece, uint32_t dofs, uint32_t oprsz,
+ uint32_t maxsz, uint64_t x)
{
check_size_align(oprsz, maxsz, dofs);
- do_dup(MO_8, dofs, oprsz, maxsz, NULL, NULL, x);
+ do_dup(vece, dofs, oprsz, maxsz, NULL, NULL, x);
}
void tcg_gen_gvec_not(unsigned vece, uint32_t dofs, uint32_t aofs,
@@ -2319,7 +2361,7 @@ void tcg_gen_gvec_xor(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
- tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0);
+ tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}
@@ -2336,7 +2378,7 @@ void tcg_gen_gvec_andc(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
- tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, 0);
+ tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, 0);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}
@@ -2353,7 +2395,7 @@ void tcg_gen_gvec_orc(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
- tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1);
+ tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}
@@ -2404,7 +2446,7 @@ void tcg_gen_gvec_eqv(unsigned vece, uint32_t dofs, uint32_t aofs,
};
if (aofs == bofs) {
- tcg_gen_gvec_dup8i(dofs, oprsz, maxsz, -1);
+ tcg_gen_gvec_dup_imm(MO_64, dofs, oprsz, maxsz, -1);
} else {
tcg_gen_gvec_3(dofs, aofs, bofs, oprsz, maxsz, &g);
}
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c
index e2e25ebf7d..e60b74fb82 100644
--- a/tcg/tcg-op.c
+++ b/tcg/tcg-op.c
@@ -540,9 +540,9 @@ void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
}
}
-void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
@@ -580,9 +580,9 @@ void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2)
}
}
-void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, unsigned arg2)
+void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2)
{
- tcg_debug_assert(arg2 < 32);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 32);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i32(ret, arg1);
@@ -1962,9 +1962,9 @@ void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
}
}
-void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1);
@@ -2001,9 +2001,9 @@ void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2)
}
}
-void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned arg2)
+void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2)
{
- tcg_debug_assert(arg2 < 64);
+ tcg_debug_assert(arg2 >= 0 && arg2 < 64);
/* some cases can be optimized here */
if (arg2 == 0) {
tcg_gen_mov_i64(ret, arg1);