aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2021-06-17 13:15:53 +0100
committerPeter Maydell <peter.maydell@linaro.org>2021-06-21 17:12:50 +0100
commit614dd4f3ba2a025eae5235c3466ef6da191879f6 (patch)
treeebace9429994ecd47c5e5c5dd8a9897ba2b75d89 /tcg
parent399a8c766c0526b51cd180e1b1c776d6dc95bad8 (diff)
tcg: Make gen_dup_i32/i64() public as tcg_gen_dup_i32/i64
The Arm MVE VDUP implementation would like to be able to emit code to duplicate a byte or halfword value into an i32. We have code to do this already in tcg-op-gvec.c, so all we need to do is make the functions global. For consistency with other functions made available to the frontends: * we rename to tcg_gen_dup_* * we expose both the _i32 and _i64 forms * we provide the #define for a _tl form Suggested-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Message-id: 20210617121628.20116-10-peter.maydell@linaro.org
Diffstat (limited to 'tcg')
-rw-r--r--tcg/tcg-op-gvec.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c
index 498a959839..515db120cc 100644
--- a/tcg/tcg-op-gvec.c
+++ b/tcg/tcg-op-gvec.c
@@ -386,7 +386,7 @@ uint64_t (dup_const)(unsigned vece, uint64_t c)
}
/* Duplicate IN into OUT as per VECE. */
-static void gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
+void tcg_gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
{
switch (vece) {
case MO_8:
@@ -404,7 +404,7 @@ static void gen_dup_i32(unsigned vece, TCGv_i32 out, TCGv_i32 in)
}
}
-static void gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
+void tcg_gen_dup_i64(unsigned vece, TCGv_i64 out, TCGv_i64 in)
{
switch (vece) {
case MO_8:
@@ -578,15 +578,15 @@ static void do_dup(unsigned vece, uint32_t dofs, uint32_t oprsz,
&& (vece != MO_32 || !check_size_impl(oprsz, 4))) {
t_64 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(t_64, in_32);
- gen_dup_i64(vece, t_64, t_64);
+ tcg_gen_dup_i64(vece, t_64, t_64);
} else {
t_32 = tcg_temp_new_i32();
- gen_dup_i32(vece, t_32, in_32);
+ tcg_gen_dup_i32(vece, t_32, in_32);
}
} else if (in_64) {
/* We are given a 64-bit variable input. */
t_64 = tcg_temp_new_i64();
- gen_dup_i64(vece, t_64, in_64);
+ tcg_gen_dup_i64(vece, t_64, in_64);
} else {
/* We are given a constant input. */
/* For 64-bit hosts, use 64-bit constants for "simple" constants
@@ -1311,14 +1311,14 @@ void tcg_gen_gvec_2s(uint32_t dofs, uint32_t aofs, uint32_t oprsz,
} else if (g->fni8 && check_size_impl(oprsz, 8)) {
TCGv_i64 t64 = tcg_temp_new_i64();
- gen_dup_i64(g->vece, t64, c);
+ tcg_gen_dup_i64(g->vece, t64, c);
expand_2s_i64(dofs, aofs, oprsz, t64, g->scalar_first, g->fni8);
tcg_temp_free_i64(t64);
} else if (g->fni4 && check_size_impl(oprsz, 4)) {
TCGv_i32 t32 = tcg_temp_new_i32();
tcg_gen_extrl_i64_i32(t32, c);
- gen_dup_i32(g->vece, t32, t32);
+ tcg_gen_dup_i32(g->vece, t32, t32);
expand_2s_i32(dofs, aofs, oprsz, t32, g->scalar_first, g->fni4);
tcg_temp_free_i32(t32);
} else {
@@ -2538,7 +2538,7 @@ void tcg_gen_gvec_ands(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
TCGv_i64 tmp = tcg_temp_new_i64();
- gen_dup_i64(vece, tmp, c);
+ tcg_gen_dup_i64(vece, tmp, c);
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ands);
tcg_temp_free_i64(tmp);
}
@@ -2562,7 +2562,7 @@ void tcg_gen_gvec_xors(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
TCGv_i64 tmp = tcg_temp_new_i64();
- gen_dup_i64(vece, tmp, c);
+ tcg_gen_dup_i64(vece, tmp, c);
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_xors);
tcg_temp_free_i64(tmp);
}
@@ -2586,7 +2586,7 @@ void tcg_gen_gvec_ors(unsigned vece, uint32_t dofs, uint32_t aofs,
TCGv_i64 c, uint32_t oprsz, uint32_t maxsz)
{
TCGv_i64 tmp = tcg_temp_new_i64();
- gen_dup_i64(vece, tmp, c);
+ tcg_gen_dup_i64(vece, tmp, c);
tcg_gen_gvec_2s(dofs, aofs, oprsz, maxsz, tmp, &gop_ors);
tcg_temp_free_i64(tmp);
}