aboutsummaryrefslogtreecommitdiff
path: root/target/ppc
diff options
context:
space:
mode:
authorMatheus Ferst <matheus.ferst@eldorado.org.br>2021-12-17 17:57:13 +0100
committerCédric Le Goater <clg@kaod.org>2021-12-17 17:57:13 +0100
commit9193eaa901c54dbff4a91ea0b12a99e0135dbca1 (patch)
treeb4482db90ab74476c9d83ec2177f5853f26baafe /target/ppc
parent17868d81e0074905b2c1e414af6618570e8059eb (diff)
target/ppc: Implement Vector Mask Move insns
Implement the following PowerISA v3.1 instructions: mtvsrbm: Move to VSR Byte Mask mtvsrhm: Move to VSR Halfword Mask mtvsrwm: Move to VSR Word Mask mtvsrdm: Move to VSR Doubleword Mask mtvsrqm: Move to VSR Quadword Mask mtvsrbmi: Move to VSR Byte Mask Immediate Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Matheus Ferst <matheus.ferst@eldorado.org.br> Message-Id: <20211203194229.746275-4-matheus.ferst@eldorado.org.br> Signed-off-by: Cédric Le Goater <clg@kaod.org>
Diffstat (limited to 'target/ppc')
-rw-r--r--target/ppc/insn32.decode11
-rw-r--r--target/ppc/translate/vmx-impl.c.inc115
2 files changed, 126 insertions, 0 deletions
diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode
index 639ac22bf0..f68931f4f3 100644
--- a/target/ppc/insn32.decode
+++ b/target/ppc/insn32.decode
@@ -40,6 +40,10 @@
%ds_rtp 22:4 !function=times_2
@DS_rtp ...... ....0 ra:5 .............. .. &D rt=%ds_rtp si=%ds_si
+&DX_b vrt b
+%dx_b 6:10 16:5 0:1
+@DX_b ...... vrt:5 ..... .......... ..... . &DX_b b=%dx_b
+
&DX rt d
%dx_d 6:s10 16:5 0:1
@DX ...... rt:5 ..... .......... ..... . &DX d=%dx_d
@@ -413,6 +417,13 @@ VSRDBI 000100 ..... ..... ..... 01 ... 010110 @VN
## Vector Mask Manipulation Instructions
+MTVSRBM 000100 ..... 10000 ..... 11001000010 @VX_tb
+MTVSRHM 000100 ..... 10001 ..... 11001000010 @VX_tb
+MTVSRWM 000100 ..... 10010 ..... 11001000010 @VX_tb
+MTVSRDM 000100 ..... 10011 ..... 11001000010 @VX_tb
+MTVSRQM 000100 ..... 10100 ..... 11001000010 @VX_tb
+MTVSRBMI 000100 ..... ..... .......... 01010 . @DX_b
+
VEXPANDBM 000100 ..... 00000 ..... 11001000010 @VX_tb
VEXPANDHM 000100 ..... 00001 ..... 11001000010 @VX_tb
VEXPANDWM 000100 ..... 00010 ..... 11001000010 @VX_tb
diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc
index 96c97bf6e7..d5e02fd7f2 100644
--- a/target/ppc/translate/vmx-impl.c.inc
+++ b/target/ppc/translate/vmx-impl.c.inc
@@ -1607,6 +1607,121 @@ static bool trans_VEXTRACTQM(DisasContext *ctx, arg_VX_tb *a)
return true;
}
+static bool do_mtvsrm(DisasContext *ctx, arg_VX_tb *a, unsigned vece)
+{
+ const uint64_t elem_width = 8 << vece, elem_count_half = 8 >> vece;
+ uint64_t c;
+ int i, j;
+ TCGv_i64 hi, lo, t0, t1;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ hi = tcg_temp_new_i64();
+ lo = tcg_temp_new_i64();
+ t0 = tcg_temp_new_i64();
+ t1 = tcg_temp_new_i64();
+
+ tcg_gen_extu_tl_i64(t0, cpu_gpr[a->vrb]);
+ tcg_gen_extract_i64(hi, t0, elem_count_half, elem_count_half);
+ tcg_gen_extract_i64(lo, t0, 0, elem_count_half);
+
+ /*
+ * Spread the bits into their respective elements.
+ * E.g. for bytes:
+ * 00000000000000000000000000000000000000000000000000000000abcdefgh
+ * << 32 - 4
+ * 0000000000000000000000000000abcdefgh0000000000000000000000000000
+ * |
+ * 0000000000000000000000000000abcdefgh00000000000000000000abcdefgh
+ * << 16 - 2
+ * 00000000000000abcdefgh00000000000000000000abcdefgh00000000000000
+ * |
+ * 00000000000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh
+ * << 8 - 1
+ * 0000000abcdefgh000000abcdefgh000000abcdefgh000000abcdefgh0000000
+ * |
+ * 0000000abcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgXbcdefgh
+ * & dup(1)
+ * 0000000a0000000b0000000c0000000d0000000e0000000f0000000g0000000h
+ * * 0xff
+ * aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh
+ */
+ for (i = elem_count_half / 2, j = 32; i > 0; i >>= 1, j >>= 1) {
+ tcg_gen_shli_i64(t0, hi, j - i);
+ tcg_gen_shli_i64(t1, lo, j - i);
+ tcg_gen_or_i64(hi, hi, t0);
+ tcg_gen_or_i64(lo, lo, t1);
+ }
+
+ c = dup_const(vece, 1);
+ tcg_gen_andi_i64(hi, hi, c);
+ tcg_gen_andi_i64(lo, lo, c);
+
+ c = MAKE_64BIT_MASK(0, elem_width);
+ tcg_gen_muli_i64(hi, hi, c);
+ tcg_gen_muli_i64(lo, lo, c);
+
+ set_avr64(a->vrt, lo, false);
+ set_avr64(a->vrt, hi, true);
+
+ tcg_temp_free_i64(hi);
+ tcg_temp_free_i64(lo);
+ tcg_temp_free_i64(t0);
+ tcg_temp_free_i64(t1);
+
+ return true;
+}
+
+TRANS(MTVSRBM, do_mtvsrm, MO_8)
+TRANS(MTVSRHM, do_mtvsrm, MO_16)
+TRANS(MTVSRWM, do_mtvsrm, MO_32)
+TRANS(MTVSRDM, do_mtvsrm, MO_64)
+
+static bool trans_MTVSRQM(DisasContext *ctx, arg_VX_tb *a)
+{
+ TCGv_i64 tmp;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ tmp = tcg_temp_new_i64();
+
+ tcg_gen_ext_tl_i64(tmp, cpu_gpr[a->vrb]);
+ tcg_gen_sextract_i64(tmp, tmp, 0, 1);
+ set_avr64(a->vrt, tmp, false);
+ set_avr64(a->vrt, tmp, true);
+
+ tcg_temp_free_i64(tmp);
+
+ return true;
+}
+
+static bool trans_MTVSRBMI(DisasContext *ctx, arg_DX_b *a)
+{
+ const uint64_t mask = dup_const(MO_8, 1);
+ uint64_t hi, lo;
+
+ REQUIRE_INSNS_FLAGS2(ctx, ISA310);
+ REQUIRE_VECTOR(ctx);
+
+ hi = extract16(a->b, 8, 8);
+ lo = extract16(a->b, 0, 8);
+
+ for (int i = 4, j = 32; i > 0; i >>= 1, j >>= 1) {
+ hi |= hi << (j - i);
+ lo |= lo << (j - i);
+ }
+
+ hi = (hi & mask) * 0xFF;
+ lo = (lo & mask) * 0xFF;
+
+ set_avr64(a->vrt, tcg_constant_i64(hi), true);
+ set_avr64(a->vrt, tcg_constant_i64(lo), false);
+
+ return true;
+}
+
#define GEN_VAFORM_PAIRED(name0, name1, opc2) \
static void glue(gen_, name0##_##name1)(DisasContext *ctx) \
{ \