aboutsummaryrefslogtreecommitdiff
path: root/tcg
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-04-17 10:16:28 +0200
committerRichard Henderson <richard.henderson@linaro.org>2023-05-30 09:51:11 -0700
commit098d0fc10d26e5a7d73cce93d145136c595399d6 (patch)
treea8ba48420eb62030a6d1f15c41d673a4066629c3 /tcg
parent480dfba2c9fe4e67f0d1f01a20c9a0fd09587ece (diff)
tcg/i386: Support 128-bit load/store
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg')
-rw-r--r--tcg/i386/tcg-target.c.inc191
-rw-r--r--tcg/i386/tcg-target.h4
2 files changed, 190 insertions, 5 deletions
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc
index bfe9d98b7e..ae54e5fbf3 100644
--- a/tcg/i386/tcg-target.c.inc
+++ b/tcg/i386/tcg-target.c.inc
@@ -91,6 +91,8 @@ static const int tcg_target_reg_alloc_order[] = {
#endif
};
+#define TCG_TMP_VEC TCG_REG_XMM5
+
static const int tcg_target_call_iarg_regs[] = {
#if TCG_TARGET_REG_BITS == 64
#if defined(_WIN64)
@@ -319,6 +321,8 @@ static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
#define OPC_PCMPGTW (0x65 | P_EXT | P_DATA16)
#define OPC_PCMPGTD (0x66 | P_EXT | P_DATA16)
#define OPC_PCMPGTQ (0x37 | P_EXT38 | P_DATA16)
+#define OPC_PEXTRD (0x16 | P_EXT3A | P_DATA16)
+#define OPC_PINSRD (0x22 | P_EXT3A | P_DATA16)
#define OPC_PMAXSB (0x3c | P_EXT38 | P_DATA16)
#define OPC_PMAXSW (0xee | P_EXT | P_DATA16)
#define OPC_PMAXSD (0x3d | P_EXT38 | P_DATA16)
@@ -1753,7 +1757,21 @@ typedef struct {
bool tcg_target_has_memory_bswap(MemOp memop)
{
- return have_movbe;
+ TCGAtomAlign aa;
+
+ if (!have_movbe) {
+ return false;
+ }
+ if ((memop & MO_SIZE) < MO_128) {
+ return true;
+ }
+
+ /*
+ * Reject 16-byte memop with 16-byte atomicity, i.e. VMOVDQA,
+ * but do allow a pair of 64-bit operations, i.e. MOVBEQ.
+ */
+ aa = atom_and_align_for_opc(tcg_ctx, memop, MO_ATOM_IFALIGN, true);
+ return aa.atom < MO_128;
}
/*
@@ -1781,6 +1799,30 @@ static const TCGLdstHelperParam ldst_helper_param = {
static const TCGLdstHelperParam ldst_helper_param = { };
#endif
+static void tcg_out_vec_to_pair(TCGContext *s, TCGType type,
+ TCGReg l, TCGReg h, TCGReg v)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ /* vpmov{d,q} %v, %l */
+ tcg_out_vex_modrm(s, OPC_MOVD_EyVy + rexw, v, 0, l);
+ /* vpextr{d,q} $1, %v, %h */
+ tcg_out_vex_modrm(s, OPC_PEXTRD + rexw, v, 0, h);
+ tcg_out8(s, 1);
+}
+
+static void tcg_out_pair_to_vec(TCGContext *s, TCGType type,
+ TCGReg v, TCGReg l, TCGReg h)
+{
+ int rexw = type == TCG_TYPE_I32 ? 0 : P_REXW;
+
+ /* vmov{d,q} %l, %v */
+ tcg_out_vex_modrm(s, OPC_MOVD_VyEy + rexw, v, 0, l);
+ /* vpinsr{d,q} $1, %h, %v, %v */
+ tcg_out_vex_modrm(s, OPC_PINSRD + rexw, v, v, h);
+ tcg_out8(s, 1);
+}
+
/*
* Generate code for the slow path for a load at the end of block
*/
@@ -1870,6 +1912,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
{
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
+ MemOp s_bits = opc & MO_SIZE;
unsigned a_mask;
#ifdef CONFIG_SOFTMMU
@@ -1880,7 +1923,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
*h = x86_guest_base;
#endif
h->base = addrlo;
- h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
+ h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, s_bits == MO_128);
a_mask = (1 << h->aa.align) - 1;
#ifdef CONFIG_SOFTMMU
@@ -1890,7 +1933,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
TCGType tlbtype = TCG_TYPE_I32;
int trexw = 0, hrexw = 0, tlbrexw = 0;
unsigned mem_index = get_mmuidx(oi);
- unsigned s_bits = opc & MO_SIZE;
unsigned s_mask = (1 << s_bits) - 1;
int tlb_mask;
@@ -2070,6 +2112,72 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
h.base, h.index, 0, h.ofs + 4);
}
break;
+
+ case MO_128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+
+ /*
+ * Without 16-byte atomicity, use integer regs.
+ * That is where we want the data, and it allows bswaps.
+ */
+ if (h.aa.atom < MO_128) {
+ if (use_movbe) {
+ TCGReg t = datalo;
+ datalo = datahi;
+ datahi = t;
+ }
+ if (h.base == datalo || h.index == datalo) {
+ tcg_out_modrm_sib_offset(s, OPC_LEA + P_REXW, datahi,
+ h.base, h.index, 0, h.ofs);
+ tcg_out_modrm_offset(s, movop + P_REXW + h.seg,
+ datalo, datahi, 0);
+ tcg_out_modrm_offset(s, movop + P_REXW + h.seg,
+ datahi, datahi, 8);
+ } else {
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
+ h.base, h.index, 0, h.ofs);
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datahi,
+ h.base, h.index, 0, h.ofs + 8);
+ }
+ break;
+ }
+
+ /*
+ * With 16-byte atomicity, a vector load is required.
+ * If we already have 16-byte alignment, then VMOVDQA always works.
+ * Else if VMOVDQU has atomicity with dynamic alignment, use that.
+ * Else use we require a runtime test for alignment for VMOVDQA;
+ * use VMOVDQU on the unaligned nonatomic path for simplicity.
+ */
+ if (h.aa.align >= MO_128) {
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_VxWx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ } else if (cpuinfo & CPUINFO_ATOMIC_VMOVDQU) {
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_VxWx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ } else {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ tcg_out_testi(s, h.base, 15);
+ tcg_out_jxx(s, JCC_JNE, l1, true);
+
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_VxWx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ tcg_out_jxx(s, JCC_JMP, l2, true);
+
+ tcg_out_label(s, l1);
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_VxWx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ tcg_out_label(s, l2);
+ }
+ tcg_out_vec_to_pair(s, TCG_TYPE_I64, datalo, datahi, TCG_TMP_VEC);
+ break;
+
default:
g_assert_not_reached();
}
@@ -2140,6 +2248,63 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg datalo, TCGReg datahi,
h.base, h.index, 0, h.ofs + 4);
}
break;
+
+ case MO_128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+
+ /*
+ * Without 16-byte atomicity, use integer regs.
+ * That is where we have the data, and it allows bswaps.
+ */
+ if (h.aa.atom < MO_128) {
+ if (use_movbe) {
+ TCGReg t = datalo;
+ datalo = datahi;
+ datahi = t;
+ }
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datalo,
+ h.base, h.index, 0, h.ofs);
+ tcg_out_modrm_sib_offset(s, movop + P_REXW + h.seg, datahi,
+ h.base, h.index, 0, h.ofs + 8);
+ break;
+ }
+
+ /*
+ * With 16-byte atomicity, a vector store is required.
+ * If we already have 16-byte alignment, then VMOVDQA always works.
+ * Else if VMOVDQU has atomicity with dynamic alignment, use that.
+ * Else use we require a runtime test for alignment for VMOVDQA;
+ * use VMOVDQU on the unaligned nonatomic path for simplicity.
+ */
+ tcg_out_pair_to_vec(s, TCG_TYPE_I64, TCG_TMP_VEC, datalo, datahi);
+ if (h.aa.align >= MO_128) {
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_WxVx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ } else if (cpuinfo & CPUINFO_ATOMIC_VMOVDQU) {
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_WxVx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ } else {
+ TCGLabel *l1 = gen_new_label();
+ TCGLabel *l2 = gen_new_label();
+
+ tcg_out_testi(s, h.base, 15);
+ tcg_out_jxx(s, JCC_JNE, l1, true);
+
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQA_WxVx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ tcg_out_jxx(s, JCC_JMP, l2, true);
+
+ tcg_out_label(s, l1);
+ tcg_out_vex_modrm_sib_offset(s, OPC_MOVDQU_WxVx + h.seg,
+ TCG_TMP_VEC, 0,
+ h.base, h.index, 0, h.ofs);
+ tcg_out_label(s, l2);
+ }
+ break;
+
default:
g_assert_not_reached();
}
@@ -2470,6 +2635,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_qemu_ld(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
}
break;
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ tcg_out_qemu_ld(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
+ break;
case INDEX_op_qemu_st_a64_i32:
case INDEX_op_qemu_st8_a64_i32:
@@ -2496,6 +2666,11 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
tcg_out_qemu_st(s, a0, a1, a2, args[3], args[4], TCG_TYPE_I64);
}
break;
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ tcg_out_qemu_st(s, a0, a1, a2, -1, args[3], TCG_TYPE_I128);
+ break;
OP_32_64(mulu2):
tcg_out_modrm(s, OPC_GRP3_Ev + rexw, EXT3_MUL, args[3]);
@@ -3193,6 +3368,15 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_qemu_st_a64_i64:
return TCG_TARGET_REG_BITS == 64 ? C_O0_I2(L, L) : C_O0_I4(L, L, L, L);
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ return C_O2_I1(r, r, L);
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ tcg_debug_assert(TCG_TARGET_REG_BITS == 64);
+ return C_O0_I3(L, L, L);
+
case INDEX_op_brcond2_i32:
return C_O0_I4(r, r, ri, ri);
@@ -3962,6 +4146,7 @@ static void tcg_target_init(TCGContext *s)
s->reserved_regs = 0;
tcg_regset_set_reg(s->reserved_regs, TCG_REG_CALL_STACK);
+ tcg_regset_set_reg(s->reserved_regs, TCG_TMP_VEC);
#ifdef _WIN64
/* These are call saved, and we don't save them, so don't use them. */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_XMM6);
diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h
index 0106946996..b167f1e8d6 100644
--- a/tcg/i386/tcg-target.h
+++ b/tcg/i386/tcg-target.h
@@ -118,7 +118,6 @@ typedef enum {
#define have_avx1 (cpuinfo & CPUINFO_AVX1)
#define have_avx2 (cpuinfo & CPUINFO_AVX2)
#define have_movbe (cpuinfo & CPUINFO_MOVBE)
-#define have_atomic16 (cpuinfo & CPUINFO_ATOMIC_VMOVDQA)
/*
* There are interesting instructions in AVX512, so long as we have AVX512VL,
@@ -202,7 +201,8 @@ typedef enum {
#define TCG_TARGET_HAS_qemu_st8_i32 1
#endif
-#define TCG_TARGET_HAS_qemu_ldst_i128 0
+#define TCG_TARGET_HAS_qemu_ldst_i128 \
+ (TCG_TARGET_REG_BITS == 64 && (cpuinfo & CPUINFO_ATOMIC_VMOVDQA))
/* We do not support older SSE systems, only beginning with AVX1. */
#define TCG_TARGET_HAS_v64 have_avx1