aboutsummaryrefslogtreecommitdiff
path: root/tcg/aarch64/tcg-target.c.inc
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/aarch64/tcg-target.c.inc')
-rw-r--r--tcg/aarch64/tcg-target.c.inc141
1 files changed, 139 insertions, 2 deletions
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 00d9033e2f..261ad25210 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -385,6 +385,10 @@ typedef enum {
I3305_LDR_v64 = 0x5c000000,
I3305_LDR_v128 = 0x9c000000,
+ /* Load/store exclusive. */
+ I3306_LDXP = 0xc8600000,
+ I3306_STXP = 0xc8200000,
+
/* Load/store register. Described here as 3.3.12, but the helper
that emits them can transform to 3.3.10 or 3.3.13. */
I3312_STRB = 0x38000000 | LDST_ST << 22 | MO_8 << 30,
@@ -449,6 +453,9 @@ typedef enum {
I3406_ADR = 0x10000000,
I3406_ADRP = 0x90000000,
+ /* Add/subtract extended register instructions. */
+ I3501_ADD = 0x0b200000,
+
/* Add/subtract shifted register instructions (without a shift). */
I3502_ADD = 0x0b000000,
I3502_ADDS = 0x2b000000,
@@ -619,6 +626,12 @@ static void tcg_out_insn_3305(TCGContext *s, AArch64Insn insn,
tcg_out32(s, insn | (imm19 & 0x7ffff) << 5 | rt);
}
+static void tcg_out_insn_3306(TCGContext *s, AArch64Insn insn, TCGReg rs,
+ TCGReg rt, TCGReg rt2, TCGReg rn)
+{
+ tcg_out32(s, insn | rs << 16 | rt2 << 10 | rn << 5 | rt);
+}
+
static void tcg_out_insn_3201(TCGContext *s, AArch64Insn insn, TCGType ext,
TCGReg rt, int imm19)
{
@@ -701,6 +714,14 @@ static void tcg_out_insn_3406(TCGContext *s, AArch64Insn insn,
tcg_out32(s, insn | (disp & 3) << 29 | (disp & 0x1ffffc) << (5 - 2) | rd);
}
+static inline void tcg_out_insn_3501(TCGContext *s, AArch64Insn insn,
+ TCGType sf, TCGReg rd, TCGReg rn,
+ TCGReg rm, int opt, int imm3)
+{
+ tcg_out32(s, insn | sf << 31 | rm << 16 | opt << 13 |
+ imm3 << 10 | rn << 5 | rd);
+}
+
/* This function is for both 3.5.2 (Add/Subtract shifted register), for
the rare occasion when we actually want to supply a shift amount. */
static inline void tcg_out_insn_3502S(TCGContext *s, AArch64Insn insn,
@@ -1628,16 +1649,16 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
TCGType addr_type = s->addr_type;
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
+ MemOp s_bits = opc & MO_SIZE;
unsigned a_mask;
h->aa = atom_and_align_for_opc(s, opc,
have_lse2 ? MO_ATOM_WITHIN16
: MO_ATOM_IFALIGN,
- false);
+ s_bits == MO_128);
a_mask = (1 << h->aa.align) - 1;
#ifdef CONFIG_SOFTMMU
- unsigned s_bits = opc & MO_SIZE;
unsigned s_mask = (1u << s_bits) - 1;
unsigned mem_index = get_mmuidx(oi);
TCGReg addr_adj;
@@ -1818,6 +1839,108 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
}
}
+static void tcg_out_qemu_ldst_i128(TCGContext *s, TCGReg datalo, TCGReg datahi,
+ TCGReg addr_reg, MemOpIdx oi, bool is_ld)
+{
+ TCGLabelQemuLdst *ldst;
+ HostAddress h;
+ TCGReg base;
+ bool use_pair;
+
+ ldst = prepare_host_addr(s, &h, addr_reg, oi, is_ld);
+
+ /* Compose the final address, as LDP/STP have no indexing. */
+ if (h.index == TCG_REG_XZR) {
+ base = h.base;
+ } else {
+ base = TCG_REG_TMP2;
+ if (h.index_ext == TCG_TYPE_I32) {
+ /* add base, base, index, uxtw */
+ tcg_out_insn(s, 3501, ADD, TCG_TYPE_I64, base,
+ h.base, h.index, MO_32, 0);
+ } else {
+ /* add base, base, index */
+ tcg_out_insn(s, 3502, ADD, 1, base, h.base, h.index);
+ }
+ }
+
+ use_pair = h.aa.atom < MO_128 || have_lse2;
+
+ if (!use_pair) {
+ tcg_insn_unit *branch = NULL;
+ TCGReg ll, lh, sl, sh;
+
+ /*
+ * If we have already checked for 16-byte alignment, that's all
+ * we need. Otherwise we have determined that misaligned atomicity
+ * may be handled with two 8-byte loads.
+ */
+ if (h.aa.align < MO_128) {
+ /*
+ * TODO: align should be MO_64, so we only need test bit 3,
+ * which means we could use TBNZ instead of ANDS+B_C.
+ */
+ tcg_out_logicali(s, I3404_ANDSI, 0, TCG_REG_XZR, addr_reg, 15);
+ branch = s->code_ptr;
+ tcg_out_insn(s, 3202, B_C, TCG_COND_NE, 0);
+ use_pair = true;
+ }
+
+ if (is_ld) {
+ /*
+ * 16-byte atomicity without LSE2 requires LDXP+STXP loop:
+ * ldxp lo, hi, [base]
+ * stxp t0, lo, hi, [base]
+ * cbnz t0, .-8
+ * Require no overlap between data{lo,hi} and base.
+ */
+ if (datalo == base || datahi == base) {
+ tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_TMP2, base);
+ base = TCG_REG_TMP2;
+ }
+ ll = sl = datalo;
+ lh = sh = datahi;
+ } else {
+ /*
+ * 16-byte atomicity without LSE2 requires LDXP+STXP loop:
+ * 1: ldxp t0, t1, [base]
+ * stxp t0, lo, hi, [base]
+ * cbnz t0, 1b
+ */
+ tcg_debug_assert(base != TCG_REG_TMP0 && base != TCG_REG_TMP1);
+ ll = TCG_REG_TMP0;
+ lh = TCG_REG_TMP1;
+ sl = datalo;
+ sh = datahi;
+ }
+
+ tcg_out_insn(s, 3306, LDXP, TCG_REG_XZR, ll, lh, base);
+ tcg_out_insn(s, 3306, STXP, TCG_REG_TMP0, sl, sh, base);
+ tcg_out_insn(s, 3201, CBNZ, 0, TCG_REG_TMP0, -2);
+
+ if (use_pair) {
+ /* "b .+8", branching across the one insn of use_pair. */
+ tcg_out_insn(s, 3206, B, 2);
+ reloc_pc19(branch, tcg_splitwx_to_rx(s->code_ptr));
+ }
+ }
+
+ if (use_pair) {
+ if (is_ld) {
+ tcg_out_insn(s, 3314, LDP, datalo, datahi, base, 0, 1, 0);
+ } else {
+ tcg_out_insn(s, 3314, STP, datalo, datahi, base, 0, 1, 0);
+ }
+ }
+
+ if (ldst) {
+ ldst->type = TCG_TYPE_I128;
+ ldst->datalo_reg = datalo;
+ ldst->datahi_reg = datahi;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
+ }
+}
+
static const tcg_insn_unit *tb_ret_addr;
static void tcg_out_exit_tb(TCGContext *s, uintptr_t a0)
@@ -2157,6 +2280,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
case INDEX_op_qemu_st_a64_i64:
tcg_out_qemu_st(s, REG0(0), a1, a2, ext);
break;
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ tcg_out_qemu_ldst_i128(s, a0, a1, a2, args[3], true);
+ break;
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ tcg_out_qemu_ldst_i128(s, REG0(0), REG0(1), a2, args[3], false);
+ break;
case INDEX_op_bswap64_i64:
tcg_out_rev(s, TCG_TYPE_I64, MO_64, a0, a1);
@@ -2796,11 +2927,17 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
case INDEX_op_qemu_ld_a32_i64:
case INDEX_op_qemu_ld_a64_i64:
return C_O1_I1(r, r);
+ case INDEX_op_qemu_ld_a32_i128:
+ case INDEX_op_qemu_ld_a64_i128:
+ return C_O2_I1(r, r, r);
case INDEX_op_qemu_st_a32_i32:
case INDEX_op_qemu_st_a64_i32:
case INDEX_op_qemu_st_a32_i64:
case INDEX_op_qemu_st_a64_i64:
return C_O0_I2(rZ, r);
+ case INDEX_op_qemu_st_a32_i128:
+ case INDEX_op_qemu_st_a64_i128:
+ return C_O0_I3(rZ, rZ, r);
case INDEX_op_deposit_i32:
case INDEX_op_deposit_i64: