aboutsummaryrefslogtreecommitdiff
path: root/tcg/mips
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2021-08-06 05:49:16 -1000
committerRichard Henderson <richard.henderson@linaro.org>2022-02-09 08:55:02 +1100
commit23a79c113ed2ae693d882d109862f4a759fbf10e (patch)
tree49f53b9a023cfc76b63e833ab8725b6db5e209cc /tcg/mips
parent0c90fa5dce29243c06841d7b07ff2bd97c27c1f4 (diff)
tcg/mips: Support unaligned access for user-only
This is kinda sorta the opposite of the other tcg hosts, where we get (normal) alignment checks for free with host SIGBUS and need to add code to support unaligned accesses. Fortunately, the ISA contains pairs of instructions that are used to implement unaligned memory accesses. Use them. Tested-by: Jiaxun Yang <jiaxun.yang@flygoat.com> Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com> Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/mips')
-rw-r--r--tcg/mips/tcg-target.c.inc334
-rw-r--r--tcg/mips/tcg-target.h2
2 files changed, 328 insertions, 8 deletions
diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc
index 27b020e66c..2c94ac2ed6 100644
--- a/tcg/mips/tcg-target.c.inc
+++ b/tcg/mips/tcg-target.c.inc
@@ -24,6 +24,8 @@
* THE SOFTWARE.
*/
+#include "../tcg-ldst.c.inc"
+
#ifdef HOST_WORDS_BIGENDIAN
# define MIPS_BE 1
#else
@@ -230,16 +232,26 @@ typedef enum {
OPC_ORI = 015 << 26,
OPC_XORI = 016 << 26,
OPC_LUI = 017 << 26,
+ OPC_BNEL = 025 << 26,
+ OPC_BNEZALC_R6 = 030 << 26,
OPC_DADDIU = 031 << 26,
+ OPC_LDL = 032 << 26,
+ OPC_LDR = 033 << 26,
OPC_LB = 040 << 26,
OPC_LH = 041 << 26,
+ OPC_LWL = 042 << 26,
OPC_LW = 043 << 26,
OPC_LBU = 044 << 26,
OPC_LHU = 045 << 26,
+ OPC_LWR = 046 << 26,
OPC_LWU = 047 << 26,
OPC_SB = 050 << 26,
OPC_SH = 051 << 26,
+ OPC_SWL = 052 << 26,
OPC_SW = 053 << 26,
+ OPC_SDL = 054 << 26,
+ OPC_SDR = 055 << 26,
+ OPC_SWR = 056 << 26,
OPC_LD = 067 << 26,
OPC_SD = 077 << 26,
@@ -1015,8 +1027,6 @@ static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg)
}
#if defined(CONFIG_SOFTMMU)
-#include "../tcg-ldst.c.inc"
-
static void * const qemu_ld_helpers[(MO_SSIZE | MO_BSWAP) + 1] = {
[MO_UB] = helper_ret_ldub_mmu,
[MO_SB] = helper_ret_ldsb_mmu,
@@ -1324,7 +1334,82 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0);
return true;
}
-#endif
+
+#else
+
+static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addrlo,
+ TCGReg addrhi, unsigned a_bits)
+{
+ unsigned a_mask = (1 << a_bits) - 1;
+ TCGLabelQemuLdst *l = new_ldst_label(s);
+
+ l->is_ld = is_ld;
+ l->addrlo_reg = addrlo;
+ l->addrhi_reg = addrhi;
+
+ /* We are expecting a_bits to max out at 7, much lower than ANDI. */
+ tcg_debug_assert(a_bits < 16);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_TMP0, addrlo, a_mask);
+
+ l->label_ptr[0] = s->code_ptr;
+ if (use_mips32r6_instructions) {
+ tcg_out_opc_br(s, OPC_BNEZALC_R6, TCG_REG_ZERO, TCG_TMP0);
+ } else {
+ tcg_out_opc_br(s, OPC_BNEL, TCG_TMP0, TCG_REG_ZERO);
+ tcg_out_nop(s);
+ }
+
+ l->raddr = tcg_splitwx_to_rx(s->code_ptr);
+}
+
+static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ void *target;
+
+ if (!reloc_pc16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
+ return false;
+ }
+
+ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) {
+ /* A0 is env, A1 is skipped, A2:A3 is the uint64_t address. */
+ TCGReg a2 = MIPS_BE ? l->addrhi_reg : l->addrlo_reg;
+ TCGReg a3 = MIPS_BE ? l->addrlo_reg : l->addrhi_reg;
+
+ if (a3 != TCG_REG_A2) {
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
+ } else if (a2 != TCG_REG_A3) {
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, a3);
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, a2);
+ } else {
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_TMP0, TCG_REG_A2);
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A2, TCG_REG_A3);
+ tcg_out_mov(s, TCG_TYPE_I32, TCG_REG_A3, TCG_TMP0);
+ }
+ } else {
+ tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
+ }
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
+
+ /*
+ * Tail call to the helper, with the return address back inline.
+ * We have arrived here via BNEL, so $31 is already set.
+ */
+ target = (l->is_ld ? helper_unaligned_ld : helper_unaligned_st);
+ tcg_out_call_int(s, target, true);
+ return true;
+}
+
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ return tcg_out_fail_alignment(s, l);
+}
+
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
+{
+ return tcg_out_fail_alignment(s, l);
+}
+#endif /* SOFTMMU */
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
TCGReg base, MemOp opc, bool is_64)
@@ -1430,6 +1515,127 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi,
}
}
+static void __attribute__((unused))
+tcg_out_qemu_ld_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
+ TCGReg base, MemOp opc, bool is_64)
+{
+ const MIPSInsn lw1 = MIPS_BE ? OPC_LWL : OPC_LWR;
+ const MIPSInsn lw2 = MIPS_BE ? OPC_LWR : OPC_LWL;
+ const MIPSInsn ld1 = MIPS_BE ? OPC_LDL : OPC_LDR;
+ const MIPSInsn ld2 = MIPS_BE ? OPC_LDR : OPC_LDL;
+
+ bool sgn = (opc & MO_SIGN);
+
+ switch (opc & (MO_SSIZE | MO_BSWAP)) {
+ case MO_SW | MO_BE:
+ case MO_UW | MO_BE:
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 0);
+ tcg_out_opc_imm(s, OPC_LBU, lo, base, 1);
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
+ } else {
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP0, TCG_TMP0, 8);
+ tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
+ }
+ break;
+
+ case MO_SW | MO_LE:
+ case MO_UW | MO_LE:
+ if (use_mips32r2_instructions && lo != base) {
+ tcg_out_opc_imm(s, OPC_LBU, lo, base, 0);
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP0, base, 1);
+ tcg_out_opc_bf(s, OPC_INS, lo, TCG_TMP0, 31, 8);
+ } else {
+ tcg_out_opc_imm(s, OPC_LBU, TCG_TMP0, base, 0);
+ tcg_out_opc_imm(s, sgn ? OPC_LB : OPC_LBU, TCG_TMP1, base, 1);
+ tcg_out_opc_sa(s, OPC_SLL, TCG_TMP1, TCG_TMP1, 8);
+ tcg_out_opc_reg(s, OPC_OR, lo, TCG_TMP0, TCG_TMP1);
+ }
+ break;
+
+ case MO_SL:
+ case MO_UL:
+ tcg_out_opc_imm(s, lw1, lo, base, 0);
+ tcg_out_opc_imm(s, lw2, lo, base, 3);
+ if (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn) {
+ tcg_out_ext32u(s, lo, lo);
+ }
+ break;
+
+ case MO_UL | MO_BSWAP:
+ case MO_SL | MO_BSWAP:
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_imm(s, lw1, lo, base, 0);
+ tcg_out_opc_imm(s, lw2, lo, base, 3);
+ tcg_out_bswap32(s, lo, lo,
+ TCG_TARGET_REG_BITS == 64 && is_64
+ ? (sgn ? TCG_BSWAP_OS : TCG_BSWAP_OZ) : 0);
+ } else {
+ const tcg_insn_unit *subr =
+ (TCG_TARGET_REG_BITS == 64 && is_64 && !sgn
+ ? bswap32u_addr : bswap32_addr);
+
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0);
+ tcg_out_bswap_subr(s, subr);
+ /* delay slot */
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 3);
+ tcg_out_mov(s, is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32, lo, TCG_TMP3);
+ }
+ break;
+
+ case MO_UQ:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_opc_imm(s, ld1, lo, base, 0);
+ tcg_out_opc_imm(s, ld2, lo, base, 7);
+ } else {
+ tcg_out_opc_imm(s, lw1, MIPS_BE ? hi : lo, base, 0 + 0);
+ tcg_out_opc_imm(s, lw2, MIPS_BE ? hi : lo, base, 0 + 3);
+ tcg_out_opc_imm(s, lw1, MIPS_BE ? lo : hi, base, 4 + 0);
+ tcg_out_opc_imm(s, lw2, MIPS_BE ? lo : hi, base, 4 + 3);
+ }
+ break;
+
+ case MO_UQ | MO_BSWAP:
+ if (TCG_TARGET_REG_BITS == 64) {
+ if (use_mips32r2_instructions) {
+ tcg_out_opc_imm(s, ld1, lo, base, 0);
+ tcg_out_opc_imm(s, ld2, lo, base, 7);
+ tcg_out_bswap64(s, lo, lo);
+ } else {
+ tcg_out_opc_imm(s, ld1, TCG_TMP0, base, 0);
+ tcg_out_bswap_subr(s, bswap64_addr);
+ /* delay slot */
+ tcg_out_opc_imm(s, ld2, TCG_TMP0, base, 7);
+ tcg_out_mov(s, TCG_TYPE_I64, lo, TCG_TMP3);
+ }
+ } else if (use_mips32r2_instructions) {
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
+ tcg_out_opc_imm(s, lw1, TCG_TMP1, base, 4 + 0);
+ tcg_out_opc_imm(s, lw2, TCG_TMP1, base, 4 + 3);
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, TCG_TMP0);
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, TCG_TMP1);
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? lo : hi, TCG_TMP0, 16);
+ tcg_out_opc_sa(s, OPC_ROTR, MIPS_BE ? hi : lo, TCG_TMP1, 16);
+ } else {
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 0 + 0);
+ tcg_out_bswap_subr(s, bswap32_addr);
+ /* delay slot */
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 0 + 3);
+ tcg_out_opc_imm(s, lw1, TCG_TMP0, base, 4 + 0);
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? lo : hi, TCG_TMP3);
+ tcg_out_bswap_subr(s, bswap32_addr);
+ /* delay slot */
+ tcg_out_opc_imm(s, lw2, TCG_TMP0, base, 4 + 3);
+ tcg_out_mov(s, TCG_TYPE_I32, MIPS_BE ? hi : lo, TCG_TMP3);
+ }
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
{
TCGReg addr_regl, addr_regh __attribute__((unused));
@@ -1438,6 +1644,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
+#else
+ unsigned a_bits, s_bits;
#endif
TCGReg base = TCG_REG_A0;
@@ -1467,7 +1675,27 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64)
} else {
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
}
- tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
+ a_bits = get_alignment_bits(opc);
+ s_bits = opc & MO_SIZE;
+ /*
+ * R6 removes the left/right instructions but requires the
+ * system to support misaligned memory accesses.
+ */
+ if (use_mips32r6_instructions) {
+ if (a_bits) {
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
+ }
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
+ } else {
+ if (a_bits && a_bits != s_bits) {
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
+ }
+ if (a_bits >= s_bits) {
+ tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64);
+ } else {
+ tcg_out_qemu_ld_unalign(s, data_regl, data_regh, base, opc, is_64);
+ }
+ }
#endif
}
@@ -1532,6 +1760,79 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi,
}
}
+static void __attribute__((unused))
+tcg_out_qemu_st_unalign(TCGContext *s, TCGReg lo, TCGReg hi,
+ TCGReg base, MemOp opc)
+{
+ const MIPSInsn sw1 = MIPS_BE ? OPC_SWL : OPC_SWR;
+ const MIPSInsn sw2 = MIPS_BE ? OPC_SWR : OPC_SWL;
+ const MIPSInsn sd1 = MIPS_BE ? OPC_SDL : OPC_SDR;
+ const MIPSInsn sd2 = MIPS_BE ? OPC_SDR : OPC_SDL;
+
+ /* Don't clutter the code below with checks to avoid bswapping ZERO. */
+ if ((lo | hi) == 0) {
+ opc &= ~MO_BSWAP;
+ }
+
+ switch (opc & (MO_SIZE | MO_BSWAP)) {
+ case MO_16 | MO_BE:
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
+ tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 0);
+ tcg_out_opc_imm(s, OPC_SB, lo, base, 1);
+ break;
+
+ case MO_16 | MO_LE:
+ tcg_out_opc_sa(s, OPC_SRL, TCG_TMP0, lo, 8);
+ tcg_out_opc_imm(s, OPC_SB, lo, base, 0);
+ tcg_out_opc_imm(s, OPC_SB, TCG_TMP0, base, 1);
+ break;
+
+ case MO_32 | MO_BSWAP:
+ tcg_out_bswap32(s, TCG_TMP3, lo, 0);
+ lo = TCG_TMP3;
+ /* fall through */
+ case MO_32:
+ tcg_out_opc_imm(s, sw1, lo, base, 0);
+ tcg_out_opc_imm(s, sw2, lo, base, 3);
+ break;
+
+ case MO_64 | MO_BSWAP:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_bswap64(s, TCG_TMP3, lo);
+ lo = TCG_TMP3;
+ } else if (use_mips32r2_instructions) {
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP0, 0, MIPS_BE ? hi : lo);
+ tcg_out_opc_reg(s, OPC_WSBH, TCG_TMP1, 0, MIPS_BE ? lo : hi);
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP0, TCG_TMP0, 16);
+ tcg_out_opc_sa(s, OPC_ROTR, TCG_TMP1, TCG_TMP1, 16);
+ hi = MIPS_BE ? TCG_TMP0 : TCG_TMP1;
+ lo = MIPS_BE ? TCG_TMP1 : TCG_TMP0;
+ } else {
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? lo : hi, 0);
+ tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 0 + 0);
+ tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 0 + 3);
+ tcg_out_bswap32(s, TCG_TMP3, MIPS_BE ? hi : lo, 0);
+ tcg_out_opc_imm(s, sw1, TCG_TMP3, base, 4 + 0);
+ tcg_out_opc_imm(s, sw2, TCG_TMP3, base, 4 + 3);
+ break;
+ }
+ /* fall through */
+ case MO_64:
+ if (TCG_TARGET_REG_BITS == 64) {
+ tcg_out_opc_imm(s, sd1, lo, base, 0);
+ tcg_out_opc_imm(s, sd2, lo, base, 7);
+ } else {
+ tcg_out_opc_imm(s, sw1, MIPS_BE ? hi : lo, base, 0 + 0);
+ tcg_out_opc_imm(s, sw2, MIPS_BE ? hi : lo, base, 0 + 3);
+ tcg_out_opc_imm(s, sw1, MIPS_BE ? lo : hi, base, 4 + 0);
+ tcg_out_opc_imm(s, sw2, MIPS_BE ? lo : hi, base, 4 + 3);
+ }
+ break;
+
+ default:
+ tcg_abort();
+ }
+}
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
{
TCGReg addr_regl, addr_regh __attribute__((unused));
@@ -1540,6 +1841,8 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
MemOp opc;
#if defined(CONFIG_SOFTMMU)
tcg_insn_unit *label_ptr[2];
+#else
+ unsigned a_bits, s_bits;
#endif
TCGReg base = TCG_REG_A0;
@@ -1558,7 +1861,6 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
data_regl, data_regh, addr_regl, addr_regh,
s->code_ptr, label_ptr);
#else
- base = TCG_REG_A0;
if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) {
tcg_out_ext32u(s, base, addr_regl);
addr_regl = base;
@@ -1570,7 +1872,27 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64)
} else {
tcg_out_opc_reg(s, ALIAS_PADD, base, TCG_GUEST_BASE_REG, addr_regl);
}
- tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
+ a_bits = get_alignment_bits(opc);
+ s_bits = opc & MO_SIZE;
+ /*
+ * R6 removes the left/right instructions but requires the
+ * system to support misaligned memory accesses.
+ */
+ if (use_mips32r6_instructions) {
+ if (a_bits) {
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
+ }
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
+ } else {
+ if (a_bits && a_bits != s_bits) {
+ tcg_out_test_alignment(s, true, addr_regl, addr_regh, a_bits);
+ }
+ if (a_bits >= s_bits) {
+ tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc);
+ } else {
+ tcg_out_qemu_st_unalign(s, data_regl, data_regh, base, opc);
+ }
+ }
#endif
}
diff --git a/tcg/mips/tcg-target.h b/tcg/mips/tcg-target.h
index c366fdf74b..7669213175 100644
--- a/tcg/mips/tcg-target.h
+++ b/tcg/mips/tcg-target.h
@@ -207,8 +207,6 @@ extern bool use_mips32r2_instructions;
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t)
QEMU_ERROR("code path is reachable");
-#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
-#endif
#endif