diff options
Diffstat (limited to 'tcg/sparc/tcg-target.c')
-rw-r--r-- | tcg/sparc/tcg-target.c | 129 |
1 files changed, 115 insertions, 14 deletions
diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c index 40f2ec1027..0c4b028580 100644 --- a/tcg/sparc/tcg-target.c +++ b/tcg/sparc/tcg-target.c @@ -197,8 +197,8 @@ static const int tcg_target_call_oarg_regs[] = { #define ARITH_XOR (INSN_OP(2) | INSN_OP3(0x03)) #define ARITH_SUB (INSN_OP(2) | INSN_OP3(0x04)) #define ARITH_SUBCC (INSN_OP(2) | INSN_OP3(0x14)) -#define ARITH_ADDX (INSN_OP(2) | INSN_OP3(0x08)) -#define ARITH_SUBX (INSN_OP(2) | INSN_OP3(0x0c)) +#define ARITH_ADDC (INSN_OP(2) | INSN_OP3(0x08)) +#define ARITH_SUBC (INSN_OP(2) | INSN_OP3(0x0c)) #define ARITH_UMUL (INSN_OP(2) | INSN_OP3(0x0a)) #define ARITH_SMUL (INSN_OP(2) | INSN_OP3(0x0b)) #define ARITH_UDIV (INSN_OP(2) | INSN_OP3(0x0e)) @@ -209,6 +209,9 @@ static const int tcg_target_call_oarg_regs[] = { #define ARITH_MOVCC (INSN_OP(2) | INSN_OP3(0x2c)) #define ARITH_MOVR (INSN_OP(2) | INSN_OP3(0x2f)) +#define ARITH_ADDXC (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x11)) +#define ARITH_UMULXHI (INSN_OP(2) | INSN_OP3(0x36) | INSN_OPF(0x16)) + #define SHIFT_SLL (INSN_OP(2) | INSN_OP3(0x25)) #define SHIFT_SRL (INSN_OP(2) | INSN_OP3(0x26)) #define SHIFT_SRA (INSN_OP(2) | INSN_OP3(0x27)) @@ -262,6 +265,10 @@ static const int tcg_target_call_oarg_regs[] = { #define STW_LE (STWA | INSN_ASI(ASI_PRIMARY_LITTLE)) #define STX_LE (STXA | INSN_ASI(ASI_PRIMARY_LITTLE)) +#ifndef use_vis3_instructions +bool use_vis3_instructions; +#endif + static inline int check_fit_i64(int64_t val, unsigned int bits) { return val == sextract64(val, 0, bits); @@ -657,7 +664,7 @@ static void tcg_out_movcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const) { - /* For 32-bit comparisons, we can play games with ADDX/SUBX. */ + /* For 32-bit comparisons, we can play games with ADDC/SUBC. */ switch (cond) { case TCG_COND_LTU: case TCG_COND_GEU: @@ -668,9 +675,12 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, case TCG_COND_NE: /* For equality, we can transform to inequality vs zero. */ if (c2 != 0) { - tcg_out_arithc(s, ret, c1, c2, c2const, ARITH_XOR); + tcg_out_arithc(s, TCG_REG_T1, c1, c2, c2const, ARITH_XOR); + c2 = TCG_REG_T1; + } else { + c2 = c1; } - c1 = TCG_REG_G0, c2 = ret, c2const = 0; + c1 = TCG_REG_G0, c2const = 0; cond = (cond == TCG_COND_EQ ? TCG_COND_GEU : TCG_COND_LTU); break; @@ -698,15 +708,32 @@ static void tcg_out_setcond_i32(TCGContext *s, TCGCond cond, TCGReg ret, tcg_out_cmp(s, c1, c2, c2const); if (cond == TCG_COND_LTU) { - tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDX); + tcg_out_arithi(s, ret, TCG_REG_G0, 0, ARITH_ADDC); } else { - tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBX); + tcg_out_arithi(s, ret, TCG_REG_G0, -1, ARITH_SUBC); } } static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg c1, int32_t c2, int c2const) { + if (use_vis3_instructions) { + switch (cond) { + case TCG_COND_NE: + if (c2 != 0) { + break; + } + c2 = c1, c2const = 0, c1 = TCG_REG_G0; + /* FALLTHRU */ + case TCG_COND_LTU: + tcg_out_cmp(s, c1, c2, c2const); + tcg_out_arith(s, ret, TCG_REG_G0, TCG_REG_G0, ARITH_ADDXC); + return; + default: + break; + } + } + /* For 64-bit signed comparisons vs zero, we can avoid the compare if the input does not overlap the output. */ if (c2 == 0 && !is_unsigned_cond(cond) && c1 != ret) { @@ -719,9 +746,9 @@ static void tcg_out_setcond_i64(TCGContext *s, TCGCond cond, TCGReg ret, } } -static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, - TCGReg al, TCGReg ah, int32_t bl, int blconst, - int32_t bh, int bhconst, int opl, int oph) +static void tcg_out_addsub2_i32(TCGContext *s, TCGReg rl, TCGReg rh, + TCGReg al, TCGReg ah, int32_t bl, int blconst, + int32_t bh, int bhconst, int opl, int oph) { TCGReg tmp = TCG_REG_T1; @@ -735,6 +762,54 @@ static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, tcg_out_mov(s, TCG_TYPE_I32, rl, tmp); } +static void tcg_out_addsub2_i64(TCGContext *s, TCGReg rl, TCGReg rh, + TCGReg al, TCGReg ah, int32_t bl, int blconst, + int32_t bh, int bhconst, bool is_sub) +{ + TCGReg tmp = TCG_REG_T1; + + /* Note that the low parts are fully consumed before tmp is set. */ + if (rl != ah && (bhconst || rl != bh)) { + tmp = rl; + } + + tcg_out_arithc(s, tmp, al, bl, blconst, is_sub ? ARITH_SUBCC : ARITH_ADDCC); + + if (use_vis3_instructions && !is_sub) { + /* Note that ADDXC doesn't accept immediates. */ + if (bhconst && bh != 0) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh); + bh = TCG_REG_T2; + } + tcg_out_arith(s, rh, ah, bh, ARITH_ADDXC); + } else if (bh == TCG_REG_G0) { + /* If we have a zero, we can perform the operation in two insns, + with the arithmetic first, and a conditional move into place. */ + if (rh == ah) { + tcg_out_arithi(s, TCG_REG_T2, ah, 1, + is_sub ? ARITH_SUB : ARITH_ADD); + tcg_out_movcc(s, TCG_COND_LTU, MOVCC_XCC, rh, TCG_REG_T2, 0); + } else { + tcg_out_arithi(s, rh, ah, 1, is_sub ? ARITH_SUB : ARITH_ADD); + tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, rh, ah, 0); + } + } else { + /* Otherwise adjust BH as if there is carry into T2 ... */ + if (bhconst) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_T2, bh + (is_sub ? -1 : 1)); + } else { + tcg_out_arithi(s, TCG_REG_T2, bh, 1, + is_sub ? ARITH_SUB : ARITH_ADD); + } + /* ... smoosh T2 back to original BH if carry is clear ... */ + tcg_out_movcc(s, TCG_COND_GEU, MOVCC_XCC, TCG_REG_T2, bh, bhconst); + /* ... and finally perform the arithmetic with the new operand. */ + tcg_out_arith(s, rh, ah, TCG_REG_T2, is_sub ? ARITH_SUB : ARITH_ADD); + } + + tcg_out_mov(s, TCG_TYPE_I64, rl, tmp); +} + static void tcg_out_call_nodelay(TCGContext *s, tcg_insn_unit *dest) { ptrdiff_t disp = tcg_pcrel_diff(s, dest); @@ -1264,12 +1339,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_add2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4], - args[5], const_args[5], ARITH_ADDCC, ARITH_ADDX); + tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], + args[4], const_args[4], args[5], const_args[5], + ARITH_ADDCC, ARITH_ADDC); break; case INDEX_op_sub2_i32: - tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], const_args[4], - args[5], const_args[5], ARITH_SUBCC, ARITH_SUBX); + tcg_out_addsub2_i32(s, args[0], args[1], args[2], args[3], + args[4], const_args[4], args[5], const_args[5], + ARITH_SUBCC, ARITH_SUBC); break; case INDEX_op_mulu2_i32: c = ARITH_UMUL; @@ -1351,6 +1428,17 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, case INDEX_op_movcond_i64: tcg_out_movcond_i64(s, args[5], a0, a1, a2, c2, args[3], const_args[3]); break; + case INDEX_op_add2_i64: + tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], + const_args[4], args[5], const_args[5], false); + break; + case INDEX_op_sub2_i64: + tcg_out_addsub2_i64(s, args[0], args[1], args[2], args[3], args[4], + const_args[4], args[5], const_args[5], true); + break; + case INDEX_op_muluh_i64: + tcg_out_arith(s, args[0], args[1], args[2], ARITH_UMULXHI); + break; gen_arith: tcg_out_arithc(s, a0, a1, a2, c2, c); @@ -1449,6 +1537,10 @@ static const TCGTargetOpDef sparc_op_defs[] = { { INDEX_op_setcond_i64, { "R", "RZ", "RJ" } }, { INDEX_op_movcond_i64, { "R", "RZ", "RJ", "RI", "0" } }, + { INDEX_op_add2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, + { INDEX_op_sub2_i64, { "R", "R", "RZ", "RZ", "RJ", "RI" } }, + { INDEX_op_muluh_i64, { "R", "RZ", "RZ" } }, + { INDEX_op_qemu_ld_i32, { "r", "A" } }, { INDEX_op_qemu_ld_i64, { "R", "A" } }, { INDEX_op_qemu_st_i32, { "sZ", "A" } }, @@ -1459,6 +1551,15 @@ static const TCGTargetOpDef sparc_op_defs[] = { static void tcg_target_init(TCGContext *s) { + /* Only probe for the platform and capabilities if we havn't already + determined maximum values at compile time. */ +#ifndef use_vis3_instructions + { + unsigned long hwcap = qemu_getauxval(AT_HWCAP); + use_vis3_instructions = (hwcap & HWCAP_SPARC_VIS3) != 0; + } +#endif + tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff); tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I64], 0, ALL_64); |