diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2022-02-24 00:24:05 +0000 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-01-06 23:07:07 +0000 |
commit | 23d1394a6d1962bbbc8b5180e1a696c2895d89c8 (patch) | |
tree | 376e0b62dd63834544b674093d130f470bd853b6 /tcg/s390x | |
parent | 5c837bbca66805416f94cfc7acc9ad99aa3c4396 (diff) |
tcg/s390x: Generalize movcond implementation
Generalize movcond to support pre-computed conditions, and the same
set of arguments at all times. This will be assumed by a following
patch, which needs to reuse tgen_movcond_int.
Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/s390x')
-rw-r--r-- | tcg/s390x/tcg-target-con-set.h | 3 | ||||
-rw-r--r-- | tcg/s390x/tcg-target.c.inc | 52 |
2 files changed, 41 insertions, 14 deletions
diff --git a/tcg/s390x/tcg-target-con-set.h b/tcg/s390x/tcg-target-con-set.h index b194ad7f03..8cf8ed4dff 100644 --- a/tcg/s390x/tcg-target-con-set.h +++ b/tcg/s390x/tcg-target-con-set.h @@ -33,8 +33,7 @@ C_O1_I2(r, rZ, r) C_O1_I2(v, v, r) C_O1_I2(v, v, v) C_O1_I3(v, v, v, v) -C_O1_I4(r, r, ri, r, 0) -C_O1_I4(r, r, ri, rI, 0) +C_O1_I4(r, r, ri, rI, r) C_O2_I2(o, m, 0, r) C_O2_I2(o, m, r, r) C_O2_I3(o, m, 0, 1, r) diff --git a/tcg/s390x/tcg-target.c.inc b/tcg/s390x/tcg-target.c.inc index a9e3b4a9b9..30c12052f0 100644 --- a/tcg/s390x/tcg-target.c.inc +++ b/tcg/s390x/tcg-target.c.inc @@ -1354,19 +1354,49 @@ static void tgen_setcond(TCGContext *s, TCGType type, TCGCond cond, tcg_out_insn(s, RRFc, LOCGR, dest, TCG_TMP0, cc); } +static void tgen_movcond_int(TCGContext *s, TCGType type, TCGReg dest, + TCGArg v3, int v3const, TCGReg v4, + int cc, int inv_cc) +{ + TCGReg src; + + if (v3const) { + if (dest == v4) { + if (HAVE_FACILITY(LOAD_ON_COND2)) { + /* Emit: if (cc) dest = v3. */ + tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc); + return; + } + tcg_out_insn(s, RI, LGHI, TCG_TMP0, v3); + src = TCG_TMP0; + } else { + /* LGR+LOCGHI is larger than LGHI+LOCGR. */ + tcg_out_insn(s, RI, LGHI, dest, v3); + cc = inv_cc; + src = v4; + } + } else { + if (dest == v4) { + src = v3; + } else { + tcg_out_mov(s, type, dest, v3); + cc = inv_cc; + src = v4; + } + } + + /* Emit: if (cc) dest = src. */ + tcg_out_insn(s, RRFc, LOCGR, dest, src, cc); +} + static void tgen_movcond(TCGContext *s, TCGType type, TCGCond c, TCGReg dest, TCGReg c1, TCGArg c2, int c2const, - TCGArg v3, int v3const) + TCGArg v3, int v3const, TCGReg v4) { int cc, inv_cc; cc = tgen_cmp2(s, type, c, c1, c2, c2const, false, &inv_cc); - - if (v3const) { - tcg_out_insn(s, RIEg, LOCGHI, dest, v3, cc); - } else { - tcg_out_insn(s, RRFc, LOCGR, dest, v3, cc); - } + tgen_movcond_int(s, type, dest, v3, v3const, v4, cc, inv_cc); } static void tgen_clz(TCGContext *s, TCGReg dest, TCGReg a1, @@ -2225,7 +2255,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_movcond_i32: tgen_movcond(s, TCG_TYPE_I32, args[5], args[0], args[1], - args[2], const_args[2], args[3], const_args[3]); + args[2], const_args[2], args[3], const_args[3], args[4]); break; case INDEX_op_qemu_ld_i32: @@ -2509,7 +2539,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_movcond_i64: tgen_movcond(s, TCG_TYPE_I64, args[5], args[0], args[1], - args[2], const_args[2], args[3], const_args[3]); + args[2], const_args[2], args[3], const_args[3], args[4]); break; OP_32_64(deposit): @@ -3114,9 +3144,7 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) case INDEX_op_movcond_i32: case INDEX_op_movcond_i64: - return (HAVE_FACILITY(LOAD_ON_COND2) - ? C_O1_I4(r, r, ri, rI, 0) - : C_O1_I4(r, r, ri, r, 0)); + return C_O1_I4(r, r, ri, rI, r); case INDEX_op_div2_i32: case INDEX_op_div2_i64: |