diff options
author | Richard Henderson <rth@twiddle.net> | 2013-02-19 23:52:01 -0800 |
---|---|---|
committer | Blue Swirl <blauwirbel@gmail.com> | 2013-02-23 17:25:29 +0000 |
commit | f402f38f439f17d4361b28248f948a6170d30133 (patch) | |
tree | d1a4534df3916acb29bb05939c39820224b9902c /tcg/tcg-op.h | |
parent | 76f131332310d8317880ef73a2bc90725d857ed3 (diff) |
tcg: Implement muls2 with mulu2
Signed-off-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Blue Swirl <blauwirbel@gmail.com>
Diffstat (limited to 'tcg/tcg-op.h')
-rw-r--r-- | tcg/tcg-op.h | 40 |
1 files changed, 40 insertions, 0 deletions
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index dac3b4ed9f..d70b2eba33 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -2396,6 +2396,26 @@ static inline void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2); /* Allow the optimizer room to replace muls2 with two moves. */ tcg_gen_op0(INDEX_op_nop); + } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_mulu2_i32) { + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_new_i32(); + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + tcg_gen_op4_i32(INDEX_op_mulu2_i32, t0, t1, arg1, arg2); + /* Allow the optimizer room to replace mulu2 with two moves. */ + tcg_gen_op0(INDEX_op_nop); + /* Adjust for negative inputs. */ + tcg_gen_sari_i32(t2, arg1, 31); + tcg_gen_sari_i32(t3, arg2, 31); + tcg_gen_and_i32(t2, t2, arg2); + tcg_gen_and_i32(t3, t3, arg1); + tcg_gen_sub_i32(rh, t1, t2); + tcg_gen_sub_i32(rh, rh, t3); + tcg_gen_mov_i32(rl, t0); + tcg_temp_free_i32(t0); + tcg_temp_free_i32(t1); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t3); } else { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); @@ -2455,6 +2475,26 @@ static inline void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2); /* Allow the optimizer room to replace mulu2 with two moves. */ tcg_gen_op0(INDEX_op_nop); + } else if (TCG_TARGET_HAS_mulu2_i64) { + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + TCGv_i64 t3 = tcg_temp_new_i64(); + tcg_gen_op4_i64(INDEX_op_mulu2_i64, t0, t1, arg1, arg2); + /* Allow the optimizer room to replace mulu2 with two moves. */ + tcg_gen_op0(INDEX_op_nop); + /* Adjust for negative inputs. */ + tcg_gen_sari_i64(t2, arg1, 63); + tcg_gen_sari_i64(t3, arg2, 63); + tcg_gen_and_i64(t2, t2, arg2); + tcg_gen_and_i64(t3, t3, arg1); + tcg_gen_sub_i64(rh, t1, t2); + tcg_gen_sub_i64(rh, rh, t3); + tcg_gen_mov_i64(rl, t0); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + tcg_temp_free_i64(t3); } else { TCGv_i64 t0 = tcg_temp_new_i64(); int sizemask = 0; |