aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoraurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>2008-11-02 08:22:54 +0000
committeraurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>2008-11-02 08:22:54 +0000
commit6359706f9384d9d50fbd0ba92df18d3da5d7ed96 (patch)
tree7a07813eee864b7e8da9d719e9c7715cd6e6b1b3
parent0cfe58cd4470cfbbd3650ec9451cdd0b0e3058c0 (diff)
tcg-ops.h: _i64 TCG immediate instructions cleanup
Move addi_i64, muli_i64 and subi_i64 out of #if TCG_TARGET_REG_BITS as both implementations are strictly identical. Use the same optimisation (ie when imm == 0) for addi_i64 and subi_64 than the 32-bit version. Signed-off-by: Aurelien Jarno <aurelien@aurel32.net> git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5598 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--tcg/tcg-op.h73
1 files changed, 31 insertions, 42 deletions
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h
index 18be6412d8..9c4f9270c1 100644
--- a/tcg/tcg-op.h
+++ b/tcg/tcg-op.h
@@ -673,26 +673,12 @@ static inline void tcg_gen_add_i64(TCGv ret, TCGv arg1, TCGv arg2)
arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2));
}
-static inline void tcg_gen_addi_i64(TCGv ret, TCGv arg1, int64_t arg2)
-{
- TCGv t0 = tcg_const_i64(arg2);
- tcg_gen_add_i64(ret, arg1, t0);
- tcg_temp_free(t0);
-}
-
static inline void tcg_gen_sub_i64(TCGv ret, TCGv arg1, TCGv arg2)
{
tcg_gen_op6(INDEX_op_sub2_i32, ret, TCGV_HIGH(ret),
arg1, TCGV_HIGH(arg1), arg2, TCGV_HIGH(arg2));
}
-static inline void tcg_gen_subi_i64(TCGv ret, TCGv arg1, int64_t arg2)
-{
- TCGv t0 = tcg_const_i64(arg2);
- tcg_gen_sub_i64(ret, arg1, t0);
- tcg_temp_free(t0);
-}
-
static inline void tcg_gen_and_i64(TCGv ret, TCGv arg1, TCGv arg2)
{
tcg_gen_and_i32(ret, arg1, arg2);
@@ -788,13 +774,6 @@ static inline void tcg_gen_mul_i64(TCGv ret, TCGv arg1, TCGv arg2)
tcg_temp_free(t1);
}
-static inline void tcg_gen_muli_i64(TCGv ret, TCGv arg1, int64_t arg2)
-{
- TCGv t0 = tcg_const_i64(arg2);
- tcg_gen_mul_i64(ret, arg1, t0);
- tcg_temp_free(t0);
-}
-
static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2)
{
tcg_gen_helper_1_2(tcg_helper_div_i64, ret, arg1, arg2);
@@ -897,25 +876,11 @@ static inline void tcg_gen_add_i64(TCGv ret, TCGv arg1, TCGv arg2)
tcg_gen_op3(INDEX_op_add_i64, ret, arg1, arg2);
}
-static inline void tcg_gen_addi_i64(TCGv ret, TCGv arg1, int64_t arg2)
-{
- TCGv t0 = tcg_const_i64(arg2);
- tcg_gen_add_i64(ret, arg1, t0);
- tcg_temp_free(t0);
-}
-
static inline void tcg_gen_sub_i64(TCGv ret, TCGv arg1, TCGv arg2)
{
tcg_gen_op3(INDEX_op_sub_i64, ret, arg1, arg2);
}
-static inline void tcg_gen_subi_i64(TCGv ret, TCGv arg1, int64_t arg2)
-{
- TCGv t0 = tcg_const_i64(arg2);
- tcg_gen_sub_i64(ret, arg1, t0);
- tcg_temp_free(t0);
-}
-
static inline void tcg_gen_and_i64(TCGv ret, TCGv arg1, TCGv arg2)
{
tcg_gen_op3(INDEX_op_and_i64, ret, arg1, arg2);
@@ -1011,13 +976,6 @@ static inline void tcg_gen_mul_i64(TCGv ret, TCGv arg1, TCGv arg2)
tcg_gen_op3(INDEX_op_mul_i64, ret, arg1, arg2);
}
-static inline void tcg_gen_muli_i64(TCGv ret, TCGv arg1, int64_t arg2)
-{
- TCGv t0 = tcg_const_i64(arg2);
- tcg_gen_mul_i64(ret, arg1, t0);
- tcg_temp_free(t0);
-}
-
#ifdef TCG_TARGET_HAS_div_i64
static inline void tcg_gen_div_i64(TCGv ret, TCGv arg1, TCGv arg2)
{
@@ -1078,6 +1036,18 @@ static inline void tcg_gen_remu_i64(TCGv ret, TCGv arg1, TCGv arg2)
#endif
+static inline void tcg_gen_addi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_add_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
static inline void tcg_gen_brcondi_i64(int cond, TCGv arg1, int64_t arg2,
int label_index)
{
@@ -1086,6 +1056,25 @@ static inline void tcg_gen_brcondi_i64(int cond, TCGv arg1, int64_t arg2,
tcg_temp_free(t0);
}
+static inline void tcg_gen_muli_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_mul_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+}
+
+static inline void tcg_gen_subi_i64(TCGv ret, TCGv arg1, int64_t arg2)
+{
+ /* some cases can be optimized here */
+ if (arg2 == 0) {
+ tcg_gen_mov_i64(ret, arg1);
+ } else {
+ TCGv t0 = tcg_const_i64(arg2);
+ tcg_gen_sub_i64(ret, arg1, t0);
+ tcg_temp_free(t0);
+ }
+}
+
/***************************************/
/* optional operations */