diff options
author | Aurelien Jarno <aurelien@aurel32.net> | 2013-09-03 01:35:43 +0200 |
---|---|---|
committer | Aurelien Jarno <aurelien@aurel32.net> | 2013-09-03 01:35:43 +0200 |
commit | 545825d4cda03ea292b7788b3401b99860efe8bc (patch) | |
tree | 22983d4bcd8f48fb38561d241ac5e2d7e51e5a22 /tcg/tcg-op.h | |
parent | 32f3bd6d4d6d6f835cbc2b9241fe8c32d2898d73 (diff) | |
parent | 6fb5874590589585cdcad4ca2431d9d8d4d491b1 (diff) |
Merge branch 'tcg-next' of git://github.com/rth7680/qemu
* 'tcg-next' of git://github.com/rth7680/qemu: (29 commits)
tcg-i386: Make use of zero-extended memory helper routines
tcg: Introduce zero and sign-extended versions of load helpers
exec: Split softmmu_defs.h
target: Include softmmu_exec.h where forgotten
exec: Rename USUFFIX to LSUFFIX
tcg-i386: Don't perform GETPC adjustment in TCG code
exec: Reorganize the GETRA/GETPC macros
configure: Allow x32 as a host
tcg-i386: Adjust tcg_out_tlb_load for x32
tcg-i386: Use intptr_t appropriately
tcg: Fix jit debug for x32
tcg: Use appropriate types in tcg_reg_alloc_call
tcg: Change tcg_out_ld/st offset to intptr_t
tcg: Change tcg_gen_exit_tb argument to uintptr_t
tcg: Use uintptr_t in TCGHelperInfo
tcg: Change relocation offsets to intptr_t
tcg: Change memory offsets to intptr_t
tcg: Change frame pointer offsets to intptr_t
tcg: Define TCG_ptr properly
tcg: Define TCG_TYPE_PTR properly
...
Diffstat (limited to 'tcg/tcg-op.h')
-rw-r--r-- | tcg/tcg-op.h | 42 |
1 files changed, 37 insertions, 5 deletions
diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 364964d8d4..bb30a7cf39 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -1039,10 +1039,18 @@ static inline void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i32(); - tcg_gen_op4_i32(INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0), - TCGV_LOW(arg1), TCGV_LOW(arg2)); - /* Allow the optimizer room to replace mulu2 with two moves. */ - tcg_gen_op0(INDEX_op_nop); + if (TCG_TARGET_HAS_mulu2_i32) { + tcg_gen_op4_i32(INDEX_op_mulu2_i32, TCGV_LOW(t0), TCGV_HIGH(t0), + TCGV_LOW(arg1), TCGV_LOW(arg2)); + /* Allow the optimizer room to replace mulu2 with two moves. */ + tcg_gen_op0(INDEX_op_nop); + } else { + tcg_debug_assert(TCG_TARGET_HAS_muluh_i32); + tcg_gen_op3_i32(INDEX_op_mul_i32, TCGV_LOW(t0), + TCGV_LOW(arg1), TCGV_LOW(arg2)); + tcg_gen_op3_i32(INDEX_op_muluh_i32, TCGV_HIGH(t0), + TCGV_LOW(arg1), TCGV_LOW(arg2)); + } tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2)); tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1); @@ -2401,6 +2409,12 @@ static inline void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2); /* Allow the optimizer room to replace mulu2 with two moves. */ tcg_gen_op0(INDEX_op_nop); + } else if (TCG_TARGET_HAS_muluh_i32) { + TCGv_i32 t = tcg_temp_new_i32(); + tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2); + tcg_gen_mov_i32(rl, t); + tcg_temp_free_i32(t); } else { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); @@ -2420,6 +2434,12 @@ static inline void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2); /* Allow the optimizer room to replace muls2 with two moves. */ tcg_gen_op0(INDEX_op_nop); + } else if (TCG_TARGET_HAS_mulsh_i32) { + TCGv_i32 t = tcg_temp_new_i32(); + tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); + tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2); + tcg_gen_mov_i32(rl, t); + tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_mulu2_i32) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32(); @@ -2499,6 +2519,12 @@ static inline void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2); /* Allow the optimizer room to replace mulu2 with two moves. */ tcg_gen_op0(INDEX_op_nop); + } else if (TCG_TARGET_HAS_muluh_i64) { + TCGv_i64 t = tcg_temp_new_i64(); + tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2); + tcg_gen_mov_i64(rl, t); + tcg_temp_free_i64(t); } else if (TCG_TARGET_HAS_mulu2_i64) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); @@ -2540,6 +2566,12 @@ static inline void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2); /* Allow the optimizer room to replace muls2 with two moves. */ tcg_gen_op0(INDEX_op_nop); + } else if (TCG_TARGET_HAS_mulsh_i64) { + TCGv_i64 t = tcg_temp_new_i64(); + tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); + tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2); + tcg_gen_mov_i64(rl, t); + tcg_temp_free_i64(t); } else { TCGv_i64 t0 = tcg_temp_new_i64(); int sizemask = 0; @@ -2599,7 +2631,7 @@ static inline void tcg_gen_debug_insn_start(uint64_t pc) #endif } -static inline void tcg_gen_exit_tb(tcg_target_long val) +static inline void tcg_gen_exit_tb(uintptr_t val) { tcg_gen_op1i(INDEX_op_exit_tb, val); } |