/* * Tiny Code Generator for QEMU * * Copyright (c) 2008 Fabrice Bellard * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "exec/exec-all.h" #include "tcg/tcg.h" #include "tcg/tcg-temp-internal.h" #include "tcg/tcg-op.h" #include "tcg/tcg-mo.h" #include "exec/plugin-gen.h" #include "tcg-internal.h" void tcg_gen_op1(TCGOpcode opc, TCGArg a1) { TCGOp *op = tcg_emit_op(opc, 1); op->args[0] = a1; } void tcg_gen_op2(TCGOpcode opc, TCGArg a1, TCGArg a2) { TCGOp *op = tcg_emit_op(opc, 2); op->args[0] = a1; op->args[1] = a2; } void tcg_gen_op3(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3) { TCGOp *op = tcg_emit_op(opc, 3); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; } void tcg_gen_op4(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4) { TCGOp *op = tcg_emit_op(opc, 4); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; } void tcg_gen_op5(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5) { TCGOp *op = tcg_emit_op(opc, 5); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; op->args[4] = a5; } void tcg_gen_op6(TCGOpcode opc, TCGArg a1, TCGArg a2, TCGArg a3, TCGArg a4, TCGArg a5, TCGArg a6) { TCGOp *op = tcg_emit_op(opc, 6); op->args[0] = a1; op->args[1] = a2; op->args[2] = a3; op->args[3] = a4; op->args[4] = a5; op->args[5] = a6; } /* Generic ops. */ static void add_last_as_label_use(TCGLabel *l) { TCGLabelUse *u = tcg_malloc(sizeof(TCGLabelUse)); u->op = tcg_last_op(); QSIMPLEQ_INSERT_TAIL(&l->branches, u, next); } void tcg_gen_br(TCGLabel *l) { tcg_gen_op1(INDEX_op_br, label_arg(l)); add_last_as_label_use(l); } void tcg_gen_mb(TCGBar mb_type) { if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { tcg_gen_op1(INDEX_op_mb, mb_type); } } /* 32 bit ops */ void tcg_gen_movi_i32(TCGv_i32 ret, int32_t arg) { tcg_gen_mov_i32(ret, tcg_constant_i32(arg)); } void tcg_gen_addi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else { tcg_gen_add_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_subfi_i32(TCGv_i32 ret, int32_t arg1, TCGv_i32 arg2) { if (arg1 == 0 && TCG_TARGET_HAS_neg_i32) { /* Don't recurse with tcg_gen_neg_i32. */ tcg_gen_op2_i32(INDEX_op_neg_i32, ret, arg2); } else { tcg_gen_sub_i32(ret, tcg_constant_i32(arg1), arg2); } } void tcg_gen_subi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else { tcg_gen_sub_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_andi_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* Some cases can be optimized here. */ switch (arg2) { case 0: tcg_gen_movi_i32(ret, 0); return; case -1: tcg_gen_mov_i32(ret, arg1); return; case 0xff: /* Don't recurse with tcg_gen_ext8u_i32. */ if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg1); return; } break; case 0xffff: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg1); return; } break; } tcg_gen_and_i32(ret, arg1, tcg_constant_i32(arg2)); } void tcg_gen_ori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* Some cases can be optimized here. */ if (arg2 == -1) { tcg_gen_movi_i32(ret, -1); } else if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else { tcg_gen_or_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_xori_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { /* Some cases can be optimized here. */ if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else if (arg2 == -1 && TCG_TARGET_HAS_not_i32) { /* Don't recurse with tcg_gen_not_i32. */ tcg_gen_op2_i32(INDEX_op_not_i32, ret, arg1); } else { tcg_gen_xor_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_shli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else { tcg_gen_shl_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_shri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else { tcg_gen_shr_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_sari_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else { tcg_gen_sar_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_brcond_i32(TCGCond cond, TCGv_i32 arg1, TCGv_i32 arg2, TCGLabel *l) { if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { tcg_gen_op4ii_i32(INDEX_op_brcond_i32, arg1, arg2, cond, label_arg(l)); add_last_as_label_use(l); } } void tcg_gen_brcondi_i32(TCGCond cond, TCGv_i32 arg1, int32_t arg2, TCGLabel *l) { if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { tcg_gen_brcond_i32(cond, arg1, tcg_constant_i32(arg2), l); } } void tcg_gen_setcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_movi_i32(ret, 1); } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i32(ret, 0); } else { tcg_gen_op4i_i32(INDEX_op_setcond_i32, ret, arg1, arg2, cond); } } void tcg_gen_setcondi_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_gen_setcond_i32(cond, ret, arg1, tcg_constant_i32(arg2)); } void tcg_gen_muli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { if (arg2 == 0) { tcg_gen_movi_i32(ret, 0); } else if (is_power_of_2(arg2)) { tcg_gen_shli_i32(ret, arg1, ctz32(arg2)); } else { tcg_gen_mul_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_div_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(INDEX_op_div_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); tcg_gen_op5_i32(INDEX_op_div2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(t0); } else { gen_helper_div_i32(ret, arg1, arg2); } } void tcg_gen_rem_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(INDEX_op_rem_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_div_i32, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t0, arg1, 31); tcg_gen_op5_i32(INDEX_op_div2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(t0); } else { gen_helper_rem_i32(ret, arg1, arg2); } } void tcg_gen_divu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_div_i32) { tcg_gen_op3_i32(INDEX_op_divu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_movi_i32(t0, 0); tcg_gen_op5_i32(INDEX_op_divu2_i32, ret, t0, arg1, t0, arg2); tcg_temp_free_i32(t0); } else { gen_helper_divu_i32(ret, arg1, arg2); } } void tcg_gen_remu_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rem_i32) { tcg_gen_op3_i32(INDEX_op_remu_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_divu_i32, t0, arg1, arg2); tcg_gen_mul_i32(t0, t0, arg2); tcg_gen_sub_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } else if (TCG_TARGET_HAS_div2_i32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_movi_i32(t0, 0); tcg_gen_op5_i32(INDEX_op_divu2_i32, t0, ret, arg1, t0, arg2); tcg_temp_free_i32(t0); } else { gen_helper_remu_i32(ret, arg1, arg2); } } void tcg_gen_andc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_andc_i32) { tcg_gen_op3_i32(INDEX_op_andc_i32, ret, arg1, arg2); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); tcg_gen_and_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } } void tcg_gen_eqv_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_eqv_i32) { tcg_gen_op3_i32(INDEX_op_eqv_i32, ret, arg1, arg2); } else { tcg_gen_xor_i32(ret, arg1, arg2); tcg_gen_not_i32(ret, ret); } } void tcg_gen_nand_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_nand_i32) { tcg_gen_op3_i32(INDEX_op_nand_i32, ret, arg1, arg2); } else { tcg_gen_and_i32(ret, arg1, arg2); tcg_gen_not_i32(ret, ret); } } void tcg_gen_nor_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_nor_i32) { tcg_gen_op3_i32(INDEX_op_nor_i32, ret, arg1, arg2); } else { tcg_gen_or_i32(ret, arg1, arg2); tcg_gen_not_i32(ret, ret); } } void tcg_gen_orc_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_orc_i32) { tcg_gen_op3_i32(INDEX_op_orc_i32, ret, arg1, arg2); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_not_i32(t0, arg2); tcg_gen_or_i32(ret, arg1, t0); tcg_temp_free_i32(t0); } } void tcg_gen_clz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_clz_i32) { tcg_gen_op3_i32(INDEX_op_clz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_clz_i64) { TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); tcg_gen_extu_i32_i64(t2, arg2); tcg_gen_addi_i64(t2, t2, 32); tcg_gen_clz_i64(t1, t1, t2); tcg_gen_extrl_i64_i32(ret, t1); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); tcg_gen_subi_i32(ret, ret, 32); } else { gen_helper_clz_i32(ret, arg1, arg2); } } void tcg_gen_clzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) { tcg_gen_clz_i32(ret, arg1, tcg_constant_i32(arg2)); } void tcg_gen_ctz_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_ctz_i32) { tcg_gen_op3_i32(INDEX_op_ctz_i32, ret, arg1, arg2); } else if (TCG_TARGET_HAS_ctz_i64) { TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t1, arg1); tcg_gen_extu_i32_i64(t2, arg2); tcg_gen_ctz_i64(t1, t1, t2); tcg_gen_extrl_i64_i32(ret, t1); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } else if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i32 || TCG_TARGET_HAS_clz_i64) { TCGv_i32 z, t = tcg_temp_ebb_new_i32(); if (TCG_TARGET_HAS_ctpop_i32 || TCG_TARGET_HAS_ctpop_i64) { tcg_gen_subi_i32(t, arg1, 1); tcg_gen_andc_i32(t, t, arg1); tcg_gen_ctpop_i32(t, t); } else { /* Since all non-x86 hosts have clz(0) == 32, don't fight it. */ tcg_gen_neg_i32(t, arg1); tcg_gen_and_i32(t, t, arg1); tcg_gen_clzi_i32(t, t, 32); tcg_gen_xori_i32(t, t, 31); } z = tcg_constant_i32(0); tcg_gen_movcond_i32(TCG_COND_EQ, ret, arg1, z, arg2, t); tcg_temp_free_i32(t); } else { gen_helper_ctz_i32(ret, arg1, arg2); } } void tcg_gen_ctzi_i32(TCGv_i32 ret, TCGv_i32 arg1, uint32_t arg2) { if (!TCG_TARGET_HAS_ctz_i32 && TCG_TARGET_HAS_ctpop_i32 && arg2 == 32) { /* This equivalence has the advantage of not requiring a fixup. */ TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_subi_i32(t, arg1, 1); tcg_gen_andc_i32(t, t, arg1); tcg_gen_ctpop_i32(ret, t); tcg_temp_free_i32(t); } else { tcg_gen_ctz_i32(ret, arg1, tcg_constant_i32(arg2)); } } void tcg_gen_clrsb_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_clz_i32) { TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t, arg, 31); tcg_gen_xor_i32(t, t, arg); tcg_gen_clzi_i32(t, t, 32); tcg_gen_subi_i32(ret, t, 1); tcg_temp_free_i32(t); } else { gen_helper_clrsb_i32(ret, arg); } } void tcg_gen_ctpop_i32(TCGv_i32 ret, TCGv_i32 arg1) { if (TCG_TARGET_HAS_ctpop_i32) { tcg_gen_op2_i32(INDEX_op_ctpop_i32, ret, arg1); } else if (TCG_TARGET_HAS_ctpop_i64) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t, arg1); tcg_gen_ctpop_i64(t, t); tcg_gen_extrl_i64_i32(ret, t); tcg_temp_free_i64(t); } else { gen_helper_ctpop_i32(ret, arg1); } } void tcg_gen_rotl_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rot_i32) { tcg_gen_op3_i32(INDEX_op_rotl_i32, ret, arg1, arg2); } else { TCGv_i32 t0, t1; t0 = tcg_temp_ebb_new_i32(); t1 = tcg_temp_ebb_new_i32(); tcg_gen_shl_i32(t0, arg1, arg2); tcg_gen_subfi_i32(t1, 32, arg2); tcg_gen_shr_i32(t1, arg1, t1); tcg_gen_or_i32(ret, t0, t1); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } } void tcg_gen_rotli_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else if (TCG_TARGET_HAS_rot_i32) { tcg_gen_rotl_i32(ret, arg1, tcg_constant_i32(arg2)); } else { TCGv_i32 t0, t1; t0 = tcg_temp_ebb_new_i32(); t1 = tcg_temp_ebb_new_i32(); tcg_gen_shli_i32(t0, arg1, arg2); tcg_gen_shri_i32(t1, arg1, 32 - arg2); tcg_gen_or_i32(ret, t0, t1); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } } void tcg_gen_rotr_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_rot_i32) { tcg_gen_op3_i32(INDEX_op_rotr_i32, ret, arg1, arg2); } else { TCGv_i32 t0, t1; t0 = tcg_temp_ebb_new_i32(); t1 = tcg_temp_ebb_new_i32(); tcg_gen_shr_i32(t0, arg1, arg2); tcg_gen_subfi_i32(t1, 32, arg2); tcg_gen_shl_i32(t1, arg1, t1); tcg_gen_or_i32(ret, t0, t1); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } } void tcg_gen_rotri_i32(TCGv_i32 ret, TCGv_i32 arg1, int32_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 32); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i32(ret, arg1); } else { tcg_gen_rotli_i32(ret, arg1, 32 - arg2); } } void tcg_gen_deposit_i32(TCGv_i32 ret, TCGv_i32 arg1, TCGv_i32 arg2, unsigned int ofs, unsigned int len) { uint32_t mask; TCGv_i32 t1; tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); if (len == 32) { tcg_gen_mov_i32(ret, arg2); return; } if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, arg1, arg2, ofs, len); return; } t1 = tcg_temp_ebb_new_i32(); if (TCG_TARGET_HAS_extract2_i32) { if (ofs + len == 32) { tcg_gen_shli_i32(t1, arg1, len); tcg_gen_extract2_i32(ret, t1, arg2, len); goto done; } if (ofs == 0) { tcg_gen_extract2_i32(ret, arg1, arg2, len); tcg_gen_rotli_i32(ret, ret, len); goto done; } } mask = (1u << len) - 1; if (ofs + len < 32) { tcg_gen_andi_i32(t1, arg2, mask); tcg_gen_shli_i32(t1, t1, ofs); } else { tcg_gen_shli_i32(t1, arg2, ofs); } tcg_gen_andi_i32(ret, arg1, ~(mask << ofs)); tcg_gen_or_i32(ret, ret, t1); done: tcg_temp_free_i32(t1); } void tcg_gen_deposit_z_i32(TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); if (ofs + len == 32) { tcg_gen_shli_i32(ret, arg, ofs); } else if (ofs == 0) { tcg_gen_andi_i32(ret, arg, (1u << len) - 1); } else if (TCG_TARGET_HAS_deposit_i32 && TCG_TARGET_deposit_i32_valid(ofs, len)) { TCGv_i32 zero = tcg_constant_i32(0); tcg_gen_op5ii_i32(INDEX_op_deposit_i32, ret, zero, arg, ofs, len); } else { /* To help two-operand hosts we prefer to zero-extend first, which allows ARG to stay live. */ switch (len) { case 16: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_ext16u_i32(ret, arg); tcg_gen_shli_i32(ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_ext8u_i32(ret, arg); tcg_gen_shli_i32(ret, ret, ofs); return; } break; } /* Otherwise prefer zero-extension over AND for code size. */ switch (ofs + len) { case 16: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_shli_i32(ret, arg, ofs); tcg_gen_ext16u_i32(ret, ret); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_shli_i32(ret, arg, ofs); tcg_gen_ext8u_i32(ret, ret); return; } break; } tcg_gen_andi_i32(ret, arg, (1u << len) - 1); tcg_gen_shli_i32(ret, ret, ofs); } } void tcg_gen_extract_i32(TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); /* Canonicalize certain special cases, even if extract is supported. */ if (ofs + len == 32) { tcg_gen_shri_i32(ret, arg, 32 - len); return; } if (ofs == 0) { tcg_gen_andi_i32(ret, arg, (1u << len) - 1); return; } if (TCG_TARGET_HAS_extract_i32 && TCG_TARGET_extract_i32_valid(ofs, len)) { tcg_gen_op4ii_i32(INDEX_op_extract_i32, ret, arg, ofs, len); return; } /* Assume that zero-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 16: if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_ext16u_i32(ret, arg); tcg_gen_shri_i32(ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_ext8u_i32(ret, arg); tcg_gen_shri_i32(ret, ret, ofs); return; } break; } /* ??? Ideally we'd know what values are available for immediate AND. Assume that 8 bits are available, plus the special case of 16, so that we get ext8u, ext16u. */ switch (len) { case 1 ... 8: case 16: tcg_gen_shri_i32(ret, arg, ofs); tcg_gen_andi_i32(ret, ret, (1u << len) - 1); break; default: tcg_gen_shli_i32(ret, arg, 32 - len - ofs); tcg_gen_shri_i32(ret, ret, 32 - len); break; } } void tcg_gen_sextract_i32(TCGv_i32 ret, TCGv_i32 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 32); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 32); tcg_debug_assert(ofs + len <= 32); /* Canonicalize certain special cases, even if extract is supported. */ if (ofs + len == 32) { tcg_gen_sari_i32(ret, arg, 32 - len); return; } if (ofs == 0) { switch (len) { case 16: tcg_gen_ext16s_i32(ret, arg); return; case 8: tcg_gen_ext8s_i32(ret, arg); return; } } if (TCG_TARGET_HAS_sextract_i32 && TCG_TARGET_extract_i32_valid(ofs, len)) { tcg_gen_op4ii_i32(INDEX_op_sextract_i32, ret, arg, ofs, len); return; } /* Assume that sign-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 16: if (TCG_TARGET_HAS_ext16s_i32) { tcg_gen_ext16s_i32(ret, arg); tcg_gen_sari_i32(ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8s_i32) { tcg_gen_ext8s_i32(ret, arg); tcg_gen_sari_i32(ret, ret, ofs); return; } break; } switch (len) { case 16: if (TCG_TARGET_HAS_ext16s_i32) { tcg_gen_shri_i32(ret, arg, ofs); tcg_gen_ext16s_i32(ret, ret); return; } break; case 8: if (TCG_TARGET_HAS_ext8s_i32) { tcg_gen_shri_i32(ret, arg, ofs); tcg_gen_ext8s_i32(ret, ret); return; } break; } tcg_gen_shli_i32(ret, arg, 32 - len - ofs); tcg_gen_sari_i32(ret, ret, 32 - len); } /* * Extract 32-bits from a 64-bit input, ah:al, starting from ofs. * Unlike tcg_gen_extract_i32 above, len is fixed at 32. */ void tcg_gen_extract2_i32(TCGv_i32 ret, TCGv_i32 al, TCGv_i32 ah, unsigned int ofs) { tcg_debug_assert(ofs <= 32); if (ofs == 0) { tcg_gen_mov_i32(ret, al); } else if (ofs == 32) { tcg_gen_mov_i32(ret, ah); } else if (al == ah) { tcg_gen_rotri_i32(ret, al, ofs); } else if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_op4i_i32(INDEX_op_extract2_i32, ret, al, ah, ofs); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, al, ofs); tcg_gen_deposit_i32(ret, t0, ah, 32 - ofs, ofs); tcg_temp_free_i32(t0); } } void tcg_gen_movcond_i32(TCGCond cond, TCGv_i32 ret, TCGv_i32 c1, TCGv_i32 c2, TCGv_i32 v1, TCGv_i32 v2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_mov_i32(ret, v1); } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i32(ret, v2); } else if (TCG_TARGET_HAS_movcond_i32) { tcg_gen_op6i_i32(INDEX_op_movcond_i32, ret, c1, c2, v1, v2, cond); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_setcond_i32(cond, t0, c1, c2); tcg_gen_neg_i32(t0, t0); tcg_gen_and_i32(t1, v1, t0); tcg_gen_andc_i32(ret, v2, t0); tcg_gen_or_i32(ret, ret, t1); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } } void tcg_gen_add2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) { if (TCG_TARGET_HAS_add2_i32) { tcg_gen_op6_i32(INDEX_op_add2_i32, rl, rh, al, ah, bl, bh); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_concat_i32_i64(t0, al, ah); tcg_gen_concat_i32_i64(t1, bl, bh); tcg_gen_add_i64(t0, t0, t1); tcg_gen_extr_i64_i32(rl, rh, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_sub2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 al, TCGv_i32 ah, TCGv_i32 bl, TCGv_i32 bh) { if (TCG_TARGET_HAS_sub2_i32) { tcg_gen_op6_i32(INDEX_op_sub2_i32, rl, rh, al, ah, bl, bh); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_concat_i32_i64(t0, al, ah); tcg_gen_concat_i32_i64(t1, bl, bh); tcg_gen_sub_i64(t0, t0, t1); tcg_gen_extr_i64_i32(rl, rh, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_mulu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_mulu2_i32) { tcg_gen_op4_i32(INDEX_op_mulu2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_muluh_i32) { TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(INDEX_op_muluh_i32, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_extu_i32_i64(t0, arg1); tcg_gen_extu_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_extr_i64_i32(rl, rh, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } else { qemu_build_not_reached(); } } void tcg_gen_muls2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_HAS_muls2_i32) { tcg_gen_op4_i32(INDEX_op_muls2_i32, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_mulsh_i32) { TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_op3_i32(INDEX_op_mul_i32, t, arg1, arg2); tcg_gen_op3_i32(INDEX_op_mulsh_i32, rh, arg1, arg2); tcg_gen_mov_i32(rl, t); tcg_temp_free_i32(t); } else if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_temp_ebb_new_i32(); TCGv_i32 t3 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i32(t2, arg1, 31); tcg_gen_sari_i32(t3, arg2, 31); tcg_gen_and_i32(t2, t2, arg2); tcg_gen_and_i32(t3, t3, arg1); tcg_gen_sub_i32(rh, t1, t2); tcg_gen_sub_i32(rh, rh, t3); tcg_gen_mov_i32(rl, t0); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); tcg_temp_free_i32(t3); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i32_i64(t0, arg1); tcg_gen_ext_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_extr_i64_i32(rl, rh, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_mulsu2_i32(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, TCGv_i32 arg2) { if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i32(t2, arg1, 31); tcg_gen_and_i32(t2, t2, arg2); tcg_gen_sub_i32(rh, t1, t2); tcg_gen_mov_i32(rl, t0); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i32_i64(t0, arg1); tcg_gen_extu_i32_i64(t1, arg2); tcg_gen_mul_i64(t0, t0, t1); tcg_gen_extr_i64_i32(rl, rh, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_ext8s_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext8s_i32) { tcg_gen_op2_i32(INDEX_op_ext8s_i32, ret, arg); } else { tcg_gen_shli_i32(ret, arg, 24); tcg_gen_sari_i32(ret, ret, 24); } } void tcg_gen_ext16s_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext16s_i32) { tcg_gen_op2_i32(INDEX_op_ext16s_i32, ret, arg); } else { tcg_gen_shli_i32(ret, arg, 16); tcg_gen_sari_i32(ret, ret, 16); } } void tcg_gen_ext8u_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext8u_i32) { tcg_gen_op2_i32(INDEX_op_ext8u_i32, ret, arg); } else { tcg_gen_andi_i32(ret, arg, 0xffu); } } void tcg_gen_ext16u_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_ext16u_i32) { tcg_gen_op2_i32(INDEX_op_ext16u_i32, ret, arg); } else { tcg_gen_andi_i32(ret, arg, 0xffffu); } } void tcg_gen_bswap16_i32(TCGv_i32 ret, TCGv_i32 arg, int flags) { /* Only one extension flag may be present. */ tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ)); if (TCG_TARGET_HAS_bswap16_i32) { tcg_gen_op3i_i32(INDEX_op_bswap16_i32, ret, arg, flags); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, arg, 8); if (!(flags & TCG_BSWAP_IZ)) { tcg_gen_ext8u_i32(t0, t0); } if (flags & TCG_BSWAP_OS) { tcg_gen_shli_i32(t1, arg, 24); tcg_gen_sari_i32(t1, t1, 16); } else if (flags & TCG_BSWAP_OZ) { tcg_gen_ext8u_i32(t1, arg); tcg_gen_shli_i32(t1, t1, 8); } else { tcg_gen_shli_i32(t1, arg, 8); } tcg_gen_or_i32(ret, t0, t1); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } } void tcg_gen_bswap32_i32(TCGv_i32 ret, TCGv_i32 arg) { if (TCG_TARGET_HAS_bswap32_i32) { tcg_gen_op3i_i32(INDEX_op_bswap32_i32, ret, arg, 0); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_constant_i32(0x00ff00ff); /* arg = abcd */ tcg_gen_shri_i32(t0, arg, 8); /* t0 = .abc */ tcg_gen_and_i32(t1, arg, t2); /* t1 = .b.d */ tcg_gen_and_i32(t0, t0, t2); /* t0 = .a.c */ tcg_gen_shli_i32(t1, t1, 8); /* t1 = b.d. */ tcg_gen_or_i32(ret, t0, t1); /* ret = badc */ tcg_gen_shri_i32(t0, ret, 16); /* t0 = ..ba */ tcg_gen_shli_i32(t1, ret, 16); /* t1 = dc.. */ tcg_gen_or_i32(ret, t0, t1); /* ret = dcba */ tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } } void tcg_gen_hswap_i32(TCGv_i32 ret, TCGv_i32 arg) { /* Swapping 2 16-bit elements is a rotate. */ tcg_gen_rotli_i32(ret, arg, 16); } void tcg_gen_smin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, a, b); } void tcg_gen_umin_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, a, b); } void tcg_gen_smax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(TCG_COND_LT, ret, a, b, b, a); } void tcg_gen_umax_i32(TCGv_i32 ret, TCGv_i32 a, TCGv_i32 b) { tcg_gen_movcond_i32(TCG_COND_LTU, ret, a, b, b, a); } void tcg_gen_abs_i32(TCGv_i32 ret, TCGv_i32 a) { TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_sari_i32(t, a, 31); tcg_gen_xor_i32(ret, a, t); tcg_gen_sub_i32(ret, ret, t); tcg_temp_free_i32(t); } /* 64-bit ops */ #if TCG_TARGET_REG_BITS == 32 /* These are all inline for TCG_TARGET_REG_BITS == 64. */ void tcg_gen_discard_i64(TCGv_i64 arg) { tcg_gen_discard_i32(TCGV_LOW(arg)); tcg_gen_discard_i32(TCGV_HIGH(arg)); } void tcg_gen_mov_i64(TCGv_i64 ret, TCGv_i64 arg) { TCGTemp *ts = tcgv_i64_temp(arg); /* Canonicalize TCGv_i64 TEMP_CONST into TCGv_i32 TEMP_CONST. */ if (ts->kind == TEMP_CONST) { tcg_gen_movi_i64(ret, ts->val); } else { tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); } } void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) { tcg_gen_movi_i32(TCGV_LOW(ret), arg); tcg_gen_movi_i32(TCGV_HIGH(ret), arg >> 32); } void tcg_gen_ld8u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld8u_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } void tcg_gen_ld8s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld8s_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } void tcg_gen_ld16u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld16u_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } void tcg_gen_ld16s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld16s_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } void tcg_gen_ld32u_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } void tcg_gen_ld32s_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } void tcg_gen_ld_i64(TCGv_i64 ret, TCGv_ptr arg2, tcg_target_long offset) { /* Since arg2 and ret have different types, they cannot be the same temporary */ #if HOST_BIG_ENDIAN tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset); tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset + 4); #else tcg_gen_ld_i32(TCGV_LOW(ret), arg2, offset); tcg_gen_ld_i32(TCGV_HIGH(ret), arg2, offset + 4); #endif } void tcg_gen_st8_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_st8_i32(TCGV_LOW(arg1), arg2, offset); } void tcg_gen_st16_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_st16_i32(TCGV_LOW(arg1), arg2, offset); } void tcg_gen_st32_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset); } void tcg_gen_st_i64(TCGv_i64 arg1, TCGv_ptr arg2, tcg_target_long offset) { #if HOST_BIG_ENDIAN tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset); tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset + 4); #else tcg_gen_st_i32(TCGV_LOW(arg1), arg2, offset); tcg_gen_st_i32(TCGV_HIGH(arg1), arg2, offset + 4); #endif } void tcg_gen_add_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2)); } void tcg_gen_sub_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2)); } void tcg_gen_and_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_and_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_and_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } void tcg_gen_or_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } void tcg_gen_xor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { tcg_gen_xor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_xor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } void tcg_gen_shl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { gen_helper_shl_i64(ret, arg1, arg2); } void tcg_gen_shr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { gen_helper_shr_i64(ret, arg1, arg2); } void tcg_gen_sar_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { gen_helper_sar_i64(ret, arg1, arg2); } void tcg_gen_mul_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { TCGv_i64 t0; TCGv_i32 t1; t0 = tcg_temp_ebb_new_i64(); t1 = tcg_temp_ebb_new_i32(); tcg_gen_mulu2_i32(TCGV_LOW(t0), TCGV_HIGH(t0), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_mul_i32(t1, TCGV_LOW(arg1), TCGV_HIGH(arg2)); tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1); tcg_gen_mul_i32(t1, TCGV_HIGH(arg1), TCGV_LOW(arg2)); tcg_gen_add_i32(TCGV_HIGH(t0), TCGV_HIGH(t0), t1); tcg_gen_mov_i64(ret, t0); tcg_temp_free_i64(t0); tcg_temp_free_i32(t1); } #else void tcg_gen_movi_i64(TCGv_i64 ret, int64_t arg) { tcg_gen_mov_i64(ret, tcg_constant_i64(arg)); } #endif /* TCG_TARGET_REG_SIZE == 32 */ void tcg_gen_addi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else if (TCG_TARGET_REG_BITS == 64) { tcg_gen_add_i64(ret, arg1, tcg_constant_i64(arg2)); } else { tcg_gen_add2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32)); } } void tcg_gen_subfi_i64(TCGv_i64 ret, int64_t arg1, TCGv_i64 arg2) { if (arg1 == 0 && TCG_TARGET_HAS_neg_i64) { /* Don't recurse with tcg_gen_neg_i64. */ tcg_gen_op2_i64(INDEX_op_neg_i64, ret, arg2); } else if (TCG_TARGET_REG_BITS == 64) { tcg_gen_sub_i64(ret, tcg_constant_i64(arg1), arg2); } else { tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), tcg_constant_i32(arg1), tcg_constant_i32(arg1 >> 32), TCGV_LOW(arg2), TCGV_HIGH(arg2)); } } void tcg_gen_subi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else if (TCG_TARGET_REG_BITS == 64) { tcg_gen_sub_i64(ret, arg1, tcg_constant_i64(arg2)); } else { tcg_gen_sub2_i32(TCGV_LOW(ret), TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32)); } } void tcg_gen_andi_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_andi_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); tcg_gen_andi_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); return; } /* Some cases can be optimized here. */ switch (arg2) { case 0: tcg_gen_movi_i64(ret, 0); return; case -1: tcg_gen_mov_i64(ret, arg1); return; case 0xff: /* Don't recurse with tcg_gen_ext8u_i64. */ if (TCG_TARGET_HAS_ext8u_i64) { tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg1); return; } break; case 0xffff: if (TCG_TARGET_HAS_ext16u_i64) { tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg1); return; } break; case 0xffffffffu: if (TCG_TARGET_HAS_ext32u_i64) { tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg1); return; } break; } tcg_gen_and_i64(ret, arg1, tcg_constant_i64(arg2)); } void tcg_gen_ori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_ori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); tcg_gen_ori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); return; } /* Some cases can be optimized here. */ if (arg2 == -1) { tcg_gen_movi_i64(ret, -1); } else if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else { tcg_gen_or_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_xori_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_xori_i32(TCGV_LOW(ret), TCGV_LOW(arg1), arg2); tcg_gen_xori_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), arg2 >> 32); return; } /* Some cases can be optimized here. */ if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else if (arg2 == -1 && TCG_TARGET_HAS_not_i64) { /* Don't recurse with tcg_gen_not_i64. */ tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg1); } else { tcg_gen_xor_i64(ret, arg1, tcg_constant_i64(arg2)); } } static inline void tcg_gen_shifti_i64(TCGv_i64 ret, TCGv_i64 arg1, unsigned c, bool right, bool arith) { tcg_debug_assert(c < 64); if (c == 0) { tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); } else if (c >= 32) { c -= 32; if (right) { if (arith) { tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), 31); } else { tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), c); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } } else { tcg_gen_shli_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), c); tcg_gen_movi_i32(TCGV_LOW(ret), 0); } } else if (right) { if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_extract2_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), c); } else { tcg_gen_shri_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c); tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(arg1), 32 - c, c); } if (arith) { tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c); } else { tcg_gen_shri_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), c); } } else { if (TCG_TARGET_HAS_extract2_i32) { tcg_gen_extract2_i32(TCGV_HIGH(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), 32 - c); } else { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); tcg_gen_shri_i32(t0, TCGV_LOW(arg1), 32 - c); tcg_gen_deposit_i32(TCGV_HIGH(ret), t0, TCGV_HIGH(arg1), c, 32 - c); tcg_temp_free_i32(t0); } tcg_gen_shli_i32(TCGV_LOW(ret), TCGV_LOW(arg1), c); } } void tcg_gen_shli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); if (TCG_TARGET_REG_BITS == 32) { tcg_gen_shifti_i64(ret, arg1, arg2, 0, 0); } else if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else { tcg_gen_shl_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_shri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); if (TCG_TARGET_REG_BITS == 32) { tcg_gen_shifti_i64(ret, arg1, arg2, 1, 0); } else if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else { tcg_gen_shr_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_sari_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); if (TCG_TARGET_REG_BITS == 32) { tcg_gen_shifti_i64(ret, arg1, arg2, 1, 1); } else if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else { tcg_gen_sar_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_brcond_i64(TCGCond cond, TCGv_i64 arg1, TCGv_i64 arg2, TCGLabel *l) { if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2), cond, label_arg(l)); } else { tcg_gen_op4ii_i64(INDEX_op_brcond_i64, arg1, arg2, cond, label_arg(l)); } add_last_as_label_use(l); } } void tcg_gen_brcondi_i64(TCGCond cond, TCGv_i64 arg1, int64_t arg2, TCGLabel *l) { if (TCG_TARGET_REG_BITS == 64) { tcg_gen_brcond_i64(cond, arg1, tcg_constant_i64(arg2), l); } else if (cond == TCG_COND_ALWAYS) { tcg_gen_br(l); } else if (cond != TCG_COND_NEVER) { tcg_gen_op6ii_i32(INDEX_op_brcond2_i32, TCGV_LOW(arg1), TCGV_HIGH(arg1), tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32), cond, label_arg(l)); add_last_as_label_use(l); } } void tcg_gen_setcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_movi_i64(ret, 1); } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i64(ret, 0); } else { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), TCGV_LOW(arg2), TCGV_HIGH(arg2), cond); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else { tcg_gen_op4i_i64(INDEX_op_setcond_i64, ret, arg1, arg2, cond); } } } void tcg_gen_setcondi_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { if (TCG_TARGET_REG_BITS == 64) { tcg_gen_setcond_i64(cond, ret, arg1, tcg_constant_i64(arg2)); } else if (cond == TCG_COND_ALWAYS) { tcg_gen_movi_i64(ret, 1); } else if (cond == TCG_COND_NEVER) { tcg_gen_movi_i64(ret, 0); } else { tcg_gen_op6i_i32(INDEX_op_setcond2_i32, TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_HIGH(arg1), tcg_constant_i32(arg2), tcg_constant_i32(arg2 >> 32), cond); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } } void tcg_gen_muli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { if (arg2 == 0) { tcg_gen_movi_i64(ret, 0); } else if (is_power_of_2(arg2)) { tcg_gen_shli_i64(ret, arg1, ctz64(arg2)); } else { tcg_gen_mul_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_div_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_div_i64) { tcg_gen_op3_i64(INDEX_op_div_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); tcg_gen_op5_i64(INDEX_op_div2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(t0); } else { gen_helper_div_i64(ret, arg1, arg2); } } void tcg_gen_rem_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_rem_i64) { tcg_gen_op3_i64(INDEX_op_rem_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_div_i64, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } else if (TCG_TARGET_HAS_div2_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t0, arg1, 63); tcg_gen_op5_i64(INDEX_op_div2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(t0); } else { gen_helper_rem_i64(ret, arg1, arg2); } } void tcg_gen_divu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_div_i64) { tcg_gen_op3_i64(INDEX_op_divu_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div2_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_movi_i64(t0, 0); tcg_gen_op5_i64(INDEX_op_divu2_i64, ret, t0, arg1, t0, arg2); tcg_temp_free_i64(t0); } else { gen_helper_divu_i64(ret, arg1, arg2); } } void tcg_gen_remu_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_rem_i64) { tcg_gen_op3_i64(INDEX_op_remu_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_div_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_divu_i64, t0, arg1, arg2); tcg_gen_mul_i64(t0, t0, arg2); tcg_gen_sub_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } else if (TCG_TARGET_HAS_div2_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_movi_i64(t0, 0); tcg_gen_op5_i64(INDEX_op_divu2_i64, t0, ret, arg1, t0, arg2); tcg_temp_free_i64(t0); } else { gen_helper_remu_i64(ret, arg1, arg2); } } void tcg_gen_ext8s_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_ext8s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } else if (TCG_TARGET_HAS_ext8s_i64) { tcg_gen_op2_i64(INDEX_op_ext8s_i64, ret, arg); } else { tcg_gen_shli_i64(ret, arg, 56); tcg_gen_sari_i64(ret, ret, 56); } } void tcg_gen_ext16s_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_ext16s_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } else if (TCG_TARGET_HAS_ext16s_i64) { tcg_gen_op2_i64(INDEX_op_ext16s_i64, ret, arg); } else { tcg_gen_shli_i64(ret, arg, 48); tcg_gen_sari_i64(ret, ret, 48); } } void tcg_gen_ext32s_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } else if (TCG_TARGET_HAS_ext32s_i64) { tcg_gen_op2_i64(INDEX_op_ext32s_i64, ret, arg); } else { tcg_gen_shli_i64(ret, arg, 32); tcg_gen_sari_i64(ret, ret, 32); } } void tcg_gen_ext8u_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_ext8u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else if (TCG_TARGET_HAS_ext8u_i64) { tcg_gen_op2_i64(INDEX_op_ext8u_i64, ret, arg); } else { tcg_gen_andi_i64(ret, arg, 0xffu); } } void tcg_gen_ext16u_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_ext16u_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else if (TCG_TARGET_HAS_ext16u_i64) { tcg_gen_op2_i64(INDEX_op_ext16u_i64, ret, arg); } else { tcg_gen_andi_i64(ret, arg, 0xffffu); } } void tcg_gen_ext32u_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else if (TCG_TARGET_HAS_ext32u_i64) { tcg_gen_op2_i64(INDEX_op_ext32u_i64, ret, arg); } else { tcg_gen_andi_i64(ret, arg, 0xffffffffu); } } void tcg_gen_bswap16_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) { /* Only one extension flag may be present. */ tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ)); if (TCG_TARGET_REG_BITS == 32) { tcg_gen_bswap16_i32(TCGV_LOW(ret), TCGV_LOW(arg), flags); if (flags & TCG_BSWAP_OS) { tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } else { tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } } else if (TCG_TARGET_HAS_bswap16_i64) { tcg_gen_op3i_i64(INDEX_op_bswap16_i64, ret, arg, flags); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t0, arg, 8); if (!(flags & TCG_BSWAP_IZ)) { tcg_gen_ext8u_i64(t0, t0); } if (flags & TCG_BSWAP_OS) { tcg_gen_shli_i64(t1, arg, 56); tcg_gen_sari_i64(t1, t1, 48); } else if (flags & TCG_BSWAP_OZ) { tcg_gen_ext8u_i64(t1, arg); tcg_gen_shli_i64(t1, t1, 8); } else { tcg_gen_shli_i64(t1, arg, 8); } tcg_gen_or_i64(ret, t0, t1); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_bswap32_i64(TCGv_i64 ret, TCGv_i64 arg, int flags) { /* Only one extension flag may be present. */ tcg_debug_assert(!(flags & TCG_BSWAP_OS) || !(flags & TCG_BSWAP_OZ)); if (TCG_TARGET_REG_BITS == 32) { tcg_gen_bswap32_i32(TCGV_LOW(ret), TCGV_LOW(arg)); if (flags & TCG_BSWAP_OS) { tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } else { tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } } else if (TCG_TARGET_HAS_bswap32_i64) { tcg_gen_op3i_i64(INDEX_op_bswap32_i64, ret, arg, flags); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_constant_i64(0x00ff00ff); /* arg = xxxxabcd */ tcg_gen_shri_i64(t0, arg, 8); /* t0 = .xxxxabc */ tcg_gen_and_i64(t1, arg, t2); /* t1 = .....b.d */ tcg_gen_and_i64(t0, t0, t2); /* t0 = .....a.c */ tcg_gen_shli_i64(t1, t1, 8); /* t1 = ....b.d. */ tcg_gen_or_i64(ret, t0, t1); /* ret = ....badc */ tcg_gen_shli_i64(t1, ret, 48); /* t1 = dc...... */ tcg_gen_shri_i64(t0, ret, 16); /* t0 = ......ba */ if (flags & TCG_BSWAP_OS) { tcg_gen_sari_i64(t1, t1, 32); /* t1 = ssssdc.. */ } else { tcg_gen_shri_i64(t1, t1, 32); /* t1 = ....dc.. */ } tcg_gen_or_i64(ret, t0, t1); /* ret = ssssdcba */ tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_bswap64_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0, t1; t0 = tcg_temp_ebb_new_i32(); t1 = tcg_temp_ebb_new_i32(); tcg_gen_bswap32_i32(t0, TCGV_LOW(arg)); tcg_gen_bswap32_i32(t1, TCGV_HIGH(arg)); tcg_gen_mov_i32(TCGV_LOW(ret), t1); tcg_gen_mov_i32(TCGV_HIGH(ret), t0); tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } else if (TCG_TARGET_HAS_bswap64_i64) { tcg_gen_op3i_i64(INDEX_op_bswap64_i64, ret, arg, 0); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); /* arg = abcdefgh */ tcg_gen_movi_i64(t2, 0x00ff00ff00ff00ffull); tcg_gen_shri_i64(t0, arg, 8); /* t0 = .abcdefg */ tcg_gen_and_i64(t1, arg, t2); /* t1 = .b.d.f.h */ tcg_gen_and_i64(t0, t0, t2); /* t0 = .a.c.e.g */ tcg_gen_shli_i64(t1, t1, 8); /* t1 = b.d.f.h. */ tcg_gen_or_i64(ret, t0, t1); /* ret = badcfehg */ tcg_gen_movi_i64(t2, 0x0000ffff0000ffffull); tcg_gen_shri_i64(t0, ret, 16); /* t0 = ..badcfe */ tcg_gen_and_i64(t1, ret, t2); /* t1 = ..dc..hg */ tcg_gen_and_i64(t0, t0, t2); /* t0 = ..ba..fe */ tcg_gen_shli_i64(t1, t1, 16); /* t1 = dc..hg.. */ tcg_gen_or_i64(ret, t0, t1); /* ret = dcbahgfe */ tcg_gen_shri_i64(t0, ret, 32); /* t0 = ....dcba */ tcg_gen_shli_i64(t1, ret, 32); /* t1 = hgfe.... */ tcg_gen_or_i64(ret, t0, t1); /* ret = hgfedcba */ tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } } void tcg_gen_hswap_i64(TCGv_i64 ret, TCGv_i64 arg) { uint64_t m = 0x0000ffff0000ffffull; TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); /* See include/qemu/bitops.h, hswap64. */ tcg_gen_rotli_i64(t1, arg, 32); tcg_gen_andi_i64(t0, t1, m); tcg_gen_shli_i64(t0, t0, 16); tcg_gen_shri_i64(t1, t1, 16); tcg_gen_andi_i64(t1, t1, m); tcg_gen_or_i64(ret, t0, t1); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } void tcg_gen_wswap_i64(TCGv_i64 ret, TCGv_i64 arg) { /* Swapping 2 32-bit elements is a rotate. */ tcg_gen_rotli_i64(ret, arg, 32); } void tcg_gen_not_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_not_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_not_i32(TCGV_HIGH(ret), TCGV_HIGH(arg)); } else if (TCG_TARGET_HAS_not_i64) { tcg_gen_op2_i64(INDEX_op_not_i64, ret, arg); } else { tcg_gen_xori_i64(ret, arg, -1); } } void tcg_gen_andc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } else if (TCG_TARGET_HAS_andc_i64) { tcg_gen_op3_i64(INDEX_op_andc_i64, ret, arg1, arg2); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); tcg_gen_and_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } } void tcg_gen_eqv_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_eqv_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_eqv_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } else if (TCG_TARGET_HAS_eqv_i64) { tcg_gen_op3_i64(INDEX_op_eqv_i64, ret, arg1, arg2); } else { tcg_gen_xor_i64(ret, arg1, arg2); tcg_gen_not_i64(ret, ret); } } void tcg_gen_nand_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_nand_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_nand_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } else if (TCG_TARGET_HAS_nand_i64) { tcg_gen_op3_i64(INDEX_op_nand_i64, ret, arg1, arg2); } else { tcg_gen_and_i64(ret, arg1, arg2); tcg_gen_not_i64(ret, ret); } } void tcg_gen_nor_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_nor_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_nor_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } else if (TCG_TARGET_HAS_nor_i64) { tcg_gen_op3_i64(INDEX_op_nor_i64, ret, arg1, arg2); } else { tcg_gen_or_i64(ret, arg1, arg2); tcg_gen_not_i64(ret, ret); } } void tcg_gen_orc_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_orc_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2)); tcg_gen_orc_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_HIGH(arg2)); } else if (TCG_TARGET_HAS_orc_i64) { tcg_gen_op3_i64(INDEX_op_orc_i64, ret, arg1, arg2); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_not_i64(t0, arg2); tcg_gen_or_i64(ret, arg1, t0); tcg_temp_free_i64(t0); } } void tcg_gen_clz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_clz_i64) { tcg_gen_op3_i64(INDEX_op_clz_i64, ret, arg1, arg2); } else { gen_helper_clz_i64(ret, arg1, arg2); } } void tcg_gen_clzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) { if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_clz_i32 && arg2 <= 0xffffffffu) { TCGv_i32 t = tcg_temp_ebb_new_i32(); tcg_gen_clzi_i32(t, TCGV_LOW(arg1), arg2 - 32); tcg_gen_addi_i32(t, t, 32); tcg_gen_clz_i32(TCGV_LOW(ret), TCGV_HIGH(arg1), t); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); tcg_temp_free_i32(t); } else { tcg_gen_clz_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_ctz_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_ctz_i64) { tcg_gen_op3_i64(INDEX_op_ctz_i64, ret, arg1, arg2); } else if (TCG_TARGET_HAS_ctpop_i64 || TCG_TARGET_HAS_clz_i64) { TCGv_i64 z, t = tcg_temp_ebb_new_i64(); if (TCG_TARGET_HAS_ctpop_i64) { tcg_gen_subi_i64(t, arg1, 1); tcg_gen_andc_i64(t, t, arg1); tcg_gen_ctpop_i64(t, t); } else { /* Since all non-x86 hosts have clz(0) == 64, don't fight it. */ tcg_gen_neg_i64(t, arg1); tcg_gen_and_i64(t, t, arg1); tcg_gen_clzi_i64(t, t, 64); tcg_gen_xori_i64(t, t, 63); } z = tcg_constant_i64(0); tcg_gen_movcond_i64(TCG_COND_EQ, ret, arg1, z, arg2, t); tcg_temp_free_i64(t); tcg_temp_free_i64(z); } else { gen_helper_ctz_i64(ret, arg1, arg2); } } void tcg_gen_ctzi_i64(TCGv_i64 ret, TCGv_i64 arg1, uint64_t arg2) { if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctz_i32 && arg2 <= 0xffffffffu) { TCGv_i32 t32 = tcg_temp_ebb_new_i32(); tcg_gen_ctzi_i32(t32, TCGV_HIGH(arg1), arg2 - 32); tcg_gen_addi_i32(t32, t32, 32); tcg_gen_ctz_i32(TCGV_LOW(ret), TCGV_LOW(arg1), t32); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); tcg_temp_free_i32(t32); } else if (!TCG_TARGET_HAS_ctz_i64 && TCG_TARGET_HAS_ctpop_i64 && arg2 == 64) { /* This equivalence has the advantage of not requiring a fixup. */ TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_subi_i64(t, arg1, 1); tcg_gen_andc_i64(t, t, arg1); tcg_gen_ctpop_i64(ret, t); tcg_temp_free_i64(t); } else { tcg_gen_ctz_i64(ret, arg1, tcg_constant_i64(arg2)); } } void tcg_gen_clrsb_i64(TCGv_i64 ret, TCGv_i64 arg) { if (TCG_TARGET_HAS_clz_i64 || TCG_TARGET_HAS_clz_i32) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t, arg, 63); tcg_gen_xor_i64(t, t, arg); tcg_gen_clzi_i64(t, t, 64); tcg_gen_subi_i64(ret, t, 1); tcg_temp_free_i64(t); } else { gen_helper_clrsb_i64(ret, arg); } } void tcg_gen_ctpop_i64(TCGv_i64 ret, TCGv_i64 arg1) { if (TCG_TARGET_HAS_ctpop_i64) { tcg_gen_op2_i64(INDEX_op_ctpop_i64, ret, arg1); } else if (TCG_TARGET_REG_BITS == 32 && TCG_TARGET_HAS_ctpop_i32) { tcg_gen_ctpop_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); tcg_gen_ctpop_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); tcg_gen_add_i32(TCGV_LOW(ret), TCGV_LOW(ret), TCGV_HIGH(ret)); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else { gen_helper_ctpop_i64(ret, arg1); } } void tcg_gen_rotl_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_rot_i64) { tcg_gen_op3_i64(INDEX_op_rotl_i64, ret, arg1, arg2); } else { TCGv_i64 t0, t1; t0 = tcg_temp_ebb_new_i64(); t1 = tcg_temp_ebb_new_i64(); tcg_gen_shl_i64(t0, arg1, arg2); tcg_gen_subfi_i64(t1, 64, arg2); tcg_gen_shr_i64(t1, arg1, t1); tcg_gen_or_i64(ret, t0, t1); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_rotli_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else if (TCG_TARGET_HAS_rot_i64) { tcg_gen_rotl_i64(ret, arg1, tcg_constant_i64(arg2)); } else { TCGv_i64 t0, t1; t0 = tcg_temp_ebb_new_i64(); t1 = tcg_temp_ebb_new_i64(); tcg_gen_shli_i64(t0, arg1, arg2); tcg_gen_shri_i64(t1, arg1, 64 - arg2); tcg_gen_or_i64(ret, t0, t1); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_rotr_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_rot_i64) { tcg_gen_op3_i64(INDEX_op_rotr_i64, ret, arg1, arg2); } else { TCGv_i64 t0, t1; t0 = tcg_temp_ebb_new_i64(); t1 = tcg_temp_ebb_new_i64(); tcg_gen_shr_i64(t0, arg1, arg2); tcg_gen_subfi_i64(t1, 64, arg2); tcg_gen_shl_i64(t1, arg1, t1); tcg_gen_or_i64(ret, t0, t1); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_rotri_i64(TCGv_i64 ret, TCGv_i64 arg1, int64_t arg2) { tcg_debug_assert(arg2 >= 0 && arg2 < 64); /* some cases can be optimized here */ if (arg2 == 0) { tcg_gen_mov_i64(ret, arg1); } else { tcg_gen_rotli_i64(ret, arg1, 64 - arg2); } } void tcg_gen_deposit_i64(TCGv_i64 ret, TCGv_i64 arg1, TCGv_i64 arg2, unsigned int ofs, unsigned int len) { uint64_t mask; TCGv_i64 t1; tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); if (len == 64) { tcg_gen_mov_i64(ret, arg2); return; } if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) { tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, arg1, arg2, ofs, len); return; } if (TCG_TARGET_REG_BITS == 32) { if (ofs >= 32) { tcg_gen_deposit_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1), TCGV_LOW(arg2), ofs - 32, len); tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg1)); return; } if (ofs + len <= 32) { tcg_gen_deposit_i32(TCGV_LOW(ret), TCGV_LOW(arg1), TCGV_LOW(arg2), ofs, len); tcg_gen_mov_i32(TCGV_HIGH(ret), TCGV_HIGH(arg1)); return; } } t1 = tcg_temp_ebb_new_i64(); if (TCG_TARGET_HAS_extract2_i64) { if (ofs + len == 64) { tcg_gen_shli_i64(t1, arg1, len); tcg_gen_extract2_i64(ret, t1, arg2, len); goto done; } if (ofs == 0) { tcg_gen_extract2_i64(ret, arg1, arg2, len); tcg_gen_rotli_i64(ret, ret, len); goto done; } } mask = (1ull << len) - 1; if (ofs + len < 64) { tcg_gen_andi_i64(t1, arg2, mask); tcg_gen_shli_i64(t1, t1, ofs); } else { tcg_gen_shli_i64(t1, arg2, ofs); } tcg_gen_andi_i64(ret, arg1, ~(mask << ofs)); tcg_gen_or_i64(ret, ret, t1); done: tcg_temp_free_i64(t1); } void tcg_gen_deposit_z_i64(TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); if (ofs + len == 64) { tcg_gen_shli_i64(ret, arg, ofs); } else if (ofs == 0) { tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); } else if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(ofs, len)) { TCGv_i64 zero = tcg_constant_i64(0); tcg_gen_op5ii_i64(INDEX_op_deposit_i64, ret, zero, arg, ofs, len); } else { if (TCG_TARGET_REG_BITS == 32) { if (ofs >= 32) { tcg_gen_deposit_z_i32(TCGV_HIGH(ret), TCGV_LOW(arg), ofs - 32, len); tcg_gen_movi_i32(TCGV_LOW(ret), 0); return; } if (ofs + len <= 32) { tcg_gen_deposit_z_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); return; } } /* To help two-operand hosts we prefer to zero-extend first, which allows ARG to stay live. */ switch (len) { case 32: if (TCG_TARGET_HAS_ext32u_i64) { tcg_gen_ext32u_i64(ret, arg); tcg_gen_shli_i64(ret, ret, ofs); return; } break; case 16: if (TCG_TARGET_HAS_ext16u_i64) { tcg_gen_ext16u_i64(ret, arg); tcg_gen_shli_i64(ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i64) { tcg_gen_ext8u_i64(ret, arg); tcg_gen_shli_i64(ret, ret, ofs); return; } break; } /* Otherwise prefer zero-extension over AND for code size. */ switch (ofs + len) { case 32: if (TCG_TARGET_HAS_ext32u_i64) { tcg_gen_shli_i64(ret, arg, ofs); tcg_gen_ext32u_i64(ret, ret); return; } break; case 16: if (TCG_TARGET_HAS_ext16u_i64) { tcg_gen_shli_i64(ret, arg, ofs); tcg_gen_ext16u_i64(ret, ret); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i64) { tcg_gen_shli_i64(ret, arg, ofs); tcg_gen_ext8u_i64(ret, ret); return; } break; } tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); tcg_gen_shli_i64(ret, ret, ofs); } } void tcg_gen_extract_i64(TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); /* Canonicalize certain special cases, even if extract is supported. */ if (ofs + len == 64) { tcg_gen_shri_i64(ret, arg, 64 - len); return; } if (ofs == 0) { tcg_gen_andi_i64(ret, arg, (1ull << len) - 1); return; } if (TCG_TARGET_REG_BITS == 32) { /* Look for a 32-bit extract within one of the two words. */ if (ofs >= 32) { tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); return; } if (ofs + len <= 32) { tcg_gen_extract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); return; } /* The field is split across two words. One double-word shift is better than two double-word shifts. */ goto do_shift_and; } if (TCG_TARGET_HAS_extract_i64 && TCG_TARGET_extract_i64_valid(ofs, len)) { tcg_gen_op4ii_i64(INDEX_op_extract_i64, ret, arg, ofs, len); return; } /* Assume that zero-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 32: if (TCG_TARGET_HAS_ext32u_i64) { tcg_gen_ext32u_i64(ret, arg); tcg_gen_shri_i64(ret, ret, ofs); return; } break; case 16: if (TCG_TARGET_HAS_ext16u_i64) { tcg_gen_ext16u_i64(ret, arg); tcg_gen_shri_i64(ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8u_i64) { tcg_gen_ext8u_i64(ret, arg); tcg_gen_shri_i64(ret, ret, ofs); return; } break; } /* ??? Ideally we'd know what values are available for immediate AND. Assume that 8 bits are available, plus the special cases of 16 and 32, so that we get ext8u, ext16u, and ext32u. */ switch (len) { case 1 ... 8: case 16: case 32: do_shift_and: tcg_gen_shri_i64(ret, arg, ofs); tcg_gen_andi_i64(ret, ret, (1ull << len) - 1); break; default: tcg_gen_shli_i64(ret, arg, 64 - len - ofs); tcg_gen_shri_i64(ret, ret, 64 - len); break; } } void tcg_gen_sextract_i64(TCGv_i64 ret, TCGv_i64 arg, unsigned int ofs, unsigned int len) { tcg_debug_assert(ofs < 64); tcg_debug_assert(len > 0); tcg_debug_assert(len <= 64); tcg_debug_assert(ofs + len <= 64); /* Canonicalize certain special cases, even if sextract is supported. */ if (ofs + len == 64) { tcg_gen_sari_i64(ret, arg, 64 - len); return; } if (ofs == 0) { switch (len) { case 32: tcg_gen_ext32s_i64(ret, arg); return; case 16: tcg_gen_ext16s_i64(ret, arg); return; case 8: tcg_gen_ext8s_i64(ret, arg); return; } } if (TCG_TARGET_REG_BITS == 32) { /* Look for a 32-bit extract within one of the two words. */ if (ofs >= 32) { tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_HIGH(arg), ofs - 32, len); } else if (ofs + len <= 32) { tcg_gen_sextract_i32(TCGV_LOW(ret), TCGV_LOW(arg), ofs, len); } else if (ofs == 0) { tcg_gen_mov_i32(TCGV_LOW(ret), TCGV_LOW(arg)); tcg_gen_sextract_i32(TCGV_HIGH(ret), TCGV_HIGH(arg), 0, len - 32); return; } else if (len > 32) { TCGv_i32 t = tcg_temp_ebb_new_i32(); /* Extract the bits for the high word normally. */ tcg_gen_sextract_i32(t, TCGV_HIGH(arg), ofs + 32, len - 32); /* Shift the field down for the low part. */ tcg_gen_shri_i64(ret, arg, ofs); /* Overwrite the shift into the high part. */ tcg_gen_mov_i32(TCGV_HIGH(ret), t); tcg_temp_free_i32(t); return; } else { /* Shift the field down for the low part, such that the field sits at the MSB. */ tcg_gen_shri_i64(ret, arg, ofs + len - 32); /* Shift the field down from the MSB, sign extending. */ tcg_gen_sari_i32(TCGV_LOW(ret), TCGV_LOW(ret), 32 - len); } /* Sign-extend the field from 32 bits. */ tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); return; } if (TCG_TARGET_HAS_sextract_i64 && TCG_TARGET_extract_i64_valid(ofs, len)) { tcg_gen_op4ii_i64(INDEX_op_sextract_i64, ret, arg, ofs, len); return; } /* Assume that sign-extension, if available, is cheaper than a shift. */ switch (ofs + len) { case 32: if (TCG_TARGET_HAS_ext32s_i64) { tcg_gen_ext32s_i64(ret, arg); tcg_gen_sari_i64(ret, ret, ofs); return; } break; case 16: if (TCG_TARGET_HAS_ext16s_i64) { tcg_gen_ext16s_i64(ret, arg); tcg_gen_sari_i64(ret, ret, ofs); return; } break; case 8: if (TCG_TARGET_HAS_ext8s_i64) { tcg_gen_ext8s_i64(ret, arg); tcg_gen_sari_i64(ret, ret, ofs); return; } break; } switch (len) { case 32: if (TCG_TARGET_HAS_ext32s_i64) { tcg_gen_shri_i64(ret, arg, ofs); tcg_gen_ext32s_i64(ret, ret); return; } break; case 16: if (TCG_TARGET_HAS_ext16s_i64) { tcg_gen_shri_i64(ret, arg, ofs); tcg_gen_ext16s_i64(ret, ret); return; } break; case 8: if (TCG_TARGET_HAS_ext8s_i64) { tcg_gen_shri_i64(ret, arg, ofs); tcg_gen_ext8s_i64(ret, ret); return; } break; } tcg_gen_shli_i64(ret, arg, 64 - len - ofs); tcg_gen_sari_i64(ret, ret, 64 - len); } /* * Extract 64 bits from a 128-bit input, ah:al, starting from ofs. * Unlike tcg_gen_extract_i64 above, len is fixed at 64. */ void tcg_gen_extract2_i64(TCGv_i64 ret, TCGv_i64 al, TCGv_i64 ah, unsigned int ofs) { tcg_debug_assert(ofs <= 64); if (ofs == 0) { tcg_gen_mov_i64(ret, al); } else if (ofs == 64) { tcg_gen_mov_i64(ret, ah); } else if (al == ah) { tcg_gen_rotri_i64(ret, al, ofs); } else if (TCG_TARGET_HAS_extract2_i64) { tcg_gen_op4i_i64(INDEX_op_extract2_i64, ret, al, ah, ofs); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t0, al, ofs); tcg_gen_deposit_i64(ret, t0, ah, 64 - ofs, ofs); tcg_temp_free_i64(t0); } } void tcg_gen_movcond_i64(TCGCond cond, TCGv_i64 ret, TCGv_i64 c1, TCGv_i64 c2, TCGv_i64 v1, TCGv_i64 v2) { if (cond == TCG_COND_ALWAYS) { tcg_gen_mov_i64(ret, v1); } else if (cond == TCG_COND_NEVER) { tcg_gen_mov_i64(ret, v2); } else if (TCG_TARGET_REG_BITS == 32) { TCGv_i32 t0 = tcg_temp_ebb_new_i32(); TCGv_i32 t1 = tcg_temp_ebb_new_i32(); tcg_gen_op6i_i32(INDEX_op_setcond2_i32, t0, TCGV_LOW(c1), TCGV_HIGH(c1), TCGV_LOW(c2), TCGV_HIGH(c2), cond); if (TCG_TARGET_HAS_movcond_i32) { tcg_gen_movi_i32(t1, 0); tcg_gen_movcond_i32(TCG_COND_NE, TCGV_LOW(ret), t0, t1, TCGV_LOW(v1), TCGV_LOW(v2)); tcg_gen_movcond_i32(TCG_COND_NE, TCGV_HIGH(ret), t0, t1, TCGV_HIGH(v1), TCGV_HIGH(v2)); } else { tcg_gen_neg_i32(t0, t0); tcg_gen_and_i32(t1, TCGV_LOW(v1), t0); tcg_gen_andc_i32(TCGV_LOW(ret), TCGV_LOW(v2), t0); tcg_gen_or_i32(TCGV_LOW(ret), TCGV_LOW(ret), t1); tcg_gen_and_i32(t1, TCGV_HIGH(v1), t0); tcg_gen_andc_i32(TCGV_HIGH(ret), TCGV_HIGH(v2), t0); tcg_gen_or_i32(TCGV_HIGH(ret), TCGV_HIGH(ret), t1); } tcg_temp_free_i32(t0); tcg_temp_free_i32(t1); } else if (TCG_TARGET_HAS_movcond_i64) { tcg_gen_op6i_i64(INDEX_op_movcond_i64, ret, c1, c2, v1, v2, cond); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_setcond_i64(cond, t0, c1, c2); tcg_gen_neg_i64(t0, t0); tcg_gen_and_i64(t1, v1, t0); tcg_gen_andc_i64(ret, v2, t0); tcg_gen_or_i64(ret, ret, t1); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_add2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { if (TCG_TARGET_HAS_add2_i64) { tcg_gen_op6_i64(INDEX_op_add2_i64, rl, rh, al, ah, bl, bh); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_add_i64(t0, al, bl); tcg_gen_setcond_i64(TCG_COND_LTU, t1, t0, al); tcg_gen_add_i64(rh, ah, bh); tcg_gen_add_i64(rh, rh, t1); tcg_gen_mov_i64(rl, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_sub2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 al, TCGv_i64 ah, TCGv_i64 bl, TCGv_i64 bh) { if (TCG_TARGET_HAS_sub2_i64) { tcg_gen_op6_i64(INDEX_op_sub2_i64, rl, rh, al, ah, bl, bh); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); tcg_gen_sub_i64(t0, al, bl); tcg_gen_setcond_i64(TCG_COND_LTU, t1, al, bl); tcg_gen_sub_i64(rh, ah, bh); tcg_gen_sub_i64(rh, rh, t1); tcg_gen_mov_i64(rl, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); } } void tcg_gen_mulu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_mulu2_i64) { tcg_gen_op4_i64(INDEX_op_mulu2_i64, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_muluh_i64) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(INDEX_op_muluh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_mul_i64(t0, arg1, arg2); gen_helper_muluh_i64(rh, arg1, arg2); tcg_gen_mov_i64(rl, t0); tcg_temp_free_i64(t0); } } void tcg_gen_muls2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { if (TCG_TARGET_HAS_muls2_i64) { tcg_gen_op4_i64(INDEX_op_muls2_i64, rl, rh, arg1, arg2); } else if (TCG_TARGET_HAS_mulsh_i64) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_op3_i64(INDEX_op_mul_i64, t, arg1, arg2); tcg_gen_op3_i64(INDEX_op_mulsh_i64, rh, arg1, arg2); tcg_gen_mov_i64(rl, t); tcg_temp_free_i64(t); } else if (TCG_TARGET_HAS_mulu2_i64 || TCG_TARGET_HAS_muluh_i64) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); TCGv_i64 t3 = tcg_temp_ebb_new_i64(); tcg_gen_mulu2_i64(t0, t1, arg1, arg2); /* Adjust for negative inputs. */ tcg_gen_sari_i64(t2, arg1, 63); tcg_gen_sari_i64(t3, arg2, 63); tcg_gen_and_i64(t2, t2, arg2); tcg_gen_and_i64(t3, t3, arg1); tcg_gen_sub_i64(rh, t1, t2); tcg_gen_sub_i64(rh, rh, t3); tcg_gen_mov_i64(rl, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); tcg_temp_free_i64(t3); } else { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); tcg_gen_mul_i64(t0, arg1, arg2); gen_helper_mulsh_i64(rh, arg1, arg2); tcg_gen_mov_i64(rl, t0); tcg_temp_free_i64(t0); } } void tcg_gen_mulsu2_i64(TCGv_i64 rl, TCGv_i64 rh, TCGv_i64 arg1, TCGv_i64 arg2) { TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); tcg_gen_mulu2_i64(t0, t1, arg1, arg2); /* Adjust for negative input for the signed arg1. */ tcg_gen_sari_i64(t2, arg1, 63); tcg_gen_and_i64(t2, t2, arg2); tcg_gen_sub_i64(rh, t1, t2); tcg_gen_mov_i64(rl, t0); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } void tcg_gen_smin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, a, b); } void tcg_gen_umin_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, a, b); } void tcg_gen_smax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(TCG_COND_LT, ret, a, b, b, a); } void tcg_gen_umax_i64(TCGv_i64 ret, TCGv_i64 a, TCGv_i64 b) { tcg_gen_movcond_i64(TCG_COND_LTU, ret, a, b, b, a); } void tcg_gen_abs_i64(TCGv_i64 ret, TCGv_i64 a) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_sari_i64(t, a, 63); tcg_gen_xor_i64(ret, a, t); tcg_gen_sub_i64(ret, ret, t); tcg_temp_free_i64(t); } /* Size changing operations. */ void tcg_gen_extrl_i64_i32(TCGv_i32 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(ret, TCGV_LOW(arg)); } else if (TCG_TARGET_HAS_extrl_i64_i32) { tcg_gen_op2(INDEX_op_extrl_i64_i32, tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } else { tcg_gen_mov_i32(ret, (TCGv_i32)arg); } } void tcg_gen_extrh_i64_i32(TCGv_i32 ret, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(ret, TCGV_HIGH(arg)); } else if (TCG_TARGET_HAS_extrh_i64_i32) { tcg_gen_op2(INDEX_op_extrh_i64_i32, tcgv_i32_arg(ret), tcgv_i64_arg(arg)); } else { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_shri_i64(t, arg, 32); tcg_gen_mov_i32(ret, (TCGv_i32)t); tcg_temp_free_i64(t); } } void tcg_gen_extu_i32_i64(TCGv_i64 ret, TCGv_i32 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(TCGV_LOW(ret), arg); tcg_gen_movi_i32(TCGV_HIGH(ret), 0); } else { tcg_gen_op2(INDEX_op_extu_i32_i64, tcgv_i64_arg(ret), tcgv_i32_arg(arg)); } } void tcg_gen_ext_i32_i64(TCGv_i64 ret, TCGv_i32 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(TCGV_LOW(ret), arg); tcg_gen_sari_i32(TCGV_HIGH(ret), TCGV_LOW(ret), 31); } else { tcg_gen_op2(INDEX_op_ext_i32_i64, tcgv_i64_arg(ret), tcgv_i32_arg(arg)); } } void tcg_gen_concat_i32_i64(TCGv_i64 dest, TCGv_i32 low, TCGv_i32 high) { TCGv_i64 tmp; if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(TCGV_LOW(dest), low); tcg_gen_mov_i32(TCGV_HIGH(dest), high); return; } tmp = tcg_temp_ebb_new_i64(); /* These extensions are only needed for type correctness. We may be able to do better given target specific information. */ tcg_gen_extu_i32_i64(tmp, high); tcg_gen_extu_i32_i64(dest, low); /* If deposit is available, use it. Otherwise use the extra knowledge that we have of the zero-extensions above. */ if (TCG_TARGET_HAS_deposit_i64 && TCG_TARGET_deposit_i64_valid(32, 32)) { tcg_gen_deposit_i64(dest, dest, tmp, 32, 32); } else { tcg_gen_shli_i64(tmp, tmp, 32); tcg_gen_or_i64(dest, dest, tmp); } tcg_temp_free_i64(tmp); } void tcg_gen_extr_i64_i32(TCGv_i32 lo, TCGv_i32 hi, TCGv_i64 arg) { if (TCG_TARGET_REG_BITS == 32) { tcg_gen_mov_i32(lo, TCGV_LOW(arg)); tcg_gen_mov_i32(hi, TCGV_HIGH(arg)); } else { tcg_gen_extrl_i64_i32(lo, arg); tcg_gen_extrh_i64_i32(hi, arg); } } void tcg_gen_extr32_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i64 arg) { tcg_gen_ext32u_i64(lo, arg); tcg_gen_shri_i64(hi, arg, 32); } void tcg_gen_extr_i128_i64(TCGv_i64 lo, TCGv_i64 hi, TCGv_i128 arg) { tcg_gen_mov_i64(lo, TCGV128_LOW(arg)); tcg_gen_mov_i64(hi, TCGV128_HIGH(arg)); } void tcg_gen_concat_i64_i128(TCGv_i128 ret, TCGv_i64 lo, TCGv_i64 hi) { tcg_gen_mov_i64(TCGV128_LOW(ret), lo); tcg_gen_mov_i64(TCGV128_HIGH(ret), hi); } void tcg_gen_mov_i128(TCGv_i128 dst, TCGv_i128 src) { if (dst != src) { tcg_gen_mov_i64(TCGV128_LOW(dst), TCGV128_LOW(src)); tcg_gen_mov_i64(TCGV128_HIGH(dst), TCGV128_HIGH(src)); } } /* QEMU specific operations. */ void tcg_gen_exit_tb(const TranslationBlock *tb, unsigned idx) { /* * Let the jit code return the read-only version of the * TranslationBlock, so that we minimize the pc-relative * distance of the address of the exit_tb code to TB. * This will improve utilization of pc-relative address loads. * * TODO: Move this to translator_loop, so that all const * TranslationBlock pointers refer to read-only memory. * This requires coordination with targets that do not use * the translator_loop. */ uintptr_t val = (uintptr_t)tcg_splitwx_to_rx((void *)tb) + idx; if (tb == NULL) { tcg_debug_assert(idx == 0); } else if (idx <= TB_EXIT_IDXMAX) { #ifdef CONFIG_DEBUG_TCG /* This is an exit following a goto_tb. Verify that we have seen this numbered exit before, via tcg_gen_goto_tb. */ tcg_debug_assert(tcg_ctx->goto_tb_issue_mask & (1 << idx)); #endif } else { /* This is an exit via the exitreq label. */ tcg_debug_assert(idx == TB_EXIT_REQUESTED); } tcg_gen_op1i(INDEX_op_exit_tb, val); } void tcg_gen_goto_tb(unsigned idx) { /* We tested CF_NO_GOTO_TB in translator_use_goto_tb. */ tcg_debug_assert(!(tcg_ctx->gen_tb->cflags & CF_NO_GOTO_TB)); /* We only support two chained exits. */ tcg_debug_assert(idx <= TB_EXIT_IDXMAX); #ifdef CONFIG_DEBUG_TCG /* Verify that we haven't seen this numbered exit before. */ tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); tcg_ctx->goto_tb_issue_mask |= 1 << idx; #endif plugin_gen_disable_mem_helpers(); tcg_gen_op1i(INDEX_op_goto_tb, idx); } void tcg_gen_lookup_and_goto_ptr(void) { TCGv_ptr ptr; if (tcg_ctx->gen_tb->cflags & CF_NO_GOTO_PTR) { tcg_gen_exit_tb(NULL, 0); return; } plugin_gen_disable_mem_helpers(); ptr = tcg_temp_ebb_new_ptr(); gen_helper_lookup_tb_ptr(ptr, cpu_env); tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); } static inline MemOp tcg_canonicalize_memop(MemOp op, bool is64, bool st) { /* Trigger the asserts within as early as possible. */ unsigned a_bits = get_alignment_bits(op); /* Prefer MO_ALIGN+MO_XX over MO_ALIGN_XX+MO_XX */ if (a_bits == (op & MO_SIZE)) { op = (op & ~MO_AMASK) | MO_ALIGN; } switch (op & MO_SIZE) { case MO_8: op &= ~MO_BSWAP; break; case MO_16: break; case MO_32: if (!is64) { op &= ~MO_SIGN; } break; case MO_64: if (is64) { op &= ~MO_SIGN; break; } /* fall through */ default: g_assert_not_reached(); } if (st) { op &= ~MO_SIGN; } return op; } static void gen_ldst_i32(TCGOpcode opc, TCGv_i32 val, TCGv addr, MemOp memop, TCGArg idx) { MemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 tcg_gen_op3i_i32(opc, val, addr, oi); #else if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op4i_i32(opc, val, TCGV_LOW(addr), TCGV_HIGH(addr), oi); } else { tcg_gen_op3(opc, tcgv_i32_arg(val), tcgv_i64_arg(addr), oi); } #endif } static void gen_ldst_i64(TCGOpcode opc, TCGv_i64 val, TCGv addr, MemOp memop, TCGArg idx) { MemOpIdx oi = make_memop_idx(memop, idx); #if TARGET_LONG_BITS == 32 if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op4i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), addr, oi); } else { tcg_gen_op3(opc, tcgv_i64_arg(val), tcgv_i32_arg(addr), oi); } #else if (TCG_TARGET_REG_BITS == 32) { tcg_gen_op5i_i32(opc, TCGV_LOW(val), TCGV_HIGH(val), TCGV_LOW(addr), TCGV_HIGH(addr), oi); } else { tcg_gen_op3i_i64(opc, val, addr, oi); } #endif } static void tcg_gen_req_mo(TCGBar type) { #ifdef TCG_GUEST_DEFAULT_MO type &= TCG_GUEST_DEFAULT_MO; #endif type &= ~TCG_TARGET_DEFAULT_MO; if (type) { tcg_gen_mb(type | TCG_BAR_SC); } } static inline TCGv plugin_prep_mem_callbacks(TCGv vaddr) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { /* Save a copy of the vaddr for use after a load. */ TCGv temp = tcg_temp_new(); tcg_gen_mov_tl(temp, vaddr); return temp; } #endif return vaddr; } static void plugin_gen_mem_callbacks(TCGv vaddr, MemOpIdx oi, enum qemu_plugin_mem_rw rw) { #ifdef CONFIG_PLUGIN if (tcg_ctx->plugin_insn != NULL) { qemu_plugin_meminfo_t info = make_plugin_meminfo(oi, rw); plugin_gen_empty_mem_callback(vaddr, info); tcg_temp_free(vaddr); } #endif } void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; MemOpIdx oi; tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); oi = make_memop_idx(memop, idx); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { memop &= ~MO_BSWAP; /* The bswap primitive benefits from zero-extended input. */ if ((memop & MO_SSIZE) == MO_SW) { memop &= ~MO_SIGN; } } addr = plugin_prep_mem_callbacks(addr); gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i32(val, val, (orig_memop & MO_SIGN ? TCG_BSWAP_IZ | TCG_BSWAP_OS : TCG_BSWAP_IZ | TCG_BSWAP_OZ)); break; case MO_32: tcg_gen_bswap32_i32(val, val); break; default: g_assert_not_reached(); } } } void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; MemOpIdx oi; tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); oi = make_memop_idx(memop, idx); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_ebb_new_i32(); switch (memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i32(swap, val, 0); break; case MO_32: tcg_gen_bswap32_i32(swap, val); break; default: g_assert_not_reached(); } val = swap; memop &= ~MO_BSWAP; } addr = plugin_prep_mem_callbacks(addr); if (TCG_TARGET_HAS_qemu_st8_i32 && (memop & MO_SIZE) == MO_8) { gen_ldst_i32(INDEX_op_qemu_st8_i32, val, addr, memop, idx); } else { gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); } plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i32(swap); } } void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; MemOpIdx oi; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); if (memop & MO_SIGN) { tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31); } else { tcg_gen_movi_i32(TCGV_HIGH(val), 0); } return; } tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); oi = make_memop_idx(memop, idx); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { memop &= ~MO_BSWAP; /* The bswap primitive benefits from zero-extended input. */ if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) { memop &= ~MO_SIGN; } } addr = plugin_prep_mem_callbacks(addr); gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); if ((orig_memop ^ memop) & MO_BSWAP) { int flags = (orig_memop & MO_SIGN ? TCG_BSWAP_IZ | TCG_BSWAP_OS : TCG_BSWAP_IZ | TCG_BSWAP_OZ); switch (orig_memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i64(val, val, flags); break; case MO_32: tcg_gen_bswap32_i64(val, val, flags); break; case MO_64: tcg_gen_bswap64_i64(val, val); break; default: g_assert_not_reached(); } } } void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; MemOpIdx oi; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); return; } tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); oi = make_memop_idx(memop, idx); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_ebb_new_i64(); switch (memop & MO_SIZE) { case MO_16: tcg_gen_bswap16_i64(swap, val, 0); break; case MO_32: tcg_gen_bswap32_i64(swap, val, 0); break; case MO_64: tcg_gen_bswap64_i64(swap, val); break; default: g_assert_not_reached(); } val = swap; memop &= ~MO_BSWAP; } addr = plugin_prep_mem_callbacks(addr); gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); if (swap) { tcg_temp_free_i64(swap); } } /* * Return true if @mop, without knowledge of the pointer alignment, * does not require 16-byte atomicity, and it would be adventagous * to avoid a call to a helper function. */ static bool use_two_i64_for_i128(MemOp mop) { #ifdef CONFIG_SOFTMMU /* Two softmmu tlb lookups is larger than one function call. */ return false; #else /* * For user-only, two 64-bit operations may well be smaller than a call. * Determine if that would be legal for the requested atomicity. */ switch (mop & MO_ATOM_MASK) { case MO_ATOM_NONE: case MO_ATOM_IFALIGN_PAIR: return true; case MO_ATOM_IFALIGN: case MO_ATOM_SUBALIGN: case MO_ATOM_WITHIN16: case MO_ATOM_WITHIN16_PAIR: /* In a serialized context, no atomicity is required. */ return !(tcg_ctx->gen_tb->cflags & CF_PARALLEL); default: g_assert_not_reached(); } #endif } static void canonicalize_memop_i128_as_i64(MemOp ret[2], MemOp orig) { MemOp mop_1 = orig, mop_2; tcg_debug_assert((orig & MO_SIZE) == MO_128); tcg_debug_assert((orig & MO_SIGN) == 0); /* Use a memory ordering implemented by the host. */ if (!TCG_TARGET_HAS_MEMORY_BSWAP && (orig & MO_BSWAP)) { mop_1 &= ~MO_BSWAP; } /* Reduce the size to 64-bit. */ mop_1 = (mop_1 & ~MO_SIZE) | MO_64; /* Retain the alignment constraints of the original. */ switch (orig & MO_AMASK) { case MO_UNALN: case MO_ALIGN_2: case MO_ALIGN_4: mop_2 = mop_1; break; case MO_ALIGN_8: /* Prefer MO_ALIGN+MO_64 to MO_ALIGN_8+MO_64. */ mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN; mop_2 = mop_1; break; case MO_ALIGN: /* Second has 8-byte alignment; first has 16-byte alignment. */ mop_2 = mop_1; mop_1 = (mop_1 & ~MO_AMASK) | MO_ALIGN_16; break; case MO_ALIGN_16: case MO_ALIGN_32: case MO_ALIGN_64: /* Second has 8-byte alignment; first retains original. */ mop_2 = (mop_1 & ~MO_AMASK) | MO_ALIGN; break; default: g_assert_not_reached(); } ret[0] = mop_1; ret[1] = mop_2; } #if TARGET_LONG_BITS == 64 #define tcg_temp_ebb_new tcg_temp_ebb_new_i64 #else #define tcg_temp_ebb_new tcg_temp_ebb_new_i32 #endif void tcg_gen_qemu_ld_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) { MemOpIdx oi = make_memop_idx(memop, idx); tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); addr = plugin_prep_mem_callbacks(addr); /* TODO: allow the tcg backend to see the whole operation. */ if (use_two_i64_for_i128(memop)) { MemOp mop[2]; TCGv addr_p8; TCGv_i64 x, y; canonicalize_memop_i128_as_i64(mop, memop); /* * Since there are no global TCGv_i128, there is no visible state * changed if the second load faults. Load directly into the two * subwords. */ if ((memop & MO_BSWAP) == MO_LE) { x = TCGV128_LOW(val); y = TCGV128_HIGH(val); } else { x = TCGV128_HIGH(val); y = TCGV128_LOW(val); } gen_ldst_i64(INDEX_op_qemu_ld_i64, x, addr, mop[0], idx); if ((mop[0] ^ memop) & MO_BSWAP) { tcg_gen_bswap64_i64(x, x); } addr_p8 = tcg_temp_ebb_new(); tcg_gen_addi_tl(addr_p8, addr, 8); gen_ldst_i64(INDEX_op_qemu_ld_i64, y, addr_p8, mop[1], idx); tcg_temp_free(addr_p8); if ((mop[0] ^ memop) & MO_BSWAP) { tcg_gen_bswap64_i64(y, y); } } else { gen_helper_ld_i128(val, cpu_env, addr, tcg_constant_i32(oi)); } plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_R); } void tcg_gen_qemu_st_i128(TCGv_i128 val, TCGv addr, TCGArg idx, MemOp memop) { MemOpIdx oi = make_memop_idx(memop, idx); tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); tcg_gen_req_mo(TCG_MO_ST_LD | TCG_MO_ST_ST); addr = plugin_prep_mem_callbacks(addr); /* TODO: allow the tcg backend to see the whole operation. */ if (use_two_i64_for_i128(memop)) { MemOp mop[2]; TCGv addr_p8; TCGv_i64 x, y; canonicalize_memop_i128_as_i64(mop, memop); if ((memop & MO_BSWAP) == MO_LE) { x = TCGV128_LOW(val); y = TCGV128_HIGH(val); } else { x = TCGV128_HIGH(val); y = TCGV128_LOW(val); } addr_p8 = tcg_temp_ebb_new(); if ((mop[0] ^ memop) & MO_BSWAP) { TCGv_i64 t = tcg_temp_ebb_new_i64(); tcg_gen_bswap64_i64(t, x); gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr, mop[0], idx); tcg_gen_bswap64_i64(t, y); tcg_gen_addi_tl(addr_p8, addr, 8); gen_ldst_i64(INDEX_op_qemu_st_i64, t, addr_p8, mop[1], idx); tcg_temp_free_i64(t); } else { gen_ldst_i64(INDEX_op_qemu_st_i64, x, addr, mop[0], idx); tcg_gen_addi_tl(addr_p8, addr, 8); gen_ldst_i64(INDEX_op_qemu_st_i64, y, addr_p8, mop[1], idx); } tcg_temp_free(addr_p8); } else { gen_helper_st_i128(cpu_env, addr, val, tcg_constant_i32(oi)); } plugin_gen_mem_callbacks(addr, oi, QEMU_PLUGIN_MEM_W); } static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: tcg_gen_ext8s_i32(ret, val); break; case MO_UB: tcg_gen_ext8u_i32(ret, val); break; case MO_SW: tcg_gen_ext16s_i32(ret, val); break; case MO_UW: tcg_gen_ext16u_i32(ret, val); break; default: tcg_gen_mov_i32(ret, val); break; } } static void tcg_gen_ext_i64(TCGv_i64 ret, TCGv_i64 val, MemOp opc) { switch (opc & MO_SSIZE) { case MO_SB: tcg_gen_ext8s_i64(ret, val); break; case MO_UB: tcg_gen_ext8u_i64(ret, val); break; case MO_SW: tcg_gen_ext16s_i64(ret, val); break; case MO_UW: tcg_gen_ext16u_i64(ret, val); break; case MO_SL: tcg_gen_ext32s_i64(ret, val); break; case MO_UL: tcg_gen_ext32u_i64(ret, val); break; default: tcg_gen_mov_i64(ret, val); break; } } typedef void (*gen_atomic_cx_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32, TCGv_i32); typedef void (*gen_atomic_cx_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i64, TCGv_i32); typedef void (*gen_atomic_cx_i128)(TCGv_i128, TCGv_env, TCGv, TCGv_i128, TCGv_i128, TCGv_i32); typedef void (*gen_atomic_op_i32)(TCGv_i32, TCGv_env, TCGv, TCGv_i32, TCGv_i32); typedef void (*gen_atomic_op_i64)(TCGv_i64, TCGv_env, TCGv, TCGv_i64, TCGv_i32); #ifdef CONFIG_ATOMIC64 # define WITH_ATOMIC64(X) X, #else # define WITH_ATOMIC64(X) #endif #ifdef CONFIG_CMPXCHG128 # define WITH_ATOMIC128(X) X, #else # define WITH_ATOMIC128(X) #endif static void * const table_cmpxchg[(MO_SIZE | MO_BSWAP) + 1] = { [MO_8] = gen_helper_atomic_cmpxchgb, [MO_16 | MO_LE] = gen_helper_atomic_cmpxchgw_le, [MO_16 | MO_BE] = gen_helper_atomic_cmpxchgw_be, [MO_32 | MO_LE] = gen_helper_atomic_cmpxchgl_le, [MO_32 | MO_BE] = gen_helper_atomic_cmpxchgl_be, WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_cmpxchgq_le) WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_cmpxchgq_be) WITH_ATOMIC128([MO_128 | MO_LE] = gen_helper_atomic_cmpxchgo_le) WITH_ATOMIC128([MO_128 | MO_BE] = gen_helper_atomic_cmpxchgo_be) }; void tcg_gen_nonatomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, MemOp memop) { TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_temp_ebb_new_i32(); tcg_gen_ext_i32(t2, cmpv, memop & MO_SIZE); tcg_gen_qemu_ld_i32(t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i32(TCG_COND_EQ, t2, t1, t2, newv, t1); tcg_gen_qemu_st_i32(t2, addr, idx, memop); tcg_temp_free_i32(t2); if (memop & MO_SIGN) { tcg_gen_ext_i32(retv, t1, memop); } else { tcg_gen_mov_i32(retv, t1); } tcg_temp_free_i32(t1); } void tcg_gen_atomic_cmpxchg_i32(TCGv_i32 retv, TCGv addr, TCGv_i32 cmpv, TCGv_i32 newv, TCGArg idx, MemOp memop) { gen_atomic_cx_i32 gen; MemOpIdx oi; if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { tcg_gen_nonatomic_cmpxchg_i32(retv, addr, cmpv, newv, idx, memop); return; } memop = tcg_canonicalize_memop(memop, 0, 0); gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); if (memop & MO_SIGN) { tcg_gen_ext_i32(retv, retv, memop); } } void tcg_gen_nonatomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, TCGv_i64 newv, TCGArg idx, MemOp memop) { TCGv_i64 t1, t2; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_nonatomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), TCGV_LOW(newv), idx, memop); if (memop & MO_SIGN) { tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); } else { tcg_gen_movi_i32(TCGV_HIGH(retv), 0); } return; } t1 = tcg_temp_ebb_new_i64(); t2 = tcg_temp_ebb_new_i64(); tcg_gen_ext_i64(t2, cmpv, memop & MO_SIZE); tcg_gen_qemu_ld_i64(t1, addr, idx, memop & ~MO_SIGN); tcg_gen_movcond_i64(TCG_COND_EQ, t2, t1, t2, newv, t1); tcg_gen_qemu_st_i64(t2, addr, idx, memop); tcg_temp_free_i64(t2); if (memop & MO_SIGN) { tcg_gen_ext_i64(retv, t1, memop); } else { tcg_gen_mov_i64(retv, t1); } tcg_temp_free_i64(t1); } void tcg_gen_atomic_cmpxchg_i64(TCGv_i64 retv, TCGv addr, TCGv_i64 cmpv, TCGv_i64 newv, TCGArg idx, MemOp memop) { if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { tcg_gen_nonatomic_cmpxchg_i64(retv, addr, cmpv, newv, idx, memop); return; } if ((memop & MO_SIZE) == MO_64) { gen_atomic_cx_i64 gen; memop = tcg_canonicalize_memop(memop, 1, 0); gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; if (gen) { MemOpIdx oi = make_memop_idx(memop, idx); gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); return; } gen_helper_exit_atomic(cpu_env); /* * Produce a result for a well-formed opcode stream. This satisfies * liveness for set before used, which happens before this dead code * is removed. */ tcg_gen_movi_i64(retv, 0); return; } if (TCG_TARGET_REG_BITS == 32) { tcg_gen_atomic_cmpxchg_i32(TCGV_LOW(retv), addr, TCGV_LOW(cmpv), TCGV_LOW(newv), idx, memop); if (memop & MO_SIGN) { tcg_gen_sari_i32(TCGV_HIGH(retv), TCGV_LOW(retv), 31); } else { tcg_gen_movi_i32(TCGV_HIGH(retv), 0); } } else { TCGv_i32 c32 = tcg_temp_ebb_new_i32(); TCGv_i32 n32 = tcg_temp_ebb_new_i32(); TCGv_i32 r32 = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(c32, cmpv); tcg_gen_extrl_i64_i32(n32, newv); tcg_gen_atomic_cmpxchg_i32(r32, addr, c32, n32, idx, memop & ~MO_SIGN); tcg_temp_free_i32(c32); tcg_temp_free_i32(n32); tcg_gen_extu_i32_i64(retv, r32); tcg_temp_free_i32(r32); if (memop & MO_SIGN) { tcg_gen_ext_i64(retv, retv, memop); } } } void tcg_gen_nonatomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, TCGv_i128 newv, TCGArg idx, MemOp memop) { if (TCG_TARGET_REG_BITS == 32) { /* Inline expansion below is simply too large for 32-bit hosts. */ gen_atomic_cx_i128 gen = ((memop & MO_BSWAP) == MO_LE ? gen_helper_nonatomic_cmpxchgo_le : gen_helper_nonatomic_cmpxchgo_be); MemOpIdx oi = make_memop_idx(memop, idx); tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); } else { TCGv_i128 oldv = tcg_temp_ebb_new_i128(); TCGv_i128 tmpv = tcg_temp_ebb_new_i128(); TCGv_i64 t0 = tcg_temp_ebb_new_i64(); TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 z = tcg_constant_i64(0); tcg_gen_qemu_ld_i128(oldv, addr, idx, memop); /* Compare i128 */ tcg_gen_xor_i64(t0, TCGV128_LOW(oldv), TCGV128_LOW(cmpv)); tcg_gen_xor_i64(t1, TCGV128_HIGH(oldv), TCGV128_HIGH(cmpv)); tcg_gen_or_i64(t0, t0, t1); /* tmpv = equal ? newv : oldv */ tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_LOW(tmpv), t0, z, TCGV128_LOW(newv), TCGV128_LOW(oldv)); tcg_gen_movcond_i64(TCG_COND_EQ, TCGV128_HIGH(tmpv), t0, z, TCGV128_HIGH(newv), TCGV128_HIGH(oldv)); /* Unconditional writeback. */ tcg_gen_qemu_st_i128(tmpv, addr, idx, memop); tcg_gen_mov_i128(retv, oldv); tcg_temp_free_i64(t0); tcg_temp_free_i64(t1); tcg_temp_free_i128(tmpv); tcg_temp_free_i128(oldv); } } void tcg_gen_atomic_cmpxchg_i128(TCGv_i128 retv, TCGv addr, TCGv_i128 cmpv, TCGv_i128 newv, TCGArg idx, MemOp memop) { gen_atomic_cx_i128 gen; if (!(tcg_ctx->gen_tb->cflags & CF_PARALLEL)) { tcg_gen_nonatomic_cmpxchg_i128(retv, addr, cmpv, newv, idx, memop); return; } tcg_debug_assert((memop & MO_SIZE) == MO_128); tcg_debug_assert((memop & MO_SIGN) == 0); gen = table_cmpxchg[memop & (MO_SIZE | MO_BSWAP)]; if (gen) { MemOpIdx oi = make_memop_idx(memop, idx); gen(retv, cpu_env, addr, cmpv, newv, tcg_constant_i32(oi)); return; } gen_helper_exit_atomic(cpu_env); /* * Produce a result for a well-formed opcode stream. This satisfies * liveness for set before used, which happens before this dead code * is removed. */ tcg_gen_movi_i64(TCGV128_LOW(retv), 0); tcg_gen_movi_i64(TCGV128_HIGH(retv), 0); } static void do_nonatomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i32, TCGv_i32, TCGv_i32)) { TCGv_i32 t1 = tcg_temp_ebb_new_i32(); TCGv_i32 t2 = tcg_temp_ebb_new_i32(); memop = tcg_canonicalize_memop(memop, 0, 0); tcg_gen_qemu_ld_i32(t1, addr, idx, memop); tcg_gen_ext_i32(t2, val, memop); gen(t2, t1, t2); tcg_gen_qemu_st_i32(t2, addr, idx, memop); tcg_gen_ext_i32(ret, (new_val ? t2 : t1), memop); tcg_temp_free_i32(t1); tcg_temp_free_i32(t2); } static void do_atomic_op_i32(TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop, void * const table[]) { gen_atomic_op_i32 gen; MemOpIdx oi; memop = tcg_canonicalize_memop(memop, 0, 0); gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); if (memop & MO_SIGN) { tcg_gen_ext_i32(ret, ret, memop); } } static void do_nonatomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop, bool new_val, void (*gen)(TCGv_i64, TCGv_i64, TCGv_i64)) { TCGv_i64 t1 = tcg_temp_ebb_new_i64(); TCGv_i64 t2 = tcg_temp_ebb_new_i64(); memop = tcg_canonicalize_memop(memop, 1, 0); tcg_gen_qemu_ld_i64(t1, addr, idx, memop); tcg_gen_ext_i64(t2, val, memop); gen(t2, t1, t2); tcg_gen_qemu_st_i64(t2, addr, idx, memop); tcg_gen_ext_i64(ret, (new_val ? t2 : t1), memop); tcg_temp_free_i64(t1); tcg_temp_free_i64(t2); } static void do_atomic_op_i64(TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop, void * const table[]) { memop = tcg_canonicalize_memop(memop, 1, 0); if ((memop & MO_SIZE) == MO_64) { #ifdef CONFIG_ATOMIC64 gen_atomic_op_i64 gen; MemOpIdx oi; gen = table[memop & (MO_SIZE | MO_BSWAP)]; tcg_debug_assert(gen != NULL); oi = make_memop_idx(memop & ~MO_SIGN, idx); gen(ret, cpu_env, addr, val, tcg_constant_i32(oi)); #else gen_helper_exit_atomic(cpu_env); /* Produce a result, so that we have a well-formed opcode stream with respect to uses of the result in the (dead) code following. */ tcg_gen_movi_i64(ret, 0); #endif /* CONFIG_ATOMIC64 */ } else { TCGv_i32 v32 = tcg_temp_ebb_new_i32(); TCGv_i32 r32 = tcg_temp_ebb_new_i32(); tcg_gen_extrl_i64_i32(v32, val); do_atomic_op_i32(r32, addr, v32, idx, memop & ~MO_SIGN, table); tcg_temp_free_i32(v32); tcg_gen_extu_i32_i64(ret, r32); tcg_temp_free_i32(r32); if (memop & MO_SIGN) { tcg_gen_ext_i64(ret, ret, memop); } } } #define GEN_ATOMIC_HELPER(NAME, OP, NEW) \ static void * const table_##NAME[(MO_SIZE | MO_BSWAP) + 1] = { \ [MO_8] = gen_helper_atomic_##NAME##b, \ [MO_16 | MO_LE] = gen_helper_atomic_##NAME##w_le, \ [MO_16 | MO_BE] = gen_helper_atomic_##NAME##w_be, \ [MO_32 | MO_LE] = gen_helper_atomic_##NAME##l_le, \ [MO_32 | MO_BE] = gen_helper_atomic_##NAME##l_be, \ WITH_ATOMIC64([MO_64 | MO_LE] = gen_helper_atomic_##NAME##q_le) \ WITH_ATOMIC64([MO_64 | MO_BE] = gen_helper_atomic_##NAME##q_be) \ }; \ void tcg_gen_atomic_##NAME##_i32 \ (TCGv_i32 ret, TCGv addr, TCGv_i32 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ do_atomic_op_i32(ret, addr, val, idx, memop, table_##NAME); \ } else { \ do_nonatomic_op_i32(ret, addr, val, idx, memop, NEW, \ tcg_gen_##OP##_i32); \ } \ } \ void tcg_gen_atomic_##NAME##_i64 \ (TCGv_i64 ret, TCGv addr, TCGv_i64 val, TCGArg idx, MemOp memop) \ { \ if (tcg_ctx->gen_tb->cflags & CF_PARALLEL) { \ do_atomic_op_i64(ret, addr, val, idx, memop, table_##NAME); \ } else { \ do_nonatomic_op_i64(ret, addr, val, idx, memop, NEW, \ tcg_gen_##OP##_i64); \ } \ } GEN_ATOMIC_HELPER(fetch_add, add, 0) GEN_ATOMIC_HELPER(fetch_and, and, 0) GEN_ATOMIC_HELPER(fetch_or, or, 0) GEN_ATOMIC_HELPER(fetch_xor, xor, 0) GEN_ATOMIC_HELPER(fetch_smin, smin, 0) GEN_ATOMIC_HELPER(fetch_umin, umin, 0) GEN_ATOMIC_HELPER(fetch_smax, smax, 0) GEN_ATOMIC_HELPER(fetch_umax, umax, 0) GEN_ATOMIC_HELPER(add_fetch, add, 1) GEN_ATOMIC_HELPER(and_fetch, and, 1) GEN_ATOMIC_HELPER(or_fetch, or, 1) GEN_ATOMIC_HELPER(xor_fetch, xor, 1) GEN_ATOMIC_HELPER(smin_fetch, smin, 1) GEN_ATOMIC_HELPER(umin_fetch, umin, 1) GEN_ATOMIC_HELPER(smax_fetch, smax, 1) GEN_ATOMIC_HELPER(umax_fetch, umax, 1) static void tcg_gen_mov2_i32(TCGv_i32 r, TCGv_i32 a, TCGv_i32 b) { tcg_gen_mov_i32(r, b); } static void tcg_gen_mov2_i64(TCGv_i64 r, TCGv_i64 a, TCGv_i64 b) { tcg_gen_mov_i64(r, b); } GEN_ATOMIC_HELPER(xchg, mov2, 0) #undef GEN_ATOMIC_HELPER