diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2023-04-09 23:03:55 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-05-11 09:53:41 +0100 |
commit | da8ab70ad17762e1ad1c4de01f981adebd26f851 (patch) | |
tree | 00df9f7946640bcfedab116ab062efb81b0a904b /tcg/i386/tcg-target.c.inc | |
parent | 8429a1ca8f315394b2a68b96812b1dc9c60070fd (diff) |
tcg/i386: Convert tcg_out_qemu_ld_slow_path
Use tcg_out_ld_helper_args and tcg_out_ld_helper_ret.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tcg/i386/tcg-target.c.inc')
-rw-r--r-- | tcg/i386/tcg-target.c.inc | 71 |
1 files changed, 28 insertions, 43 deletions
diff --git a/tcg/i386/tcg-target.c.inc b/tcg/i386/tcg-target.c.inc index 18b0e7997d..3508b9cc6c 100644 --- a/tcg/i386/tcg-target.c.inc +++ b/tcg/i386/tcg-target.c.inc @@ -1803,12 +1803,36 @@ static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { }; /* + * Because i686 has no register parameters and because x86_64 has xchg + * to handle addr/data register overlap, we have placed all input arguments + * before we need might need a scratch reg. + * + * Even then, a scratch is only needed for l->raddr. Rather than expose + * a general-purpose scratch when we don't actually know it's available, + * use the ra_gen hook to load into RAX if needed. + */ +#if TCG_TARGET_REG_BITS == 64 +static TCGReg ldst_ra_gen(TCGContext *s, const TCGLabelQemuLdst *l, int arg) +{ + if (arg < 0) { + arg = TCG_REG_RAX; + } + tcg_out_movi(s, TCG_TYPE_PTR, arg, (uintptr_t)l->raddr); + return arg; +} +static const TCGLdstHelperParam ldst_helper_param = { + .ra_gen = ldst_ra_gen +}; +#else +static const TCGLdstHelperParam ldst_helper_param = { }; +#endif + +/* * Generate code for the slow path for a load at the end of block */ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { - MemOpIdx oi = l->oi; - MemOp opc = get_memop(oi); + MemOp opc = get_memop(l->oi); tcg_insn_unit **label_ptr = &l->label_ptr[0]; /* resolve label address */ @@ -1817,49 +1841,10 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_patch32(label_ptr[1], s->code_ptr - label_ptr[1] - 4); } - if (TCG_TARGET_REG_BITS == 32) { - int ofs = 0; - - tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs); - ofs += 4; - - if (TARGET_LONG_BITS == 64) { - tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs); - ofs += 4; - } - - tcg_out_sti(s, TCG_TYPE_I32, oi, TCG_REG_ESP, ofs); - ofs += 4; - - tcg_out_sti(s, TCG_TYPE_PTR, (uintptr_t)l->raddr, TCG_REG_ESP, ofs); - } else { - tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); - tcg_out_mov(s, TCG_TYPE_TL, tcg_target_call_iarg_regs[1], - l->addrlo_reg); - tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], oi); - tcg_out_movi(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[3], - (uintptr_t)l->raddr); - } - + tcg_out_ld_helper_args(s, l, &ldst_helper_param); tcg_out_branch(s, 1, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_ld_helper_ret(s, l, false, &ldst_helper_param); - if (TCG_TARGET_REG_BITS == 32 && (opc & MO_SIZE) == MO_64) { - TCGMovExtend ext[2] = { - { .dst = l->datalo_reg, .dst_type = TCG_TYPE_I32, - .src = TCG_REG_EAX, .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, - { .dst = l->datahi_reg, .dst_type = TCG_TYPE_I32, - .src = TCG_REG_EDX, .src_type = TCG_TYPE_I32, .src_ext = MO_UL }, - }; - tcg_out_movext2(s, &ext[0], &ext[1], -1); - } else { - tcg_out_movext(s, l->type, l->datalo_reg, - TCG_TYPE_REG, opc & MO_SSIZE, TCG_REG_EAX); - } - - /* Jump to the code corresponding to next IR of qemu_st */ tcg_out_jmp(s, l->raddr); return true; } |