aboutsummaryrefslogtreecommitdiff
path: root/tcg/arm/tcg-target.inc.c
diff options
context:
space:
mode:
Diffstat (limited to 'tcg/arm/tcg-target.inc.c')
-rw-r--r--tcg/arm/tcg-target.inc.c54
1 files changed, 36 insertions, 18 deletions
diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c
index e75a6d4943..9f5cb66718 100644
--- a/tcg/arm/tcg-target.inc.c
+++ b/tcg/arm/tcg-target.inc.c
@@ -329,11 +329,6 @@ static const uint8_t tcg_cond_to_arm_cond[] = {
[TCG_COND_GTU] = COND_HI,
};
-static inline void tcg_out_bx(TCGContext *s, int cond, int rn)
-{
- tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
-}
-
static inline void tcg_out_b(TCGContext *s, int cond, int32_t offset)
{
tcg_out32(s, (cond << 28) | 0x0a000000 |
@@ -402,6 +397,18 @@ static inline void tcg_out_mov_reg(TCGContext *s, int cond, int rd, int rm)
}
}
+static inline void tcg_out_bx(TCGContext *s, int cond, TCGReg rn)
+{
+ /* Unless the C portion of QEMU is compiled as thumb, we don't
+ actually need true BX semantics; merely a branch to an address
+ held in a register. */
+ if (use_armv5t_instructions) {
+ tcg_out32(s, (cond << 28) | 0x012fff10 | rn);
+ } else {
+ tcg_out_mov_reg(s, cond, TCG_REG_PC, rn);
+ }
+}
+
static inline void tcg_out_dat_imm(TCGContext *s,
int cond, int opc, int rd, int rn, int im)
{
@@ -977,7 +984,7 @@ static inline void tcg_out_st8(TCGContext *s, int cond,
* with the code buffer limited to 16MB we wouldn't need the long case.
* But we also use it for the tail-call to the qemu_ld/st helpers, which does.
*/
-static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
+static void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
{
intptr_t addri = (intptr_t)addr;
ptrdiff_t disp = tcg_pcrel_diff(s, addr);
@@ -987,15 +994,9 @@ static inline void tcg_out_goto(TCGContext *s, int cond, tcg_insn_unit *addr)
return;
}
+ assert(use_armv5t_instructions || (addri & 1) == 0);
tcg_out_movi32(s, cond, TCG_REG_TMP, addri);
- if (use_armv5t_instructions) {
- tcg_out_bx(s, cond, TCG_REG_TMP);
- } else {
- if (addri & 1) {
- tcg_abort();
- }
- tcg_out_mov_reg(s, cond, TCG_REG_PC, TCG_REG_TMP);
- }
+ tcg_out_bx(s, cond, TCG_REG_TMP);
}
/* The call case is mostly used for helpers - so it's not unreasonable
@@ -1654,8 +1655,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
switch (opc) {
case INDEX_op_exit_tb:
- tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
- tcg_out_goto(s, COND_AL, tb_ret_addr);
+ /* Reuse the zeroing that exists for goto_ptr. */
+ a0 = args[0];
+ if (a0 == 0) {
+ tcg_out_goto(s, COND_AL, s->code_gen_epilogue);
+ } else {
+ tcg_out_movi32(s, COND_AL, TCG_REG_R0, args[0]);
+ tcg_out_goto(s, COND_AL, tb_ret_addr);
+ }
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_insn_offset) {
@@ -1670,6 +1677,9 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc,
}
s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s);
break;
+ case INDEX_op_goto_ptr:
+ tcg_out_bx(s, COND_AL, args[0]);
+ break;
case INDEX_op_br:
tcg_out_goto_label(s, COND_AL, arg_label(args[0]));
break;
@@ -1960,6 +1970,7 @@ static const TCGTargetOpDef arm_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_br, { } },
+ { INDEX_op_goto_ptr, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
@@ -2135,9 +2146,16 @@ static void tcg_target_qemu_prologue(TCGContext *s)
tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]);
tcg_out_bx(s, COND_AL, tcg_target_call_iarg_regs[1]);
- tb_ret_addr = s->code_ptr;
- /* Epilogue. We branch here via tb_ret_addr. */
+ /*
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
+ * and fall through to the rest of the epilogue.
+ */
+ s->code_gen_epilogue = s->code_ptr;
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R0, 0);
+
+ /* TB epilogue */
+ tb_ret_addr = s->code_ptr;
tcg_out_dat_rI(s, COND_AL, ARITH_ADD, TCG_REG_CALL_STACK,
TCG_REG_CALL_STACK, stack_addend, 1);