diff options
author | Sergey Fedorov <serge.fdrv@gmail.com> | 2016-04-10 23:35:45 +0300 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2016-05-12 14:06:41 -1000 |
commit | f309101c26b59641fc1aa8fb2a98a5441cdaea03 (patch) | |
tree | 9c0509fddf84c84027cc9bf8292219c3dbe62c12 /tcg | |
parent | 7ba6a512ae439c98c0c1f0f4348c079d90f9dd9d (diff) |
tcg: Clean up direct block chaining data fields
Briefly describe in a comment how direct block chaining is done. It
should help in understanding of the following data fields.
Rename some fields in TranslationBlock and TCGContext structures to
better reflect their purpose (dropping excessive 'tb_' prefix in
TranslationBlock but keeping it in TCGContext):
tb_next_offset => jmp_reset_offset
tb_jmp_offset => jmp_insn_offset
tb_next => jmp_target_addr
jmp_next => jmp_list_next
jmp_first => jmp_list_first
Avoid using a magic constant as an invalid offset which is used to
indicate that there's no n-th jump generated.
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/aarch64/tcg-target.inc.c | 7 | ||||
-rw-r--r-- | tcg/arm/tcg-target.inc.c | 8 | ||||
-rw-r--r-- | tcg/i386/tcg-target.inc.c | 8 | ||||
-rw-r--r-- | tcg/ia64/tcg-target.inc.c | 6 | ||||
-rw-r--r-- | tcg/mips/tcg-target.inc.c | 8 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.inc.c | 6 | ||||
-rw-r--r-- | tcg/s390/tcg-target.inc.c | 11 | ||||
-rw-r--r-- | tcg/sparc/tcg-target.inc.c | 9 | ||||
-rw-r--r-- | tcg/tcg.h | 6 | ||||
-rw-r--r-- | tcg/tci/tcg-target.inc.c | 10 |
10 files changed, 41 insertions, 38 deletions
diff --git a/tcg/aarch64/tcg-target.inc.c b/tcg/aarch64/tcg-target.inc.c index 88183c830f..1447f7c216 100644 --- a/tcg/aarch64/tcg-target.inc.c +++ b/tcg/aarch64/tcg-target.inc.c @@ -1306,12 +1306,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, #ifndef USE_DIRECT_JUMP #error "USE_DIRECT_JUMP required for aarch64" #endif - tcg_debug_assert(s->tb_jmp_offset != NULL); /* consistency for USE_DIRECT_JUMP */ - s->tb_jmp_offset[a0] = tcg_current_code_size(s); + /* consistency for USE_DIRECT_JUMP */ + tcg_debug_assert(s->tb_jmp_insn_offset != NULL); + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); /* actual branch destination will be patched by aarch64_tb_set_jmp_target later, beware retranslation. */ tcg_out_goto_noaddr(s); - s->tb_next_offset[a0] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s); break; case INDEX_op_br: diff --git a/tcg/arm/tcg-target.inc.c b/tcg/arm/tcg-target.inc.c index 977baa05c5..f9f54c64c6 100644 --- a/tcg/arm/tcg-target.inc.c +++ b/tcg/arm/tcg-target.inc.c @@ -1665,17 +1665,17 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_goto(s, COND_AL, tb_ret_addr); break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* Direct jump method */ - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); tcg_out_b_noaddr(s, COND_AL); } else { /* Indirect jump method */ - intptr_t ptr = (intptr_t)(s->tb_next + args[0]); + intptr_t ptr = (intptr_t)(s->tb_jmp_target_addr + args[0]); tcg_out_movi32(s, COND_AL, TCG_REG_R0, ptr & ~0xfff); tcg_out_ld32_12(s, COND_AL, TCG_REG_PC, TCG_REG_R0, ptr & 0xfff); } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); break; case INDEX_op_br: tcg_out_goto_label(s, COND_AL, arg_label(args[0])); diff --git a/tcg/i386/tcg-target.inc.c b/tcg/i386/tcg-target.inc.c index 8d242a6665..317484cb5d 100644 --- a/tcg/i386/tcg-target.inc.c +++ b/tcg/i386/tcg-target.inc.c @@ -1790,7 +1790,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_jmp(s, tb_ret_addr); break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* direct jump method */ int gap; /* jump displacement must be aligned for atomic patching; @@ -1801,14 +1801,14 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out_nopn(s, gap - 1); } tcg_out8(s, OPC_JMP_long); /* jmp im */ - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); tcg_out32(s, 0); } else { /* indirect jump method */ tcg_out_modrm_offset(s, OPC_GRP5, EXT5_JMPN_Ev, -1, - (intptr_t)(s->tb_next + args[0])); + (intptr_t)(s->tb_jmp_target_addr + args[0])); } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); break; case INDEX_op_br: tcg_out_jxx(s, JCC_JMP, arg_label(args[0]), 0); diff --git a/tcg/ia64/tcg-target.inc.c b/tcg/ia64/tcg-target.inc.c index 7557e6a9d4..395223e340 100644 --- a/tcg/ia64/tcg-target.inc.c +++ b/tcg/ia64/tcg-target.inc.c @@ -881,13 +881,13 @@ static void tcg_out_exit_tb(TCGContext *s, tcg_target_long arg) static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg) { - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* direct jump method */ tcg_abort(); } else { /* indirect jump method */ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R2, - (tcg_target_long)(s->tb_next + arg)); + (tcg_target_long)(s->tb_jmp_target_addr + arg)); tcg_out_bundle(s, MmI, tcg_opc_m1 (TCG_REG_P0, OPC_LD8_M1, TCG_REG_R2, TCG_REG_R2), @@ -900,7 +900,7 @@ static inline void tcg_out_goto_tb(TCGContext *s, TCGArg arg) tcg_opc_b4 (TCG_REG_P0, OPC_BR_SPTK_MANY_B4, TCG_REG_B6)); } - s->tb_next_offset[arg] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[arg] = tcg_current_code_size(s); } static inline void tcg_out_jmp(TCGContext *s, TCGArg addr) diff --git a/tcg/mips/tcg-target.inc.c b/tcg/mips/tcg-target.inc.c index 1e5a6b41fa..50e98ea63a 100644 --- a/tcg/mips/tcg-target.inc.c +++ b/tcg/mips/tcg-target.inc.c @@ -1397,19 +1397,19 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* direct jump method */ - s->tb_jmp_offset[a0] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); /* Avoid clobbering the address during retranslation. */ tcg_out32(s, OPC_J | (*(uint32_t *)s->code_ptr & 0x3ffffff)); } else { /* indirect jump method */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_TMP0, TCG_REG_ZERO, - (uintptr_t)(s->tb_next + a0)); + (uintptr_t)(s->tb_jmp_target_addr + a0)); tcg_out_opc_reg(s, OPC_JR, 0, TCG_TMP0, 0); } tcg_out_nop(s); - s->tb_next_offset[a0] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s); break; case INDEX_op_br: tcg_out_brcond(s, TCG_COND_EQ, TCG_REG_ZERO, TCG_REG_ZERO, diff --git a/tcg/ppc/tcg-target.inc.c b/tcg/ppc/tcg-target.inc.c index 039fa77748..da100528ab 100644 --- a/tcg/ppc/tcg-target.inc.c +++ b/tcg/ppc/tcg-target.inc.c @@ -1902,14 +1902,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, tcg_out_b(s, 0, tb_ret_addr); break; case INDEX_op_goto_tb: - tcg_debug_assert(s->tb_jmp_offset); + tcg_debug_assert(s->tb_jmp_insn_offset); /* Direct jump. */ #ifdef __powerpc64__ /* Ensure the next insns are 8-byte aligned. */ if ((uintptr_t)s->code_ptr & 7) { tcg_out32(s, NOP); } - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); /* To be replaced by either a branch+nop or a load into TMP1. */ s->code_ptr += 2; tcg_out32(s, MTSPR | RS(TCG_REG_TMP1) | CTR); @@ -1918,7 +1918,7 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, /* To be replaced by a branch. */ s->code_ptr++; #endif - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); break; case INDEX_op_br: { diff --git a/tcg/s390/tcg-target.inc.c b/tcg/s390/tcg-target.inc.c index 8e8064ce73..e0a60e618c 100644 --- a/tcg/s390/tcg-target.inc.c +++ b/tcg/s390/tcg-target.inc.c @@ -1717,7 +1717,7 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* branch displacement must be aligned for atomic patching; * see if we need to add extra nop before branch */ @@ -1725,15 +1725,16 @@ static inline void tcg_out_op(TCGContext *s, TCGOpcode opc, tcg_out16(s, NOP); } tcg_out16(s, RIL_BRCL | (S390_CC_ALWAYS << 4)); - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); s->code_ptr += 2; } else { - /* load address stored at s->tb_next + args[0] */ - tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, s->tb_next + args[0]); + /* load address stored at s->tb_jmp_target_addr + args[0] */ + tcg_out_ld_abs(s, TCG_TYPE_PTR, TCG_TMP0, + s->tb_jmp_target_addr + args[0]); /* and go there */ tcg_out_insn(s, RR, BCR, S390_CC_ALWAYS, TCG_TMP0); } - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); break; OP_32_64(ld8u): diff --git a/tcg/sparc/tcg-target.inc.c b/tcg/sparc/tcg-target.inc.c index c6479e2507..9938a5085e 100644 --- a/tcg/sparc/tcg-target.inc.c +++ b/tcg/sparc/tcg-target.inc.c @@ -1229,18 +1229,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, } break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* direct jump method */ - s->tb_jmp_offset[a0] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); /* Make sure to preserve links during retranslation. */ tcg_out32(s, CALL | (*s->code_ptr & ~INSN_OP(-1))); } else { /* indirect jump method */ - tcg_out_ld_ptr(s, TCG_REG_T1, (uintptr_t)(s->tb_next + a0)); + tcg_out_ld_ptr(s, TCG_REG_T1, + (uintptr_t)(s->tb_jmp_target_addr + a0)); tcg_out_arithi(s, TCG_REG_G0, TCG_REG_T1, 0, JMPL); } tcg_out_nop(s); - s->tb_next_offset[a0] = tcg_current_code_size(s); + s->tb_jmp_reset_offset[a0] = tcg_current_code_size(s); break; case INDEX_op_br: tcg_out_bpcc(s, COND_A, BPCC_PT, arg_label(a0)); @@ -510,9 +510,9 @@ struct TCGContext { /* goto_tb support */ tcg_insn_unit *code_buf; - uintptr_t *tb_next; - uint16_t *tb_next_offset; - uint16_t *tb_jmp_offset; /* != NULL if USE_DIRECT_JUMP */ + uint16_t *tb_jmp_reset_offset; /* tb->jmp_reset_offset */ + uint16_t *tb_jmp_insn_offset; /* tb->jmp_insn_offset if USE_DIRECT_JUMP */ + uintptr_t *tb_jmp_target_addr; /* tb->jmp_target_addr if !USE_DIRECT_JUMP */ /* liveness analysis */ uint16_t *op_dead_args; /* for each operation, each bit tells if the diff --git a/tcg/tci/tcg-target.inc.c b/tcg/tci/tcg-target.inc.c index 85eeb5de24..fa74d5278e 100644 --- a/tcg/tci/tcg-target.inc.c +++ b/tcg/tci/tcg-target.inc.c @@ -553,19 +553,19 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, tcg_out64(s, args[0]); break; case INDEX_op_goto_tb: - if (s->tb_jmp_offset) { + if (s->tb_jmp_insn_offset) { /* Direct jump method. */ - tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_offset)); + tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_insn_offset)); /* Align for atomic patching and thread safety */ s->code_ptr = QEMU_ALIGN_PTR_UP(s->code_ptr, 4); - s->tb_jmp_offset[args[0]] = tcg_current_code_size(s); + s->tb_jmp_insn_offset[args[0]] = tcg_current_code_size(s); tcg_out32(s, 0); } else { /* Indirect jump method. */ TODO(); } - tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_next_offset)); - s->tb_next_offset[args[0]] = tcg_current_code_size(s); + tcg_debug_assert(args[0] < ARRAY_SIZE(s->tb_jmp_reset_offset)); + s->tb_jmp_reset_offset[args[0]] = tcg_current_code_size(s); break; case INDEX_op_br: tci_out_label(s, arg_label(args[0])); |