diff options
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/ia64/tcg-target.c | 11 | ||||
-rw-r--r-- | tcg/tcg-be-ldst.h | 11 | ||||
-rw-r--r-- | tcg/tcg-be-null.h | 3 | ||||
-rw-r--r-- | tcg/tcg.c | 12 |
4 files changed, 27 insertions, 10 deletions
diff --git a/tcg/ia64/tcg-target.c b/tcg/ia64/tcg-target.c index 647e9a6f29..62d654943c 100644 --- a/tcg/ia64/tcg-target.c +++ b/tcg/ia64/tcg-target.c @@ -1572,7 +1572,7 @@ static void add_qemu_ldst_label(TCGContext *s, bool is_ld, TCGMemOp opc, be->labels = l; } -static void tcg_out_tb_finalize(TCGContext *s) +static bool tcg_out_tb_finalize(TCGContext *s) { static const void * const helpers[8] = { helper_ret_stb_mmu, @@ -1620,7 +1620,16 @@ static void tcg_out_tb_finalize(TCGContext *s) } reloc_pcrel21b_slot2(l->label_ptr, dest); + + /* Test for (pending) buffer overflow. The assumption is that any + one operation beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + generating code without having to check during generation. */ + if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { + return false; + } } + return true; } static inline void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args) diff --git a/tcg/tcg-be-ldst.h b/tcg/tcg-be-ldst.h index 40a2369b7c..17777aec5a 100644 --- a/tcg/tcg-be-ldst.h +++ b/tcg/tcg-be-ldst.h @@ -56,7 +56,7 @@ static inline void tcg_out_tb_init(TCGContext *s) static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l); static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l); -static void tcg_out_tb_finalize(TCGContext *s) +static bool tcg_out_tb_finalize(TCGContext *s) { TCGLabelQemuLdst *lb; @@ -67,7 +67,16 @@ static void tcg_out_tb_finalize(TCGContext *s) } else { tcg_out_qemu_st_slow_path(s, lb); } + + /* Test for (pending) buffer overflow. The assumption is that any + one operation beginning below the high water mark cannot overrun + the buffer completely. Thus we can test for overflow after + generating code without having to check during generation. */ + if (unlikely((void *)s->code_ptr > s->code_gen_highwater)) { + return false; + } } + return true; } /* diff --git a/tcg/tcg-be-null.h b/tcg/tcg-be-null.h index 74c57d5a6c..5222fe29e2 100644 --- a/tcg/tcg-be-null.h +++ b/tcg/tcg-be-null.h @@ -38,6 +38,7 @@ static inline void tcg_out_tb_init(TCGContext *s) * Generate TB finalization at the end of block */ -static inline void tcg_out_tb_finalize(TCGContext *s) +static inline bool tcg_out_tb_finalize(TCGContext *s) { + return true; } @@ -111,7 +111,7 @@ static void tcg_out_call(TCGContext *s, tcg_insn_unit *target); static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct); static void tcg_out_tb_init(TCGContext *s); -static void tcg_out_tb_finalize(TCGContext *s); +static bool tcg_out_tb_finalize(TCGContext *s); @@ -389,11 +389,7 @@ void tcg_prologue_init(TCGContext *s) /* Compute a high-water mark, at which we voluntarily flush the buffer and start over. The size here is arbitrary, significantly larger than we expect the code generation for any one opcode to require. */ - /* ??? We currently have no good estimate for, or checks in, - tcg_out_tb_finalize. If there are quite a lot of guest memory ops, - the number of out-of-line fragments could be quite high. In the - short-term, increase the highwater buffer. */ - s->code_gen_highwater = s->code_gen_buffer + (total_size - 64*1024); + s->code_gen_highwater = s->code_gen_buffer + (total_size - 1024); tcg_register_jit(s->code_gen_buffer, total_size); @@ -2456,7 +2452,9 @@ int tcg_gen_code(TCGContext *s, tcg_insn_unit *gen_code_buf) s->gen_insn_end_off[num_insns] = tcg_current_code_size(s); /* Generate TB finalization at the end of block */ - tcg_out_tb_finalize(s); + if (!tcg_out_tb_finalize(s)) { + return -1; + } /* flush instruction cache */ flush_icache_range((uintptr_t)s->code_buf, (uintptr_t)s->code_ptr); |