diff options
author | Sergey Fedorov <serge.fdrv@gmail.com> | 2016-04-25 18:17:30 +0300 |
---|---|---|
committer | Richard Henderson <rth@twiddle.net> | 2016-05-12 14:06:42 -1000 |
commit | a0522c7a55cc8ac76d82884cf8e52f76daa664cc (patch) | |
tree | dd7ea25b77fb21a5d3b80e7dde3e09fb59226718 /cpu-exec.c | |
parent | 6f789be56d3f38e9214dafcfab3bf9be7191f370 (diff) |
cpu-exec: Move TB chaining into tb_find_fast()
Move tb_add_jump() call and surrounding code from cpu_exec() into
tb_find_fast(). That simplifies cpu_exec() a little by hiding the direct
chaining optimization details into tb_find_fast(). It also allows to
move tb_lock()/tb_unlock() pair into tb_find_fast(), putting it closer
to tb_find_slow() which also manipulates the lock.
Suggested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
Signed-off-by: Richard Henderson <rth@twiddle.net>
[rth: Fixed rebase typo in nochain test.]
Diffstat (limited to 'cpu-exec.c')
-rw-r--r-- | cpu-exec.c | 35 |
1 files changed, 19 insertions, 16 deletions
diff --git a/cpu-exec.c b/cpu-exec.c index f49a436e1a..7380b1eb4d 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -320,7 +320,9 @@ found: return tb; } -static inline TranslationBlock *tb_find_fast(CPUState *cpu) +static inline TranslationBlock *tb_find_fast(CPUState *cpu, + TranslationBlock **last_tb, + int tb_exit) { CPUArchState *env = (CPUArchState *)cpu->env_ptr; TranslationBlock *tb; @@ -331,11 +333,24 @@ static inline TranslationBlock *tb_find_fast(CPUState *cpu) always be the same before a given translated block is executed. */ cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); + tb_lock(); tb = cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)]; if (unlikely(!tb || tb->pc != pc || tb->cs_base != cs_base || tb->flags != flags)) { tb = tb_find_slow(cpu, pc, cs_base, flags); } + if (cpu->tb_flushed) { + /* Ensure that no TB jump will be modified as the + * translation buffer has been flushed. + */ + *last_tb = NULL; + cpu->tb_flushed = false; + } + /* See if we can patch the calling TB. */ + if (*last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { + tb_add_jump(*last_tb, tb_exit, tb); + } + tb_unlock(); return tb; } @@ -441,7 +456,8 @@ int cpu_exec(CPUState *cpu) } else if (replay_has_exception() && cpu->icount_decr.u16.low + cpu->icount_extra == 0) { /* try to cause an exception pending in the log */ - cpu_exec_nocache(cpu, 1, tb_find_fast(cpu), true); + last_tb = NULL; /* Avoid chaining TBs */ + cpu_exec_nocache(cpu, 1, tb_find_fast(cpu, &last_tb, 0), true); ret = -1; break; #endif @@ -511,20 +527,7 @@ int cpu_exec(CPUState *cpu) cpu->exception_index = EXCP_INTERRUPT; cpu_loop_exit(cpu); } - tb_lock(); - tb = tb_find_fast(cpu); - if (cpu->tb_flushed) { - /* Ensure that no TB jump will be modified as the - * translation buffer has been flushed. - */ - last_tb = NULL; - cpu->tb_flushed = false; - } - /* See if we can patch the calling TB. */ - if (last_tb && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { - tb_add_jump(last_tb, tb_exit, tb); - } - tb_unlock(); + tb = tb_find_fast(cpu, &last_tb, tb_exit); if (likely(!cpu->exit_request)) { uintptr_t ret; trace_exec_tb(tb, tb->pc); |