aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cpu-exec.c20
-rw-r--r--include/exec/exec-all.h3
2 files changed, 14 insertions, 9 deletions
diff --git a/cpu-exec.c b/cpu-exec.c
index fd92452f16..6206cdf385 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -133,10 +133,14 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
#endif /* CONFIG USER ONLY */
/* Execute a TB, and fix up the CPU state afterwards if necessary */
-static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
+static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
{
CPUArchState *env = cpu->env_ptr;
uintptr_t next_tb;
+ uint8_t *tb_ptr = itb->tc_ptr;
+
+ qemu_log_mask(CPU_LOG_EXEC, "Trace %p [" TARGET_FMT_lx "] %s\n",
+ itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
#if defined(DEBUG_DISAS)
if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
@@ -167,6 +171,10 @@ static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, uint8_t *tb_ptr)
*/
CPUClass *cc = CPU_GET_CLASS(cpu);
TranslationBlock *tb = (TranslationBlock *)(next_tb & ~TB_EXIT_MASK);
+ qemu_log_mask(CPU_LOG_EXEC,
+ "Stopped execution of TB chain before %p ["
+ TARGET_FMT_lx "] %s\n",
+ itb->tc_ptr, itb->pc, lookup_symbol(itb->pc));
if (cc->synchronize_from_tb) {
cc->synchronize_from_tb(cpu, tb);
} else {
@@ -202,7 +210,7 @@ static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
cpu->current_tb = tb;
/* execute the generated code */
trace_exec_tb_nocache(tb, tb->pc);
- cpu_tb_exec(cpu, tb->tc_ptr);
+ cpu_tb_exec(cpu, tb);
cpu->current_tb = NULL;
tb_phys_invalidate(tb, -1);
tb_free(tb);
@@ -344,7 +352,6 @@ int cpu_exec(CPUState *cpu)
#endif
int ret, interrupt_request;
TranslationBlock *tb;
- uint8_t *tc_ptr;
uintptr_t next_tb;
SyncClocks sc;
@@ -500,10 +507,6 @@ int cpu_exec(CPUState *cpu)
next_tb = 0;
tcg_ctx.tb_ctx.tb_invalidated_flag = 0;
}
- if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
- qemu_log("Trace %p [" TARGET_FMT_lx "] %s\n",
- tb->tc_ptr, tb->pc, lookup_symbol(tb->pc));
- }
/* see if we can patch the calling TB. When the TB
spans two pages, we cannot safely do a direct
jump. */
@@ -515,10 +518,9 @@ int cpu_exec(CPUState *cpu)
tb_unlock();
if (likely(!cpu->exit_request)) {
trace_exec_tb(tb, tb->pc);
- tc_ptr = tb->tc_ptr;
/* execute the generated code */
cpu->current_tb = tb;
- next_tb = cpu_tb_exec(cpu, tc_ptr);
+ next_tb = cpu_tb_exec(cpu, tb);
cpu->current_tb = NULL;
switch (next_tb & TB_EXIT_MASK) {
case TB_EXIT_REQUESTED:
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 05a151da4a..1823ee3c78 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -379,6 +379,9 @@ static inline void tb_add_jump(TranslationBlock *tb, int n,
{
/* NOTE: this test is only needed for thread safety */
if (!tb->jmp_next[n]) {
+ qemu_log_mask(CPU_LOG_EXEC, "Linking TBs %p [" TARGET_FMT_lx
+ "] index %d -> %p [" TARGET_FMT_lx "]\n",
+ tb->tc_ptr, tb->pc, n, tb_next->tc_ptr, tb_next->pc);
/* patch the native jump address */
tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);