diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2022-12-24 04:35:22 -0800 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2023-01-05 11:41:29 -0800 |
commit | 1b660f42efd721e4bcb5548907ba8d1370053318 (patch) | |
tree | 623e24fe630a7674f75f03da675286aa2133e198 /accel | |
parent | cee44b037b7e40c74401f7a87bef32f6680483c7 (diff) |
accel/tcg: Fix tb_invalidate_phys_page_unwind
When called from syscall(), we are not within a TB and pc == 0.
We can skip the check for invalidating the current TB.
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r-- | accel/tcg/tb-maint.c | 72 |
1 files changed, 40 insertions, 32 deletions
diff --git a/accel/tcg/tb-maint.c b/accel/tcg/tb-maint.c index 1b8e860647..b3d6529ae2 100644 --- a/accel/tcg/tb-maint.c +++ b/accel/tcg/tb-maint.c @@ -1024,43 +1024,51 @@ void tb_invalidate_phys_page(tb_page_addr_t addr) */ bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc) { - assert(pc != 0); -#ifdef TARGET_HAS_PRECISE_SMC - assert_memory_lock(); - { - TranslationBlock *current_tb = tcg_tb_lookup(pc); - bool current_tb_modified = false; - TranslationBlock *tb; - PageForEachNext n; + TranslationBlock *current_tb; + bool current_tb_modified; + TranslationBlock *tb; + PageForEachNext n; - addr &= TARGET_PAGE_MASK; + /* + * Without precise smc semantics, or when outside of a TB, + * we can skip to invalidate. + */ +#ifndef TARGET_HAS_PRECISE_SMC + pc = 0; +#endif + if (!pc) { + tb_invalidate_phys_page(addr); + return false; + } - PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) { - if (current_tb == tb && - (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { - /* - * If we are modifying the current TB, we must stop its - * execution. We could be more precise by checking that - * the modification is after the current PC, but it would - * require a specialized function to partially restore - * the CPU state. - */ - current_tb_modified = true; - cpu_restore_state_from_tb(current_cpu, current_tb, pc); - } - tb_phys_invalidate__locked(tb); + assert_memory_lock(); + current_tb = tcg_tb_lookup(pc); + + addr &= TARGET_PAGE_MASK; + current_tb_modified = false; + + PAGE_FOR_EACH_TB(addr, addr + TARGET_PAGE_SIZE, unused, tb, n) { + if (current_tb == tb && + (tb_cflags(current_tb) & CF_COUNT_MASK) != 1) { + /* + * If we are modifying the current TB, we must stop its + * execution. We could be more precise by checking that + * the modification is after the current PC, but it would + * require a specialized function to partially restore + * the CPU state. + */ + current_tb_modified = true; + cpu_restore_state_from_tb(current_cpu, current_tb, pc); } + tb_phys_invalidate__locked(tb); + } - if (current_tb_modified) { - /* Force execution of one insn next time. */ - CPUState *cpu = current_cpu; - cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); - return true; - } + if (current_tb_modified) { + /* Force execution of one insn next time. */ + CPUState *cpu = current_cpu; + cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(current_cpu); + return true; } -#else - tb_invalidate_phys_page(addr); -#endif /* TARGET_HAS_PRECISE_SMC */ return false; } #else |