diff options
author | Sergey Fedorov <serge.fdrv@gmail.com> | 2016-08-02 18:27:43 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-09-27 11:57:30 +0200 |
commit | 3359baad36889b83df40b637ed993a4b816c4906 (patch) | |
tree | 534ac1be2e5bd5466404bb34ac2b7d52e89a0215 /translate-all.c | |
parent | 53f5ed95064fe6807890cd5535445a05d3361bd2 (diff) |
tcg: Make tb_flush() thread safe
Use async_safe_run_on_cpu() to make tb_flush() thread safe. This is
possible now that code generation does not happen in the middle of
execution.
It can happen that multiple threads schedule a safe work to flush the
translation buffer. To keep statistics and debugging output sane, always
check if the translation buffer has already been flushed.
Signed-off-by: Sergey Fedorov <serge.fdrv@gmail.com>
Signed-off-by: Sergey Fedorov <sergey.fedorov@linaro.org>
[AJB: minor re-base fixes]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Message-Id: <1470158864-17651-13-git-send-email-alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'translate-all.c')
-rw-r--r-- | translate-all.c | 38 |
1 files changed, 28 insertions, 10 deletions
diff --git a/translate-all.c b/translate-all.c index e9bc90c654..8ca393c9d0 100644 --- a/translate-all.c +++ b/translate-all.c @@ -834,12 +834,19 @@ static void page_flush_tb(void) } /* flush all the translation blocks */ -/* XXX: tb_flush is currently not thread safe */ -void tb_flush(CPUState *cpu) +static void do_tb_flush(CPUState *cpu, void *data) { - if (!tcg_enabled()) { - return; + unsigned tb_flush_req = (unsigned) (uintptr_t) data; + + tb_lock(); + + /* If it's already been done on request of another CPU, + * just retry. + */ + if (tcg_ctx.tb_ctx.tb_flush_count != tb_flush_req) { + goto done; } + #if defined(DEBUG_FLUSH) printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n", (unsigned long)(tcg_ctx.code_gen_ptr - tcg_ctx.code_gen_buffer), @@ -858,7 +865,6 @@ void tb_flush(CPUState *cpu) for (i = 0; i < TB_JMP_CACHE_SIZE; ++i) { atomic_set(&cpu->tb_jmp_cache[i], NULL); } - atomic_mb_set(&cpu->tb_flushed, true); } tcg_ctx.tb_ctx.nb_tbs = 0; @@ -868,7 +874,19 @@ void tb_flush(CPUState *cpu) tcg_ctx.code_gen_ptr = tcg_ctx.code_gen_buffer; /* XXX: flush processor icache at this point if cache flush is expensive */ - tcg_ctx.tb_ctx.tb_flush_count++; + atomic_mb_set(&tcg_ctx.tb_ctx.tb_flush_count, + tcg_ctx.tb_ctx.tb_flush_count + 1); + +done: + tb_unlock(); +} + +void tb_flush(CPUState *cpu) +{ + if (tcg_enabled()) { + uintptr_t tb_flush_req = atomic_mb_read(&tcg_ctx.tb_ctx.tb_flush_count); + async_safe_run_on_cpu(cpu, do_tb_flush, (void *) tb_flush_req); + } } #ifdef DEBUG_TB_CHECK @@ -1175,9 +1193,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu, buffer_overflow: /* flush must be done */ tb_flush(cpu); - /* cannot fail at this point */ - tb = tb_alloc(pc); - assert(tb != NULL); + mmap_unlock(); + cpu_loop_exit(cpu); } gen_code_buf = tcg_ctx.code_gen_ptr; @@ -1775,7 +1792,8 @@ void dump_exec_info(FILE *f, fprintf_function cpu_fprintf) qht_statistics_destroy(&hst); cpu_fprintf(f, "\nStatistics:\n"); - cpu_fprintf(f, "TB flush count %d\n", tcg_ctx.tb_ctx.tb_flush_count); + cpu_fprintf(f, "TB flush count %u\n", + atomic_read(&tcg_ctx.tb_ctx.tb_flush_count)); cpu_fprintf(f, "TB invalidate count %d\n", tcg_ctx.tb_ctx.tb_phys_invalidate_count); cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count); |