diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2021-03-01 19:21:08 -0800 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2021-03-06 11:53:57 -0800 |
commit | 6cc9d67c6f682cf04eea2d6e64a252b63a7eccdf (patch) | |
tree | f4a70ec6c66bcb37242891c9b544508b78f5a726 /accel | |
parent | 872ebd884dd68ecef4c6f9f86c5da519f18bd31e (diff) |
accel/tcg: Precompute curr_cflags into cpu->tcg_cflags
The primary motivation is to remove a dozen insns along
the fast-path in tb_lookup. As a byproduct, this allows
us to completely remove parallel_cpus.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r-- | accel/tcg/cpu-exec.c | 3 | ||||
-rw-r--r-- | accel/tcg/tcg-accel-ops-mttcg.c | 3 | ||||
-rw-r--r-- | accel/tcg/tcg-accel-ops-rr.c | 2 | ||||
-rw-r--r-- | accel/tcg/tcg-accel-ops.c | 8 | ||||
-rw-r--r-- | accel/tcg/tcg-accel-ops.h | 1 | ||||
-rw-r--r-- | accel/tcg/translate-all.c | 4 |
6 files changed, 11 insertions, 10 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 931da96c2b..bdfa036ac8 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -267,8 +267,6 @@ void cpu_exec_step_atomic(CPUState *cpu) mmap_unlock(); } - /* Since we got here, we know that parallel_cpus must be true. */ - parallel_cpus = false; cpu_exec_enter(cpu); /* execute the generated code */ trace_exec_tb(tb, pc); @@ -296,7 +294,6 @@ void cpu_exec_step_atomic(CPUState *cpu) * the execution. */ g_assert(cpu_in_exclusive_context(cpu)); - parallel_cpus = true; cpu->running = false; end_exclusive(); } diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c index 42973fb062..847d2079d2 100644 --- a/accel/tcg/tcg-accel-ops-mttcg.c +++ b/accel/tcg/tcg-accel-ops-mttcg.c @@ -114,8 +114,7 @@ void mttcg_start_vcpu_thread(CPUState *cpu) char thread_name[VCPU_THREAD_NAME_SIZE]; g_assert(tcg_enabled()); - - parallel_cpus = (current_machine->smp.max_cpus > 1); + tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1); cpu->thread = g_malloc0(sizeof(QemuThread)); cpu->halt_cond = g_malloc0(sizeof(QemuCond)); diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c index 4a66055e0d..018b54c508 100644 --- a/accel/tcg/tcg-accel-ops-rr.c +++ b/accel/tcg/tcg-accel-ops-rr.c @@ -269,7 +269,7 @@ void rr_start_vcpu_thread(CPUState *cpu) static QemuThread *single_tcg_cpu_thread; g_assert(tcg_enabled()); - parallel_cpus = false; + tcg_cpu_init_cflags(cpu, false); if (!single_tcg_cpu_thread) { cpu->thread = g_malloc0(sizeof(QemuThread)); diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c index 6144d9df87..6cdcaa2855 100644 --- a/accel/tcg/tcg-accel-ops.c +++ b/accel/tcg/tcg-accel-ops.c @@ -41,6 +41,14 @@ /* common functionality among all TCG variants */ +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel) +{ + uint32_t cflags = cpu->cluster_index << CF_CLUSTER_SHIFT; + cflags |= parallel ? CF_PARALLEL : 0; + cflags |= icount_enabled() ? CF_USE_ICOUNT : 0; + cpu->tcg_cflags = cflags; +} + void tcg_cpus_destroy(CPUState *cpu) { cpu_thread_signal_destroyed(cpu); diff --git a/accel/tcg/tcg-accel-ops.h b/accel/tcg/tcg-accel-ops.h index 48130006de..6a5fcef889 100644 --- a/accel/tcg/tcg-accel-ops.h +++ b/accel/tcg/tcg-accel-ops.h @@ -17,5 +17,6 @@ void tcg_cpus_destroy(CPUState *cpu); int tcg_cpus_exec(CPUState *cpu); void tcg_handle_interrupt(CPUState *cpu, int mask); +void tcg_cpu_init_cflags(CPUState *cpu, bool parallel); #endif /* TCG_CPUS_H */ diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c index 0b0bfd35ab..f32df8b240 100644 --- a/accel/tcg/translate-all.c +++ b/accel/tcg/translate-all.c @@ -224,7 +224,6 @@ static void *l1_map[V_L1_MAX_SIZE]; TCGContext tcg_init_ctx; __thread TCGContext *tcg_ctx; TBContext tb_ctx; -bool parallel_cpus; static void page_table_config_init(void) { @@ -1867,9 +1866,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu, cflags = (cflags & ~CF_COUNT_MASK) | 1; } - cflags &= ~CF_CLUSTER_MASK; - cflags |= cpu->cluster_index << CF_CLUSTER_SHIFT; - max_insns = cflags & CF_COUNT_MASK; if (max_insns == 0) { max_insns = CF_COUNT_MASK; |