diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2019-12-07 14:36:01 -0800 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2020-01-21 14:21:59 -1000 |
commit | 3c3959f2d9af919e562d37f6d40322a80a90469d (patch) | |
tree | 5e0ffc2dd9b12141997dac1f8202581c7a140d82 /accel | |
parent | 3c16304af4241f242eeacae646457b9720aa71db (diff) |
cputlb: Hoist timestamp outside of loops over tlbs
Do not call get_clock_realtime() in tlb_mmu_resize_locked,
but hoist outside of any loop over a set of tlbs. This is
only two (indirect) callers, tlb_flush_by_mmuidx_async_work
and tlb_flush_page_locked, so not onerous.
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alistair Francis <alistair.francis@wdc.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r-- | accel/tcg/cputlb.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 5a35386224..e3b5750c3b 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -137,12 +137,12 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, * high), since otherwise we are likely to have a significant amount of * conflict misses. */ -static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) +static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, + int64_t now) { size_t old_size = tlb_n_entries(fast); size_t rate; size_t new_size = old_size; - int64_t now = get_clock_realtime(); int64_t window_len_ms = 100; int64_t window_len_ns = window_len_ms * 1000 * 1000; bool window_expired = now > desc->window_begin_ns + window_len_ns; @@ -222,12 +222,13 @@ static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) memset(desc->vtable, -1, sizeof(desc->vtable)); } -static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) +static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, + int64_t now) { CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; - tlb_mmu_resize_locked(desc, fast); + tlb_mmu_resize_locked(desc, fast, now); tlb_mmu_flush_locked(desc, fast); } @@ -310,6 +311,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) CPUArchState *env = cpu->env_ptr; uint16_t asked = data.host_int; uint16_t all_dirty, work, to_clean; + int64_t now = get_clock_realtime(); assert_cpu_is_self(cpu); @@ -324,7 +326,7 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) for (work = to_clean; work != 0; work &= work - 1) { int mmu_idx = ctz32(work); - tlb_flush_one_mmuidx_locked(env, mmu_idx); + tlb_flush_one_mmuidx_locked(env, mmu_idx, now); } qemu_spin_unlock(&env_tlb(env)->c.lock); @@ -446,7 +448,7 @@ static void tlb_flush_page_locked(CPUArchState *env, int midx, tlb_debug("forcing full flush midx %d (" TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", midx, lp_addr, lp_mask); - tlb_flush_one_mmuidx_locked(env, midx); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); } else { if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { tlb_n_used_entries_dec(env, midx); |