aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-12-07 13:22:19 -0800
committerRichard Henderson <richard.henderson@linaro.org>2020-01-21 14:21:59 -1000
commit56e89f76fdf0dc8162e28105055570a83a93b15e (patch)
tree9a6653875e24cafefbd958da8c2ab8a5ea33725c /accel
parentbbf021b04a57e95b6b4fde882b33c5363f94373f (diff)
cputlb: Partially merge tlb_dyn_init into tlb_init
Merge into the only caller, but at the same time split out tlb_mmu_init to initialize a single tlb entry. Reviewed-by: Alex Bennée <alex.bennee@linaro.org> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 28cfff1556..360495468e 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -97,22 +97,6 @@ static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
desc->window_max_entries = max_entries;
}
-static void tlb_dyn_init(CPUArchState *env)
-{
- int i;
-
- for (i = 0; i < NB_MMU_MODES; i++) {
- CPUTLBDesc *desc = &env_tlb(env)->d[i];
- size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
-
- tlb_window_reset(desc, get_clock_realtime(), 0);
- desc->n_used_entries = 0;
- env_tlb(env)->f[i].mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
- env_tlb(env)->f[i].table = g_new(CPUTLBEntry, n_entries);
- env_tlb(env)->d[i].iotlb = g_new(CPUIOTLBEntry, n_entries);
- }
-}
-
/**
* tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
* @desc: The CPUTLBDesc portion of the TLB
@@ -247,6 +231,17 @@ static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx)
tlb_mmu_flush_locked(desc, fast);
}
+static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
+{
+ size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
+
+ tlb_window_reset(desc, now, 0);
+ desc->n_used_entries = 0;
+ fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
+ fast->table = g_new(CPUTLBEntry, n_entries);
+ desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
+}
+
static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
{
env_tlb(env)->d[mmu_idx].n_used_entries++;
@@ -260,13 +255,17 @@ static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
void tlb_init(CPUState *cpu)
{
CPUArchState *env = cpu->env_ptr;
+ int64_t now = get_clock_realtime();
+ int i;
qemu_spin_init(&env_tlb(env)->c.lock);
/* Ensure that cpu_reset performs a full flush. */
env_tlb(env)->c.dirty = ALL_MMUIDX_BITS;
- tlb_dyn_init(env);
+ for (i = 0; i < NB_MMU_MODES; i++) {
+ tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
+ }
}
/* flush_all_helper: run fn across all cpus