diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2020-10-16 14:07:53 -0700 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2020-10-20 16:12:00 +0100 |
commit | 3ab6e68cd035de244d9bf999900349a69939ad41 (patch) | |
tree | 8e46f33644cf8b694277a2993f01c10fd7040fe5 /accel | |
parent | 722bde6789c55f9f872026f796ecabecbec5d82b (diff) |
accel/tcg: Add tlb_flush_page_bits_by_mmuidx*
On ARM, the Top Byte Ignore feature means that only 56 bits of
the address are significant in the virtual address. We are
required to give the entire 64-bit address to FAR_ELx on fault,
which means that we do not "clean" the top byte early in TCG.
This new interface allows us to flush all 256 possible aliases
for a given page, currently missed by tlb_flush_page*.
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 20201016210754.818257-2-richard.henderson@linaro.org
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r-- | accel/tcg/cputlb.c | 275 |
1 files changed, 266 insertions, 9 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 2bbbb3ab29..42ab79c1a5 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -409,12 +409,21 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu) tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); } +static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, + target_ulong page, target_ulong mask) +{ + page &= mask; + mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; + + return (page == (tlb_entry->addr_read & mask) || + page == (tlb_addr_write(tlb_entry) & mask) || + page == (tlb_entry->addr_code & mask)); +} + static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, target_ulong page) { - return tlb_hit_page(tlb_entry->addr_read, page) || - tlb_hit_page(tlb_addr_write(tlb_entry), page) || - tlb_hit_page(tlb_entry->addr_code, page); + return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); } /** @@ -427,31 +436,45 @@ static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) } /* Called with tlb_c.lock held */ -static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, - target_ulong page) +static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, + target_ulong page, + target_ulong mask) { - if (tlb_hit_page_anyprot(tlb_entry, page)) { + if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { memset(tlb_entry, -1, sizeof(*tlb_entry)); return true; } return false; } +static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, + target_ulong page) +{ + return tlb_flush_entry_mask_locked(tlb_entry, page, -1); +} + /* Called with tlb_c.lock held */ -static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, - target_ulong page) +static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, + target_ulong page, + target_ulong mask) { CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; int k; assert_cpu_is_self(env_cpu(env)); for (k = 0; k < CPU_VTLB_SIZE; k++) { - if (tlb_flush_entry_locked(&d->vtable[k], page)) { + if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { tlb_n_used_entries_dec(env, mmu_idx); } } } +static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, + target_ulong page) +{ + tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); +} + static void tlb_flush_page_locked(CPUArchState *env, int midx, target_ulong page) { @@ -666,6 +689,240 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); } +static void tlb_flush_page_bits_locked(CPUArchState *env, int midx, + target_ulong page, unsigned bits) +{ + CPUTLBDesc *d = &env_tlb(env)->d[midx]; + CPUTLBDescFast *f = &env_tlb(env)->f[midx]; + target_ulong mask = MAKE_64BIT_MASK(0, bits); + + /* + * If @bits is smaller than the tlb size, there may be multiple entries + * within the TLB; otherwise all addresses that match under @mask hit + * the same TLB entry. + * + * TODO: Perhaps allow bits to be a few bits less than the size. + * For now, just flush the entire TLB. + */ + if (mask < f->mask) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + midx, page, mask); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + return; + } + + /* Check if we need to flush due to large pages. */ + if ((page & d->large_page_mask) == d->large_page_addr) { + tlb_debug("forcing full flush midx %d (" + TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", + midx, d->large_page_addr, d->large_page_mask); + tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); + return; + } + + if (tlb_flush_entry_mask_locked(tlb_entry(env, midx, page), page, mask)) { + tlb_n_used_entries_dec(env, midx); + } + tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); +} + +typedef struct { + target_ulong addr; + uint16_t idxmap; + uint16_t bits; +} TLBFlushPageBitsByMMUIdxData; + +static void +tlb_flush_page_bits_by_mmuidx_async_0(CPUState *cpu, + TLBFlushPageBitsByMMUIdxData d) +{ + CPUArchState *env = cpu->env_ptr; + int mmu_idx; + + assert_cpu_is_self(cpu); + + tlb_debug("page addr:" TARGET_FMT_lx "/%u mmu_map:0x%x\n", + d.addr, d.bits, d.idxmap); + + qemu_spin_lock(&env_tlb(env)->c.lock); + for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { + if ((d.idxmap >> mmu_idx) & 1) { + tlb_flush_page_bits_locked(env, mmu_idx, d.addr, d.bits); + } + } + qemu_spin_unlock(&env_tlb(env)->c.lock); + + tb_flush_jmp_cache(cpu, d.addr); +} + +static bool encode_pbm_to_runon(run_on_cpu_data *out, + TLBFlushPageBitsByMMUIdxData d) +{ + /* We need 6 bits to hold to hold @bits up to 63. */ + if (d.idxmap <= MAKE_64BIT_MASK(0, TARGET_PAGE_BITS - 6)) { + *out = RUN_ON_CPU_TARGET_PTR(d.addr | (d.idxmap << 6) | d.bits); + return true; + } + return false; +} + +static TLBFlushPageBitsByMMUIdxData +decode_runon_to_pbm(run_on_cpu_data data) +{ + target_ulong addr_map_bits = (target_ulong) data.target_ptr; + return (TLBFlushPageBitsByMMUIdxData){ + .addr = addr_map_bits & TARGET_PAGE_MASK, + .idxmap = (addr_map_bits & ~TARGET_PAGE_MASK) >> 6, + .bits = addr_map_bits & 0x3f + }; +} + +static void tlb_flush_page_bits_by_mmuidx_async_1(CPUState *cpu, + run_on_cpu_data runon) +{ + tlb_flush_page_bits_by_mmuidx_async_0(cpu, decode_runon_to_pbm(runon)); +} + +static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu, + run_on_cpu_data data) +{ + TLBFlushPageBitsByMMUIdxData *d = data.host_ptr; + tlb_flush_page_bits_by_mmuidx_async_0(cpu, *d); + g_free(d); +} + +void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, + uint16_t idxmap, unsigned bits) +{ + TLBFlushPageBitsByMMUIdxData d; + run_on_cpu_data runon; + + /* If all bits are significant, this devolves to tlb_flush_page. */ + if (bits >= TARGET_LONG_BITS) { + tlb_flush_page_by_mmuidx(cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx(cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.idxmap = idxmap; + d.bits = bits; + + if (qemu_cpu_is_self(cpu)) { + tlb_flush_page_bits_by_mmuidx_async_0(cpu, d); + } else if (encode_pbm_to_runon(&runon, d)) { + async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); + } else { + TLBFlushPageBitsByMMUIdxData *p + = g_new(TLBFlushPageBitsByMMUIdxData, 1); + + /* Otherwise allocate a structure, freed by the worker. */ + *p = d; + async_run_on_cpu(cpu, tlb_flush_page_bits_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(p)); + } +} + +void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap, + unsigned bits) +{ + TLBFlushPageBitsByMMUIdxData d; + run_on_cpu_data runon; + + /* If all bits are significant, this devolves to tlb_flush_page. */ + if (bits >= TARGET_LONG_BITS) { + tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.idxmap = idxmap; + d.bits = bits; + + if (encode_pbm_to_runon(&runon, d)) { + flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); + } else { + CPUState *dst_cpu; + TLBFlushPageBitsByMMUIdxData *p; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + p = g_new(TLBFlushPageBitsByMMUIdxData, 1); + *p = d; + async_run_on_cpu(dst_cpu, + tlb_flush_page_bits_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(p)); + } + } + } + + tlb_flush_page_bits_by_mmuidx_async_0(src_cpu, d); +} + +void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, + target_ulong addr, + uint16_t idxmap, + unsigned bits) +{ + TLBFlushPageBitsByMMUIdxData d; + run_on_cpu_data runon; + + /* If all bits are significant, this devolves to tlb_flush_page. */ + if (bits >= TARGET_LONG_BITS) { + tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); + return; + } + /* If no page bits are significant, this devolves to tlb_flush. */ + if (bits < TARGET_PAGE_BITS) { + tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); + return; + } + + /* This should already be page aligned */ + d.addr = addr & TARGET_PAGE_MASK; + d.idxmap = idxmap; + d.bits = bits; + + if (encode_pbm_to_runon(&runon, d)) { + flush_all_helper(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, runon); + async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_1, + runon); + } else { + CPUState *dst_cpu; + TLBFlushPageBitsByMMUIdxData *p; + + /* Allocate a separate data block for each destination cpu. */ + CPU_FOREACH(dst_cpu) { + if (dst_cpu != src_cpu) { + p = g_new(TLBFlushPageBitsByMMUIdxData, 1); + *p = d; + async_run_on_cpu(dst_cpu, tlb_flush_page_bits_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(p)); + } + } + + p = g_new(TLBFlushPageBitsByMMUIdxData, 1); + *p = d; + async_safe_run_on_cpu(src_cpu, tlb_flush_page_bits_by_mmuidx_async_2, + RUN_ON_CPU_HOST_PTR(p)); + } +} + /* update the TLBs so that writes to code in the virtual page 'addr' can be detected */ void tlb_protect_code(ram_addr_t ram_addr) |