diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2024-03-27 00:18:14 +1000 |
---|---|---|
committer | Nicholas Piggin <npiggin@gmail.com> | 2024-05-24 08:57:50 +1000 |
commit | 30933c4fb4f3df95ae44c4c3c86a5df049852c01 (patch) | |
tree | e5d311497b3eb11fa1ccca5db1487fea8b5031c4 /accel/tcg | |
parent | 99cd12ced16d15a1ffde055f842497747f070f91 (diff) |
tcg/cputlb: remove other-cpu capability from TLB flushing
Some TLB flush operations can flush other CPUs. The problem with this
is they used non-synced variants of flushes (i.e., that return
before the destination has completed the flush). Since all TLB flush
users need the _synced variants, and that last user (ppc) of the
non-synced flush was buggy, this is a footgun waiting to go off. There
do not seem to be any callers that flush other CPUs, so remove the
capability.
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Diffstat (limited to 'accel/tcg')
-rw-r--r-- | accel/tcg/cputlb.c | 42 |
1 files changed, 9 insertions, 33 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 45799869eb..117b516739 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -418,12 +418,9 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) { tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); - if (cpu->created && !qemu_cpu_is_self(cpu)) { - async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, - RUN_ON_CPU_HOST_INT(idxmap)); - } else { - tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); - } + assert_cpu_is_self(cpu); + + tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); } void tlb_flush(CPUState *cpu) @@ -612,28 +609,12 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) { tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap); + assert_cpu_is_self(cpu); + /* This should already be page aligned */ addr &= TARGET_PAGE_MASK; - if (qemu_cpu_is_self(cpu)) { - tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); - } else if (idxmap < TARGET_PAGE_SIZE) { - /* - * Most targets have only a few mmu_idx. In the case where - * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid - * allocating memory for this operation. - */ - async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, - RUN_ON_CPU_TARGET_PTR(addr | idxmap)); - } else { - TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); - - /* Otherwise allocate a structure, freed by the worker. */ - d->addr = addr; - d->idxmap = idxmap; - async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, - RUN_ON_CPU_HOST_PTR(d)); - } + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); } void tlb_flush_page(CPUState *cpu, vaddr addr) @@ -796,6 +777,8 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, { TLBFlushRangeData d; + assert_cpu_is_self(cpu); + /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. @@ -816,14 +799,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, d.idxmap = idxmap; d.bits = bits; - if (qemu_cpu_is_self(cpu)) { - tlb_flush_range_by_mmuidx_async_0(cpu, d); - } else { - /* Otherwise allocate a structure, freed by the worker. */ - TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); - async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, - RUN_ON_CPU_HOST_PTR(p)); - } + tlb_flush_range_by_mmuidx_async_0(cpu, d); } void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, |