From 60a2ad7d86e7379e6669806bedaa6cfdf4f2c2f4 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Sat, 20 Oct 2018 13:54:46 -0700 Subject: cputlb: Move cpu->pending_tlb_flush to env->tlb_c.pending_flush Protect it with the tlb_lock instead of using atomics. The move puts it in or near the same cacheline as the lock; using the lock means we don't need a second atomic operation in order to perform the update. Which makes it cheap to also update pending_flush in tlb_flush_by_mmuidx_async_work. Tested-by: Emilio G. Cota Reviewed-by: Emilio G. Cota Signed-off-by: Richard Henderson --- include/exec/cpu-defs.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'include/exec/cpu-defs.h') diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h index 9005923b4d..659c73d2a1 100644 --- a/include/exec/cpu-defs.h +++ b/include/exec/cpu-defs.h @@ -145,8 +145,14 @@ typedef struct CPUIOTLBEntry { * Data elements that are shared between all MMU modes. */ typedef struct CPUTLBCommon { - /* lock serializes updates to tlb_table and tlb_v_table */ + /* Serialize updates to tlb_table and tlb_v_table, and others as noted. */ QemuSpin lock; + /* + * Within pending_flush, for each bit N, there exists an outstanding + * cross-cpu flush for mmu_idx N. Further cross-cpu flushes to that + * mmu_idx may be discarded. Protected by tlb_c.lock. + */ + uint16_t pending_flush; } CPUTLBCommon; /* -- cgit v1.2.3