aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--accel/tcg/cputlb.c20
-rw-r--r--include/exec/exec-all.h19
2 files changed, 34 insertions, 5 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a49a72b035..464e744591 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -797,13 +797,17 @@ static void tlb_flush_page_bits_by_mmuidx_async_2(CPUState *cpu,
g_free(d);
}
-void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
- uint16_t idxmap, unsigned bits)
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits)
{
TLBFlushRangeData d;
- /* If all bits are significant, this devolves to tlb_flush_page. */
- if (bits >= TARGET_LONG_BITS) {
+ /*
+ * If all bits are significant, and len is small,
+ * this devolves to tlb_flush_page.
+ */
+ if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
return;
}
@@ -815,7 +819,7 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
/* This should already be page aligned */
d.addr = addr & TARGET_PAGE_MASK;
- d.len = TARGET_PAGE_SIZE;
+ d.len = len;
d.idxmap = idxmap;
d.bits = bits;
@@ -829,6 +833,12 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
}
}
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
+ uint16_t idxmap, unsigned bits)
+{
+ tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
+}
+
void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
target_ulong addr,
uint16_t idxmap,
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 6b036cae8f..5a5f6d4c1a 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -263,6 +263,20 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced
(CPUState *cpu, target_ulong addr, uint16_t idxmap, unsigned bits);
/**
+ * tlb_flush_range_by_mmuidx
+ * @cpu: CPU whose TLB should be flushed
+ * @addr: virtual address of the start of the range to be flushed
+ * @len: length of range to be flushed
+ * @idxmap: bitmap of mmu indexes to flush
+ * @bits: number of significant bits in address
+ *
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
+ * comparing only the low @bits worth of each virtual page.
+ */
+void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits);
+/**
* tlb_set_page_with_attrs:
* @cpu: CPU to add this TLB entry for
* @vaddr: virtual address of page to add entry for
@@ -365,6 +379,11 @@ tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr,
uint16_t idxmap, unsigned bits)
{
}
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
+ target_ulong len, uint16_t idxmap,
+ unsigned bits)
+{
+}
#endif
/**
* probe_access: