aboutsummaryrefslogtreecommitdiff
path: root/include/exec/cpu-all.h
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-08-23 15:12:32 -0700
committerRichard Henderson <richard.henderson@linaro.org>2019-09-03 08:30:39 -0700
commit30d7e098d5c38644359820317fcf72e3e129ec53 (patch)
tree55ac8fed388ffa8cb3e92a8c37dea9edce816182 /include/exec/cpu-all.h
parent0026348b48fe532279e8c12b100c16c1aa991373 (diff)
cputlb: Fold TLB_RECHECK into TLB_INVALID_MASK
We had two different mechanisms to force a recheck of the tlb. Before TLB_RECHECK was introduced, we had a PAGE_WRITE_INV bit that would immediate set TLB_INVALID_MASK, which automatically means that a second check of the tlb entry fails. We can use the same mechanism to handle small pages. Conserve TLB_* bits by removing TLB_RECHECK. Reviewed-by: David Hildenbrand <david@redhat.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include/exec/cpu-all.h')
-rw-r--r--include/exec/cpu-all.h5
1 files changed, 1 insertions, 4 deletions
diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 8323094648..8d07ae23a5 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -329,14 +329,11 @@ CPUArchState *cpu_copy(CPUArchState *env);
#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
/* Set if TLB entry is an IO callback. */
#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
-/* Set if TLB entry must have MMU lookup repeated for every access */
-#define TLB_RECHECK (1 << (TARGET_PAGE_BITS - 4))
/* Use this mask to check interception with an alignment mask
* in a TCG backend.
*/
-#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
- | TLB_RECHECK)
+#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
/**
* tlb_hit_page: return true if page aligned @addr is a hit against the