aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg/cputlb.c
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2018-07-13 15:16:36 +0100
committerPeter Maydell <peter.maydell@linaro.org>2018-07-16 17:26:01 +0100
commit3474c98a2a2afcefa7c665f02ad2bed2a43ab0f7 (patch)
treea23a5c19f169e81bd8b802a697bcbb13ad923745 /accel/tcg/cputlb.c
parentb493ccf1fc82674ef73564b3c61e309105c9336b (diff)
accel/tcg: Assert that tlb fill gave us a valid TLB entry
In commit 4b1a3e1e34ad97 we added a check for whether the TLB entry we had following a tlb_fill had the INVALID bit set. This could happen in some circumstances because a stale or wrong TLB entry was pulled out of the victim cache. However, after commit 68fea038553039e (which prevents stale entries being in the victim cache) and the previous commit (which ensures we don't incorrectly hit in the victim cache)) this should never be possible. Drop the check on TLB_INVALID_MASK from the "is this a TLB_RECHECK?" condition, and instead assert that the tlb fill procedure has given us a valid TLB entry (or longjumped out with a guest exception). Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20180713141636.18665-3-peter.maydell@linaro.org
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r--accel/tcg/cputlb.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 2d5fb15d9a..563fa30117 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -970,10 +970,10 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
if (!VICTIM_TLB_HIT(addr_code, addr)) {
tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
}
+ assert(tlb_hit(env->tlb_table[mmu_idx][index].addr_code, addr));
}
- if (unlikely((env->tlb_table[mmu_idx][index].addr_code &
- (TLB_RECHECK | TLB_INVALID_MASK)) == TLB_RECHECK)) {
+ if (unlikely(env->tlb_table[mmu_idx][index].addr_code & TLB_RECHECK)) {
/*
* This is a TLB_RECHECK access, where the MMU protection
* covers a smaller range than a target page, and we must