aboutsummaryrefslogtreecommitdiff
path: root/include/exec/cpu_ldst.h
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-04-03 10:16:56 +0700
committerRichard Henderson <richard.henderson@linaro.org>2019-05-10 11:12:50 -0700
commit4811e9095c0491bc6f5450e5012c9c4796b9e59d (patch)
treec29149b731e5babf0487ab2459d7c03265996a8e /include/exec/cpu_ldst.h
parent69963f5709a0645934c169784820d0bee22208ba (diff)
tcg: Use tlb_fill probe from tlb_vaddr_to_host
Most of the existing users would continue around a loop which would fault the tlb entry in via a normal load/store. But for AArch64 SVE we have an existing emulation bug wherein we would mark the first element of a no-fault vector load as faulted (within the FFR, not via exception) just because we did not have its address in the TLB. Now we can properly only mark it as faulted if there really is no valid, readable translation, while still not raising an exception. (Note that beyond the first element of the vector, the hardware may report a fault for any reason whatsoever; with at least one element loaded, forward progress is guaranteed.) Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'include/exec/cpu_ldst.h')
-rw-r--r--include/exec/cpu_ldst.h50
1 files changed, 10 insertions, 40 deletions
diff --git a/include/exec/cpu_ldst.h b/include/exec/cpu_ldst.h
index d78041d7a0..7b28a839d2 100644
--- a/include/exec/cpu_ldst.h
+++ b/include/exec/cpu_ldst.h
@@ -433,50 +433,20 @@ static inline CPUTLBEntry *tlb_entry(CPUArchState *env, uintptr_t mmu_idx,
* @mmu_idx: MMU index to use for lookup
*
* Look up the specified guest virtual index in the TCG softmmu TLB.
- * If the TLB contains a host virtual address suitable for direct RAM
- * access, then return it. Otherwise (TLB miss, TLB entry is for an
- * I/O access, etc) return NULL.
- *
- * This is the equivalent of the initial fast-path code used by
- * TCG backends for guest load and store accesses.
+ * If we can translate a host virtual address suitable for direct RAM
+ * access, without causing a guest exception, then return it.
+ * Otherwise (TLB entry is for an I/O access, guest software
+ * TLB fill required, etc) return NULL.
*/
+#ifdef CONFIG_USER_ONLY
static inline void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
- int access_type, int mmu_idx)
+ MMUAccessType access_type, int mmu_idx)
{
-#if defined(CONFIG_USER_ONLY)
return g2h(addr);
-#else
- CPUTLBEntry *tlbentry = tlb_entry(env, mmu_idx, addr);
- abi_ptr tlb_addr;
- uintptr_t haddr;
-
- switch (access_type) {
- case 0:
- tlb_addr = tlbentry->addr_read;
- break;
- case 1:
- tlb_addr = tlb_addr_write(tlbentry);
- break;
- case 2:
- tlb_addr = tlbentry->addr_code;
- break;
- default:
- g_assert_not_reached();
- }
-
- if (!tlb_hit(tlb_addr, addr)) {
- /* TLB entry is for a different page */
- return NULL;
- }
-
- if (tlb_addr & ~TARGET_PAGE_MASK) {
- /* IO access */
- return NULL;
- }
-
- haddr = addr + tlbentry->addend;
- return (void *)haddr;
-#endif /* defined(CONFIG_USER_ONLY) */
}
+#else
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx);
+#endif
#endif /* CPU_LDST_H */