aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c88
-rw-r--r--accel/tcg/user-exec.c36
2 files changed, 89 insertions, 35 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index a083324768..cdcc377102 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -855,6 +855,25 @@ static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
return ram_addr;
}
+/*
+ * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
+ * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
+ * be discarded and looked up again (e.g. via tlb_entry()).
+ */
+static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
+{
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+ bool ok;
+
+ /*
+ * This is not a probe, so only valid return is success; failure
+ * should result in exception + longjmp to the cpu loop.
+ */
+ ok = cc->tlb_fill(cpu, addr, size, access_type, mmu_idx, false, retaddr);
+ assert(ok);
+}
+
static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
int mmu_idx, target_ulong addr, uintptr_t retaddr,
MMUAccessType access_type, int size)
@@ -938,6 +957,16 @@ static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
}
}
+static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
+{
+#if TCG_OVERSIZED_GUEST
+ return *(target_ulong *)((uintptr_t)entry + ofs);
+#else
+ /* ofs might correspond to .addr_write, so use atomic_read */
+ return atomic_read((target_ulong *)((uintptr_t)entry + ofs));
+#endif
+}
+
/* Return true if ADDR is present in the victim tlb, and has been copied
back to the main tlb. */
static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
@@ -948,14 +977,7 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
assert_cpu_is_self(ENV_GET_CPU(env));
for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx];
- target_ulong cmp;
-
- /* elt_ofs might correspond to .addr_write, so use atomic_read */
-#if TCG_OVERSIZED_GUEST
- cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
-#else
- cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
-#endif
+ target_ulong cmp = tlb_read_ofs(vtlb, elt_ofs);
if (cmp == page) {
/* Found entry in victim tlb, swap tlb and iotlb. */
@@ -1039,6 +1061,56 @@ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx,
}
}
+void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
+ MMUAccessType access_type, int mmu_idx)
+{
+ CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
+ uintptr_t tlb_addr, page;
+ size_t elt_ofs;
+
+ switch (access_type) {
+ case MMU_DATA_LOAD:
+ elt_ofs = offsetof(CPUTLBEntry, addr_read);
+ break;
+ case MMU_DATA_STORE:
+ elt_ofs = offsetof(CPUTLBEntry, addr_write);
+ break;
+ case MMU_INST_FETCH:
+ elt_ofs = offsetof(CPUTLBEntry, addr_code);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ page = addr & TARGET_PAGE_MASK;
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+
+ if (!tlb_hit_page(tlb_addr, page)) {
+ uintptr_t index = tlb_index(env, mmu_idx, addr);
+
+ if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page)) {
+ CPUState *cs = ENV_GET_CPU(env);
+ CPUClass *cc = CPU_GET_CLASS(cs);
+
+ if (!cc->tlb_fill(cs, addr, 0, access_type, mmu_idx, true, 0)) {
+ /* Non-faulting page table read failed. */
+ return NULL;
+ }
+
+ /* TLB resize via tlb_fill may have moved the entry. */
+ entry = tlb_entry(env, mmu_idx, addr);
+ }
+ tlb_addr = tlb_read_ofs(entry, elt_ofs);
+ }
+
+ if (tlb_addr & ~TARGET_PAGE_MASK) {
+ /* IO access */
+ return NULL;
+ }
+
+ return (void *)((uintptr_t)addr + entry->addend);
+}
+
/* Probe for a read-modify-write atomic operation. Do not allow unaligned
* operations, or io operations to proceed. Return the host address. */
static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
diff --git a/accel/tcg/user-exec.c b/accel/tcg/user-exec.c
index 0789984fe6..8cfbeb1b56 100644
--- a/accel/tcg/user-exec.c
+++ b/accel/tcg/user-exec.c
@@ -63,8 +63,8 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
{
CPUState *cpu = current_cpu;
CPUClass *cc;
- int ret;
unsigned long address = (unsigned long)info->si_addr;
+ MMUAccessType access_type;
/* We must handle PC addresses from two different sources:
* a call return address and a signal frame address.
@@ -147,35 +147,17 @@ static inline int handle_cpu_signal(uintptr_t pc, siginfo_t *info,
are still valid segv ones */
address = h2g_nocheck(address);
- cc = CPU_GET_CLASS(cpu);
- /* see if it is an MMU fault */
- g_assert(cc->handle_mmu_fault);
- ret = cc->handle_mmu_fault(cpu, address, 0, is_write, MMU_USER_IDX);
-
- if (ret == 0) {
- /* The MMU fault was handled without causing real CPU fault.
- * Retain helper_retaddr for a possible second fault.
- */
- return 1;
- }
-
- /* All other paths lead to cpu_exit; clear helper_retaddr
- * for next execution.
+ /*
+ * There is no way the target can handle this other than raising
+ * an exception. Undo signal and retaddr state prior to longjmp.
*/
- helper_retaddr = 0;
-
- if (ret < 0) {
- return 0; /* not an MMU fault */
- }
-
- /* Now we have a real cpu fault. */
- cpu_restore_state(cpu, pc, true);
-
sigprocmask(SIG_SETMASK, old_set, NULL);
- cpu_loop_exit(cpu);
+ helper_retaddr = 0;
- /* never comes here */
- return 1;
+ cc = CPU_GET_CLASS(cpu);
+ access_type = is_write ? MMU_DATA_STORE : MMU_DATA_LOAD;
+ cc->tlb_fill(cpu, address, 0, access_type, MMU_USER_IDX, false, pc);
+ g_assert_not_reached();
}
#if defined(__i386__)