aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-08-10 14:13:30 -0700
committerRichard Henderson <richard.henderson@linaro.org>2022-09-06 08:04:26 +0100
commit7e0d9973ea665bf459b2dbd173d0e51bc6ca5216 (patch)
tree561211b6e0cd4e4a0305b5db9e31566d3af2206a
parent97e03465f7dac073434373428388eb6e0998ecea (diff)
accel/tcg: Use probe_access_internal for softmmu get_page_addr_code_hostp
Simplify the implementation of get_page_addr_code_hostp by reusing the existing probe_access infrastructure. Acked-by: Ilya Leoshkevich <iii@linux.ibm.com> Tested-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--accel/tcg/cputlb.c76
1 files changed, 26 insertions, 50 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index 80a3eb4f1c..8fad2d9b83 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -1482,56 +1482,6 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
(ADDR) & TARGET_PAGE_MASK)
-/*
- * Return a ram_addr_t for the virtual address for execution.
- *
- * Return -1 if we can't translate and execute from an entire page
- * of RAM. This will force us to execute by loading and translating
- * one insn at a time, without caching.
- *
- * NOTE: This function will trigger an exception if the page is
- * not executable.
- */
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
- void **hostp)
-{
- uintptr_t mmu_idx = cpu_mmu_index(env, true);
- uintptr_t index = tlb_index(env, mmu_idx, addr);
- CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
- void *p;
-
- if (unlikely(!tlb_hit(entry->addr_code, addr))) {
- if (!VICTIM_TLB_HIT(addr_code, addr)) {
- tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
- index = tlb_index(env, mmu_idx, addr);
- entry = tlb_entry(env, mmu_idx, addr);
-
- if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
- /*
- * The MMU protection covers a smaller range than a target
- * page, so we must redo the MMU check for every insn.
- */
- return -1;
- }
- }
- assert(tlb_hit(entry->addr_code, addr));
- }
-
- if (unlikely(entry->addr_code & TLB_MMIO)) {
- /* The region is not backed by RAM. */
- if (hostp) {
- *hostp = NULL;
- }
- return -1;
- }
-
- p = (void *)((uintptr_t)addr + entry->addend);
- if (hostp) {
- *hostp = p;
- }
- return qemu_ram_addr_from_host_nofail(p);
-}
-
static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
{
@@ -1687,6 +1637,32 @@ void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
return flags ? NULL : host;
}
+/*
+ * Return a ram_addr_t for the virtual address for execution.
+ *
+ * Return -1 if we can't translate and execute from an entire page
+ * of RAM. This will force us to execute by loading and translating
+ * one insn at a time, without caching.
+ *
+ * NOTE: This function will trigger an exception if the page is
+ * not executable.
+ */
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+ void **hostp)
+{
+ void *p;
+
+ (void)probe_access_internal(env, addr, 1, MMU_INST_FETCH,
+ cpu_mmu_index(env, true), false, &p, 0);
+ if (p == NULL) {
+ return -1;
+ }
+ if (hostp) {
+ *hostp = p;
+ }
+ return qemu_ram_addr_from_host_nofail(p);
+}
+
#ifdef CONFIG_PLUGIN
/*
* Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.