aboutsummaryrefslogtreecommitdiff
path: root/target/arm
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-05-16 13:15:08 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-05-16 13:15:08 +0100
commitd8276573da58e8ce78dab8c46dd660efd664bcb7 (patch)
tree4f02aab577980667493c8fe1152a4d0470df6da4 /target/arm
parentc1497fba36465d0259d4d04f2bf09ea59ed42680 (diff)
parent4811e9095c0491bc6f5450e5012c9c4796b9e59d (diff)
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20190510' into staging
Add CPUClass::tlb_fill. Improve tlb_vaddr_to_host for use by ARM SVE no-fault loads. # gpg: Signature made Fri 10 May 2019 19:48:37 BST # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * remotes/rth/tags/pull-tcg-20190510: (27 commits) tcg: Use tlb_fill probe from tlb_vaddr_to_host tcg: Remove CPUClass::handle_mmu_fault tcg: Use CPUClass::tlb_fill in cputlb.c target/xtensa: Convert to CPUClass::tlb_fill target/unicore32: Convert to CPUClass::tlb_fill target/tricore: Convert to CPUClass::tlb_fill target/tilegx: Convert to CPUClass::tlb_fill target/sparc: Convert to CPUClass::tlb_fill target/sh4: Convert to CPUClass::tlb_fill target/s390x: Convert to CPUClass::tlb_fill target/riscv: Convert to CPUClass::tlb_fill target/ppc: Convert to CPUClass::tlb_fill target/openrisc: Convert to CPUClass::tlb_fill target/nios2: Convert to CPUClass::tlb_fill target/moxie: Convert to CPUClass::tlb_fill target/mips: Convert to CPUClass::tlb_fill target/mips: Tidy control flow in mips_cpu_handle_mmu_fault target/mips: Pass a valid error to raise_mmu_exception for user-only target/microblaze: Convert to CPUClass::tlb_fill target/m68k: Convert to CPUClass::tlb_fill ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target/arm')
-rw-r--r--target/arm/cpu.c22
-rw-r--r--target/arm/helper.c90
-rw-r--r--target/arm/internals.h10
-rw-r--r--target/arm/op_helper.c29
-rw-r--r--target/arm/sve_helper.c6
5 files changed, 66 insertions, 91 deletions
diff --git a/target/arm/cpu.c b/target/arm/cpu.c
index a181fa8dc1..8eee1d8c59 100644
--- a/target/arm/cpu.c
+++ b/target/arm/cpu.c
@@ -2133,23 +2133,6 @@ static Property arm_cpu_properties[] = {
DEFINE_PROP_END_OF_LIST()
};
-#ifdef CONFIG_USER_ONLY
-static int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
- int rw, int mmu_idx)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- env->exception.vaddress = address;
- if (rw == 2) {
- cs->exception_index = EXCP_PREFETCH_ABORT;
- } else {
- cs->exception_index = EXCP_DATA_ABORT;
- }
- return 1;
-}
-#endif
-
static gchar *arm_gdb_arch_name(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
@@ -2182,9 +2165,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->synchronize_from_tb = arm_cpu_synchronize_from_tb;
cc->gdb_read_register = arm_cpu_gdb_read_register;
cc->gdb_write_register = arm_cpu_gdb_write_register;
-#ifdef CONFIG_USER_ONLY
- cc->handle_mmu_fault = arm_cpu_handle_mmu_fault;
-#else
+#ifndef CONFIG_USER_ONLY
cc->do_interrupt = arm_cpu_do_interrupt;
cc->do_unaligned_access = arm_cpu_do_unaligned_access;
cc->do_transaction_failed = arm_cpu_do_transaction_failed;
@@ -2209,6 +2190,7 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data)
cc->disas_set_info = arm_disas_set_info;
#ifdef CONFIG_TCG
cc->tcg_initialize = arm_translate_init;
+ cc->tlb_fill = arm_cpu_tlb_fill;
#endif
}
diff --git a/target/arm/helper.c b/target/arm/helper.c
index 1e6eb0d0f3..e2d5c8e34f 100644
--- a/target/arm/helper.c
+++ b/target/arm/helper.c
@@ -12596,43 +12596,6 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
}
}
-/* Walk the page table and (if the mapping exists) add the page
- * to the TLB. Return false on success, or true on failure. Populate
- * fsr with ARM DFSR/IFSR fault register format value on failure.
- */
-bool arm_tlb_fill(CPUState *cs, vaddr address,
- MMUAccessType access_type, int mmu_idx,
- ARMMMUFaultInfo *fi)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- hwaddr phys_addr;
- target_ulong page_size;
- int prot;
- int ret;
- MemTxAttrs attrs = {};
-
- ret = get_phys_addr(env, address, access_type,
- core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
- &attrs, &prot, &page_size, fi, NULL);
- if (!ret) {
- /*
- * Map a single [sub]page. Regions smaller than our declared
- * target page size are handled specially, so for those we
- * pass in the exact addresses.
- */
- if (page_size >= TARGET_PAGE_SIZE) {
- phys_addr &= TARGET_PAGE_MASK;
- address &= TARGET_PAGE_MASK;
- }
- tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
- prot, mmu_idx, page_size);
- return 0;
- }
-
- return ret;
-}
-
hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
MemTxAttrs *attrs)
{
@@ -13111,6 +13074,59 @@ uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
#endif
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+
+#ifdef CONFIG_USER_ONLY
+ cpu->env.exception.vaddress = address;
+ if (access_type == MMU_INST_FETCH) {
+ cs->exception_index = EXCP_PREFETCH_ABORT;
+ } else {
+ cs->exception_index = EXCP_DATA_ABORT;
+ }
+ cpu_loop_exit_restore(cs, retaddr);
+#else
+ hwaddr phys_addr;
+ target_ulong page_size;
+ int prot, ret;
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+
+ /*
+ * Walk the page table and (if the mapping exists) add the page
+ * to the TLB. On success, return true. Otherwise, if probing,
+ * return false. Otherwise populate fsr with ARM DFSR/IFSR fault
+ * register format, and signal the fault.
+ */
+ ret = get_phys_addr(&cpu->env, address, access_type,
+ core_to_arm_mmu_idx(&cpu->env, mmu_idx),
+ &phys_addr, &attrs, &prot, &page_size, &fi, NULL);
+ if (likely(!ret)) {
+ /*
+ * Map a single [sub]page. Regions smaller than our declared
+ * target page size are handled specially, so for those we
+ * pass in the exact addresses.
+ */
+ if (page_size >= TARGET_PAGE_SIZE) {
+ phys_addr &= TARGET_PAGE_MASK;
+ address &= TARGET_PAGE_MASK;
+ }
+ tlb_set_page_with_attrs(cs, address, phys_addr, attrs,
+ prot, mmu_idx, page_size);
+ return true;
+ } else if (probe) {
+ return false;
+ } else {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
+ arm_deliver_fault(cpu, address, access_type, mmu_idx, &fi);
+ }
+#endif
+}
+
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
{
/* Implement DC ZVA, which zeroes a fixed-length block of memory.
diff --git a/target/arm/internals.h b/target/arm/internals.h
index 587a1ddf58..5a02f458f3 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -761,10 +761,12 @@ static inline bool arm_extabort_type(MemTxResult result)
return result != MEMTX_DECODE_ERROR;
}
-/* Do a page table walk and add page to TLB if possible */
-bool arm_tlb_fill(CPUState *cpu, vaddr address,
- MMUAccessType access_type, int mmu_idx,
- ARMMMUFaultInfo *fi);
+bool arm_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
+ MMUAccessType access_type, int mmu_idx,
+ bool probe, uintptr_t retaddr);
+
+void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi) QEMU_NORETURN;
/* Return true if the stage 1 translation regime is using LPAE format page
* tables */
diff --git a/target/arm/op_helper.c b/target/arm/op_helper.c
index 8698b4dc83..8ee15a4bd4 100644
--- a/target/arm/op_helper.c
+++ b/target/arm/op_helper.c
@@ -126,8 +126,8 @@ static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
return syn;
}
-static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
- int mmu_idx, ARMMMUFaultInfo *fi)
+void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi)
{
CPUARMState *env = &cpu->env;
int target_el;
@@ -179,27 +179,6 @@ static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
raise_exception(env, exc, syn, target_el);
}
-/* try to fill the TLB and return an exception if error. If retaddr is
- * NULL, it means that the function was called in C code (i.e. not
- * from generated code or from helper.c)
- */
-void tlb_fill(CPUState *cs, target_ulong addr, int size,
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
- bool ret;
- ARMMMUFaultInfo fi = {};
-
- ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
- if (unlikely(ret)) {
- ARMCPU *cpu = ARM_CPU(cs);
-
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr, true);
-
- deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
- }
-}
-
/* Raise a data fault alignment exception for the specified virtual address */
void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
MMUAccessType access_type,
@@ -212,7 +191,7 @@ void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
cpu_restore_state(cs, retaddr, true);
fi.type = ARMFault_Alignment;
- deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
+ arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
}
/* arm_cpu_do_transaction_failed: handle a memory system error response
@@ -233,7 +212,7 @@ void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
fi.ea = arm_extabort_type(response);
fi.type = ARMFault_SyncExternal;
- deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
+ arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */
diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c
index bc847250dd..fd434c66ea 100644
--- a/target/arm/sve_helper.c
+++ b/target/arm/sve_helper.c
@@ -4598,11 +4598,7 @@ static void sve_ldnf1_r(CPUARMState *env, void *vg, const target_ulong addr,
* in the real world, obviously.)
*
* Then there are the annoying special cases with watchpoints...
- *
- * TODO: Add a form of tlb_fill that does not raise an exception,
- * with a form of tlb_vaddr_to_host and a set of loads to match.
- * The non_fault_vaddr_to_host would handle everything, usually,
- * and the loads would handle the iomem path for watchpoints.
+ * TODO: Add a form of non-faulting loads using cc->tlb_fill(probe=true).
*/
host = tlb_vaddr_to_host(env, addr + mem_off, MMU_DATA_LOAD, mmu_idx);
split = max_for_page(addr, mem_off, mem_max);