aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-07-09 16:56:48 -0700
committerRichard Henderson <richard.henderson@linaro.org>2024-07-23 10:56:16 +1000
commitf6bcc5b8f91e3c1b855c3078d9133f3918080276 (patch)
tree0d01d6a4ca533b476d0b11ab3771d6c752d18acb
parentc6d84fd7cfb46a67c5c0404e93ed024cd3a14e6e (diff)
target/ppc: Improve helper_dcbz for user-only
Mark the reserve_addr check unlikely. Use tlb_vaddr_to_host instead of probe_write, relying on the memset itself to test for page writability. Use set/clear_helper_retaddr so that we can properly unwind on segfault. With this, a trivial loop around guest memset will no longer spend nearly 25% of runtime within page_get_flags. Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
-rw-r--r--target/ppc/mem_helper.c15
1 files changed, 11 insertions, 4 deletions
diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c
index 24bae3b80c..953dd08d5d 100644
--- a/target/ppc/mem_helper.c
+++ b/target/ppc/mem_helper.c
@@ -280,20 +280,27 @@ static void dcbz_common(CPUPPCState *env, target_ulong addr,
addr &= mask;
/* Check reservation */
- if ((env->reserve_addr & mask) == addr) {
+ if (unlikely((env->reserve_addr & mask) == addr)) {
env->reserve_addr = (target_ulong)-1ULL;
}
/* Try fast path translate */
+#ifdef CONFIG_USER_ONLY
+ haddr = tlb_vaddr_to_host(env, addr, MMU_DATA_STORE, mmu_idx);
+#else
haddr = probe_write(env, addr, dcbz_size, mmu_idx, retaddr);
- if (haddr) {
- memset(haddr, 0, dcbz_size);
- } else {
+ if (unlikely(!haddr)) {
/* Slow path */
for (int i = 0; i < dcbz_size; i += 8) {
cpu_stq_mmuidx_ra(env, addr + i, 0, mmu_idx, retaddr);
}
+ return;
}
+#endif
+
+ set_helper_retaddr(retaddr);
+ memset(haddr, 0, dcbz_size);
+ clear_helper_retaddr();
}
void helper_dcbz(CPUPPCState *env, target_ulong addr, int mmu_idx)