aboutsummaryrefslogtreecommitdiff
path: root/accel
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2022-11-01 12:51:04 +1100
committerRichard Henderson <richard.henderson@linaro.org>2023-05-16 15:21:39 -0700
commit0cadc1eda1a3120c37c713ab6d6b7a02da0d2e6f (patch)
tree480c1a82e17bf8dd990c96c9c98321f9373e6cbb /accel
parent5b36f2684cbb07fcd75cc29b618651c43e7a80ed (diff)
tcg: Unify helper_{be,le}_{ld,st}*
With the current structure of cputlb.c, there is no difference between the little-endian and big-endian entry points, aside from the assert. Unify the pairs of functions. Hoist the qemu_{ld,st}_helpers arrays to tcg.c. Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'accel')
-rw-r--r--accel/tcg/cputlb.c190
1 files changed, 61 insertions, 129 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index d910464c36..34796ef568 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2013,25 +2013,6 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
}
/*
- * Verify that we have passed the correct MemOp to the correct function.
- *
- * In the case of the helper_*_mmu functions, we will have done this by
- * using the MemOp to look up the helper during code generation.
- *
- * In the case of the cpu_*_mmu functions, this is up to the caller.
- * We could present one function to target code, and dispatch based on
- * the MemOp, but so far we have worked hard to avoid an indirect function
- * call along the memory path.
- */
-static void validate_memop(MemOpIdx oi, MemOp expected)
-{
-#ifdef CONFIG_DEBUG_TCG
- MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
- assert(have == expected);
-#endif
-}
-
-/*
* Load Helpers
*
* We support two different access types. SOFTMMU_CODE_ACCESS is
@@ -2303,10 +2284,10 @@ static uint8_t do_ld1_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return do_ld_1(env, &l.page[0], l.mmu_idx, access_type, ra);
}
-tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldub_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_UB);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
return do_ld1_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2334,17 +2315,10 @@ static uint16_t do_ld2_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return ret;
}
-tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUW);
- return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUW);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
return do_ld2_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2368,17 +2342,10 @@ static uint32_t do_ld4_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return ret;
}
-tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_LEUL);
- return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_BEUL);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
return do_ld4_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2402,17 +2369,10 @@ static uint64_t do_ld8_mmu(CPUArchState *env, target_ulong addr, MemOpIdx oi,
return ret;
}
-uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUQ);
- return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
-}
-
-uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+uint64_t helper_ldq_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUQ);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
return do_ld8_mmu(env, addr, oi, retaddr, MMU_DATA_LOAD);
}
@@ -2421,35 +2381,22 @@ uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
*/
-
-tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
+ return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
}
-tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
+ return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
}
-tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, target_ulong addr,
+ MemOpIdx oi, uintptr_t retaddr)
{
- return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
-}
-
-tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
- MemOpIdx oi, uintptr_t retaddr)
-{
- return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
+ return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
}
/*
@@ -2465,7 +2412,7 @@ uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
{
uint8_t ret;
- validate_memop(oi, MO_UB);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
ret = do_ld1_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2476,7 +2423,7 @@ uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
{
uint16_t ret;
- validate_memop(oi, MO_BEUW);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW);
ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2487,7 +2434,7 @@ uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
{
uint32_t ret;
- validate_memop(oi, MO_BEUL);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL);
ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2498,7 +2445,7 @@ uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
{
uint64_t ret;
- validate_memop(oi, MO_BEUQ);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ);
ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2509,7 +2456,7 @@ uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
{
uint16_t ret;
- validate_memop(oi, MO_LEUW);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW);
ret = do_ld2_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2520,7 +2467,7 @@ uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
{
uint32_t ret;
- validate_memop(oi, MO_LEUL);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL);
ret = do_ld4_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2531,7 +2478,7 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
{
uint64_t ret;
- validate_memop(oi, MO_LEUQ);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ);
ret = do_ld8_mmu(env, addr, oi, ra, MMU_DATA_LOAD);
plugin_load_cb(env, addr, oi);
return ret;
@@ -2559,8 +2506,8 @@ Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
new_oi = make_memop_idx(mop, mmu_idx);
- h = helper_be_ldq_mmu(env, addr, new_oi, ra);
- l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra);
+ h = helper_ldq_mmu(env, addr, new_oi, ra);
+ l = helper_ldq_mmu(env, addr + 8, new_oi, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return int128_make128(l, h);
@@ -2588,8 +2535,8 @@ Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
new_oi = make_memop_idx(mop, mmu_idx);
- l = helper_le_ldq_mmu(env, addr, new_oi, ra);
- h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra);
+ l = helper_ldq_mmu(env, addr, new_oi, ra);
+ h = helper_ldq_mmu(env, addr + 8, new_oi, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
return int128_make128(l, h);
@@ -2738,13 +2685,13 @@ static void do_st_8(CPUArchState *env, MMULookupPageData *p, uint64_t val,
}
}
-void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t ra)
+void helper_stb_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ MemOpIdx oi, uintptr_t ra)
{
MMULookupLocals l;
bool crosspage;
- validate_memop(oi, MO_UB);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
crosspage = mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE, &l);
tcg_debug_assert(!crosspage);
@@ -2773,17 +2720,10 @@ static void do_st2_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
do_st_1(env, &l.page[1], b, l.mmu_idx, ra);
}
-void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUW);
- do_st2_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void helper_stw_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUW);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
do_st2_mmu(env, addr, val, oi, retaddr);
}
@@ -2807,17 +2747,10 @@ static void do_st4_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
(void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUL);
- do_st4_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void helper_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUL);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
do_st4_mmu(env, addr, val, oi, retaddr);
}
@@ -2841,17 +2774,10 @@ static void do_st8_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
(void) do_st_leN(env, &l.page[1], val, l.mmu_idx, l.memop, ra);
}
-void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
-{
- validate_memop(oi, MO_LEUQ);
- do_st8_mmu(env, addr, val, oi, retaddr);
-}
-
-void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
- MemOpIdx oi, uintptr_t retaddr)
+void helper_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
+ MemOpIdx oi, uintptr_t retaddr)
{
- validate_memop(oi, MO_BEUQ);
+ tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
do_st8_mmu(env, addr, val, oi, retaddr);
}
@@ -2867,49 +2793,55 @@ static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_ret_stb_mmu(env, addr, val, oi, retaddr);
+ helper_stb_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_be_stw_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUW);
+ do_st2_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_be_stl_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUL);
+ do_st4_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_be_stq_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_BEUQ);
+ do_st8_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_le_stw_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUW);
+ do_st2_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_le_stl_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUL);
+ do_st4_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
MemOpIdx oi, uintptr_t retaddr)
{
- helper_le_stq_mmu(env, addr, val, oi, retaddr);
+ tcg_debug_assert((get_memop(oi) & (MO_BSWAP | MO_SIZE)) == MO_LEUQ);
+ do_st8_mmu(env, addr, val, oi, retaddr);
plugin_store_cb(env, addr, oi);
}
@@ -2934,8 +2866,8 @@ void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
new_oi = make_memop_idx(mop, mmu_idx);
- helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra);
- helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra);
+ helper_stq_mmu(env, addr, int128_gethi(val), new_oi, ra);
+ helper_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}
@@ -2961,8 +2893,8 @@ void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
new_oi = make_memop_idx(mop, mmu_idx);
- helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra);
- helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra);
+ helper_stq_mmu(env, addr, int128_getlo(val), new_oi, ra);
+ helper_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra);
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
}