From 504f73f7b3724c885317b6b236620e9048f50c0a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Fri, 28 Jun 2019 20:54:11 +0100 Subject: trace: add mmu_index to mem_info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We are going to re-use mem_info later for plugins and will need to track the mmu_idx for softmmu code. Signed-off-by: Alex Bennée --- tcg/tcg-op.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'tcg') diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index e87c327fbf..1388bd344d 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -2795,7 +2795,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 0)); + addr, trace_mem_get_info(memop, idx, 0)); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2832,7 +2832,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 1)); + addr, trace_mem_get_info(memop, idx, 1)); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i32(); @@ -2875,7 +2875,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 0)); + addr, trace_mem_get_info(memop, idx, 0)); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2923,7 +2923,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 1)); + addr, trace_mem_get_info(memop, idx, 1)); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i64(); -- cgit v1.2.3 From c87fb14fde63071234afc984ec76181f33751a13 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Fri, 7 Dec 2018 15:06:05 -0500 Subject: tcg: add tcg_gen_st_ptr MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Will gain a user soon. Signed-off-by: Emilio G. Cota Reviewed-by: Richard Henderson Signed-off-by: Alex Bennée --- tcg/tcg-op.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'tcg') diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index e9cf172762..7c778f96f3 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -1249,6 +1249,11 @@ static inline void tcg_gen_ld_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o) glue(tcg_gen_ld_,PTR)((NAT)r, a, o); } +static inline void tcg_gen_st_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o) +{ + glue(tcg_gen_st_, PTR)((NAT)r, a, o); +} + static inline void tcg_gen_discard_ptr(TCGv_ptr a) { glue(tcg_gen_discard_,PTR)((NAT)a); -- cgit v1.2.3 From 38b47b19ec3adf6a96d68726dc29096b3aad780a Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Fri, 7 Dec 2018 15:33:56 -0500 Subject: plugin-gen: add module for TCG-related code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We first inject empty instrumentation from translator_loop. After translation, we go through the plugins to see what they want to register for, filling in the empty instrumentation. If if turns out that some instrumentation remains unused, we remove it. This approach supports the following features: - Inlining TCG code for simple operations. Note that we do not export TCG ops to plugins. Instead, we give them a C API to insert inlined ops. So far we only support adding an immediate to a u64, e.g. to count events. - "Direct" callbacks. These are callbacks that do not go via a helper. Instead, the helper is defined at run-time, so that the plugin code is directly called from TCG. This makes direct callbacks as efficient as possible; they are therefore used for very frequent events, e.g. memory callbacks. - Passing the host address to memory callbacks. Most of this is implemented in a later patch though. - Instrumentation of memory accesses performed from helpers. See the corresponding comment, as well as a later patch. Signed-off-by: Emilio G. Cota [AJB: add alloc_tcg_plugin_context, use glib, rm hwaddr] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- tcg/tcg-op.h | 11 +++++++++++ tcg/tcg-opc.h | 3 +++ tcg/tcg.c | 22 ++++++++++++++++++++++ tcg/tcg.h | 20 ++++++++++++++++++++ 4 files changed, 56 insertions(+) (limited to 'tcg') diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index 7c778f96f3..4af272daa5 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -833,6 +833,17 @@ void tcg_gen_goto_tb(unsigned idx); */ void tcg_gen_lookup_and_goto_ptr(void); +static inline void tcg_gen_plugin_cb_start(unsigned from, unsigned type, + unsigned wr) +{ + tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr); +} + +static inline void tcg_gen_plugin_cb_end(void) +{ + tcg_emit_op(INDEX_op_plugin_cb_end); +} + #if TARGET_LONG_BITS == 32 #define tcg_temp_new() tcg_temp_new_i32() #define tcg_global_reg_new tcg_global_reg_new_i32 diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h index 242d608e6d..9288a04946 100644 --- a/tcg/tcg-opc.h +++ b/tcg/tcg-opc.h @@ -198,6 +198,9 @@ DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr)) +DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT) +DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT) + DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, diff --git a/tcg/tcg.c b/tcg/tcg.c index 16b2d0e0ec..5475d49ed1 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -736,6 +736,15 @@ void tcg_region_init(void) #endif } +static void alloc_tcg_plugin_context(TCGContext *s) +{ +#ifdef CONFIG_PLUGIN + s->plugin_tb = g_new0(struct qemu_plugin_tb, 1); + s->plugin_tb->insns = + g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn); +#endif +} + /* * All TCG threads except the parent (i.e. the one that called tcg_context_init * and registered the target's TCG globals) must register with this function @@ -780,6 +789,10 @@ void tcg_register_thread(void) g_assert(n < ms->smp.max_cpus); atomic_set(&tcg_ctxs[n], s); + if (n > 0) { + alloc_tcg_plugin_context(s); + } + tcg_ctx = s; qemu_mutex_lock(®ion.lock); err = tcg_region_initial_alloc__locked(tcg_ctx); @@ -976,6 +989,8 @@ void tcg_context_init(TCGContext *s) indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i]; } + alloc_tcg_plugin_context(s); + tcg_ctx = s; /* * In user-mode we simply share the init context among threads, since we @@ -1681,6 +1696,13 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) flags = info->flags; sizemask = info->sizemask; +#ifdef CONFIG_PLUGIN + /* detect non-plugin helpers */ + if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) { + tcg_ctx->plugin_insn->calls_helpers = true; + } +#endif + #if defined(__sparc__) && !defined(__arch64__) \ && !defined(CONFIG_TCG_INTERPRETER) /* We have 64-bit values in one register, but need to pass as two diff --git a/tcg/tcg.h b/tcg/tcg.h index a37181c899..d234b4c9a5 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -538,6 +538,9 @@ typedef struct TCGOp { /* Next and previous opcodes. */ QTAILQ_ENTRY(TCGOp) link; +#ifdef CONFIG_PLUGIN + QSIMPLEQ_ENTRY(TCGOp) plugin_link; +#endif /* Arguments for the opcode. */ TCGArg args[MAX_OPC_PARAM]; @@ -639,6 +642,23 @@ struct TCGContext { TCGLabel *exitreq_label; +#ifdef CONFIG_PLUGIN + /* + * We keep one plugin_tb struct per TCGContext. Note that on every TB + * translation we clear but do not free its contents; this way we + * avoid a lot of malloc/free churn, since after a few TB's it's + * unlikely that we'll need to allocate either more instructions or more + * space for instructions (for variable-instruction-length ISAs). + */ + struct qemu_plugin_tb *plugin_tb; + + /* descriptor of the instruction being translated */ + struct qemu_plugin_insn *plugin_insn; + + /* list to quickly access the injected ops */ + QSIMPLEQ_HEAD(, TCGOp) plugin_ops; +#endif + TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ -- cgit v1.2.3 From e6d86bed50d20101c565e149c33e07a5cc764c72 Mon Sep 17 00:00:00 2001 From: "Emilio G. Cota" Date: Sun, 21 Oct 2018 13:24:26 -0400 Subject: tcg: let plugins instrument virtual memory accesses MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To capture all memory accesses we need hook into all the various helper functions that are involved in memory operations as well as the injected inline helper calls. A later commit will allow us to resolve the actual guest HW addresses by replaying the lookup. Signed-off-by: Emilio G. Cota [AJB: drop haddr handling, just deal in vaddr] Signed-off-by: Alex Bennée Reviewed-by: Richard Henderson --- tcg/tcg-op.c | 40 +++++++++++++++++++++++++++++++--------- tcg/tcg.h | 1 + 2 files changed, 32 insertions(+), 9 deletions(-) (limited to 'tcg') diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index 1388bd344d..c245126f98 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -30,6 +30,7 @@ #include "tcg-mo.h" #include "trace-tcg.h" #include "trace/mem.h" +#include "exec/plugin-gen.h" /* Reduce the number of ifdefs below. This assumes that all uses of TCGV_HIGH and TCGV_LOW are properly protected by a conditional that @@ -2684,6 +2685,7 @@ void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx) tcg_debug_assert(idx == TB_EXIT_REQUESTED); } + plugin_gen_disable_mem_helpers(); tcg_gen_op1i(INDEX_op_exit_tb, val); } @@ -2696,6 +2698,7 @@ void tcg_gen_goto_tb(unsigned idx) tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); tcg_ctx->goto_tb_issue_mask |= 1 << idx; #endif + plugin_gen_disable_mem_helpers(); /* When not chaining, we simply fall through to the "fallback" exit. */ if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { tcg_gen_op1i(INDEX_op_goto_tb, idx); @@ -2705,7 +2708,10 @@ void tcg_gen_goto_tb(unsigned idx) void tcg_gen_lookup_and_goto_ptr(void) { if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { - TCGv_ptr ptr = tcg_temp_new_ptr(); + TCGv_ptr ptr; + + plugin_gen_disable_mem_helpers(); + ptr = tcg_temp_new_ptr(); gen_helper_lookup_tb_ptr(ptr, cpu_env); tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); @@ -2788,14 +2794,24 @@ static void tcg_gen_req_mo(TCGBar type) } } +static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn == NULL) { + return; + } + plugin_gen_empty_mem_callback(vaddr, info); +#endif +} + void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; + uint16_t info = trace_mem_get_info(memop, idx, 0); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 0)); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2807,6 +2823,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2828,11 +2845,11 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; + uint16_t info = trace_mem_get_info(memop, idx, 1); tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 1)); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i32(); @@ -2852,6 +2869,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if (swap) { tcg_temp_free_i32(swap); @@ -2861,6 +2879,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; + uint16_t info; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -2874,8 +2893,8 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 0)); + info = trace_mem_get_info(memop, idx, 0); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2887,6 +2906,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2914,6 +2934,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; + uint16_t info; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); @@ -2922,8 +2943,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, idx, 1)); + info = trace_mem_get_info(memop, idx, 1); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i64(); @@ -2947,6 +2968,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if (swap) { tcg_temp_free_i64(swap); diff --git a/tcg/tcg.h b/tcg/tcg.h index d234b4c9a5..a38659ea5b 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -29,6 +29,7 @@ #include "exec/memop.h" #include "exec/tb-context.h" #include "qemu/bitops.h" +#include "qemu/plugin.h" #include "qemu/queue.h" #include "tcg-mo.h" #include "tcg-target.h" -- cgit v1.2.3 From 7dec71d5ff06493184352cec5ec6332889128e3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alex=20Benn=C3=A9e?= Date: Mon, 21 Oct 2019 16:09:10 +0100 Subject: cputlb: ensure _cmmu helper functions follow the naming standard MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We document this in docs/devel/load-stores.rst so lets follow it. The 32 bit and 64 bit access functions have historically not included the sign so we leave those as is. We also introduce some signed helpers which are used for loading immediate values in the translator. Fixes: 282dffc8 Signed-off-by: Alex Bennée Message-Id: <20191021150910.23216-1-alex.bennee@linaro.org> Reviewed-by: Richard Henderson --- tcg/tcg.h | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) (limited to 'tcg') diff --git a/tcg/tcg.h b/tcg/tcg.h index a38659ea5b..92ca10dffc 100644 --- a/tcg/tcg.h +++ b/tcg/tcg.h @@ -1290,16 +1290,22 @@ void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr); -uint8_t helper_ret_ldb_cmmu(CPUArchState *env, target_ulong addr, +uint8_t helper_ret_ldub_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); -uint16_t helper_le_ldw_cmmu(CPUArchState *env, target_ulong addr, +int8_t helper_ret_ldsb_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); +uint16_t helper_le_lduw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +int16_t helper_le_ldsw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_le_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_le_ldq_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); -uint16_t helper_be_ldw_cmmu(CPUArchState *env, target_ulong addr, - TCGMemOpIdx oi, uintptr_t retaddr); +uint16_t helper_be_lduw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); +int16_t helper_be_ldsw_cmmu(CPUArchState *env, target_ulong addr, + TCGMemOpIdx oi, uintptr_t retaddr); uint32_t helper_be_ldl_cmmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, @@ -1316,7 +1322,8 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, # define helper_ret_stw_mmu helper_be_stw_mmu # define helper_ret_stl_mmu helper_be_stl_mmu # define helper_ret_stq_mmu helper_be_stq_mmu -# define helper_ret_ldw_cmmu helper_be_ldw_cmmu +# define helper_ret_lduw_cmmu helper_be_lduw_cmmu +# define helper_ret_ldsw_cmmu helper_be_ldsw_cmmu # define helper_ret_ldl_cmmu helper_be_ldl_cmmu # define helper_ret_ldq_cmmu helper_be_ldq_cmmu #else @@ -1329,7 +1336,8 @@ uint64_t helper_be_ldq_cmmu(CPUArchState *env, target_ulong addr, # define helper_ret_stw_mmu helper_le_stw_mmu # define helper_ret_stl_mmu helper_le_stl_mmu # define helper_ret_stq_mmu helper_le_stq_mmu -# define helper_ret_ldw_cmmu helper_le_ldw_cmmu +# define helper_ret_lduw_cmmu helper_le_lduw_cmmu +# define helper_ret_ldsw_cmmu helper_le_ldsw_cmmu # define helper_ret_ldl_cmmu helper_le_ldl_cmmu # define helper_ret_ldq_cmmu helper_le_ldq_cmmu #endif -- cgit v1.2.3