diff options
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/tcg-op.c | 40 | ||||
-rw-r--r-- | tcg/tcg-op.h | 16 | ||||
-rw-r--r-- | tcg/tcg-opc.h | 3 | ||||
-rw-r--r-- | tcg/tcg.c | 22 | ||||
-rw-r--r-- | tcg/tcg.h | 21 |
5 files changed, 93 insertions, 9 deletions
diff --git a/tcg/tcg-op.c b/tcg/tcg-op.c index e87c327fbf..c245126f98 100644 --- a/tcg/tcg-op.c +++ b/tcg/tcg-op.c @@ -30,6 +30,7 @@ #include "tcg-mo.h" #include "trace-tcg.h" #include "trace/mem.h" +#include "exec/plugin-gen.h" /* Reduce the number of ifdefs below. This assumes that all uses of TCGV_HIGH and TCGV_LOW are properly protected by a conditional that @@ -2684,6 +2685,7 @@ void tcg_gen_exit_tb(TranslationBlock *tb, unsigned idx) tcg_debug_assert(idx == TB_EXIT_REQUESTED); } + plugin_gen_disable_mem_helpers(); tcg_gen_op1i(INDEX_op_exit_tb, val); } @@ -2696,6 +2698,7 @@ void tcg_gen_goto_tb(unsigned idx) tcg_debug_assert((tcg_ctx->goto_tb_issue_mask & (1 << idx)) == 0); tcg_ctx->goto_tb_issue_mask |= 1 << idx; #endif + plugin_gen_disable_mem_helpers(); /* When not chaining, we simply fall through to the "fallback" exit. */ if (!qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { tcg_gen_op1i(INDEX_op_goto_tb, idx); @@ -2705,7 +2708,10 @@ void tcg_gen_goto_tb(unsigned idx) void tcg_gen_lookup_and_goto_ptr(void) { if (TCG_TARGET_HAS_goto_ptr && !qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { - TCGv_ptr ptr = tcg_temp_new_ptr(); + TCGv_ptr ptr; + + plugin_gen_disable_mem_helpers(); + ptr = tcg_temp_new_ptr(); gen_helper_lookup_tb_ptr(ptr, cpu_env); tcg_gen_op1i(INDEX_op_goto_ptr, tcgv_ptr_arg(ptr)); tcg_temp_free_ptr(ptr); @@ -2788,14 +2794,24 @@ static void tcg_gen_req_mo(TCGBar type) } } +static inline void plugin_gen_mem_callbacks(TCGv vaddr, uint16_t info) +{ +#ifdef CONFIG_PLUGIN + if (tcg_ctx->plugin_insn == NULL) { + return; + } + plugin_gen_empty_mem_callback(vaddr, info); +#endif +} + void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; + uint16_t info = trace_mem_get_info(memop, idx, 0); tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 0, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 0)); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2807,6 +2823,7 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2828,11 +2845,11 @@ void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i32 swap = NULL; + uint16_t info = trace_mem_get_info(memop, idx, 1); tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 0, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 1)); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i32(); @@ -2852,6 +2869,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if (swap) { tcg_temp_free_i32(swap); @@ -2861,6 +2879,7 @@ void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { MemOp orig_memop; + uint16_t info; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop); @@ -2874,8 +2893,8 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD); memop = tcg_canonicalize_memop(memop, 1, 0); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 0)); + info = trace_mem_get_info(memop, idx, 0); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); orig_memop = memop; if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { @@ -2887,6 +2906,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if ((orig_memop ^ memop) & MO_BSWAP) { switch (orig_memop & MO_SIZE) { @@ -2914,6 +2934,7 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) { TCGv_i64 swap = NULL; + uint16_t info; if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) { tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop); @@ -2922,8 +2943,8 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST); memop = tcg_canonicalize_memop(memop, 1, 1); - trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, - addr, trace_mem_get_info(memop, 1)); + info = trace_mem_get_info(memop, idx, 1); + trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env, addr, info); if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) { swap = tcg_temp_new_i64(); @@ -2947,6 +2968,7 @@ void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, MemOp memop) } gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx); + plugin_gen_mem_callbacks(addr, info); if (swap) { tcg_temp_free_i64(swap); diff --git a/tcg/tcg-op.h b/tcg/tcg-op.h index e9cf172762..4af272daa5 100644 --- a/tcg/tcg-op.h +++ b/tcg/tcg-op.h @@ -833,6 +833,17 @@ void tcg_gen_goto_tb(unsigned idx); */ void tcg_gen_lookup_and_goto_ptr(void); +static inline void tcg_gen_plugin_cb_start(unsigned from, unsigned type, + unsigned wr) +{ + tcg_gen_op3(INDEX_op_plugin_cb_start, from, type, wr); +} + +static inline void tcg_gen_plugin_cb_end(void) +{ + tcg_emit_op(INDEX_op_plugin_cb_end); +} + #if TARGET_LONG_BITS == 32 #define tcg_temp_new() tcg_temp_new_i32() #define tcg_global_reg_new tcg_global_reg_new_i32 @@ -1249,6 +1260,11 @@ static inline void tcg_gen_ld_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o) glue(tcg_gen_ld_,PTR)((NAT)r, a, o); } +static inline void tcg_gen_st_ptr(TCGv_ptr r, TCGv_ptr a, intptr_t o) +{ + glue(tcg_gen_st_, PTR)((NAT)r, a, o); +} + static inline void tcg_gen_discard_ptr(TCGv_ptr a) { glue(tcg_gen_discard_,PTR)((NAT)a); diff --git a/tcg/tcg-opc.h b/tcg/tcg-opc.h index 242d608e6d..9288a04946 100644 --- a/tcg/tcg-opc.h +++ b/tcg/tcg-opc.h @@ -198,6 +198,9 @@ DEF(goto_tb, 0, 0, 1, TCG_OPF_BB_EXIT | TCG_OPF_BB_END) DEF(goto_ptr, 0, 1, 0, TCG_OPF_BB_EXIT | TCG_OPF_BB_END | IMPL(TCG_TARGET_HAS_goto_ptr)) +DEF(plugin_cb_start, 0, 0, 3, TCG_OPF_NOT_PRESENT) +DEF(plugin_cb_end, 0, 0, 0, TCG_OPF_NOT_PRESENT) + DEF(qemu_ld_i32, 1, TLADDR_ARGS, 1, TCG_OPF_CALL_CLOBBER | TCG_OPF_SIDE_EFFECTS) DEF(qemu_st_i32, 0, TLADDR_ARGS + 1, 1, @@ -736,6 +736,15 @@ void tcg_region_init(void) #endif } +static void alloc_tcg_plugin_context(TCGContext *s) +{ +#ifdef CONFIG_PLUGIN + s->plugin_tb = g_new0(struct qemu_plugin_tb, 1); + s->plugin_tb->insns = + g_ptr_array_new_with_free_func(qemu_plugin_insn_cleanup_fn); +#endif +} + /* * All TCG threads except the parent (i.e. the one that called tcg_context_init * and registered the target's TCG globals) must register with this function @@ -780,6 +789,10 @@ void tcg_register_thread(void) g_assert(n < ms->smp.max_cpus); atomic_set(&tcg_ctxs[n], s); + if (n > 0) { + alloc_tcg_plugin_context(s); + } + tcg_ctx = s; qemu_mutex_lock(®ion.lock); err = tcg_region_initial_alloc__locked(tcg_ctx); @@ -976,6 +989,8 @@ void tcg_context_init(TCGContext *s) indirect_reg_alloc_order[i] = tcg_target_reg_alloc_order[i]; } + alloc_tcg_plugin_context(s); + tcg_ctx = s; /* * In user-mode we simply share the init context among threads, since we @@ -1681,6 +1696,13 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args) flags = info->flags; sizemask = info->sizemask; +#ifdef CONFIG_PLUGIN + /* detect non-plugin helpers */ + if (tcg_ctx->plugin_insn && unlikely(strncmp(info->name, "plugin_", 7))) { + tcg_ctx->plugin_insn->calls_helpers = true; + } +#endif + #if defined(__sparc__) && !defined(__arch64__) \ && !defined(CONFIG_TCG_INTERPRETER) /* We have 64-bit values in one register, but need to pass as two @@ -29,6 +29,7 @@ #include "exec/memop.h" #include "exec/tb-context.h" #include "qemu/bitops.h" +#include "qemu/plugin.h" #include "qemu/queue.h" #include "tcg-mo.h" #include "tcg-target.h" @@ -538,6 +539,9 @@ typedef struct TCGOp { /* Next and previous opcodes. */ QTAILQ_ENTRY(TCGOp) link; +#ifdef CONFIG_PLUGIN + QSIMPLEQ_ENTRY(TCGOp) plugin_link; +#endif /* Arguments for the opcode. */ TCGArg args[MAX_OPC_PARAM]; @@ -639,6 +643,23 @@ struct TCGContext { TCGLabel *exitreq_label; +#ifdef CONFIG_PLUGIN + /* + * We keep one plugin_tb struct per TCGContext. Note that on every TB + * translation we clear but do not free its contents; this way we + * avoid a lot of malloc/free churn, since after a few TB's it's + * unlikely that we'll need to allocate either more instructions or more + * space for instructions (for variable-instruction-length ISAs). + */ + struct qemu_plugin_tb *plugin_tb; + + /* descriptor of the instruction being translated */ + struct qemu_plugin_insn *plugin_insn; + + /* list to quickly access the injected ops */ + QSIMPLEQ_HEAD(, TCGOp) plugin_ops; +#endif + TCGTempSet free_temps[TCG_TYPE_COUNT * 2]; TCGTemp temps[TCG_MAX_TEMPS]; /* globals first, temps after */ |