diff options
Diffstat (limited to 'accel/tcg/plugin-gen.c')
-rw-r--r-- | accel/tcg/plugin-gen.c | 136 |
1 files changed, 105 insertions, 31 deletions
diff --git a/accel/tcg/plugin-gen.c b/accel/tcg/plugin-gen.c index 54b08ffc9e..cc1634e7a6 100644 --- a/accel/tcg/plugin-gen.c +++ b/accel/tcg/plugin-gen.c @@ -101,49 +101,111 @@ static void gen_disable_mem_helper(void) offsetof(ArchCPU, env)); } -static void gen_udata_cb(struct qemu_plugin_dyn_cb *cb) +static TCGv_i32 gen_cpu_index(void) { TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); - tcg_gen_ld_i32(cpu_index, tcg_env, -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); - tcg_gen_call2(cb->regular.f.vcpu_udata, cb->regular.info, NULL, + return cpu_index; +} + +static void gen_udata_cb(struct qemu_plugin_regular_cb *cb) +{ + TCGv_i32 cpu_index = gen_cpu_index(); + tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL, tcgv_i32_temp(cpu_index), tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); tcg_temp_free_i32(cpu_index); } -static void gen_inline_cb(struct qemu_plugin_dyn_cb *cb) +static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry) { - GArray *arr = cb->inline_insn.entry.score->data; - size_t offset = cb->inline_insn.entry.offset; - TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); - TCGv_i64 val = tcg_temp_ebb_new_i64(); TCGv_ptr ptr = tcg_temp_ebb_new_ptr(); - tcg_gen_ld_i32(cpu_index, tcg_env, - -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); - tcg_gen_muli_i32(cpu_index, cpu_index, g_array_get_element_size(arr)); + GArray *arr = entry.score->data; + char *base_ptr = arr->data + entry.offset; + size_t entry_size = g_array_get_element_size(arr); + + TCGv_i32 cpu_index = gen_cpu_index(); + tcg_gen_muli_i32(cpu_index, cpu_index, entry_size); tcg_gen_ext_i32_ptr(ptr, cpu_index); tcg_temp_free_i32(cpu_index); + tcg_gen_addi_ptr(ptr, ptr, (intptr_t) base_ptr); - tcg_gen_addi_ptr(ptr, ptr, (intptr_t)arr->data); - tcg_gen_ld_i64(val, ptr, offset); - tcg_gen_addi_i64(val, val, cb->inline_insn.imm); - tcg_gen_st_i64(val, ptr, offset); + return ptr; +} + +static TCGCond plugin_cond_to_tcgcond(enum qemu_plugin_cond cond) +{ + switch (cond) { + case QEMU_PLUGIN_COND_EQ: + return TCG_COND_EQ; + case QEMU_PLUGIN_COND_NE: + return TCG_COND_NE; + case QEMU_PLUGIN_COND_LT: + return TCG_COND_LTU; + case QEMU_PLUGIN_COND_LE: + return TCG_COND_LEU; + case QEMU_PLUGIN_COND_GT: + return TCG_COND_GTU; + case QEMU_PLUGIN_COND_GE: + return TCG_COND_GEU; + default: + /* ALWAYS and NEVER conditions should never reach */ + g_assert_not_reached(); + } +} + +static void gen_udata_cond_cb(struct qemu_plugin_conditional_cb *cb) +{ + TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry); + TCGv_i64 val = tcg_temp_ebb_new_i64(); + TCGLabel *after_cb = gen_new_label(); + + /* Condition should be negated, as calling the cb is the "else" path */ + TCGCond cond = tcg_invert_cond(plugin_cond_to_tcgcond(cb->cond)); + + tcg_gen_ld_i64(val, ptr, 0); + tcg_gen_brcondi_i64(cond, val, cb->imm, after_cb); + TCGv_i32 cpu_index = gen_cpu_index(); + tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL, + tcgv_i32_temp(cpu_index), + tcgv_ptr_temp(tcg_constant_ptr(cb->userp))); + tcg_temp_free_i32(cpu_index); + gen_set_label(after_cb); tcg_temp_free_i64(val); tcg_temp_free_ptr(ptr); } -static void gen_mem_cb(struct qemu_plugin_dyn_cb *cb, - qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) +static void gen_inline_add_u64_cb(struct qemu_plugin_inline_cb *cb) { - TCGv_i32 cpu_index = tcg_temp_ebb_new_i32(); + TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry); + TCGv_i64 val = tcg_temp_ebb_new_i64(); - tcg_gen_ld_i32(cpu_index, tcg_env, - -offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index)); - tcg_gen_call4(cb->regular.f.vcpu_mem, cb->regular.info, NULL, + tcg_gen_ld_i64(val, ptr, 0); + tcg_gen_addi_i64(val, val, cb->imm); + tcg_gen_st_i64(val, ptr, 0); + + tcg_temp_free_i64(val); + tcg_temp_free_ptr(ptr); +} + +static void gen_inline_store_u64_cb(struct qemu_plugin_inline_cb *cb) +{ + TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry); + TCGv_i64 val = tcg_constant_i64(cb->imm); + + tcg_gen_st_i64(val, ptr, 0); + + tcg_temp_free_ptr(ptr); +} + +static void gen_mem_cb(struct qemu_plugin_regular_cb *cb, + qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) +{ + TCGv_i32 cpu_index = gen_cpu_index(); + tcg_gen_call4(cb->f.vcpu_mem, cb->info, NULL, tcgv_i32_temp(cpu_index), tcgv_i32_temp(tcg_constant_i32(meminfo)), tcgv_i64_temp(addr), @@ -156,10 +218,16 @@ static void inject_cb(struct qemu_plugin_dyn_cb *cb) { switch (cb->type) { case PLUGIN_CB_REGULAR: - gen_udata_cb(cb); + gen_udata_cb(&cb->regular); + break; + case PLUGIN_CB_COND: + gen_udata_cond_cb(&cb->cond); break; - case PLUGIN_CB_INLINE: - gen_inline_cb(cb); + case PLUGIN_CB_INLINE_ADD_U64: + gen_inline_add_u64_cb(&cb->inline_insn); + break; + case PLUGIN_CB_INLINE_STORE_U64: + gen_inline_store_u64_cb(&cb->inline_insn); break; default: g_assert_not_reached(); @@ -170,15 +238,21 @@ static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb, enum qemu_plugin_mem_rw rw, qemu_plugin_meminfo_t meminfo, TCGv_i64 addr) { - if (cb->rw & rw) { - switch (cb->type) { - case PLUGIN_CB_MEM_REGULAR: - gen_mem_cb(cb, meminfo, addr); - break; - default: + switch (cb->type) { + case PLUGIN_CB_MEM_REGULAR: + if (rw && cb->regular.rw) { + gen_mem_cb(&cb->regular, meminfo, addr); + } + break; + case PLUGIN_CB_INLINE_ADD_U64: + case PLUGIN_CB_INLINE_STORE_U64: + if (rw && cb->inline_insn.rw) { inject_cb(cb); - break; } + break; + default: + g_assert_not_reached(); + break; } } |