aboutsummaryrefslogtreecommitdiff
path: root/tests/plugin/inline.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2024-05-16 10:02:56 +0200
committerRichard Henderson <richard.henderson@linaro.org>2024-05-16 10:02:56 +0200
commit85ef20f1673feaa083f4acab8cf054df77b0dbed (patch)
tree430167c8c1652a05c1aaad5300e19de2b8e0dd8f /tests/plugin/inline.c
parent922582ace2df59572a671f5c0c5c6c5c706995e5 (diff)
parent09afe9677e6aeb7629eeeab5abccc17f67cb4875 (diff)
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into staging
plugin and testing updates - don't duplicate options for microbit test - don't spam the linux source tree when importing headers - add STORE_U64 inline op to TCG plugins - add conditional callback op to TCG plugins # -----BEGIN PGP SIGNATURE----- # # iQEzBAABCgAdFiEEZoWumedRZ7yvyN81+9DbCVqeKkQFAmZFvCMACgkQ+9DbCVqe # KkSrYQf/aj9+eCWCKZk3Hym0lT+qNKxUeNSx3juUN8h7iG1vkA1f/XaQle5XvKDr # ROIdo8urcr8onJ4PBH+4C7VZhUmnpL8zLH80pCuuTkF03MCNhaW/5qJ67niWmPVM # QJHVqNomkykKOMBh+WtD5M0m/BYPT5lsa10sE3bDH8ziGjp0An2v24R89tzYEXnf # 1QePItQN5vzEvhrZj6oKWVmeucqLsqS6yqS8V3sEpmF0+zqNjGZlrI86A4SAp74k # 8vuduVuRbeyki7zWBTOLUeoiuHM2Zmh7v74zm/Hc1ITBaDjWMwPctcI/vFjsrCI/ # yoFRhgrV87DtIZdkrJzk5qBYFOWoeQ== # =znN0 # -----END PGP SIGNATURE----- # gpg: Signature made Thu 16 May 2024 09:56:19 AM CEST # gpg: using RSA key 6685AE99E75167BCAFC8DF35FBD0DB095A9E2A44 # gpg: Good signature from "Alex Bennée (Master Work Key) <alex.bennee@linaro.org>" [full] * tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu: plugins: remove op from qemu_plugin_inline_cb plugins: extract cpu_index generate plugins: distinct types for callbacks tests/plugin/inline: add test for conditional callback plugins: conditional callbacks tests/plugin/inline: add test for STORE_U64 inline op plugins: add new inline op STORE_U64 plugins: extract generate ptr for qemu_plugin_u64 plugins: prepare introduction of new inline ops scripts/update-linux-header.sh: be more src tree friendly tests/tcg: don't append QEMU_OPTS for armv6m-undef test Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'tests/plugin/inline.c')
-rw-r--r--tests/plugin/inline.c130
1 files changed, 123 insertions, 7 deletions
diff --git a/tests/plugin/inline.c b/tests/plugin/inline.c
index 0163e9b51c..cd63827b7d 100644
--- a/tests/plugin/inline.c
+++ b/tests/plugin/inline.c
@@ -20,8 +20,20 @@ typedef struct {
uint64_t count_insn_inline;
uint64_t count_mem;
uint64_t count_mem_inline;
+ uint64_t tb_cond_num_trigger;
+ uint64_t tb_cond_track_count;
+ uint64_t insn_cond_num_trigger;
+ uint64_t insn_cond_track_count;
} CPUCount;
+static const uint64_t cond_trigger_limit = 100;
+
+typedef struct {
+ uint64_t data_insn;
+ uint64_t data_tb;
+ uint64_t data_mem;
+} CPUData;
+
static struct qemu_plugin_scoreboard *counts;
static qemu_plugin_u64 count_tb;
static qemu_plugin_u64 count_tb_inline;
@@ -29,6 +41,14 @@ static qemu_plugin_u64 count_insn;
static qemu_plugin_u64 count_insn_inline;
static qemu_plugin_u64 count_mem;
static qemu_plugin_u64 count_mem_inline;
+static qemu_plugin_u64 tb_cond_num_trigger;
+static qemu_plugin_u64 tb_cond_track_count;
+static qemu_plugin_u64 insn_cond_num_trigger;
+static qemu_plugin_u64 insn_cond_track_count;
+static struct qemu_plugin_scoreboard *data;
+static qemu_plugin_u64 data_insn;
+static qemu_plugin_u64 data_tb;
+static qemu_plugin_u64 data_mem;
static uint64_t global_count_tb;
static uint64_t global_count_insn;
@@ -46,12 +66,19 @@ static void stats_insn(void)
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_insn);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_insn_inline);
+ const uint64_t cond_num_trigger =
+ qemu_plugin_u64_sum(insn_cond_num_trigger);
+ const uint64_t cond_track_left = qemu_plugin_u64_sum(insn_cond_track_count);
+ const uint64_t conditional =
+ cond_num_trigger * cond_trigger_limit + cond_track_left;
printf("insn: %" PRIu64 "\n", expected);
printf("insn: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("insn: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ printf("insn: %" PRIu64 " (cond cb)\n", conditional);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
+ g_assert(conditional == expected);
}
static void stats_tb(void)
@@ -60,12 +87,18 @@ static void stats_tb(void)
const uint64_t per_vcpu = qemu_plugin_u64_sum(count_tb);
const uint64_t inl_per_vcpu =
qemu_plugin_u64_sum(count_tb_inline);
+ const uint64_t cond_num_trigger = qemu_plugin_u64_sum(tb_cond_num_trigger);
+ const uint64_t cond_track_left = qemu_plugin_u64_sum(tb_cond_track_count);
+ const uint64_t conditional =
+ cond_num_trigger * cond_trigger_limit + cond_track_left;
printf("tb: %" PRIu64 "\n", expected);
printf("tb: %" PRIu64 " (per vcpu)\n", per_vcpu);
printf("tb: %" PRIu64 " (per vcpu inline)\n", inl_per_vcpu);
+ printf("tb: %" PRIu64 " (conditional cb)\n", conditional);
g_assert(expected > 0);
g_assert(per_vcpu == expected);
g_assert(inl_per_vcpu == expected);
+ g_assert(conditional == expected);
}
static void stats_mem(void)
@@ -94,14 +127,35 @@ static void plugin_exit(qemu_plugin_id_t id, void *udata)
const uint64_t insn_inline = qemu_plugin_u64_get(count_insn_inline, i);
const uint64_t mem = qemu_plugin_u64_get(count_mem, i);
const uint64_t mem_inline = qemu_plugin_u64_get(count_mem_inline, i);
- printf("cpu %d: tb (%" PRIu64 ", %" PRIu64 ") | "
- "insn (%" PRIu64 ", %" PRIu64 ") | "
+ const uint64_t tb_cond_trigger =
+ qemu_plugin_u64_get(tb_cond_num_trigger, i);
+ const uint64_t tb_cond_left =
+ qemu_plugin_u64_get(tb_cond_track_count, i);
+ const uint64_t insn_cond_trigger =
+ qemu_plugin_u64_get(insn_cond_num_trigger, i);
+ const uint64_t insn_cond_left =
+ qemu_plugin_u64_get(insn_cond_track_count, i);
+ printf("cpu %d: tb (%" PRIu64 ", %" PRIu64
+ ", %" PRIu64 " * %" PRIu64 " + %" PRIu64
+ ") | "
+ "insn (%" PRIu64 ", %" PRIu64
+ ", %" PRIu64 " * %" PRIu64 " + %" PRIu64
+ ") | "
"mem (%" PRIu64 ", %" PRIu64 ")"
"\n",
- i, tb, tb_inline, insn, insn_inline, mem, mem_inline);
+ i,
+ tb, tb_inline,
+ tb_cond_trigger, cond_trigger_limit, tb_cond_left,
+ insn, insn_inline,
+ insn_cond_trigger, cond_trigger_limit, insn_cond_left,
+ mem, mem_inline);
g_assert(tb == tb_inline);
g_assert(insn == insn_inline);
g_assert(mem == mem_inline);
+ g_assert(tb_cond_trigger == tb / cond_trigger_limit);
+ g_assert(tb_cond_left == tb % cond_trigger_limit);
+ g_assert(insn_cond_trigger == insn / cond_trigger_limit);
+ g_assert(insn_cond_left == insn % cond_trigger_limit);
}
stats_tb();
@@ -109,20 +163,41 @@ static void plugin_exit(qemu_plugin_id_t id, void *udata)
stats_mem();
qemu_plugin_scoreboard_free(counts);
+ qemu_plugin_scoreboard_free(data);
}
static void vcpu_tb_exec(unsigned int cpu_index, void *udata)
{
qemu_plugin_u64_add(count_tb, cpu_index, 1);
+ g_assert(qemu_plugin_u64_get(data_tb, cpu_index) == (uintptr_t) udata);
g_mutex_lock(&tb_lock);
max_cpu_index = MAX(max_cpu_index, cpu_index);
global_count_tb++;
g_mutex_unlock(&tb_lock);
}
+static void vcpu_tb_cond_exec(unsigned int cpu_index, void *udata)
+{
+ g_assert(qemu_plugin_u64_get(tb_cond_track_count, cpu_index) ==
+ cond_trigger_limit);
+ g_assert(qemu_plugin_u64_get(data_tb, cpu_index) == (uintptr_t) udata);
+ qemu_plugin_u64_set(tb_cond_track_count, cpu_index, 0);
+ qemu_plugin_u64_add(tb_cond_num_trigger, cpu_index, 1);
+}
+
+static void vcpu_insn_cond_exec(unsigned int cpu_index, void *udata)
+{
+ g_assert(qemu_plugin_u64_get(insn_cond_track_count, cpu_index) ==
+ cond_trigger_limit);
+ g_assert(qemu_plugin_u64_get(data_insn, cpu_index) == (uintptr_t) udata);
+ qemu_plugin_u64_set(insn_cond_track_count, cpu_index, 0);
+ qemu_plugin_u64_add(insn_cond_num_trigger, cpu_index, 1);
+}
+
static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
{
qemu_plugin_u64_add(count_insn, cpu_index, 1);
+ g_assert(qemu_plugin_u64_get(data_insn, cpu_index) == (uintptr_t) udata);
g_mutex_lock(&insn_lock);
global_count_insn++;
g_mutex_unlock(&insn_lock);
@@ -131,9 +206,10 @@ static void vcpu_insn_exec(unsigned int cpu_index, void *udata)
static void vcpu_mem_access(unsigned int cpu_index,
qemu_plugin_meminfo_t info,
uint64_t vaddr,
- void *userdata)
+ void *udata)
{
qemu_plugin_u64_add(count_mem, cpu_index, 1);
+ g_assert(qemu_plugin_u64_get(data_mem, cpu_index) == (uintptr_t) udata);
g_mutex_lock(&mem_lock);
global_count_mem++;
g_mutex_unlock(&mem_lock);
@@ -141,20 +217,47 @@ static void vcpu_mem_access(unsigned int cpu_index,
static void vcpu_tb_trans(qemu_plugin_id_t id, struct qemu_plugin_tb *tb)
{
+ void *tb_store = tb;
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_STORE_U64, data_tb, (uintptr_t) tb_store);
qemu_plugin_register_vcpu_tb_exec_cb(
- tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, 0);
+ tb, vcpu_tb_exec, QEMU_PLUGIN_CB_NO_REGS, tb_store);
qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
tb, QEMU_PLUGIN_INLINE_ADD_U64, count_tb_inline, 1);
+ qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(
+ tb, QEMU_PLUGIN_INLINE_ADD_U64, tb_cond_track_count, 1);
+ qemu_plugin_register_vcpu_tb_exec_cond_cb(
+ tb, vcpu_tb_cond_exec, QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_COND_EQ, tb_cond_track_count, cond_trigger_limit, tb_store);
+
for (int idx = 0; idx < qemu_plugin_tb_n_insns(tb); ++idx) {
struct qemu_plugin_insn *insn = qemu_plugin_tb_get_insn(tb, idx);
+ void *insn_store = insn;
+ void *mem_store = (char *)insn_store + 0xff;
+
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_STORE_U64, data_insn,
+ (uintptr_t) insn_store);
qemu_plugin_register_vcpu_insn_exec_cb(
- insn, vcpu_insn_exec, QEMU_PLUGIN_CB_NO_REGS, 0);
+ insn, vcpu_insn_exec, QEMU_PLUGIN_CB_NO_REGS, insn_store);
qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
insn, QEMU_PLUGIN_INLINE_ADD_U64, count_insn_inline, 1);
+
+ qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(
+ insn, QEMU_PLUGIN_INLINE_ADD_U64, insn_cond_track_count, 1);
+ qemu_plugin_register_vcpu_insn_exec_cond_cb(
+ insn, vcpu_insn_cond_exec, QEMU_PLUGIN_CB_NO_REGS,
+ QEMU_PLUGIN_COND_EQ, insn_cond_track_count, cond_trigger_limit,
+ insn_store);
+
+ qemu_plugin_register_vcpu_mem_inline_per_vcpu(
+ insn, QEMU_PLUGIN_MEM_RW,
+ QEMU_PLUGIN_INLINE_STORE_U64,
+ data_mem, (uintptr_t) mem_store);
qemu_plugin_register_vcpu_mem_cb(insn, &vcpu_mem_access,
QEMU_PLUGIN_CB_NO_REGS,
- QEMU_PLUGIN_MEM_RW, 0);
+ QEMU_PLUGIN_MEM_RW, mem_store);
qemu_plugin_register_vcpu_mem_inline_per_vcpu(
insn, QEMU_PLUGIN_MEM_RW,
QEMU_PLUGIN_INLINE_ADD_U64,
@@ -179,6 +282,19 @@ int qemu_plugin_install(qemu_plugin_id_t id, const qemu_info_t *info,
counts, CPUCount, count_insn_inline);
count_mem_inline = qemu_plugin_scoreboard_u64_in_struct(
counts, CPUCount, count_mem_inline);
+ tb_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, tb_cond_num_trigger);
+ tb_cond_track_count = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, tb_cond_track_count);
+ insn_cond_num_trigger = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, insn_cond_num_trigger);
+ insn_cond_track_count = qemu_plugin_scoreboard_u64_in_struct(
+ counts, CPUCount, insn_cond_track_count);
+ data = qemu_plugin_scoreboard_new(sizeof(CPUData));
+ data_insn = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_insn);
+ data_tb = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_tb);
+ data_mem = qemu_plugin_scoreboard_u64_in_struct(data, CPUData, data_mem);
+
qemu_plugin_register_vcpu_tb_trans_cb(id, vcpu_tb_trans);
qemu_plugin_register_atexit_cb(id, plugin_exit, NULL);