1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
|
/*
* QEMU Plugin Core code
*
* This is the core code that deals with injecting instrumentation into the code
*
* Copyright (C) 2017, Emilio G. Cota <cota@braap.org>
* Copyright (C) 2019, Linaro
*
* License: GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/error-report.h"
#include "qemu/config-file.h"
#include "qapi/error.h"
#include "qemu/lockable.h"
#include "qemu/option.h"
#include "qemu/rcu_queue.h"
#include "qemu/xxhash.h"
#include "qemu/rcu.h"
#include "hw/core/cpu.h"
#include "exec/cpu-common.h"
#include "exec/exec-all.h"
#include "exec/tb-flush.h"
#include "exec/helper-proto.h"
#include "tcg/tcg.h"
#include "tcg/tcg-op.h"
#include "plugin.h"
#include "qemu/compiler.h"
struct qemu_plugin_cb {
struct qemu_plugin_ctx *ctx;
union qemu_plugin_cb_sig f;
void *udata;
QLIST_ENTRY(qemu_plugin_cb) entry;
};
struct qemu_plugin_state plugin;
struct qemu_plugin_ctx *plugin_id_to_ctx_locked(qemu_plugin_id_t id)
{
struct qemu_plugin_ctx *ctx;
qemu_plugin_id_t *id_p;
id_p = g_hash_table_lookup(plugin.id_ht, &id);
ctx = container_of(id_p, struct qemu_plugin_ctx, id);
if (ctx == NULL) {
error_report("plugin: invalid plugin id %" PRIu64, id);
abort();
}
return ctx;
}
static void plugin_cpu_update__async(CPUState *cpu, run_on_cpu_data data)
{
bitmap_copy(cpu->plugin_mask, &data.host_ulong, QEMU_PLUGIN_EV_MAX);
tcg_flush_jmp_cache(cpu);
}
static void plugin_cpu_update__locked(gpointer k, gpointer v, gpointer udata)
{
CPUState *cpu = container_of(k, CPUState, cpu_index);
run_on_cpu_data mask = RUN_ON_CPU_HOST_ULONG(*plugin.mask);
if (cpu->created) {
async_run_on_cpu(cpu, plugin_cpu_update__async, mask);
} else {
plugin_cpu_update__async(cpu, mask);
}
}
void plugin_unregister_cb__locked(struct qemu_plugin_ctx *ctx,
enum qemu_plugin_event ev)
{
struct qemu_plugin_cb *cb = ctx->callbacks[ev];
if (cb == NULL) {
return;
}
QLIST_REMOVE_RCU(cb, entry);
g_free(cb);
ctx->callbacks[ev] = NULL;
if (QLIST_EMPTY_RCU(&plugin.cb_lists[ev])) {
clear_bit(ev, plugin.mask);
g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked, NULL);
}
}
/*
* Disable CFI checks.
* The callback function has been loaded from an external library so we do not
* have type information
*/
QEMU_DISABLE_CFI
static void plugin_vcpu_cb__simple(CPUState *cpu, enum qemu_plugin_event ev)
{
struct qemu_plugin_cb *cb, *next;
switch (ev) {
case QEMU_PLUGIN_EV_VCPU_INIT:
case QEMU_PLUGIN_EV_VCPU_EXIT:
case QEMU_PLUGIN_EV_VCPU_IDLE:
case QEMU_PLUGIN_EV_VCPU_RESUME:
/* iterate safely; plugins might uninstall themselves at any time */
QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
qemu_plugin_vcpu_simple_cb_t func = cb->f.vcpu_simple;
func(cb->ctx->id, cpu->cpu_index);
}
break;
default:
g_assert_not_reached();
}
}
/*
* Disable CFI checks.
* The callback function has been loaded from an external library so we do not
* have type information
*/
QEMU_DISABLE_CFI
static void plugin_cb__simple(enum qemu_plugin_event ev)
{
struct qemu_plugin_cb *cb, *next;
switch (ev) {
case QEMU_PLUGIN_EV_FLUSH:
QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
qemu_plugin_simple_cb_t func = cb->f.simple;
func(cb->ctx->id);
}
break;
default:
g_assert_not_reached();
}
}
/*
* Disable CFI checks.
* The callback function has been loaded from an external library so we do not
* have type information
*/
QEMU_DISABLE_CFI
static void plugin_cb__udata(enum qemu_plugin_event ev)
{
struct qemu_plugin_cb *cb, *next;
switch (ev) {
case QEMU_PLUGIN_EV_ATEXIT:
QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
qemu_plugin_udata_cb_t func = cb->f.udata;
func(cb->ctx->id, cb->udata);
}
break;
default:
g_assert_not_reached();
}
}
static void
do_plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
void *func, void *udata)
{
struct qemu_plugin_ctx *ctx;
QEMU_LOCK_GUARD(&plugin.lock);
ctx = plugin_id_to_ctx_locked(id);
/* if the plugin is on its way out, ignore this request */
if (unlikely(ctx->uninstalling)) {
return;
}
if (func) {
struct qemu_plugin_cb *cb = ctx->callbacks[ev];
if (cb) {
cb->f.generic = func;
cb->udata = udata;
} else {
cb = g_new(struct qemu_plugin_cb, 1);
cb->ctx = ctx;
cb->f.generic = func;
cb->udata = udata;
ctx->callbacks[ev] = cb;
QLIST_INSERT_HEAD_RCU(&plugin.cb_lists[ev], cb, entry);
if (!test_bit(ev, plugin.mask)) {
set_bit(ev, plugin.mask);
g_hash_table_foreach(plugin.cpu_ht, plugin_cpu_update__locked,
NULL);
}
}
} else {
plugin_unregister_cb__locked(ctx, ev);
}
}
void plugin_register_cb(qemu_plugin_id_t id, enum qemu_plugin_event ev,
void *func)
{
do_plugin_register_cb(id, ev, func, NULL);
}
void
plugin_register_cb_udata(qemu_plugin_id_t id, enum qemu_plugin_event ev,
void *func, void *udata)
{
do_plugin_register_cb(id, ev, func, udata);
}
void qemu_plugin_vcpu_init_hook(CPUState *cpu)
{
bool success;
qemu_rec_mutex_lock(&plugin.lock);
plugin_cpu_update__locked(&cpu->cpu_index, NULL, NULL);
success = g_hash_table_insert(plugin.cpu_ht, &cpu->cpu_index,
&cpu->cpu_index);
g_assert(success);
qemu_rec_mutex_unlock(&plugin.lock);
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_INIT);
}
void qemu_plugin_vcpu_exit_hook(CPUState *cpu)
{
bool success;
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_EXIT);
qemu_rec_mutex_lock(&plugin.lock);
success = g_hash_table_remove(plugin.cpu_ht, &cpu->cpu_index);
g_assert(success);
qemu_rec_mutex_unlock(&plugin.lock);
}
struct plugin_for_each_args {
struct qemu_plugin_ctx *ctx;
qemu_plugin_vcpu_simple_cb_t cb;
};
static void plugin_vcpu_for_each(gpointer k, gpointer v, gpointer udata)
{
struct plugin_for_each_args *args = udata;
int cpu_index = *(int *)k;
args->cb(args->ctx->id, cpu_index);
}
void qemu_plugin_vcpu_for_each(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb)
{
struct plugin_for_each_args args;
if (cb == NULL) {
return;
}
qemu_rec_mutex_lock(&plugin.lock);
args.ctx = plugin_id_to_ctx_locked(id);
args.cb = cb;
g_hash_table_foreach(plugin.cpu_ht, plugin_vcpu_for_each, &args);
qemu_rec_mutex_unlock(&plugin.lock);
}
/* Allocate and return a callback record */
static struct qemu_plugin_dyn_cb *plugin_get_dyn_cb(GArray **arr)
{
GArray *cbs = *arr;
if (!cbs) {
cbs = g_array_sized_new(false, false,
sizeof(struct qemu_plugin_dyn_cb), 1);
*arr = cbs;
}
g_array_set_size(cbs, cbs->len + 1);
return &g_array_index(cbs, struct qemu_plugin_dyn_cb, cbs->len - 1);
}
void plugin_register_inline_op(GArray **arr,
enum qemu_plugin_mem_rw rw,
enum qemu_plugin_op op, void *ptr,
uint64_t imm)
{
struct qemu_plugin_dyn_cb *dyn_cb;
dyn_cb = plugin_get_dyn_cb(arr);
dyn_cb->userp = ptr;
dyn_cb->type = PLUGIN_CB_INLINE;
dyn_cb->rw = rw;
dyn_cb->inline_insn.op = op;
dyn_cb->inline_insn.imm = imm;
}
void plugin_register_dyn_cb__udata(GArray **arr,
qemu_plugin_vcpu_udata_cb_t cb,
enum qemu_plugin_cb_flags flags,
void *udata)
{
struct qemu_plugin_dyn_cb *dyn_cb = plugin_get_dyn_cb(arr);
dyn_cb->userp = udata;
/* Note flags are discarded as unused. */
dyn_cb->f.vcpu_udata = cb;
dyn_cb->type = PLUGIN_CB_REGULAR;
}
void plugin_register_vcpu_mem_cb(GArray **arr,
void *cb,
enum qemu_plugin_cb_flags flags,
enum qemu_plugin_mem_rw rw,
void *udata)
{
struct qemu_plugin_dyn_cb *dyn_cb;
dyn_cb = plugin_get_dyn_cb(arr);
dyn_cb->userp = udata;
/* Note flags are discarded as unused. */
dyn_cb->type = PLUGIN_CB_REGULAR;
dyn_cb->rw = rw;
dyn_cb->f.generic = cb;
}
/*
* Disable CFI checks.
* The callback function has been loaded from an external library so we do not
* have type information
*/
QEMU_DISABLE_CFI
void qemu_plugin_tb_trans_cb(CPUState *cpu, struct qemu_plugin_tb *tb)
{
struct qemu_plugin_cb *cb, *next;
enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_TB_TRANS;
/* no plugin_mask check here; caller should have checked */
QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
qemu_plugin_vcpu_tb_trans_cb_t func = cb->f.vcpu_tb_trans;
func(cb->ctx->id, tb);
}
}
/*
* Disable CFI checks.
* The callback function has been loaded from an external library so we do not
* have type information
*/
QEMU_DISABLE_CFI
void
qemu_plugin_vcpu_syscall(CPUState *cpu, int64_t num, uint64_t a1, uint64_t a2,
uint64_t a3, uint64_t a4, uint64_t a5,
uint64_t a6, uint64_t a7, uint64_t a8)
{
struct qemu_plugin_cb *cb, *next;
enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL;
if (!test_bit(ev, cpu->plugin_mask)) {
return;
}
QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
qemu_plugin_vcpu_syscall_cb_t func = cb->f.vcpu_syscall;
func(cb->ctx->id, cpu->cpu_index, num, a1, a2, a3, a4, a5, a6, a7, a8);
}
}
/*
* Disable CFI checks.
* The callback function has been loaded from an external library so we do not
* have type information
*/
QEMU_DISABLE_CFI
void qemu_plugin_vcpu_syscall_ret(CPUState *cpu, int64_t num, int64_t ret)
{
struct qemu_plugin_cb *cb, *next;
enum qemu_plugin_event ev = QEMU_PLUGIN_EV_VCPU_SYSCALL_RET;
if (!test_bit(ev, cpu->plugin_mask)) {
return;
}
QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
qemu_plugin_vcpu_syscall_ret_cb_t func = cb->f.vcpu_syscall_ret;
func(cb->ctx->id, cpu->cpu_index, num, ret);
}
}
void qemu_plugin_vcpu_idle_cb(CPUState *cpu)
{
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_IDLE);
}
void qemu_plugin_vcpu_resume_cb(CPUState *cpu)
{
plugin_vcpu_cb__simple(cpu, QEMU_PLUGIN_EV_VCPU_RESUME);
}
void qemu_plugin_register_vcpu_idle_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb)
{
plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_IDLE, cb);
}
void qemu_plugin_register_vcpu_resume_cb(qemu_plugin_id_t id,
qemu_plugin_vcpu_simple_cb_t cb)
{
plugin_register_cb(id, QEMU_PLUGIN_EV_VCPU_RESUME, cb);
}
void qemu_plugin_register_flush_cb(qemu_plugin_id_t id,
qemu_plugin_simple_cb_t cb)
{
plugin_register_cb(id, QEMU_PLUGIN_EV_FLUSH, cb);
}
static bool free_dyn_cb_arr(void *p, uint32_t h, void *userp)
{
g_array_free((GArray *) p, true);
return true;
}
void qemu_plugin_flush_cb(void)
{
qht_iter_remove(&plugin.dyn_cb_arr_ht, free_dyn_cb_arr, NULL);
qht_reset(&plugin.dyn_cb_arr_ht);
plugin_cb__simple(QEMU_PLUGIN_EV_FLUSH);
}
void exec_inline_op(struct qemu_plugin_dyn_cb *cb)
{
uint64_t *val = cb->userp;
switch (cb->inline_insn.op) {
case QEMU_PLUGIN_INLINE_ADD_U64:
*val += cb->inline_insn.imm;
break;
default:
g_assert_not_reached();
}
}
void qemu_plugin_vcpu_mem_cb(CPUState *cpu, uint64_t vaddr,
MemOpIdx oi, enum qemu_plugin_mem_rw rw)
{
GArray *arr = cpu->plugin_mem_cbs;
size_t i;
if (arr == NULL) {
return;
}
for (i = 0; i < arr->len; i++) {
struct qemu_plugin_dyn_cb *cb =
&g_array_index(arr, struct qemu_plugin_dyn_cb, i);
if (!(rw & cb->rw)) {
break;
}
switch (cb->type) {
case PLUGIN_CB_REGULAR:
cb->f.vcpu_mem(cpu->cpu_index, make_plugin_meminfo(oi, rw),
vaddr, cb->userp);
break;
case PLUGIN_CB_INLINE:
exec_inline_op(cb);
break;
default:
g_assert_not_reached();
}
}
}
void qemu_plugin_atexit_cb(void)
{
plugin_cb__udata(QEMU_PLUGIN_EV_ATEXIT);
}
void qemu_plugin_register_atexit_cb(qemu_plugin_id_t id,
qemu_plugin_udata_cb_t cb,
void *udata)
{
plugin_register_cb_udata(id, QEMU_PLUGIN_EV_ATEXIT, cb, udata);
}
/*
* Handle exit from linux-user. Unlike the normal atexit() mechanism
* we need to handle the clean-up manually as it's possible threads
* are still running. We need to remove all callbacks from code
* generation, flush the current translations and then we can safely
* trigger the exit callbacks.
*/
void qemu_plugin_user_exit(void)
{
enum qemu_plugin_event ev;
CPUState *cpu;
/*
* Locking order: we must acquire locks in an order that is consistent
* with the one in fork_start(). That is:
* - start_exclusive(), which acquires qemu_cpu_list_lock,
* must be called before acquiring plugin.lock.
* - tb_flush(), which acquires mmap_lock(), must be called
* while plugin.lock is not held.
*/
start_exclusive();
qemu_rec_mutex_lock(&plugin.lock);
/* un-register all callbacks except the final AT_EXIT one */
for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) {
if (ev != QEMU_PLUGIN_EV_ATEXIT) {
struct qemu_plugin_cb *cb, *next;
QLIST_FOREACH_SAFE_RCU(cb, &plugin.cb_lists[ev], entry, next) {
plugin_unregister_cb__locked(cb->ctx, ev);
}
}
}
CPU_FOREACH(cpu) {
qemu_plugin_disable_mem_helpers(cpu);
}
qemu_rec_mutex_unlock(&plugin.lock);
tb_flush(current_cpu);
end_exclusive();
/* now it's safe to handle the exit case */
qemu_plugin_atexit_cb();
}
/*
* Helpers for *-user to ensure locks are sane across fork() events.
*/
void qemu_plugin_user_prefork_lock(void)
{
qemu_rec_mutex_lock(&plugin.lock);
}
void qemu_plugin_user_postfork(bool is_child)
{
if (is_child) {
/* should we just reset via plugin_init? */
qemu_rec_mutex_init(&plugin.lock);
} else {
qemu_rec_mutex_unlock(&plugin.lock);
}
}
/*
* Call this function after longjmp'ing to the main loop. It's possible that the
* last instruction of a TB might have used helpers, and therefore the
* "disable" instruction will never execute because it ended up as dead code.
*/
void qemu_plugin_disable_mem_helpers(CPUState *cpu)
{
cpu->plugin_mem_cbs = NULL;
}
static bool plugin_dyn_cb_arr_cmp(const void *ap, const void *bp)
{
return ap == bp;
}
static void __attribute__((__constructor__)) plugin_init(void)
{
int i;
for (i = 0; i < QEMU_PLUGIN_EV_MAX; i++) {
QLIST_INIT(&plugin.cb_lists[i]);
}
qemu_rec_mutex_init(&plugin.lock);
plugin.id_ht = g_hash_table_new(g_int64_hash, g_int64_equal);
plugin.cpu_ht = g_hash_table_new(g_int_hash, g_int_equal);
QTAILQ_INIT(&plugin.ctxs);
qht_init(&plugin.dyn_cb_arr_ht, plugin_dyn_cb_arr_cmp, 16,
QHT_MODE_AUTO_RESIZE);
atexit(qemu_plugin_atexit_cb);
}
|