1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
|
/*
* Memory region management for Tiny Code Generator for QEMU
*
* Copyright (c) 2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
#include "exec/exec-all.h"
#include "tcg/tcg.h"
#include "tcg-internal.h"
struct tcg_region_tree {
QemuMutex lock;
GTree *tree;
/* padding to avoid false sharing is computed at run-time */
};
/*
* We divide code_gen_buffer into equally-sized "regions" that TCG threads
* dynamically allocate from as demand dictates. Given appropriate region
* sizing, this minimizes flushes even when some TCG threads generate a lot
* more code than others.
*/
struct tcg_region_state {
QemuMutex lock;
/* fields set at init time */
void *start_aligned;
void *after_prologue;
size_t n;
size_t size; /* size of one region */
size_t stride; /* .size + guard size */
size_t total_size; /* size of entire buffer, >= n * stride */
/* fields protected by the lock */
size_t current; /* current region index */
size_t agg_size_full; /* aggregate size of full regions */
};
static struct tcg_region_state region;
/*
* This is an array of struct tcg_region_tree's, with padding.
* We use void * to simplify the computation of region_trees[i]; each
* struct is found every tree_size bytes.
*/
static void *region_trees;
static size_t tree_size;
bool in_code_gen_buffer(const void *p)
{
/*
* Much like it is valid to have a pointer to the byte past the
* end of an array (so long as you don't dereference it), allow
* a pointer to the byte past the end of the code gen buffer.
*/
return (size_t)(p - region.start_aligned) <= region.total_size;
}
#ifdef CONFIG_DEBUG_TCG
const void *tcg_splitwx_to_rx(void *rw)
{
/* Pass NULL pointers unchanged. */
if (rw) {
g_assert(in_code_gen_buffer(rw));
rw += tcg_splitwx_diff;
}
return rw;
}
void *tcg_splitwx_to_rw(const void *rx)
{
/* Pass NULL pointers unchanged. */
if (rx) {
rx -= tcg_splitwx_diff;
/* Assert that we end with a pointer in the rw region. */
g_assert(in_code_gen_buffer(rx));
}
return (void *)rx;
}
#endif /* CONFIG_DEBUG_TCG */
/* compare a pointer @ptr and a tb_tc @s */
static int ptr_cmp_tb_tc(const void *ptr, const struct tb_tc *s)
{
if (ptr >= s->ptr + s->size) {
return 1;
} else if (ptr < s->ptr) {
return -1;
}
return 0;
}
static gint tb_tc_cmp(gconstpointer ap, gconstpointer bp)
{
const struct tb_tc *a = ap;
const struct tb_tc *b = bp;
/*
* When both sizes are set, we know this isn't a lookup.
* This is the most likely case: every TB must be inserted; lookups
* are a lot less frequent.
*/
if (likely(a->size && b->size)) {
if (a->ptr > b->ptr) {
return 1;
} else if (a->ptr < b->ptr) {
return -1;
}
/* a->ptr == b->ptr should happen only on deletions */
g_assert(a->size == b->size);
return 0;
}
/*
* All lookups have either .size field set to 0.
* From the glib sources we see that @ap is always the lookup key. However
* the docs provide no guarantee, so we just mark this case as likely.
*/
if (likely(a->size == 0)) {
return ptr_cmp_tb_tc(a->ptr, b);
}
return ptr_cmp_tb_tc(b->ptr, a);
}
static void tcg_region_trees_init(void)
{
size_t i;
tree_size = ROUND_UP(sizeof(struct tcg_region_tree), qemu_dcache_linesize);
region_trees = qemu_memalign(qemu_dcache_linesize, region.n * tree_size);
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
qemu_mutex_init(&rt->lock);
rt->tree = g_tree_new(tb_tc_cmp);
}
}
static struct tcg_region_tree *tc_ptr_to_region_tree(const void *p)
{
size_t region_idx;
/*
* Like tcg_splitwx_to_rw, with no assert. The pc may come from
* a signal handler over which the caller has no control.
*/
if (!in_code_gen_buffer(p)) {
p -= tcg_splitwx_diff;
if (!in_code_gen_buffer(p)) {
return NULL;
}
}
if (p < region.start_aligned) {
region_idx = 0;
} else {
ptrdiff_t offset = p - region.start_aligned;
if (offset > region.stride * (region.n - 1)) {
region_idx = region.n - 1;
} else {
region_idx = offset / region.stride;
}
}
return region_trees + region_idx * tree_size;
}
void tcg_tb_insert(TranslationBlock *tb)
{
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
g_tree_insert(rt->tree, &tb->tc, tb);
qemu_mutex_unlock(&rt->lock);
}
void tcg_tb_remove(TranslationBlock *tb)
{
struct tcg_region_tree *rt = tc_ptr_to_region_tree(tb->tc.ptr);
g_assert(rt != NULL);
qemu_mutex_lock(&rt->lock);
g_tree_remove(rt->tree, &tb->tc);
qemu_mutex_unlock(&rt->lock);
}
/*
* Find the TB 'tb' such that
* tb->tc.ptr <= tc_ptr < tb->tc.ptr + tb->tc.size
* Return NULL if not found.
*/
TranslationBlock *tcg_tb_lookup(uintptr_t tc_ptr)
{
struct tcg_region_tree *rt = tc_ptr_to_region_tree((void *)tc_ptr);
TranslationBlock *tb;
struct tb_tc s = { .ptr = (void *)tc_ptr };
if (rt == NULL) {
return NULL;
}
qemu_mutex_lock(&rt->lock);
tb = g_tree_lookup(rt->tree, &s);
qemu_mutex_unlock(&rt->lock);
return tb;
}
static void tcg_region_tree_lock_all(void)
{
size_t i;
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
qemu_mutex_lock(&rt->lock);
}
}
static void tcg_region_tree_unlock_all(void)
{
size_t i;
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
qemu_mutex_unlock(&rt->lock);
}
}
void tcg_tb_foreach(GTraverseFunc func, gpointer user_data)
{
size_t i;
tcg_region_tree_lock_all();
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
g_tree_foreach(rt->tree, func, user_data);
}
tcg_region_tree_unlock_all();
}
size_t tcg_nb_tbs(void)
{
size_t nb_tbs = 0;
size_t i;
tcg_region_tree_lock_all();
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
nb_tbs += g_tree_nnodes(rt->tree);
}
tcg_region_tree_unlock_all();
return nb_tbs;
}
static gboolean tcg_region_tree_traverse(gpointer k, gpointer v, gpointer data)
{
TranslationBlock *tb = v;
tb_destroy(tb);
return FALSE;
}
static void tcg_region_tree_reset_all(void)
{
size_t i;
tcg_region_tree_lock_all();
for (i = 0; i < region.n; i++) {
struct tcg_region_tree *rt = region_trees + i * tree_size;
g_tree_foreach(rt->tree, tcg_region_tree_traverse, NULL);
/* Increment the refcount first so that destroy acts as a reset */
g_tree_ref(rt->tree);
g_tree_destroy(rt->tree);
}
tcg_region_tree_unlock_all();
}
static void tcg_region_bounds(size_t curr_region, void **pstart, void **pend)
{
void *start, *end;
start = region.start_aligned + curr_region * region.stride;
end = start + region.size;
if (curr_region == 0) {
start = region.after_prologue;
}
/* The final region may have a few extra pages due to earlier rounding. */
if (curr_region == region.n - 1) {
end = region.start_aligned + region.total_size;
}
*pstart = start;
*pend = end;
}
static void tcg_region_assign(TCGContext *s, size_t curr_region)
{
void *start, *end;
tcg_region_bounds(curr_region, &start, &end);
s->code_gen_buffer = start;
s->code_gen_ptr = start;
s->code_gen_buffer_size = end - start;
s->code_gen_highwater = end - TCG_HIGHWATER;
}
static bool tcg_region_alloc__locked(TCGContext *s)
{
if (region.current == region.n) {
return true;
}
tcg_region_assign(s, region.current);
region.current++;
return false;
}
/*
* Request a new region once the one in use has filled up.
* Returns true on error.
*/
bool tcg_region_alloc(TCGContext *s)
{
bool err;
/* read the region size now; alloc__locked will overwrite it on success */
size_t size_full = s->code_gen_buffer_size;
qemu_mutex_lock(®ion.lock);
err = tcg_region_alloc__locked(s);
if (!err) {
region.agg_size_full += size_full - TCG_HIGHWATER;
}
qemu_mutex_unlock(®ion.lock);
return err;
}
/*
* Perform a context's first region allocation.
* This function does _not_ increment region.agg_size_full.
*/
static void tcg_region_initial_alloc__locked(TCGContext *s)
{
bool err = tcg_region_alloc__locked(s);
g_assert(!err);
}
void tcg_region_initial_alloc(TCGContext *s)
{
qemu_mutex_lock(®ion.lock);
tcg_region_initial_alloc__locked(s);
qemu_mutex_unlock(®ion.lock);
}
/* Call from a safe-work context */
void tcg_region_reset_all(void)
{
unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
qemu_mutex_lock(®ion.lock);
region.current = 0;
region.agg_size_full = 0;
for (i = 0; i < n_ctxs; i++) {
TCGContext *s = qatomic_read(&tcg_ctxs[i]);
tcg_region_initial_alloc__locked(s);
}
qemu_mutex_unlock(®ion.lock);
tcg_region_tree_reset_all();
}
static size_t tcg_n_regions(size_t tb_size, unsigned max_cpus)
{
#ifdef CONFIG_USER_ONLY
return 1;
#else
size_t n_regions;
/*
* It is likely that some vCPUs will translate more code than others,
* so we first try to set more regions than max_cpus, with those regions
* being of reasonable size. If that's not possible we make do by evenly
* dividing the code_gen_buffer among the vCPUs.
*/
/* Use a single region if all we have is one vCPU thread */
if (max_cpus == 1 || !qemu_tcg_mttcg_enabled()) {
return 1;
}
/*
* Try to have more regions than max_cpus, with each region being >= 2 MB.
* If we can't, then just allocate one region per vCPU thread.
*/
n_regions = tb_size / (2 * MiB);
if (n_regions <= max_cpus) {
return max_cpus;
}
return MIN(n_regions, max_cpus * 8);
#endif
}
/*
* Minimum size of the code gen buffer. This number is randomly chosen,
* but not so small that we can't have a fair number of TB's live.
*
* Maximum size, MAX_CODE_GEN_BUFFER_SIZE, is defined in tcg-target.h.
* Unless otherwise indicated, this is constrained by the range of
* direct branches on the host cpu, as used by the TCG implementation
* of goto_tb.
*/
#define MIN_CODE_GEN_BUFFER_SIZE (1 * MiB)
#if TCG_TARGET_REG_BITS == 32
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32 * MiB)
#ifdef CONFIG_USER_ONLY
/*
* For user mode on smaller 32 bit systems we may run into trouble
* allocating big chunks of data in the right place. On these systems
* we utilise a static code generation buffer directly in the binary.
*/
#define USE_STATIC_CODE_GEN_BUFFER
#endif
#else /* TCG_TARGET_REG_BITS == 64 */
#ifdef CONFIG_USER_ONLY
/*
* As user-mode emulation typically means running multiple instances
* of the translator don't go too nuts with our default code gen
* buffer lest we make things too hard for the OS.
*/
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (128 * MiB)
#else
/*
* We expect most system emulation to run one or two guests per host.
* Users running large scale system emulation may want to tweak their
* runtime setup via the tb-size control on the command line.
*/
#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (1 * GiB)
#endif
#endif
#define DEFAULT_CODE_GEN_BUFFER_SIZE \
(DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
#ifdef __mips__
/*
* In order to use J and JAL within the code_gen_buffer, we require
* that the buffer not cross a 256MB boundary.
*/
static inline bool cross_256mb(void *addr, size_t size)
{
return ((uintptr_t)addr ^ ((uintptr_t)addr + size)) & ~0x0ffffffful;
}
/*
* We weren't able to allocate a buffer without crossing that boundary,
* so make do with the larger portion of the buffer that doesn't cross.
* Returns the new base and size of the buffer in *obuf and *osize.
*/
static inline void split_cross_256mb(void **obuf, size_t *osize,
void *buf1, size_t size1)
{
void *buf2 = (void *)(((uintptr_t)buf1 + size1) & ~0x0ffffffful);
size_t size2 = buf1 + size1 - buf2;
size1 = buf2 - buf1;
if (size1 < size2) {
size1 = size2;
buf1 = buf2;
}
*obuf = buf1;
*osize = size1;
}
#endif
#ifdef USE_STATIC_CODE_GEN_BUFFER
static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
__attribute__((aligned(CODE_GEN_ALIGN)));
static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp)
{
void *buf, *end;
size_t size;
if (splitwx > 0) {
error_setg(errp, "jit split-wx not supported");
return -1;
}
/* page-align the beginning and end of the buffer */
buf = static_code_gen_buffer;
end = static_code_gen_buffer + sizeof(static_code_gen_buffer);
buf = QEMU_ALIGN_PTR_UP(buf, qemu_real_host_page_size);
end = QEMU_ALIGN_PTR_DOWN(end, qemu_real_host_page_size);
size = end - buf;
/* Honor a command-line option limiting the size of the buffer. */
if (size > tb_size) {
size = QEMU_ALIGN_DOWN(tb_size, qemu_real_host_page_size);
}
#ifdef __mips__
if (cross_256mb(buf, size)) {
split_cross_256mb(&buf, &size, buf, size);
}
#endif
region.start_aligned = buf;
region.total_size = size;
return PROT_READ | PROT_WRITE;
}
#elif defined(_WIN32)
static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
{
void *buf;
if (splitwx > 0) {
error_setg(errp, "jit split-wx not supported");
return -1;
}
buf = VirtualAlloc(NULL, size, MEM_RESERVE | MEM_COMMIT,
PAGE_EXECUTE_READWRITE);
if (buf == NULL) {
error_setg_win32(errp, GetLastError(),
"allocate %zu bytes for jit buffer", size);
return false;
}
region.start_aligned = buf;
region.total_size = size;
return PAGE_READ | PAGE_WRITE | PAGE_EXEC;
}
#else
static int alloc_code_gen_buffer_anon(size_t size, int prot,
int flags, Error **errp)
{
void *buf;
buf = mmap(NULL, size, prot, flags, -1, 0);
if (buf == MAP_FAILED) {
error_setg_errno(errp, errno,
"allocate %zu bytes for jit buffer", size);
return -1;
}
#ifdef __mips__
if (cross_256mb(buf, size)) {
/*
* Try again, with the original still mapped, to avoid re-acquiring
* the same 256mb crossing.
*/
size_t size2;
void *buf2 = mmap(NULL, size, prot, flags, -1, 0);
switch ((int)(buf2 != MAP_FAILED)) {
case 1:
if (!cross_256mb(buf2, size)) {
/* Success! Use the new buffer. */
munmap(buf, size);
break;
}
/* Failure. Work with what we had. */
munmap(buf2, size);
/* fallthru */
default:
/* Split the original buffer. Free the smaller half. */
split_cross_256mb(&buf2, &size2, buf, size);
if (buf == buf2) {
munmap(buf + size2, size - size2);
} else {
munmap(buf, size - size2);
}
size = size2;
break;
}
buf = buf2;
}
#endif
region.start_aligned = buf;
region.total_size = size;
return prot;
}
#ifndef CONFIG_TCG_INTERPRETER
#ifdef CONFIG_POSIX
#include "qemu/memfd.h"
static bool alloc_code_gen_buffer_splitwx_memfd(size_t size, Error **errp)
{
void *buf_rw = NULL, *buf_rx = MAP_FAILED;
int fd = -1;
#ifdef __mips__
/* Find space for the RX mapping, vs the 256MiB regions. */
if (alloc_code_gen_buffer_anon(size, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS |
MAP_NORESERVE, errp) < 0) {
return false;
}
/* The size of the mapping may have been adjusted. */
buf_rx = region.start_aligned;
size = region.total_size;
#endif
buf_rw = qemu_memfd_alloc("tcg-jit", size, 0, &fd, errp);
if (buf_rw == NULL) {
goto fail;
}
#ifdef __mips__
void *tmp = mmap(buf_rx, size, PROT_READ | PROT_EXEC,
MAP_SHARED | MAP_FIXED, fd, 0);
if (tmp != buf_rx) {
goto fail_rx;
}
#else
buf_rx = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_SHARED, fd, 0);
if (buf_rx == MAP_FAILED) {
goto fail_rx;
}
#endif
close(fd);
region.start_aligned = buf_rw;
region.total_size = size;
tcg_splitwx_diff = buf_rx - buf_rw;
return PROT_READ | PROT_WRITE;
fail_rx:
error_setg_errno(errp, errno, "failed to map shared memory for execute");
fail:
if (buf_rx != MAP_FAILED) {
munmap(buf_rx, size);
}
if (buf_rw) {
munmap(buf_rw, size);
}
if (fd >= 0) {
close(fd);
}
return -1;
}
#endif /* CONFIG_POSIX */
#ifdef CONFIG_DARWIN
#include <mach/mach.h>
extern kern_return_t mach_vm_remap(vm_map_t target_task,
mach_vm_address_t *target_address,
mach_vm_size_t size,
mach_vm_offset_t mask,
int flags,
vm_map_t src_task,
mach_vm_address_t src_address,
boolean_t copy,
vm_prot_t *cur_protection,
vm_prot_t *max_protection,
vm_inherit_t inheritance);
static int alloc_code_gen_buffer_splitwx_vmremap(size_t size, Error **errp)
{
kern_return_t ret;
mach_vm_address_t buf_rw, buf_rx;
vm_prot_t cur_prot, max_prot;
/* Map the read-write portion via normal anon memory. */
if (!alloc_code_gen_buffer_anon(size, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, errp)) {
return -1;
}
buf_rw = (mach_vm_address_t)region.start_aligned;
buf_rx = 0;
ret = mach_vm_remap(mach_task_self(),
&buf_rx,
size,
0,
VM_FLAGS_ANYWHERE,
mach_task_self(),
buf_rw,
false,
&cur_prot,
&max_prot,
VM_INHERIT_NONE);
if (ret != KERN_SUCCESS) {
/* TODO: Convert "ret" to a human readable error message. */
error_setg(errp, "vm_remap for jit splitwx failed");
munmap((void *)buf_rw, size);
return -1;
}
if (mprotect((void *)buf_rx, size, PROT_READ | PROT_EXEC) != 0) {
error_setg_errno(errp, errno, "mprotect for jit splitwx");
munmap((void *)buf_rx, size);
munmap((void *)buf_rw, size);
return -1;
}
tcg_splitwx_diff = buf_rx - buf_rw;
return PROT_READ | PROT_WRITE;
}
#endif /* CONFIG_DARWIN */
#endif /* CONFIG_TCG_INTERPRETER */
static int alloc_code_gen_buffer_splitwx(size_t size, Error **errp)
{
#ifndef CONFIG_TCG_INTERPRETER
# ifdef CONFIG_DARWIN
return alloc_code_gen_buffer_splitwx_vmremap(size, errp);
# endif
# ifdef CONFIG_POSIX
return alloc_code_gen_buffer_splitwx_memfd(size, errp);
# endif
#endif
error_setg(errp, "jit split-wx not supported");
return -1;
}
static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp)
{
ERRP_GUARD();
int prot, flags;
if (splitwx) {
prot = alloc_code_gen_buffer_splitwx(size, errp);
if (prot >= 0) {
return prot;
}
/*
* If splitwx force-on (1), fail;
* if splitwx default-on (-1), fall through to splitwx off.
*/
if (splitwx > 0) {
return -1;
}
error_free_or_abort(errp);
}
prot = PROT_READ | PROT_WRITE | PROT_EXEC;
flags = MAP_PRIVATE | MAP_ANONYMOUS;
#ifdef CONFIG_TCG_INTERPRETER
/* The tcg interpreter does not need execute permission. */
prot = PROT_READ | PROT_WRITE;
#elif defined(CONFIG_DARWIN)
/* Applicable to both iOS and macOS (Apple Silicon). */
if (!splitwx) {
flags |= MAP_JIT;
}
#endif
return alloc_code_gen_buffer_anon(size, prot, flags, errp);
}
#endif /* USE_STATIC_CODE_GEN_BUFFER, WIN32, POSIX */
/*
* Initializes region partitioning.
*
* Called at init time from the parent thread (i.e. the one calling
* tcg_context_init), after the target's TCG globals have been set.
*
* Region partitioning works by splitting code_gen_buffer into separate regions,
* and then assigning regions to TCG threads so that the threads can translate
* code in parallel without synchronization.
*
* In softmmu the number of TCG threads is bounded by max_cpus, so we use at
* least max_cpus regions in MTTCG. In !MTTCG we use a single region.
* Note that the TCG options from the command-line (i.e. -accel accel=tcg,[...])
* must have been parsed before calling this function, since it calls
* qemu_tcg_mttcg_enabled().
*
* In user-mode we use a single region. Having multiple regions in user-mode
* is not supported, because the number of vCPU threads (recall that each thread
* spawned by the guest corresponds to a vCPU thread) is only bounded by the
* OS, and usually this number is huge (tens of thousands is not uncommon).
* Thus, given this large bound on the number of vCPU threads and the fact
* that code_gen_buffer is allocated at compile-time, we cannot guarantee
* that the availability of at least one region per vCPU thread.
*
* However, this user-mode limitation is unlikely to be a significant problem
* in practice. Multi-threaded guests share most if not all of their translated
* code, which makes parallel code generation less appealing than in softmmu.
*/
void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus)
{
const size_t page_size = qemu_real_host_page_size;
size_t region_size;
int have_prot, need_prot;
/* Size the buffer. */
if (tb_size == 0) {
size_t phys_mem = qemu_get_host_physmem();
if (phys_mem == 0) {
tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
} else {
tb_size = QEMU_ALIGN_DOWN(phys_mem / 8, page_size);
tb_size = MIN(DEFAULT_CODE_GEN_BUFFER_SIZE, tb_size);
}
}
if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
tb_size = MIN_CODE_GEN_BUFFER_SIZE;
}
if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
tb_size = MAX_CODE_GEN_BUFFER_SIZE;
}
have_prot = alloc_code_gen_buffer(tb_size, splitwx, &error_fatal);
assert(have_prot >= 0);
/* Request large pages for the buffer and the splitwx. */
qemu_madvise(region.start_aligned, region.total_size, QEMU_MADV_HUGEPAGE);
if (tcg_splitwx_diff) {
qemu_madvise(region.start_aligned + tcg_splitwx_diff,
region.total_size, QEMU_MADV_HUGEPAGE);
}
/*
* Make region_size a multiple of page_size, using aligned as the start.
* As a result of this we might end up with a few extra pages at the end of
* the buffer; we will assign those to the last region.
*/
region.n = tcg_n_regions(tb_size, max_cpus);
region_size = tb_size / region.n;
region_size = QEMU_ALIGN_DOWN(region_size, page_size);
/* A region must have at least 2 pages; one code, one guard */
g_assert(region_size >= 2 * page_size);
region.stride = region_size;
/* Reserve space for guard pages. */
region.size = region_size - page_size;
region.total_size -= page_size;
/*
* The first region will be smaller than the others, via the prologue,
* which has yet to be allocated. For now, the first region begins at
* the page boundary.
*/
region.after_prologue = region.start_aligned;
/* init the region struct */
qemu_mutex_init(®ion.lock);
/*
* Set guard pages in the rw buffer, as that's the one into which
* buffer overruns could occur. Do not set guard pages in the rx
* buffer -- let that one use hugepages throughout.
* Work with the page protections set up with the initial mapping.
*/
need_prot = PAGE_READ | PAGE_WRITE;
#ifndef CONFIG_TCG_INTERPRETER
if (tcg_splitwx_diff == 0) {
need_prot |= PAGE_EXEC;
}
#endif
for (size_t i = 0, n = region.n; i < n; i++) {
void *start, *end;
tcg_region_bounds(i, &start, &end);
if (have_prot != need_prot) {
int rc;
if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) {
rc = qemu_mprotect_rwx(start, end - start);
} else if (need_prot == (PAGE_READ | PAGE_WRITE)) {
rc = qemu_mprotect_rw(start, end - start);
} else {
g_assert_not_reached();
}
if (rc) {
error_setg_errno(&error_fatal, errno,
"mprotect of jit buffer");
}
}
if (have_prot != 0) {
/*
* macOS 11.2 has a bug (Apple Feedback FB8994773) in which mprotect
* rejects a permission change from RWX -> NONE. Guard pages are
* nice for bug detection but are not essential; ignore any failure.
*/
(void)qemu_mprotect_none(end, page_size);
}
}
tcg_region_trees_init();
/*
* Leave the initial context initialized to the first region.
* This will be the context into which we generate the prologue.
* It is also the only context for CONFIG_USER_ONLY.
*/
tcg_region_initial_alloc__locked(&tcg_init_ctx);
}
void tcg_region_prologue_set(TCGContext *s)
{
/* Deduct the prologue from the first region. */
g_assert(region.start_aligned == s->code_gen_buffer);
region.after_prologue = s->code_ptr;
/* Recompute boundaries of the first region. */
tcg_region_assign(s, 0);
/* Register the balance of the buffer with gdb. */
tcg_register_jit(tcg_splitwx_to_rx(region.after_prologue),
region.start_aligned + region.total_size -
region.after_prologue);
}
/*
* Returns the size (in bytes) of all translated code (i.e. from all regions)
* currently in the cache.
* See also: tcg_code_capacity()
* Do not confuse with tcg_current_code_size(); that one applies to a single
* TCG context.
*/
size_t tcg_code_size(void)
{
unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
size_t total;
qemu_mutex_lock(®ion.lock);
total = region.agg_size_full;
for (i = 0; i < n_ctxs; i++) {
const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
size_t size;
size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer;
g_assert(size <= s->code_gen_buffer_size);
total += size;
}
qemu_mutex_unlock(®ion.lock);
return total;
}
/*
* Returns the code capacity (in bytes) of the entire cache, i.e. including all
* regions.
* See also: tcg_code_size()
*/
size_t tcg_code_capacity(void)
{
size_t guard_size, capacity;
/* no need for synchronization; these variables are set at init time */
guard_size = region.stride - region.size;
capacity = region.total_size;
capacity -= (region.n - 1) * guard_size;
capacity -= region.n * TCG_HIGHWATER;
return capacity;
}
size_t tcg_tb_phys_invalidate_count(void)
{
unsigned int n_ctxs = qatomic_read(&tcg_cur_ctxs);
unsigned int i;
size_t total = 0;
for (i = 0; i < n_ctxs; i++) {
const TCGContext *s = qatomic_read(&tcg_ctxs[i]);
total += qatomic_read(&s->tb_phys_invalidate_count);
}
return total;
}
|