1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
|
/*
* Utility function to get QEMU's own process map
*
* Copyright (c) 2020 Linaro Ltd
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/selfmap.h"
IntervalTreeRoot *read_self_maps(void)
{
IntervalTreeRoot *root;
gchar *maps, **lines;
guint i, nlines;
if (!g_file_get_contents("/proc/self/maps", &maps, NULL, NULL)) {
return NULL;
}
root = g_new0(IntervalTreeRoot, 1);
lines = g_strsplit(maps, "\n", 0);
nlines = g_strv_length(lines);
for (i = 0; i < nlines; i++) {
gchar **fields = g_strsplit(lines[i], " ", 6);
guint nfields = g_strv_length(fields);
if (nfields > 4) {
uint64_t start, end, offset, inode;
unsigned dev_maj, dev_min;
int errors = 0;
const char *p;
errors |= qemu_strtou64(fields[0], &p, 16, &start);
errors |= qemu_strtou64(p + 1, NULL, 16, &end);
errors |= qemu_strtou64(fields[2], NULL, 16, &offset);
errors |= qemu_strtoui(fields[3], &p, 16, &dev_maj);
errors |= qemu_strtoui(p + 1, NULL, 16, &dev_min);
errors |= qemu_strtou64(fields[4], NULL, 10, &inode);
if (!errors) {
size_t path_len;
MapInfo *e;
if (nfields == 6) {
p = fields[5];
p += strspn(p, " ");
path_len = strlen(p) + 1;
} else {
p = NULL;
path_len = 0;
}
e = g_malloc0(sizeof(*e) + path_len);
e->itree.start = start;
e->itree.last = end - 1;
e->offset = offset;
e->dev = makedev(dev_maj, dev_min);
e->inode = inode;
e->is_read = fields[1][0] == 'r';
e->is_write = fields[1][1] == 'w';
e->is_exec = fields[1][2] == 'x';
e->is_priv = fields[1][3] == 'p';
if (path_len) {
e->path = memcpy(e + 1, p, path_len);
}
interval_tree_insert(&e->itree, root);
}
}
g_strfreev(fields);
}
g_strfreev(lines);
g_free(maps);
return root;
}
/**
* free_self_maps:
* @root: an interval tree
*
* Free a tree of MapInfo structures.
* Since we allocated each MapInfo in one chunk, we need not consider the
* contents and can simply free each RBNode.
*/
static void free_rbnode(RBNode *n)
{
if (n) {
free_rbnode(n->rb_left);
free_rbnode(n->rb_right);
g_free(n);
}
}
void free_self_maps(IntervalTreeRoot *root)
{
if (root) {
free_rbnode(root->rb_root.rb_node);
g_free(root);
}
}
|