aboutsummaryrefslogtreecommitdiff
path: root/linux-user/syscall.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2023-08-08 20:02:19 -0700
committerRichard Henderson <richard.henderson@linaro.org>2023-09-01 13:34:16 -0700
commit7b7a3366e142d3baeb3fd1d3660a50e7956c19eb (patch)
tree630fee27ea298e5b305da9116bf424eb5aba2992 /linux-user/syscall.c
parent79be812bdb6d476b35b0a0a9cda5432673b1f5f3 (diff)
linux-user: Use walk_memory_regions for open_self_maps
Replace the by-hand method of region identification with the official user-exec interface. Cross-check the region provided to the callback with the interval tree from read_self_maps(). Tested-by: Helge Deller <deller@gmx.de> Reviewed-by: Ilya Leoshkevich <iii@linux.ibm.com> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
Diffstat (limited to 'linux-user/syscall.c')
-rw-r--r--linux-user/syscall.c190
1 files changed, 114 insertions, 76 deletions
diff --git a/linux-user/syscall.c b/linux-user/syscall.c
index a562920a84..0b91f996b7 100644
--- a/linux-user/syscall.c
+++ b/linux-user/syscall.c
@@ -8095,12 +8095,66 @@ static int open_self_cmdline(CPUArchState *cpu_env, int fd)
return 0;
}
-static void show_smaps(int fd, unsigned long size)
+struct open_self_maps_data {
+ TaskState *ts;
+ IntervalTreeRoot *host_maps;
+ int fd;
+ bool smaps;
+};
+
+/*
+ * Subroutine to output one line of /proc/self/maps,
+ * or one region of /proc/self/smaps.
+ */
+
+#ifdef TARGET_HPPA
+# define test_stack(S, E, L) (E == L)
+#else
+# define test_stack(S, E, L) (S == L)
+#endif
+
+static void open_self_maps_4(const struct open_self_maps_data *d,
+ const MapInfo *mi, abi_ptr start,
+ abi_ptr end, unsigned flags)
{
- unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
- unsigned long size_kb = size >> 10;
+ const struct image_info *info = d->ts->info;
+ const char *path = mi->path;
+ uint64_t offset;
+ int fd = d->fd;
+ int count;
+
+ if (test_stack(start, end, info->stack_limit)) {
+ path = "[stack]";
+ }
+
+ /* Except null device (MAP_ANON), adjust offset for this fragment. */
+ offset = mi->offset;
+ if (mi->dev) {
+ uintptr_t hstart = (uintptr_t)g2h_untagged(start);
+ offset += hstart - mi->itree.start;
+ }
+
+ count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
+ " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
+ start, end,
+ (flags & PAGE_READ) ? 'r' : '-',
+ (flags & PAGE_WRITE_ORG) ? 'w' : '-',
+ (flags & PAGE_EXEC) ? 'x' : '-',
+ mi->is_priv ? 'p' : 's',
+ offset, major(mi->dev), minor(mi->dev),
+ (uint64_t)mi->inode);
+ if (path) {
+ dprintf(fd, "%*s%s\n", 73 - count, "", path);
+ } else {
+ dprintf(fd, "\n");
+ }
- dprintf(fd, "Size: %lu kB\n"
+ if (d->smaps) {
+ unsigned long size = end - start;
+ unsigned long page_size_kb = TARGET_PAGE_SIZE >> 10;
+ unsigned long size_kb = size >> 10;
+
+ dprintf(fd, "Size: %lu kB\n"
"KernelPageSize: %lu kB\n"
"MMUPageSize: %lu kB\n"
"Rss: 0 kB\n"
@@ -8121,91 +8175,75 @@ static void show_smaps(int fd, unsigned long size)
"Swap: 0 kB\n"
"SwapPss: 0 kB\n"
"Locked: 0 kB\n"
- "THPeligible: 0\n", size_kb, page_size_kb, page_size_kb);
+ "THPeligible: 0\n"
+ "VmFlags:%s%s%s%s%s%s%s%s\n",
+ size_kb, page_size_kb, page_size_kb,
+ (flags & PAGE_READ) ? " rd" : "",
+ (flags & PAGE_WRITE_ORG) ? " wr" : "",
+ (flags & PAGE_EXEC) ? " ex" : "",
+ mi->is_priv ? "" : " sh",
+ (flags & PAGE_READ) ? " mr" : "",
+ (flags & PAGE_WRITE_ORG) ? " mw" : "",
+ (flags & PAGE_EXEC) ? " me" : "",
+ mi->is_priv ? "" : " ms");
+ }
}
-static int open_self_maps_1(CPUArchState *cpu_env, int fd, bool smaps)
+/*
+ * Callback for walk_memory_regions, when read_self_maps() fails.
+ * Proceed without the benefit of host /proc/self/maps cross-check.
+ */
+static int open_self_maps_3(void *opaque, target_ulong guest_start,
+ target_ulong guest_end, unsigned long flags)
{
- CPUState *cpu = env_cpu(cpu_env);
- TaskState *ts = cpu->opaque;
- IntervalTreeRoot *map_info = read_self_maps();
- IntervalTreeNode *s;
- int count;
+ static const MapInfo mi = { .is_priv = true };
- for (s = interval_tree_iter_first(map_info, 0, -1); s;
- s = interval_tree_iter_next(s, 0, -1)) {
- MapInfo *e = container_of(s, MapInfo, itree);
+ open_self_maps_4(opaque, &mi, guest_start, guest_end, flags);
+ return 0;
+}
- if (h2g_valid(e->itree.start)) {
- unsigned long min = e->itree.start;
- unsigned long max = e->itree.last + 1;
- int flags = page_get_flags(h2g(min));
- const char *path;
+/*
+ * Callback for walk_memory_regions, when read_self_maps() succeeds.
+ */
+static int open_self_maps_2(void *opaque, target_ulong guest_start,
+ target_ulong guest_end, unsigned long flags)
+{
+ const struct open_self_maps_data *d = opaque;
+ uintptr_t host_start = (uintptr_t)g2h_untagged(guest_start);
+ uintptr_t host_last = (uintptr_t)g2h_untagged(guest_end - 1);
- max = h2g_valid(max - 1) ?
- max : (uintptr_t) g2h_untagged(GUEST_ADDR_MAX) + 1;
+ while (1) {
+ IntervalTreeNode *n =
+ interval_tree_iter_first(d->host_maps, host_start, host_start);
+ MapInfo *mi = container_of(n, MapInfo, itree);
+ uintptr_t this_hlast = MIN(host_last, n->last);
+ target_ulong this_gend = h2g(this_hlast) + 1;
- if (!page_check_range(h2g(min), max - min, flags)) {
- continue;
- }
+ open_self_maps_4(d, mi, guest_start, this_gend, flags);
-#ifdef TARGET_HPPA
- if (h2g(max) == ts->info->stack_limit) {
-#else
- if (h2g(min) == ts->info->stack_limit) {
-#endif
- path = "[stack]";
- } else {
- path = e->path;
- }
-
- count = dprintf(fd, TARGET_ABI_FMT_ptr "-" TARGET_ABI_FMT_ptr
- " %c%c%c%c %08" PRIx64 " %02x:%02x %"PRId64,
- h2g(min), h2g(max - 1) + 1,
- (flags & PAGE_READ) ? 'r' : '-',
- (flags & PAGE_WRITE_ORG) ? 'w' : '-',
- (flags & PAGE_EXEC) ? 'x' : '-',
- e->is_priv ? 'p' : 's',
- (uint64_t)e->offset,
- major(e->dev), minor(e->dev),
- (uint64_t)e->inode);
- if (path) {
- dprintf(fd, "%*s%s\n", 73 - count, "", path);
- } else {
- dprintf(fd, "\n");
- }
- if (smaps) {
- show_smaps(fd, max - min);
- dprintf(fd, "VmFlags:%s%s%s%s%s%s%s%s\n",
- (flags & PAGE_READ) ? " rd" : "",
- (flags & PAGE_WRITE_ORG) ? " wr" : "",
- (flags & PAGE_EXEC) ? " ex" : "",
- e->is_priv ? "" : " sh",
- (flags & PAGE_READ) ? " mr" : "",
- (flags & PAGE_WRITE_ORG) ? " mw" : "",
- (flags & PAGE_EXEC) ? " me" : "",
- e->is_priv ? "" : " ms");
- }
+ if (this_hlast == host_last) {
+ return 0;
}
+ host_start = this_hlast + 1;
+ guest_start = h2g(host_start);
}
+}
- free_self_maps(map_info);
+static int open_self_maps_1(CPUArchState *env, int fd, bool smaps)
+{
+ struct open_self_maps_data d = {
+ .ts = env_cpu(env)->opaque,
+ .host_maps = read_self_maps(),
+ .fd = fd,
+ .smaps = smaps
+ };
-#ifdef TARGET_VSYSCALL_PAGE
- /*
- * We only support execution from the vsyscall page.
- * This is as if CONFIG_LEGACY_VSYSCALL_XONLY=y from v5.3.
- */
- count = dprintf(fd, TARGET_FMT_lx "-" TARGET_FMT_lx
- " --xp 00000000 00:00 0",
- TARGET_VSYSCALL_PAGE, TARGET_VSYSCALL_PAGE + TARGET_PAGE_SIZE);
- dprintf(fd, "%*s%s\n", 73 - count, "", "[vsyscall]");
- if (smaps) {
- show_smaps(fd, TARGET_PAGE_SIZE);
- dprintf(fd, "VmFlags: ex\n");
+ if (d.host_maps) {
+ walk_memory_regions(&d, open_self_maps_2);
+ free_self_maps(d.host_maps);
+ } else {
+ walk_memory_regions(&d, open_self_maps_3);
}
-#endif
-
return 0;
}