aboutsummaryrefslogtreecommitdiff
path: root/linux-user/elfload.c
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2020-06-05 16:49:27 +0100
committerAlex Bennée <alex.bennee@linaro.org>2020-06-08 17:04:19 +0100
commit5c3e87f345ac93de9260f12c408d2afd87a6ab3b (patch)
tree831edba4c5b969482fe678501f894d9a49275526 /linux-user/elfload.c
parentad592e37dfccf730378a44c5fa79acb603a7678d (diff)
linux-user: deal with address wrap for ARM_COMMPAGE on 32 bit
We rely on the pointer to wrap when accessing the high address of the COMMPAGE so it lands somewhere reasonable. However on 32 bit hosts we cannot afford just to map the entire 4gb address range. The old mmap trial and error code handled this by just checking we could map both the guest_base and the computed COMMPAGE address. We can't just manipulate loadaddr to get what we want so we introduce an offset which pgb_find_hole can apply when looking for a gap for guest_base that ensures there is space left to map the COMMPAGE afterwards. This is arguably a little inefficient for the one 32 bit value (kuser_helper_version) we need to keep there given all the actual code entries are picked up during the translation phase. Fixes: ee94743034b Bug: https://bugs.launchpad.net/qemu/+bug/1880225 Cc: Bug 1880225 <1880225@bugs.launchpad.net> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Tested-by: Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> Cc: Richard Henderson <richard.henderson@linaro.org> Cc: Peter Maydell <peter.maydell@linaro.org> Message-Id: <20200605154929.26910-13-alex.bennee@linaro.org>
Diffstat (limited to 'linux-user/elfload.c')
-rw-r--r--linux-user/elfload.c31
1 files changed, 17 insertions, 14 deletions
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 475d243f3b..b5cb21384a 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -389,7 +389,7 @@ static bool init_guest_commpage(void)
{
void *want = g2h(ARM_COMMPAGE & -qemu_host_page_size);
void *addr = mmap(want, qemu_host_page_size, PROT_READ | PROT_WRITE,
- MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (addr == MAP_FAILED) {
perror("Allocating guest commpage");
@@ -2113,7 +2113,8 @@ static void pgb_have_guest_base(const char *image_name, abi_ulong guest_loaddr,
* only dumbly iterate up the host address space seeing if the
* allocation would work.
*/
-static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, long align)
+static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk,
+ long align, uintptr_t offset)
{
uintptr_t base;
@@ -2123,7 +2124,7 @@ static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, lon
while (true) {
uintptr_t align_start, end;
align_start = ROUND_UP(base, align);
- end = align_start + guest_size;
+ end = align_start + guest_size + offset;
/* if brk is anywhere in the range give ourselves some room to grow. */
if (align_start <= brk && brk < end) {
@@ -2138,7 +2139,7 @@ static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, lon
PROT_NONE, flags, -1, 0);
if (mmap_start != MAP_FAILED) {
munmap((void *) align_start, guest_size);
- return (uintptr_t) mmap_start;
+ return (uintptr_t) mmap_start + offset;
}
base += qemu_host_page_size;
}
@@ -2147,7 +2148,7 @@ static uintptr_t pgd_find_hole_fallback(uintptr_t guest_size, uintptr_t brk, lon
/* Return value for guest_base, or -1 if no hole found. */
static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size,
- long align)
+ long align, uintptr_t offset)
{
GSList *maps, *iter;
uintptr_t this_start, this_end, next_start, brk;
@@ -2161,7 +2162,7 @@ static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size,
brk = (uintptr_t)sbrk(0);
if (!maps) {
- return pgd_find_hole_fallback(guest_size, brk, align);
+ return pgd_find_hole_fallback(guest_size, brk, align, offset);
}
/* The first hole is before the first map entry. */
@@ -2173,7 +2174,7 @@ static uintptr_t pgb_find_hole(uintptr_t guest_loaddr, uintptr_t guest_size,
this_end = ((MapInfo *)iter->data)->start;
next_start = ((MapInfo *)iter->data)->end;
- align_start = ROUND_UP(this_start, align);
+ align_start = ROUND_UP(this_start + offset, align);
/* Skip holes that are too small. */
if (align_start >= this_end) {
@@ -2223,6 +2224,7 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
{
uintptr_t loaddr = orig_loaddr;
uintptr_t hiaddr = orig_hiaddr;
+ uintptr_t offset = 0;
uintptr_t addr;
if (hiaddr != orig_hiaddr) {
@@ -2236,18 +2238,19 @@ static void pgb_static(const char *image_name, abi_ulong orig_loaddr,
if (ARM_COMMPAGE) {
/*
* Extend the allocation to include the commpage.
- * For a 64-bit host, this is just 4GiB; for a 32-bit host,
- * the address arithmetic will wrap around, but the difference
- * will produce the correct allocation size.
+ * For a 64-bit host, this is just 4GiB; for a 32-bit host we
+ * need to ensure there is space bellow the guest_base so we
+ * can map the commpage in the place needed when the address
+ * arithmetic wraps around.
*/
if (sizeof(uintptr_t) == 8 || loaddr >= 0x80000000u) {
- hiaddr = (uintptr_t)4 << 30;
+ hiaddr = (uintptr_t) 4 << 30;
} else {
- loaddr = ARM_COMMPAGE & -align;
+ offset = -(ARM_COMMPAGE & -align);
}
}
- addr = pgb_find_hole(loaddr, hiaddr - loaddr, align);
+ addr = pgb_find_hole(loaddr, hiaddr - loaddr, align, offset);
if (addr == -1) {
/*
* If ARM_COMMPAGE, there *might* be a non-consecutive allocation
@@ -2282,7 +2285,7 @@ static void pgb_dynamic(const char *image_name, long align)
* just above that, and maximises the positive guest addresses.
*/
commpage = ARM_COMMPAGE & -align;
- addr = pgb_find_hole(commpage, -commpage, align);
+ addr = pgb_find_hole(commpage, -commpage, align, 0);
assert(addr != -1);
guest_base = addr;
}