aboutsummaryrefslogtreecommitdiff
path: root/linux-user/elfload.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-05-19 13:19:52 -0700
committerLaurent Vivier <laurent@vivier.eu>2019-05-24 13:16:21 +0200
commit30ab9ef2967dde22193f609b6ec56101c156b061 (patch)
tree128863abc52b116065b5d5c162e0335592e60254 /linux-user/elfload.c
parentabcac736c1505254ec3f9587aff04fbe4705a55e (diff)
linux-user: Fix shmat emulation by honoring host SHMLBA
For those hosts with SHMLBA > getpagesize, we don't automatically select a guest address that is compatible with the host. We can achieve this by boosting the alignment of guest_base and by adding an extra alignment argument to mmap_find_vma. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20190519201953.20161-13-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
Diffstat (limited to 'linux-user/elfload.c')
-rw-r--r--linux-user/elfload.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 02832adfbc..a23aa4493e 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -3,6 +3,7 @@
#include <sys/param.h>
#include <sys/resource.h>
+#include <sys/shm.h>
#include "qemu.h"
#include "disas/disas.h"
@@ -2012,6 +2013,8 @@ unsigned long init_guest_space(unsigned long host_start,
unsigned long guest_start,
bool fixed)
{
+ /* In order to use host shmat, we must be able to honor SHMLBA. */
+ unsigned long align = MAX(SHMLBA, qemu_host_page_size);
unsigned long current_start, aligned_start;
int flags;
@@ -2029,7 +2032,7 @@ unsigned long init_guest_space(unsigned long host_start,
}
/* Setup the initial flags and start address. */
- current_start = host_start & qemu_host_page_mask;
+ current_start = host_start & -align;
flags = MAP_ANONYMOUS | MAP_PRIVATE | MAP_NORESERVE;
if (fixed) {
flags |= MAP_FIXED;
@@ -2065,8 +2068,8 @@ unsigned long init_guest_space(unsigned long host_start,
return (unsigned long)-1;
}
munmap((void *)real_start, host_full_size);
- if (real_start & ~qemu_host_page_mask) {
- /* The same thing again, but with an extra qemu_host_page_size
+ if (real_start & (align - 1)) {
+ /* The same thing again, but with extra
* so that we can shift around alignment.
*/
unsigned long real_size = host_full_size + qemu_host_page_size;
@@ -2079,7 +2082,7 @@ unsigned long init_guest_space(unsigned long host_start,
return (unsigned long)-1;
}
munmap((void *)real_start, real_size);
- real_start = HOST_PAGE_ALIGN(real_start);
+ real_start = ROUND_UP(real_start, align);
}
current_start = real_start;
}
@@ -2106,7 +2109,7 @@ unsigned long init_guest_space(unsigned long host_start,
}
/* Ensure the address is properly aligned. */
- if (real_start & ~qemu_host_page_mask) {
+ if (real_start & (align - 1)) {
/* Ideally, we adjust like
*
* pages: [ ][ ][ ][ ][ ]
@@ -2134,7 +2137,7 @@ unsigned long init_guest_space(unsigned long host_start,
if (real_start == (unsigned long)-1) {
return (unsigned long)-1;
}
- aligned_start = HOST_PAGE_ALIGN(real_start);
+ aligned_start = ROUND_UP(real_start, align);
} else {
aligned_start = real_start;
}
@@ -2171,7 +2174,7 @@ unsigned long init_guest_space(unsigned long host_start,
* because of trouble with ARM commpage setup.
*/
munmap((void *)real_start, real_size);
- current_start += qemu_host_page_size;
+ current_start += align;
if (host_start == current_start) {
/* Theoretically possible if host doesn't have any suitably
* aligned areas. Normally the first mmap will fail.