aboutsummaryrefslogtreecommitdiff
path: root/linux-user/mmap.c
diff options
context:
space:
mode:
authorRichard Henderson <richard.henderson@linaro.org>2019-05-19 13:19:52 -0700
committerLaurent Vivier <laurent@vivier.eu>2019-05-24 13:16:21 +0200
commit30ab9ef2967dde22193f609b6ec56101c156b061 (patch)
tree128863abc52b116065b5d5c162e0335592e60254 /linux-user/mmap.c
parentabcac736c1505254ec3f9587aff04fbe4705a55e (diff)
linux-user: Fix shmat emulation by honoring host SHMLBA
For those hosts with SHMLBA > getpagesize, we don't automatically select a guest address that is compatible with the host. We can achieve this by boosting the alignment of guest_base and by adding an extra alignment argument to mmap_find_vma. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20190519201953.20161-13-richard.henderson@linaro.org> Signed-off-by: Laurent Vivier <laurent@vivier.eu>
Diffstat (limited to 'linux-user/mmap.c')
-rw-r--r--linux-user/mmap.c70
1 files changed, 37 insertions, 33 deletions
diff --git a/linux-user/mmap.c b/linux-user/mmap.c
index e0249efe4f..10796b37ac 100644
--- a/linux-user/mmap.c
+++ b/linux-user/mmap.c
@@ -202,49 +202,52 @@ unsigned long last_brk;
/* Subroutine of mmap_find_vma, used when we have pre-allocated a chunk
of guest address space. */
-static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
+static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size,
+ abi_ulong align)
{
- abi_ulong addr;
- abi_ulong end_addr;
+ abi_ulong addr, end_addr, incr = qemu_host_page_size;
int prot;
- int looped = 0;
+ bool looped = false;
if (size > reserved_va) {
return (abi_ulong)-1;
}
- size = HOST_PAGE_ALIGN(size);
+ /* Note that start and size have already been aligned by mmap_find_vma. */
+
end_addr = start + size;
- if (end_addr > reserved_va) {
- end_addr = reserved_va;
+ if (start > reserved_va - size) {
+ /* Start at the top of the address space. */
+ end_addr = ((reserved_va - size) & -align) + size;
+ looped = true;
}
- addr = end_addr - qemu_host_page_size;
+ /* Search downward from END_ADDR, checking to see if a page is in use. */
+ addr = end_addr;
while (1) {
+ addr -= incr;
if (addr > end_addr) {
if (looped) {
+ /* Failure. The entire address space has been searched. */
return (abi_ulong)-1;
}
- end_addr = reserved_va;
- addr = end_addr - qemu_host_page_size;
- looped = 1;
- continue;
- }
- prot = page_get_flags(addr);
- if (prot) {
- end_addr = addr;
- }
- if (addr && addr + size == end_addr) {
- break;
+ /* Re-start at the top of the address space. */
+ addr = end_addr = ((reserved_va - size) & -align) + size;
+ looped = true;
+ } else {
+ prot = page_get_flags(addr);
+ if (prot) {
+ /* Page in use. Restart below this page. */
+ addr = end_addr = ((addr - size) & -align) + size;
+ } else if (addr && addr + size == end_addr) {
+ /* Success! All pages between ADDR and END_ADDR are free. */
+ if (start == mmap_next_start) {
+ mmap_next_start = addr;
+ }
+ return addr;
+ }
}
- addr -= qemu_host_page_size;
}
-
- if (start == mmap_next_start) {
- mmap_next_start = addr;
- }
-
- return addr;
}
/*
@@ -253,7 +256,7 @@ static abi_ulong mmap_find_vma_reserved(abi_ulong start, abi_ulong size)
* It must be called with mmap_lock() held.
* Return -1 if error.
*/
-abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
+abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size, abi_ulong align)
{
void *ptr, *prev;
abi_ulong addr;
@@ -265,11 +268,12 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
} else {
start &= qemu_host_page_mask;
}
+ start = ROUND_UP(start, align);
size = HOST_PAGE_ALIGN(size);
if (reserved_va) {
- return mmap_find_vma_reserved(start, size);
+ return mmap_find_vma_reserved(start, size, align);
}
addr = start;
@@ -299,7 +303,7 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
if (h2g_valid(ptr + size - 1)) {
addr = h2g(ptr);
- if ((addr & ~TARGET_PAGE_MASK) == 0) {
+ if ((addr & (align - 1)) == 0) {
/* Success. */
if (start == mmap_next_start && addr >= TASK_UNMAPPED_BASE) {
mmap_next_start = addr + size;
@@ -313,12 +317,12 @@ abi_ulong mmap_find_vma(abi_ulong start, abi_ulong size)
/* Assume the result that the kernel gave us is the
first with enough free space, so start again at the
next higher target page. */
- addr = TARGET_PAGE_ALIGN(addr);
+ addr = ROUND_UP(addr, align);
break;
case 1:
/* Sometimes the kernel decides to perform the allocation
at the top end of memory instead. */
- addr &= TARGET_PAGE_MASK;
+ addr &= -align;
break;
case 2:
/* Start over at low memory. */
@@ -416,7 +420,7 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot,
if (!(flags & MAP_FIXED)) {
host_len = len + offset - host_offset;
host_len = HOST_PAGE_ALIGN(host_len);
- start = mmap_find_vma(real_start, host_len);
+ start = mmap_find_vma(real_start, host_len, TARGET_PAGE_SIZE);
if (start == (abi_ulong)-1) {
errno = ENOMEM;
goto fail;
@@ -710,7 +714,7 @@ abi_long target_mremap(abi_ulong old_addr, abi_ulong old_size,
} else if (flags & MREMAP_MAYMOVE) {
abi_ulong mmap_start;
- mmap_start = mmap_find_vma(0, new_size);
+ mmap_start = mmap_find_vma(0, new_size, TARGET_PAGE_SIZE);
if (mmap_start == -1) {
errno = ENOMEM;