diff options
author | Paul Brook <paul@codesourcery.com> | 2010-05-05 16:32:59 +0100 |
---|---|---|
committer | Paul Brook <paul@codesourcery.com> | 2010-05-05 16:32:59 +0100 |
commit | 2e9a5713f0567fffaa3518f495b8d16a2b74f84a (patch) | |
tree | e7c2651dc7f83db54d27af18a7f8fac7bf848437 | |
parent | 048d179f20c1499c7f55957df125392de664b6a7 (diff) |
Remove PAGE_RESERVED
The usermode PAGE_RESERVED code is not required by the current mmap
implementation, and is already broken when guest_base != 0.
Unfortunately the bsd emulation still uses the old mmap implementation,
so we can't rip it out altogether.
Signed-off-by: Paul Brook <paul@codesourcery.com>
-rw-r--r-- | cpu-all.h | 3 | ||||
-rw-r--r-- | exec.c | 31 | ||||
-rw-r--r-- | linux-user/elfload.c | 6 | ||||
-rw-r--r-- | linux-user/mmap.c | 19 |
4 files changed, 5 insertions, 54 deletions
@@ -742,7 +742,10 @@ extern unsigned long qemu_host_page_mask; /* original state of the write flag (used when tracking self-modifying code */ #define PAGE_WRITE_ORG 0x0010 +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) +/* FIXME: Code that sets/uses this is broken and needs to go away. */ #define PAGE_RESERVED 0x0020 +#endif #if defined(CONFIG_USER_ONLY) void page_dump(FILE *f); @@ -288,7 +288,7 @@ static void page_init(void) qemu_host_page_bits++; qemu_host_page_mask = ~(qemu_host_page_size - 1); -#if !defined(_WIN32) && defined(CONFIG_USER_ONLY) +#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) { #ifdef HAVE_KINFO_GETVMMAP struct kinfo_vmentry *freep; @@ -324,11 +324,7 @@ static void page_init(void) last_brk = (unsigned long)sbrk(0); -#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__) f = fopen("/compat/linux/proc/self/maps", "r"); -#else - f = fopen("/proc/self/maps", "r"); -#endif if (f) { mmap_lock(); @@ -365,24 +361,11 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) int i; #if defined(CONFIG_USER_ONLY) - /* We can't use qemu_malloc because it may recurse into a locked mutex. - Neither can we record the new pages we reserve while allocating a - given page because that may recurse into an unallocated page table - entry. Stuff the allocations we do make into a queue and process - them after having completed one entire page table allocation. */ - - unsigned long reserve[2 * (V_L1_SHIFT / L2_BITS)]; - int reserve_idx = 0; - + /* We can't use qemu_malloc because it may recurse into a locked mutex. */ # define ALLOC(P, SIZE) \ do { \ P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \ - if (h2g_valid(P)) { \ - reserve[reserve_idx] = h2g(P); \ - reserve[reserve_idx + 1] = SIZE; \ - reserve_idx += 2; \ - } \ } while (0) #else # define ALLOC(P, SIZE) \ @@ -417,16 +400,6 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc) } #undef ALLOC -#if defined(CONFIG_USER_ONLY) - for (i = 0; i < reserve_idx; i += 2) { - unsigned long addr = reserve[i]; - unsigned long len = reserve[i + 1]; - - page_set_flags(addr & TARGET_PAGE_MASK, - TARGET_PAGE_ALIGN(addr + len), - PAGE_RESERVED); - } -#endif return pd + (index & (L2_SIZE - 1)); } diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 4d3dd89b2b..13f63cf77e 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -2159,12 +2159,6 @@ static int vma_walker(void *priv, abi_ulong start, abi_ulong end, { struct mm_struct *mm = (struct mm_struct *)priv; - /* - * Don't dump anything that qemu has reserved for internal use. - */ - if (flags & PAGE_RESERVED) - return (0); - vma_add_mapping(mm, start, end, flags); return (0); } diff --git a/linux-user/mmap.c b/linux-user/mmap.c index 46923c707c..9c062e7078 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -85,14 +85,6 @@ void *qemu_vmalloc(size_t size) /* Use map and mark the pages as used. */ p = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - - if (h2g_valid(p)) { - /* Allocated region overlaps guest address space. This may recurse. */ - abi_ulong addr = h2g(p); - page_set_flags(addr & TARGET_PAGE_MASK, TARGET_PAGE_ALIGN(addr + size), - PAGE_RESERVED); - } - mmap_unlock(); return p; } @@ -484,9 +476,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, } start = h2g(host_start); } else { - int flg; - target_ulong addr; - if (start & ~TARGET_PAGE_MASK) { errno = EINVAL; goto fail; @@ -504,14 +493,6 @@ abi_long target_mmap(abi_ulong start, abi_ulong len, int prot, goto fail; } - for(addr = real_start; addr < real_end; addr += TARGET_PAGE_SIZE) { - flg = page_get_flags(addr); - if (flg & PAGE_RESERVED) { - errno = ENXIO; - goto fail; - } - } - /* worst case: we cannot map the file because the offset is not aligned, so we read it */ if (!(flags & MAP_ANONYMOUS) && |