From 2044c3e7116eeac0449dcb4a4130cc8f8b9310da Mon Sep 17 00:00:00 2001 From: Murilo Opsfelder Araujo Date: Wed, 30 Jan 2019 21:36:04 -0200 Subject: mmap-alloc: unfold qemu_ram_mmap() Unfold parts of qemu_ram_mmap() for the sake of understanding, moving declarations to the top, and keeping architecture-specifics in the ifdef-else blocks. No changes in the function behaviour. Give ptr and ptr1 meaningful names: ptr -> guardptr : pointer to the PROT_NONE guard region ptr1 -> ptr : pointer to the mapped memory returned to caller Signed-off-by: Murilo Opsfelder Araujo Reviewed-by: Greg Kurz Signed-off-by: David Gibson --- util/mmap-alloc.c | 53 ++++++++++++++++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 19 deletions(-) (limited to 'util/mmap-alloc.c') diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index fd329eccd8..f71ea038c8 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -77,11 +77,19 @@ size_t qemu_mempath_getpagesize(const char *mem_path) void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) { + int flags; + int guardfd; + size_t offset; + size_t total; + void *guardptr; + void *ptr; + /* * Note: this always allocates at least one extra page of virtual address * space, even if size is already aligned. */ - size_t total = size + align; + total = size + align; + #if defined(__powerpc64__) && defined(__linux__) /* On ppc64 mappings in the same segment (aka slice) must share the same * page size. Since we will be re-allocating part of this segment @@ -91,16 +99,22 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) * We do this unless we are using the system page size, in which case * anonymous memory is OK. */ - int anonfd = fd == -1 || qemu_fd_getpagesize(fd) == getpagesize() ? -1 : fd; - int flags = anonfd == -1 ? MAP_ANONYMOUS : MAP_NORESERVE; - void *ptr = mmap(0, total, PROT_NONE, flags | MAP_PRIVATE, anonfd, 0); + flags = MAP_PRIVATE; + if (fd == -1 || qemu_fd_getpagesize(fd) == getpagesize()) { + guardfd = -1; + flags |= MAP_ANONYMOUS; + } else { + guardfd = fd; + flags |= MAP_NORESERVE; + } #else - void *ptr = mmap(0, total, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + guardfd = -1; + flags = MAP_PRIVATE | MAP_ANONYMOUS; #endif - size_t offset; - void *ptr1; - if (ptr == MAP_FAILED) { + guardptr = mmap(0, total, PROT_NONE, flags, guardfd, 0); + + if (guardptr == MAP_FAILED) { return MAP_FAILED; } @@ -108,19 +122,20 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) /* Always align to host page size */ assert(align >= getpagesize()); - offset = QEMU_ALIGN_UP((uintptr_t)ptr, align) - (uintptr_t)ptr; - ptr1 = mmap(ptr + offset, size, PROT_READ | PROT_WRITE, - MAP_FIXED | - (fd == -1 ? MAP_ANONYMOUS : 0) | - (shared ? MAP_SHARED : MAP_PRIVATE), - fd, 0); - if (ptr1 == MAP_FAILED) { - munmap(ptr, total); + flags = MAP_FIXED; + flags |= fd == -1 ? MAP_ANONYMOUS : 0; + flags |= shared ? MAP_SHARED : MAP_PRIVATE; + offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr; + + ptr = mmap(guardptr + offset, size, PROT_READ | PROT_WRITE, flags, fd, 0); + + if (ptr == MAP_FAILED) { + munmap(guardptr, total); return MAP_FAILED; } if (offset > 0) { - munmap(ptr, offset); + munmap(guardptr, offset); } /* @@ -129,10 +144,10 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) */ total -= offset; if (total > size + getpagesize()) { - munmap(ptr1 + size + getpagesize(), total - size - getpagesize()); + munmap(ptr + size + getpagesize(), total - size - getpagesize()); } - return ptr1; + return ptr; } void qemu_ram_munmap(void *ptr, size_t size) -- cgit v1.2.3 From 53adb9d43e1abba187387a51f238e878e934c647 Mon Sep 17 00:00:00 2001 From: Murilo Opsfelder Araujo Date: Wed, 30 Jan 2019 21:36:05 -0200 Subject: mmap-alloc: fix hugetlbfs misaligned length in ppc64 The commit 7197fb4058bcb68986bae2bb2c04d6370f3e7218 ("util/mmap-alloc: fix hugetlb support on ppc64") fixed Huge TLB mappings on ppc64. However, we still need to consider the underlying huge page size during munmap() because it requires that both address and length be a multiple of the underlying huge page size for Huge TLB mappings. Quote from "Huge page (Huge TLB) mappings" paragraph under NOTES section of the munmap(2) manual: "For munmap(), addr and length must both be a multiple of the underlying huge page size." On ppc64, the munmap() in qemu_ram_munmap() does not work for Huge TLB mappings because the mapped segment can be aligned with the underlying huge page size, not aligned with the native system page size, as returned by getpagesize(). This has the side effect of not releasing huge pages back to the pool after a hugetlbfs file-backed memory device is hot-unplugged. This patch fixes the situation in qemu_ram_mmap() and qemu_ram_munmap() by considering the underlying page size on ppc64. After this patch, memory hot-unplug releases huge pages back to the pool. Fixes: 7197fb4058bcb68986bae2bb2c04d6370f3e7218 Signed-off-by: Murilo Opsfelder Araujo Reviewed-by: Greg Kurz Signed-off-by: David Gibson --- util/mmap-alloc.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'util/mmap-alloc.c') diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index f71ea038c8..8565885420 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -80,6 +80,7 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) int flags; int guardfd; size_t offset; + size_t pagesize; size_t total; void *guardptr; void *ptr; @@ -100,7 +101,8 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) * anonymous memory is OK. */ flags = MAP_PRIVATE; - if (fd == -1 || qemu_fd_getpagesize(fd) == getpagesize()) { + pagesize = qemu_fd_getpagesize(fd); + if (fd == -1 || pagesize == getpagesize()) { guardfd = -1; flags |= MAP_ANONYMOUS; } else { @@ -109,6 +111,7 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) } #else guardfd = -1; + pagesize = getpagesize(); flags = MAP_PRIVATE | MAP_ANONYMOUS; #endif @@ -120,7 +123,7 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) assert(is_power_of_2(align)); /* Always align to host page size */ - assert(align >= getpagesize()); + assert(align >= pagesize); flags = MAP_FIXED; flags |= fd == -1 ? MAP_ANONYMOUS : 0; @@ -143,17 +146,24 @@ void *qemu_ram_mmap(int fd, size_t size, size_t align, bool shared) * a guard page guarding against potential buffer overflows. */ total -= offset; - if (total > size + getpagesize()) { - munmap(ptr + size + getpagesize(), total - size - getpagesize()); + if (total > size + pagesize) { + munmap(ptr + size + pagesize, total - size - pagesize); } return ptr; } -void qemu_ram_munmap(void *ptr, size_t size) +void qemu_ram_munmap(int fd, void *ptr, size_t size) { + size_t pagesize; + if (ptr) { /* Unmap both the RAM block and the guard page */ - munmap(ptr, size + getpagesize()); +#if defined(__powerpc64__) && defined(__linux__) + pagesize = qemu_fd_getpagesize(fd); +#else + pagesize = getpagesize(); +#endif + munmap(ptr, size + pagesize); } } -- cgit v1.2.3