aboutsummaryrefslogtreecommitdiff
path: root/exec.c
diff options
context:
space:
mode:
authorMurilo Opsfelder Araujo <muriloo@linux.ibm.com>2019-01-30 21:36:05 -0200
committerDavid Gibson <david@gibson.dropbear.id.au>2019-02-04 18:44:20 +1100
commit53adb9d43e1abba187387a51f238e878e934c647 (patch)
treea21a48ecf9d9f7bee7a085b5f6a945b2e48c9de7 /exec.c
parent2044c3e7116eeac0449dcb4a4130cc8f8b9310da (diff)
mmap-alloc: fix hugetlbfs misaligned length in ppc64
The commit 7197fb4058bcb68986bae2bb2c04d6370f3e7218 ("util/mmap-alloc: fix hugetlb support on ppc64") fixed Huge TLB mappings on ppc64. However, we still need to consider the underlying huge page size during munmap() because it requires that both address and length be a multiple of the underlying huge page size for Huge TLB mappings. Quote from "Huge page (Huge TLB) mappings" paragraph under NOTES section of the munmap(2) manual: "For munmap(), addr and length must both be a multiple of the underlying huge page size." On ppc64, the munmap() in qemu_ram_munmap() does not work for Huge TLB mappings because the mapped segment can be aligned with the underlying huge page size, not aligned with the native system page size, as returned by getpagesize(). This has the side effect of not releasing huge pages back to the pool after a hugetlbfs file-backed memory device is hot-unplugged. This patch fixes the situation in qemu_ram_mmap() and qemu_ram_munmap() by considering the underlying page size on ppc64. After this patch, memory hot-unplug releases huge pages back to the pool. Fixes: 7197fb4058bcb68986bae2bb2c04d6370f3e7218 Signed-off-by: Murilo Opsfelder Araujo <muriloo@linux.ibm.com> Reviewed-by: Greg Kurz <groug@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/exec.c b/exec.c
index 25f3938a27..03dd673d36 100644
--- a/exec.c
+++ b/exec.c
@@ -1873,7 +1873,7 @@ static void *file_ram_alloc(RAMBlock *block,
if (mem_prealloc) {
os_mem_prealloc(fd, area, memory, smp_cpus, errp);
if (errp && *errp) {
- qemu_ram_munmap(area, memory);
+ qemu_ram_munmap(fd, area, memory);
return NULL;
}
}
@@ -2394,7 +2394,7 @@ static void reclaim_ramblock(RAMBlock *block)
xen_invalidate_map_cache_entry(block->host);
#ifndef _WIN32
} else if (block->fd >= 0) {
- qemu_ram_munmap(block->host, block->max_length);
+ qemu_ram_munmap(block->fd, block->host, block->max_length);
close(block->fd);
#endif
} else {