diff options
author | David Hildenbrand <david@redhat.com> | 2021-05-10 13:43:20 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-06-15 20:27:38 +0200 |
commit | b444f5c079fdb8019d2c59ffa6b67069e857f4e1 (patch) | |
tree | 83e9c285a457e6e02cae8d47b6c97335b2a9f1b7 /util/mmap-alloc.c | |
parent | ebef62d0e527d4a021f94a405fb38db263f3c4a5 (diff) |
util/mmap-alloc: Pass flags instead of separate bools to qemu_ram_mmap()
Let's pass flags instead of bools to prepare for passing other flags and
update the documentation of qemu_ram_mmap(). Introduce new QEMU_MAP_
flags that abstract the mmap() PROT_ and MAP_ flag handling and simplify
it.
We expose only flags that are currently supported by qemu_ram_mmap().
Maybe, we'll see qemu_mmap() in the future as well that can implement these
flags.
Note: We don't use MAP_ flags as some flags (e.g., MAP_SYNC) are only
defined for some systems and we want to always be able to identify
these flags reliably inside qemu_ram_mmap() -- for example, to properly
warn when some future flags are not available or effective on a system.
Also, this way we can simplify PROT_ handling as well.
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Reviewed-by: Peter Xu <peterx@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com> for memory backend and machine core
Signed-off-by: David Hildenbrand <david@redhat.com>
Message-Id: <20210510114328.21835-8-david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'util/mmap-alloc.c')
-rw-r--r-- | util/mmap-alloc.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index 0e2bd7bc0e..1ddc0e2a1e 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -118,9 +118,12 @@ static void *mmap_reserve(size_t size, int fd) * Activate memory in a reserved region from the given fd (if any), to make * it accessible. */ -static void *mmap_activate(void *ptr, size_t size, int fd, bool readonly, - bool shared, bool is_pmem, off_t map_offset) +static void *mmap_activate(void *ptr, size_t size, int fd, + uint32_t qemu_map_flags, off_t map_offset) { + const bool readonly = qemu_map_flags & QEMU_MAP_READONLY; + const bool shared = qemu_map_flags & QEMU_MAP_SHARED; + const bool sync = qemu_map_flags & QEMU_MAP_SYNC; const int prot = PROT_READ | (readonly ? 0 : PROT_WRITE); int map_sync_flags = 0; int flags = MAP_FIXED; @@ -128,7 +131,7 @@ static void *mmap_activate(void *ptr, size_t size, int fd, bool readonly, flags |= fd == -1 ? MAP_ANONYMOUS : 0; flags |= shared ? MAP_SHARED : MAP_PRIVATE; - if (shared && is_pmem) { + if (shared && sync) { map_sync_flags = MAP_SYNC | MAP_SHARED_VALIDATE; } @@ -173,9 +176,7 @@ static inline size_t mmap_guard_pagesize(int fd) void *qemu_ram_mmap(int fd, size_t size, size_t align, - bool readonly, - bool shared, - bool is_pmem, + uint32_t qemu_map_flags, off_t map_offset) { const size_t guard_pagesize = mmap_guard_pagesize(fd); @@ -199,7 +200,7 @@ void *qemu_ram_mmap(int fd, offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr; - ptr = mmap_activate(guardptr + offset, size, fd, readonly, shared, is_pmem, + ptr = mmap_activate(guardptr + offset, size, fd, qemu_map_flags, map_offset); if (ptr == MAP_FAILED) { munmap(guardptr, total); |