aboutsummaryrefslogtreecommitdiff
path: root/system
diff options
context:
space:
mode:
Diffstat (limited to 'system')
-rw-r--r--system/memory.c5
-rw-r--r--system/physmem.c34
2 files changed, 36 insertions, 3 deletions
diff --git a/system/memory.c b/system/memory.c
index a229a79988..c756950c0c 100644
--- a/system/memory.c
+++ b/system/memory.c
@@ -1850,6 +1850,11 @@ bool memory_region_is_protected(MemoryRegion *mr)
return mr->ram && (mr->ram_block->flags & RAM_PROTECTED);
}
+bool memory_region_has_guest_memfd(MemoryRegion *mr)
+{
+ return mr->ram_block && mr->ram_block->guest_memfd >= 0;
+}
+
uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr)
{
uint8_t mask = mr->dirty_log_mask;
diff --git a/system/physmem.c b/system/physmem.c
index a4fe3d2bf8..f5dfa20e57 100644
--- a/system/physmem.c
+++ b/system/physmem.c
@@ -1808,6 +1808,7 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
const bool shared = qemu_ram_is_shared(new_block);
RAMBlock *block;
RAMBlock *last_block = NULL;
+ bool free_on_error = false;
ram_addr_t old_ram_size, new_ram_size;
Error *err = NULL;
@@ -1837,6 +1838,19 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
return;
}
memory_try_enable_merging(new_block->host, new_block->max_length);
+ free_on_error = true;
+ }
+ }
+
+ if (new_block->flags & RAM_GUEST_MEMFD) {
+ assert(kvm_enabled());
+ assert(new_block->guest_memfd < 0);
+
+ new_block->guest_memfd = kvm_create_guest_memfd(new_block->max_length,
+ 0, errp);
+ if (new_block->guest_memfd < 0) {
+ qemu_mutex_unlock_ramlist();
+ goto out_free;
}
}
@@ -1888,6 +1902,13 @@ static void ram_block_add(RAMBlock *new_block, Error **errp)
ram_block_notify_add(new_block->host, new_block->used_length,
new_block->max_length);
}
+ return;
+
+out_free:
+ if (free_on_error) {
+ qemu_anon_ram_free(new_block->host, new_block->max_length);
+ new_block->host = NULL;
+ }
}
#ifdef CONFIG_POSIX
@@ -1902,7 +1923,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
/* Just support these ram flags by now. */
assert((ram_flags & ~(RAM_SHARED | RAM_PMEM | RAM_NORESERVE |
RAM_PROTECTED | RAM_NAMED_FILE | RAM_READONLY |
- RAM_READONLY_FD)) == 0);
+ RAM_READONLY_FD | RAM_GUEST_MEMFD)) == 0);
if (xen_enabled()) {
error_setg(errp, "-mem-path not supported with Xen");
@@ -1939,6 +1960,7 @@ RAMBlock *qemu_ram_alloc_from_fd(ram_addr_t size, MemoryRegion *mr,
new_block->used_length = size;
new_block->max_length = size;
new_block->flags = ram_flags;
+ new_block->guest_memfd = -1;
new_block->host = file_ram_alloc(new_block, size, fd, !file_size, offset,
errp);
if (!new_block->host) {
@@ -2018,7 +2040,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
int align;
assert((ram_flags & ~(RAM_SHARED | RAM_RESIZEABLE | RAM_PREALLOC |
- RAM_NORESERVE)) == 0);
+ RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
assert(!host ^ (ram_flags & RAM_PREALLOC));
align = qemu_real_host_page_size();
@@ -2033,6 +2055,7 @@ RAMBlock *qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
new_block->max_length = max_size;
assert(max_size >= size);
new_block->fd = -1;
+ new_block->guest_memfd = -1;
new_block->page_size = qemu_real_host_page_size();
new_block->host = host;
new_block->flags = ram_flags;
@@ -2055,7 +2078,7 @@ RAMBlock *qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
RAMBlock *qemu_ram_alloc(ram_addr_t size, uint32_t ram_flags,
MemoryRegion *mr, Error **errp)
{
- assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE)) == 0);
+ assert((ram_flags & ~(RAM_SHARED | RAM_NORESERVE | RAM_GUEST_MEMFD)) == 0);
return qemu_ram_alloc_internal(size, size, NULL, NULL, ram_flags, mr, errp);
}
@@ -2083,6 +2106,11 @@ static void reclaim_ramblock(RAMBlock *block)
} else {
qemu_anon_ram_free(block->host, block->max_length);
}
+
+ if (block->guest_memfd >= 0) {
+ close(block->guest_memfd);
+ }
+
g_free(block);
}