diff options
author | Michael S. Tsirkin <mst@redhat.com> | 2015-09-10 16:41:17 +0300 |
---|---|---|
committer | Michael S. Tsirkin <mst@redhat.com> | 2015-10-01 16:16:52 +0300 |
commit | 8561c9244ddf1122dfe7ccac9b23f506062f1499 (patch) | |
tree | 80ad18ab7472e7cdc9bab615093c29cd92067cdb /exec.c | |
parent | 9fac18f03a9040b67ec38e14d3e1ed34db9c7e06 (diff) |
exec: allocate PROT_NONE pages on top of RAM
This inserts a read and write protected page between RAM and QEMU
memory, for file-backend RAM.
This makes it harder to exploit QEMU bugs resulting from buffer
overflows in devices using variants of cpu_physical_memory_map,
dma_memory_map etc.
Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'exec.c')
-rw-r--r-- | exec.c | 42 |
1 files changed, 39 insertions, 3 deletions
@@ -84,6 +84,9 @@ static MemoryRegion io_mem_unassigned; */ #define RAM_RESIZEABLE (1 << 2) +/* An extra page is mapped on top of this RAM. + */ +#define RAM_EXTRA (1 << 3) #endif struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); @@ -1185,10 +1188,13 @@ static void *file_ram_alloc(RAMBlock *block, char *filename; char *sanitized_name; char *c; + void *ptr; void *area = NULL; int fd; uint64_t hpagesize; + uint64_t total; Error *local_err = NULL; + size_t offset; hpagesize = gethugepagesize(path, &local_err); if (local_err) { @@ -1232,6 +1238,7 @@ static void *file_ram_alloc(RAMBlock *block, g_free(filename); memory = ROUND_UP(memory, hpagesize); + total = memory + hpagesize; /* * ftruncate is not supported by hugetlbfs in older @@ -1243,16 +1250,40 @@ static void *file_ram_alloc(RAMBlock *block, perror("ftruncate"); } - area = mmap(0, memory, PROT_READ | PROT_WRITE, - (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE), + ptr = mmap(0, total, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, + -1, 0); + if (ptr == MAP_FAILED) { + error_setg_errno(errp, errno, + "unable to allocate memory range for hugepages"); + close(fd); + goto error; + } + + offset = QEMU_ALIGN_UP((uintptr_t)ptr, hpagesize) - (uintptr_t)ptr; + + area = mmap(ptr + offset, memory, PROT_READ | PROT_WRITE, + (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE) | + MAP_FIXED, fd, 0); if (area == MAP_FAILED) { error_setg_errno(errp, errno, "unable to map backing store for hugepages"); + munmap(ptr, total); close(fd); goto error; } + if (offset > 0) { + munmap(ptr, offset); + } + ptr += offset; + total -= offset; + + if (total > memory + getpagesize()) { + munmap(ptr + memory + getpagesize(), + total - memory - getpagesize()); + } + if (mem_prealloc) { os_mem_prealloc(fd, area, memory); } @@ -1570,6 +1601,7 @@ ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr, new_block->used_length = size; new_block->max_length = size; new_block->flags = share ? RAM_SHARED : 0; + new_block->flags |= RAM_EXTRA; new_block->host = file_ram_alloc(new_block, size, mem_path, errp); if (!new_block->host) { @@ -1671,7 +1703,11 @@ static void reclaim_ramblock(RAMBlock *block) xen_invalidate_map_cache_entry(block->host); #ifndef _WIN32 } else if (block->fd >= 0) { - munmap(block->host, block->max_length); + if (block->flags & RAM_EXTRA) { + munmap(block->host, block->max_length + getpagesize()); + } else { + munmap(block->host, block->max_length); + } close(block->fd); #endif } else { |