aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarkus Armbruster <armbru@redhat.com>2016-03-07 20:25:13 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2016-03-15 18:23:33 +0100
commitfd97fd4408040a9a6dfaf2fdaeca1c566db6d0aa (patch)
tree90d1f57593b7fc19a8bae19ea4df301a533d707f
parent2ae823d4f707df05f28509dfa7ae7293b8e9164f (diff)
exec: Fix memory allocation when memory path names new file
Commit 8d31d6b extended file_ram_alloc() to accept file names in addition to directory names. Even though it passes O_CREAT to open(), it actually works only for existing files. Reproducer adapted from the commit's qemu-doc.texi update: $ qemu-system-x86_64 -object memory-backend-file,size=2M,mem-path=/dev/hugepages/my-shmem-file,id=mb1 qemu-system-x86_64: -object memory-backend-file,size=2M,mem-path=/dev/hugepages/my-shmem-file,id=mb1: failed to get page size of file /dev/hugepages/my-shmem-file: No such file or directory This is because we first get the page size for @path, then open the actual file. Unwise even before the flawed commit, because the directory could change in between, invalidating the page size. Unlikely to bite in practice. Rearrange the code to create the file (if necessary) before getting its page size. Carefully avoid TOCTTOU conditions with a method suggested by Paolo Bonzini. While there, replace "hugepages" by "guest RAM" in error messages, because host memory backends can be used for purposes other than huge pages, e.g. /dev/shm/ shared memory. Help text of -mem-path agrees. Cc: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Markus Armbruster <armbru@redhat.com> Message-Id: <1457378754-21649-2-git-send-email-armbru@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--exec.c107
1 files changed, 64 insertions, 43 deletions
diff --git a/exec.c b/exec.c
index f09dd4e928..3380836748 100644
--- a/exec.c
+++ b/exec.c
@@ -1233,19 +1233,17 @@ void qemu_mutex_unlock_ramlist(void)
#define HUGETLBFS_MAGIC 0x958458f6
-static long gethugepagesize(const char *path, Error **errp)
+static long gethugepagesize(int fd)
{
struct statfs fs;
int ret;
do {
- ret = statfs(path, &fs);
+ ret = fstatfs(fd, &fs);
} while (ret != 0 && errno == EINTR);
if (ret != 0) {
- error_setg_errno(errp, errno, "failed to get page size of file %s",
- path);
- return 0;
+ return -1;
}
return fs.f_bsize;
@@ -1256,60 +1254,79 @@ static void *file_ram_alloc(RAMBlock *block,
const char *path,
Error **errp)
{
- struct stat st;
+ bool unlink_on_error = false;
char *filename;
char *sanitized_name;
char *c;
void *area;
int fd;
- uint64_t hpagesize;
- Error *local_err = NULL;
-
- hpagesize = gethugepagesize(path, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- goto error;
- }
- block->mr->align = hpagesize;
-
- if (memory < hpagesize) {
- error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
- "or larger than huge page size 0x%" PRIx64,
- memory, hpagesize);
- goto error;
- }
+ int64_t hpagesize;
if (kvm_enabled() && !kvm_has_sync_mmu()) {
error_setg(errp,
"host lacks kvm mmu notifiers, -mem-path unsupported");
- goto error;
+ return NULL;
}
- if (!stat(path, &st) && S_ISDIR(st.st_mode)) {
- /* Make name safe to use with mkstemp by replacing '/' with '_'. */
- sanitized_name = g_strdup(memory_region_name(block->mr));
- for (c = sanitized_name; *c != '\0'; c++) {
- if (*c == '/') {
- *c = '_';
- }
+ for (;;) {
+ fd = open(path, O_RDWR);
+ if (fd >= 0) {
+ /* @path names an existing file, use it */
+ break;
}
+ if (errno == ENOENT) {
+ /* @path names a file that doesn't exist, create it */
+ fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644);
+ if (fd >= 0) {
+ unlink_on_error = true;
+ break;
+ }
+ } else if (errno == EISDIR) {
+ /* @path names a directory, create a file there */
+ /* Make name safe to use with mkstemp by replacing '/' with '_'. */
+ sanitized_name = g_strdup(memory_region_name(block->mr));
+ for (c = sanitized_name; *c != '\0'; c++) {
+ if (*c == '/') {
+ *c = '_';
+ }
+ }
- filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
- sanitized_name);
- g_free(sanitized_name);
+ filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
+ sanitized_name);
+ g_free(sanitized_name);
- fd = mkstemp(filename);
- if (fd >= 0) {
- unlink(filename);
+ fd = mkstemp(filename);
+ if (fd >= 0) {
+ unlink(filename);
+ g_free(filename);
+ break;
+ }
+ g_free(filename);
}
- g_free(filename);
- } else {
- fd = open(path, O_RDWR | O_CREAT, 0644);
+ if (errno != EEXIST && errno != EINTR) {
+ error_setg_errno(errp, errno,
+ "can't open backing store %s for guest RAM",
+ path);
+ goto error;
+ }
+ /*
+ * Try again on EINTR and EEXIST. The latter happens when
+ * something else creates the file between our two open().
+ */
}
- if (fd < 0) {
- error_setg_errno(errp, errno,
- "unable to create backing store for hugepages");
+ hpagesize = gethugepagesize(fd);
+ if (hpagesize < 0) {
+ error_setg_errno(errp, errno, "can't get page size for %s",
+ path);
+ goto error;
+ }
+ block->mr->align = hpagesize;
+
+ if (memory < hpagesize) {
+ error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
+ "or larger than page size 0x%" PRIx64,
+ memory, hpagesize);
goto error;
}
@@ -1328,7 +1345,7 @@ static void *file_ram_alloc(RAMBlock *block,
area = qemu_ram_mmap(fd, memory, hpagesize, block->flags & RAM_SHARED);
if (area == MAP_FAILED) {
error_setg_errno(errp, errno,
- "unable to map backing store for hugepages");
+ "unable to map backing store for guest RAM");
close(fd);
goto error;
}
@@ -1341,6 +1358,10 @@ static void *file_ram_alloc(RAMBlock *block,
return area;
error:
+ if (unlink_on_error) {
+ unlink(path);
+ }
+ close(fd);
return NULL;
}
#endif