diff options
author | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2015-11-05 18:11:22 +0000 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2015-11-10 15:00:28 +0100 |
commit | 58b7c17e226aa4d3b943ea22c1d1309126de146b (patch) | |
tree | 92d3ed7a85849b125370020ee2af9ace3ff7573d | |
parent | e9bef235d91bff87517770c93d74eb98e5a33278 (diff) |
Disable mlock around incoming postcopy
Userfault doesn't work with mlock; mlock is designed to nail down pages
so they don't move, userfault is designed to tell you when they're not
there.
munlock the pages we userfault protect before postcopy.
mlock everything again at the end if mlock is enabled.
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
Reviewed-by: Amit Shah <amit.shah@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
-rw-r--r-- | include/sysemu/sysemu.h | 1 | ||||
-rw-r--r-- | migration/postcopy-ram.c | 24 |
2 files changed, 25 insertions, 0 deletions
diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 05d1982bda..f992494e10 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -172,6 +172,7 @@ extern int boot_menu; extern bool boot_strict; extern uint8_t *boot_splash_filedata; extern size_t boot_splash_filedata_size; +extern bool enable_mlock; extern uint8_t qemu_extra_params_fw[2]; extern QEMUClockType rtc_clock; extern const char *mem_path; diff --git a/migration/postcopy-ram.c b/migration/postcopy-ram.c index 8e107fe8e9..1a24b0937e 100644 --- a/migration/postcopy-ram.c +++ b/migration/postcopy-ram.c @@ -87,6 +87,11 @@ static bool ufd_version_check(int ufd) return true; } +/* + * Note: This has the side effect of munlock'ing all of RAM, that's + * normally fine since if the postcopy succeeds it gets turned back on at the + * end. + */ bool postcopy_ram_supported_by_host(void) { long pagesize = getpagesize(); @@ -115,6 +120,15 @@ bool postcopy_ram_supported_by_host(void) } /* + * userfault and mlock don't go together; we'll put it back later if + * it was enabled. + */ + if (munlockall()) { + error_report("%s: munlockall: %s", __func__, strerror(errno)); + return -1; + } + + /* * We need to check that the ops we need are supported on anon memory * To do that we need to register a chunk and see the flags that * are returned. @@ -294,6 +308,16 @@ int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) mis->have_fault_thread = false; } + if (enable_mlock) { + if (os_mlock() < 0) { + error_report("mlock: %s", strerror(errno)); + /* + * It doesn't feel right to fail at this point, we have a valid + * VM state. + */ + } + } + postcopy_state_set(POSTCOPY_INCOMING_END); migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0); |