aboutsummaryrefslogtreecommitdiff
path: root/target/i386/hyperv.c
diff options
context:
space:
mode:
authorRoman Kagan <rkagan@virtuozzo.com>2018-09-21 11:22:11 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2018-10-19 13:44:14 +0200
commit267e071bd6d675c15e7ffbf8aaf44d488ebd5c83 (patch)
treecee516311465fd6a9167a13dd2769e547e8911ea /target/i386/hyperv.c
parent9b4cf107b09d18ac30f46fd1c4de8585ccba030c (diff)
hyperv: make overlay pages for SynIC
Per Hyper-V spec, SynIC message and event flag pages are to be implemented as so called overlay pages. That is, they are owned by the hypervisor and, when mapped into the guest physical address space, overlay the guest physical pages such that 1) the overlaid guest page becomes invisible to the guest CPUs until the overlay page is turned off 2) the contents of the overlay page is preserved when it's turned off and back on, even at a different address; it's only zeroed at vcpu reset This particular nature of SynIC message and event flag pages is ignored in the current code, and guest physical pages are used directly instead. This happens to (mostly) work because the actual guests seem not to depend on the features listed above. This patch implements those pages as the spec mandates. Since the extra RAM regions, which introduce migration incompatibility, are only added at SynIC object creation which only happens when hyperv_synic_kvm_only == false, no extra compat logic is necessary. Signed-off-by: Roman Kagan <rkagan@virtuozzo.com> Message-Id: <20180921082217.29481-5-rkagan@virtuozzo.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386/hyperv.c')
-rw-r--r--target/i386/hyperv.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/target/i386/hyperv.c b/target/i386/hyperv.c
index 0216735d67..3f76c3e266 100644
--- a/target/i386/hyperv.c
+++ b/target/i386/hyperv.c
@@ -12,6 +12,7 @@
*/
#include "qemu/osdep.h"
+#include "qemu/main-loop.h"
#include "hyperv.h"
#include "hw/hyperv/hyperv.h"
#include "hyperv-proto.h"
@@ -38,6 +39,13 @@ void hyperv_x86_synic_update(X86CPU *cpu)
hyperv_synic_update(CPU(cpu), enable, msg_page_addr, event_page_addr);
}
+static void async_synic_update(CPUState *cs, run_on_cpu_data data)
+{
+ qemu_mutex_lock_iothread();
+ hyperv_x86_synic_update(X86_CPU(cs));
+ qemu_mutex_unlock_iothread();
+}
+
int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
{
CPUX86State *env = &cpu->env;
@@ -48,11 +56,6 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
return -1;
}
- /*
- * For now just track changes in SynIC control and msg/evt pages msr's.
- * When SynIC messaging/events processing will be added in future
- * here we will do messages queues flushing and pages remapping.
- */
switch (exit->u.synic.msr) {
case HV_X64_MSR_SCONTROL:
env->msr_hv_synic_control = exit->u.synic.control;
@@ -67,7 +70,12 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
return -1;
}
- hyperv_x86_synic_update(cpu);
+ /*
+ * this will run in this cpu thread before it returns to KVM, but in a
+ * safe environment (i.e. when all cpus are quiescent) -- this is
+ * necessary because memory hierarchy is being changed
+ */
+ async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL);
return 0;
case KVM_EXIT_HYPERV_HCALL: {