diff options
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/Makefile.objs | 2 | ||||
-rw-r--r-- | target-i386/cpu-qom.h | 2 | ||||
-rw-r--r-- | target-i386/cpu.c | 4 | ||||
-rw-r--r-- | target-i386/cpu.h | 7 | ||||
-rw-r--r-- | target-i386/hyperv.c | 127 | ||||
-rw-r--r-- | target-i386/hyperv.h | 42 | ||||
-rw-r--r-- | target-i386/kvm.c | 184 | ||||
-rw-r--r-- | target-i386/kvm_i386.h | 2 | ||||
-rw-r--r-- | target-i386/machine.c | 66 |
9 files changed, 422 insertions, 14 deletions
diff --git a/target-i386/Makefile.objs b/target-i386/Makefile.objs index 437d9975b9..2255f46a9e 100644 --- a/target-i386/Makefile.objs +++ b/target-i386/Makefile.objs @@ -3,5 +3,5 @@ obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o obj-y += gdbstub.o obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o -obj-$(CONFIG_KVM) += kvm.o +obj-$(CONFIG_KVM) += kvm.o hyperv.o obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h index e3bfe9d07e..5f9d960b25 100644 --- a/target-i386/cpu-qom.h +++ b/target-i386/cpu-qom.h @@ -94,6 +94,8 @@ typedef struct X86CPU { bool hyperv_reset; bool hyperv_vpindex; bool hyperv_runtime; + bool hyperv_synic; + bool hyperv_stimer; bool check_cpuid; bool enforce_cpuid; bool expose_kvm; diff --git a/target-i386/cpu.c b/target-i386/cpu.c index 11e5e39a75..0d447b5690 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -2743,7 +2743,7 @@ static void x86_cpu_apic_create(X86CPU *cpu, Error **errp) APICCommonState *apic; const char *apic_type = "apic"; - if (kvm_irqchip_in_kernel()) { + if (kvm_apic_in_kernel()) { apic_type = "kvm-apic"; } else if (xen_enabled()) { apic_type = "xen-apic"; @@ -3146,6 +3146,8 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false), DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false), DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false), + DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false), + DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false), DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true), DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false), DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true), diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 84edfd0d8a..595891e78d 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -920,6 +920,13 @@ typedef struct CPUX86State { uint64_t msr_hv_tsc; uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS]; uint64_t msr_hv_runtime; + uint64_t msr_hv_synic_control; + uint64_t msr_hv_synic_version; + uint64_t msr_hv_synic_evt_page; + uint64_t msr_hv_synic_msg_page; + uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT]; + uint64_t msr_hv_stimer_config[HV_SYNIC_STIMER_COUNT]; + uint64_t msr_hv_stimer_count[HV_SYNIC_STIMER_COUNT]; /* exception/interrupt handling */ int error_code; diff --git a/target-i386/hyperv.c b/target-i386/hyperv.c new file mode 100644 index 0000000000..e79b173a81 --- /dev/null +++ b/target-i386/hyperv.c @@ -0,0 +1,127 @@ +/* + * QEMU KVM Hyper-V support + * + * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> + * + * Authors: + * Andrey Smetanin <asmetanin@virtuozzo.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "hyperv.h" +#include "standard-headers/asm-x86/hyperv.h" + +int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) +{ + CPUX86State *env = &cpu->env; + + switch (exit->type) { + case KVM_EXIT_HYPERV_SYNIC: + if (!cpu->hyperv_synic) { + return -1; + } + + /* + * For now just track changes in SynIC control and msg/evt pages msr's. + * When SynIC messaging/events processing will be added in future + * here we will do messages queues flushing and pages remapping. + */ + switch (exit->u.synic.msr) { + case HV_X64_MSR_SCONTROL: + env->msr_hv_synic_control = exit->u.synic.control; + break; + case HV_X64_MSR_SIMP: + env->msr_hv_synic_msg_page = exit->u.synic.msg_page; + break; + case HV_X64_MSR_SIEFP: + env->msr_hv_synic_evt_page = exit->u.synic.evt_page; + break; + default: + return -1; + } + return 0; + default: + return -1; + } +} + +static void kvm_hv_sint_ack_handler(EventNotifier *notifier) +{ + HvSintRoute *sint_route = container_of(notifier, HvSintRoute, + sint_ack_notifier); + event_notifier_test_and_clear(notifier); + if (sint_route->sint_ack_clb) { + sint_route->sint_ack_clb(sint_route); + } +} + +HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint, + HvSintAckClb sint_ack_clb) +{ + HvSintRoute *sint_route; + int r, gsi; + + sint_route = g_malloc0(sizeof(*sint_route)); + r = event_notifier_init(&sint_route->sint_set_notifier, false); + if (r) { + goto err; + } + + r = event_notifier_init(&sint_route->sint_ack_notifier, false); + if (r) { + goto err_sint_set_notifier; + } + + event_notifier_set_handler(&sint_route->sint_ack_notifier, + kvm_hv_sint_ack_handler); + + gsi = kvm_irqchip_add_hv_sint_route(kvm_state, vcpu_id, sint); + if (gsi < 0) { + goto err_gsi; + } + + r = kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + &sint_route->sint_ack_notifier, gsi); + if (r) { + goto err_irqfd; + } + sint_route->gsi = gsi; + sint_route->sint_ack_clb = sint_ack_clb; + sint_route->vcpu_id = vcpu_id; + sint_route->sint = sint; + + return sint_route; + +err_irqfd: + kvm_irqchip_release_virq(kvm_state, gsi); +err_gsi: + event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); + event_notifier_cleanup(&sint_route->sint_ack_notifier); +err_sint_set_notifier: + event_notifier_cleanup(&sint_route->sint_set_notifier); +err: + g_free(sint_route); + + return NULL; +} + +void kvm_hv_sint_route_destroy(HvSintRoute *sint_route) +{ + kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, + &sint_route->sint_set_notifier, + sint_route->gsi); + kvm_irqchip_release_virq(kvm_state, sint_route->gsi); + event_notifier_set_handler(&sint_route->sint_ack_notifier, NULL); + event_notifier_cleanup(&sint_route->sint_ack_notifier); + event_notifier_cleanup(&sint_route->sint_set_notifier); + g_free(sint_route); +} + +int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route) +{ + return event_notifier_set(&sint_route->sint_set_notifier); +} diff --git a/target-i386/hyperv.h b/target-i386/hyperv.h new file mode 100644 index 0000000000..b26201f8b9 --- /dev/null +++ b/target-i386/hyperv.h @@ -0,0 +1,42 @@ +/* + * QEMU KVM Hyper-V support + * + * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> + * + * Authors: + * Andrey Smetanin <asmetanin@virtuozzo.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef HYPERV_I386_H +#define HYPERV_I386_H + +#include "cpu.h" +#include "sysemu/kvm.h" +#include "qemu/event_notifier.h" + +typedef struct HvSintRoute HvSintRoute; +typedef void (*HvSintAckClb)(HvSintRoute *sint_route); + +struct HvSintRoute { + uint32_t sint; + uint32_t vcpu_id; + int gsi; + EventNotifier sint_set_notifier; + EventNotifier sint_ack_notifier; + HvSintAckClb sint_ack_clb; +}; + +int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit); + +HvSintRoute *kvm_hv_sint_route_create(uint32_t vcpu_id, uint32_t sint, + HvSintAckClb sint_ack_clb); + +void kvm_hv_sint_route_destroy(HvSintRoute *sint_route); + +int kvm_hv_sint_route_set_sint(HvSintRoute *sint_route); + +#endif diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 6dc9846398..ab65a6ebe5 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -25,6 +25,8 @@ #include "sysemu/kvm_int.h" #include "kvm_i386.h" #include "cpu.h" +#include "hyperv.h" + #include "exec/gdbstub.h" #include "qemu/host-utils.h" #include "qemu/config-file.h" @@ -33,9 +35,11 @@ #include "hw/i386/apic.h" #include "hw/i386/apic_internal.h" #include "hw/i386/apic-msidef.h" + #include "exec/ioport.h" #include "standard-headers/asm-x86/hyperv.h" #include "hw/pci/pci.h" +#include "hw/pci/msi.h" #include "migration/migration.h" #include "exec/memattrs.h" @@ -86,6 +90,8 @@ static bool has_msr_hv_crash; static bool has_msr_hv_reset; static bool has_msr_hv_vpindex; static bool has_msr_hv_runtime; +static bool has_msr_hv_synic; +static bool has_msr_hv_stimer; static bool has_msr_mtrr; static bool has_msr_xss; @@ -521,7 +527,9 @@ static bool hyperv_enabled(X86CPU *cpu) cpu->hyperv_crash || cpu->hyperv_reset || cpu->hyperv_vpindex || - cpu->hyperv_runtime); + cpu->hyperv_runtime || + cpu->hyperv_synic || + cpu->hyperv_stimer); } static Error *invtsc_mig_blocker; @@ -610,6 +618,28 @@ int kvm_arch_init_vcpu(CPUState *cs) if (cpu->hyperv_runtime && has_msr_hv_runtime) { c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE; } + if (cpu->hyperv_synic) { + int sint; + + if (!has_msr_hv_synic || + kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) { + fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n"); + return -ENOSYS; + } + + c->eax |= HV_X64_MSR_SYNIC_AVAILABLE; + env->msr_hv_synic_version = HV_SYNIC_VERSION_1; + for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) { + env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED; + } + } + if (cpu->hyperv_stimer) { + if (!has_msr_hv_stimer) { + fprintf(stderr, "Hyper-V timers aren't supported by kernel\n"); + return -ENOSYS; + } + c->eax |= HV_X64_MSR_SYNTIMER_AVAILABLE; + } c = &cpuid_data.entries[cpuid_i++]; c->function = HYPERV_CPUID_ENLIGHTMENT_INFO; if (cpu->hyperv_relaxed_timing) { @@ -956,6 +986,14 @@ static int kvm_get_supported_msrs(KVMState *s) has_msr_hv_runtime = true; continue; } + if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) { + has_msr_hv_synic = true; + continue; + } + if (kvm_msr_list->indices[i] == HV_X64_MSR_STIMER0_CONFIG) { + has_msr_hv_stimer = true; + continue; + } } } @@ -1107,7 +1145,7 @@ static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) lhs->l = (flags >> DESC_L_SHIFT) & 1; lhs->g = (flags & DESC_G_MASK) != 0; lhs->avl = (flags & DESC_AVL_MASK) != 0; - lhs->unusable = 0; + lhs->unusable = !lhs->present; lhs->padding = 0; } @@ -1116,14 +1154,18 @@ static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) lhs->selector = rhs->selector; lhs->base = rhs->base; lhs->limit = rhs->limit; - lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | - (rhs->present * DESC_P_MASK) | - (rhs->dpl << DESC_DPL_SHIFT) | - (rhs->db << DESC_B_SHIFT) | - (rhs->s * DESC_S_MASK) | - (rhs->l << DESC_L_SHIFT) | - (rhs->g * DESC_G_MASK) | - (rhs->avl * DESC_AVL_MASK); + if (rhs->unusable) { + lhs->flags = 0; + } else { + lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | + (rhs->present * DESC_P_MASK) | + (rhs->dpl << DESC_DPL_SHIFT) | + (rhs->db << DESC_B_SHIFT) | + (rhs->s * DESC_S_MASK) | + (rhs->l << DESC_L_SHIFT) | + (rhs->g * DESC_G_MASK) | + (rhs->avl * DESC_AVL_MASK); + } } static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) @@ -1517,6 +1559,36 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); } + if (cpu->hyperv_synic) { + int j; + + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL, + env->msr_hv_synic_control); + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION, + env->msr_hv_synic_version); + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP, + env->msr_hv_synic_evt_page); + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP, + env->msr_hv_synic_msg_page); + + for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j, + env->msr_hv_synic_sint[j]); + } + } + if (has_msr_hv_stimer) { + int j; + + for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_CONFIG + j*2, + env->msr_hv_stimer_config[j]); + } + + for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { + kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_STIMER0_COUNT + j*2, + env->msr_hv_stimer_count[j]); + } + } if (has_msr_mtrr) { kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype); kvm_msr_entry_set(&msrs[n++], @@ -1885,6 +1957,25 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_hv_runtime) { msrs[n++].index = HV_X64_MSR_VP_RUNTIME; } + if (cpu->hyperv_synic) { + uint32_t msr; + + msrs[n++].index = HV_X64_MSR_SCONTROL; + msrs[n++].index = HV_X64_MSR_SVERSION; + msrs[n++].index = HV_X64_MSR_SIEFP; + msrs[n++].index = HV_X64_MSR_SIMP; + for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { + msrs[n++].index = msr; + } + } + if (has_msr_hv_stimer) { + uint32_t msr; + + for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; + msr++) { + msrs[n++].index = msr; + } + } if (has_msr_mtrr) { msrs[n++].index = MSR_MTRRdefType; msrs[n++].index = MSR_MTRRfix64K_00000; @@ -2041,6 +2132,35 @@ static int kvm_get_msrs(X86CPU *cpu) case HV_X64_MSR_VP_RUNTIME: env->msr_hv_runtime = msrs[i].data; break; + case HV_X64_MSR_SCONTROL: + env->msr_hv_synic_control = msrs[i].data; + break; + case HV_X64_MSR_SVERSION: + env->msr_hv_synic_version = msrs[i].data; + break; + case HV_X64_MSR_SIEFP: + env->msr_hv_synic_evt_page = msrs[i].data; + break; + case HV_X64_MSR_SIMP: + env->msr_hv_synic_msg_page = msrs[i].data; + break; + case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: + env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; + break; + case HV_X64_MSR_STIMER0_CONFIG: + case HV_X64_MSR_STIMER1_CONFIG: + case HV_X64_MSR_STIMER2_CONFIG: + case HV_X64_MSR_STIMER3_CONFIG: + env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = + msrs[i].data; + break; + case HV_X64_MSR_STIMER0_COUNT: + case HV_X64_MSR_STIMER1_COUNT: + case HV_X64_MSR_STIMER2_COUNT: + case HV_X64_MSR_STIMER3_COUNT: + env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = + msrs[i].data; + break; case MSR_MTRRdefType: env->mtrr_deftype = msrs[i].data; break; @@ -2482,7 +2602,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } - if (!kvm_irqchip_in_kernel()) { + if (!kvm_pic_in_kernel()) { qemu_mutex_lock_iothread(); } @@ -2500,7 +2620,7 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) } } - if (!kvm_irqchip_in_kernel()) { + if (!kvm_pic_in_kernel()) { /* Try to inject an interrupt if the guest can accept it */ if (run->ready_for_interrupt_injection && (cpu->interrupt_request & CPU_INTERRUPT_HARD) && @@ -2899,6 +3019,13 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) ret = kvm_handle_debug(cpu, &run->debug.arch); qemu_mutex_unlock_iothread(); break; + case KVM_EXIT_HYPERV: + ret = kvm_hv_handle_exit(cpu, &run->hyperv); + break; + case KVM_EXIT_IOAPIC_EOI: + ioapic_eoi_broadcast(run->eoi.vector); + ret = 0; + break; default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; @@ -2933,6 +3060,39 @@ void kvm_arch_init_irq_routing(KVMState *s) */ kvm_msi_via_irqfd_allowed = true; kvm_gsi_routing_allowed = true; + + if (kvm_irqchip_is_split()) { + int i; + + /* If the ioapic is in QEMU and the lapics are in KVM, reserve + MSI routes for signaling interrupts to the local apics. */ + for (i = 0; i < IOAPIC_NUM_PINS; i++) { + struct MSIMessage msg = { 0x0, 0x0 }; + if (kvm_irqchip_add_msi_route(s, msg, NULL) < 0) { + error_report("Could not enable split IRQ mode."); + exit(1); + } + } + } +} + +int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) +{ + int ret; + if (machine_kernel_irqchip_split(ms)) { + ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24); + if (ret) { + error_report("Could not enable split irqchip mode: %s\n", + strerror(-ret)); + exit(1); + } else { + DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n"); + kvm_split_irqchip = true; + return 1; + } + } else { + return 0; + } } /* Classic KVM device assignment interface. Will remain x86 only. */ diff --git a/target-i386/kvm_i386.h b/target-i386/kvm_i386.h index c1b312ba2a..42b00af1b1 100644 --- a/target-i386/kvm_i386.h +++ b/target-i386/kvm_i386.h @@ -13,6 +13,8 @@ #include "sysemu/kvm.h" +#define kvm_apic_in_kernel() (kvm_irqchip_in_kernel()) + bool kvm_allows_irq0_override(void); bool kvm_has_smm(void); void kvm_synchronize_all_tsc(void); diff --git a/target-i386/machine.c b/target-i386/machine.c index a18e16e0de..6126d96d7f 100644 --- a/target-i386/machine.c +++ b/target-i386/machine.c @@ -710,6 +710,70 @@ static const VMStateDescription vmstate_msr_hyperv_runtime = { } }; +static bool hyperv_synic_enable_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + int i; + + if (env->msr_hv_synic_control != 0 || + env->msr_hv_synic_evt_page != 0 || + env->msr_hv_synic_msg_page != 0) { + return true; + } + + for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { + if (env->msr_hv_synic_sint[i] != 0) { + return true; + } + } + + return false; +} + +static const VMStateDescription vmstate_msr_hyperv_synic = { + .name = "cpu/msr_hyperv_synic", + .version_id = 1, + .minimum_version_id = 1, + .needed = hyperv_synic_enable_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU), + VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU), + VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU), + VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU, + HV_SYNIC_SINT_COUNT), + VMSTATE_END_OF_LIST() + } +}; + +static bool hyperv_stimer_enable_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + int i; + + for (i = 0; i < ARRAY_SIZE(env->msr_hv_stimer_config); i++) { + if (env->msr_hv_stimer_config[i] || env->msr_hv_stimer_count[i]) { + return true; + } + } + return false; +} + +static const VMStateDescription vmstate_msr_hyperv_stimer = { + .name = "cpu/msr_hyperv_stimer", + .version_id = 1, + .minimum_version_id = 1, + .needed = hyperv_stimer_enable_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_config, + X86CPU, HV_SYNIC_STIMER_COUNT), + VMSTATE_UINT64_ARRAY(env.msr_hv_stimer_count, + X86CPU, HV_SYNIC_STIMER_COUNT), + VMSTATE_END_OF_LIST() + } +}; + static bool avx512_needed(void *opaque) { X86CPU *cpu = opaque; @@ -893,6 +957,8 @@ VMStateDescription vmstate_x86_cpu = { &vmstate_msr_hyperv_time, &vmstate_msr_hyperv_crash, &vmstate_msr_hyperv_runtime, + &vmstate_msr_hyperv_synic, + &vmstate_msr_hyperv_stimer, &vmstate_avx512, &vmstate_xss, NULL |