aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-06-21 13:32:10 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-06-21 13:32:10 +0100
commit68d7ff0cff0c4905802104843cf0100543b47314 (patch)
tree67df4f58d1ba8c382d775decdda785e169902018
parent33d609990621dea6c7d056c86f707b8811320ac1 (diff)
parent8e8cbed09ad9d577955691b4c061b61b602406d1 (diff)
Merge remote-tracking branch 'remotes/bonzini/tags/for-upstream' into staging
* Nuke hw_compat_4_0_1 and pc_compat_4_0_1 (Greg) * Static analysis fixes (Igor, Lidong) * X86 Hyper-V CPUID improvements (Vitaly) * X86 nested virt migration (Liran) * New MSR-based features (Xiaoyao) # gpg: Signature made Fri 21 Jun 2019 12:25:42 BST # gpg: using RSA key BFFBD25F78C7AE83 # gpg: Good signature from "Paolo Bonzini <bonzini@gnu.org>" [full] # gpg: aka "Paolo Bonzini <pbonzini@redhat.com>" [full] # Primary key fingerprint: 46F5 9FBD 57D6 12E7 BFD4 E2F7 7E15 100C CD36 69B1 # Subkey fingerprint: F133 3857 4B66 2389 866C 7682 BFFB D25F 78C7 AE83 * remotes/bonzini/tags/for-upstream: (25 commits) hw: Nuke hw_compat_4_0_1 and pc_compat_4_0_1 util/main-loop: Fix incorrect assertion sd: Fix out-of-bounds assertions target/i386: kvm: Add nested migration blocker only when kernel lacks required capabilities target/i386: kvm: Add support for KVM_CAP_EXCEPTION_PAYLOAD target/i386: kvm: Add support for save and restore nested state vmstate: Add support for kernel integer types linux-headers: sync with latest KVM headers from Linux 5.2 target/i386: kvm: Block migration for vCPUs exposed with nested virtualization target/i386: kvm: Re-inject #DB to guest with updated DR6 target/i386: kvm: Use symbolic constant for #DB/#BP exception constants KVM: Introduce kvm_arch_destroy_vcpu() target/i386: kvm: Delete VMX migration blocker on vCPU init failure target/i386: define a new MSR based feature word - FEAT_CORE_CAPABILITY i386/kvm: add support for Direct Mode for Hyper-V synthetic timers i386/kvm: hv-evmcs requires hv-vapic i386/kvm: hv-tlbflush/ipi require hv-vpindex i386/kvm: hv-stimer requires hv-time and hv-synic i386/kvm: implement 'hv-passthrough' mode i386/kvm: document existing Hyper-V enlightenments ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--accel/kvm/kvm-all.c25
-rw-r--r--cpus.c1
-rw-r--r--docs/hyperv.txt201
-rw-r--r--hw/core/machine.c5
-rw-r--r--hw/i386/pc.c6
-rw-r--r--hw/i386/pc_q35.c12
-rw-r--r--hw/sd/sd.c4
-rw-r--r--include/hw/boards.h3
-rw-r--r--include/hw/i386/pc.h3
-rw-r--r--include/migration/vmstate.h26
-rw-r--r--include/sysemu/kvm.h2
-rw-r--r--linux-headers/asm-x86/kvm.h33
-rw-r--r--target/arm/kvm32.c5
-rw-r--r--target/arm/kvm64.c5
-rw-r--r--target/i386/cpu.c81
-rw-r--r--target/i386/cpu.h65
-rw-r--r--target/i386/hax-all.c36
-rw-r--r--target/i386/hvf/hvf.c10
-rw-r--r--target/i386/hvf/x86hvf.c4
-rw-r--r--target/i386/hyperv-proto.h1
-rw-r--r--target/i386/hyperv.c2
-rw-r--r--target/i386/kvm.c999
-rw-r--r--target/i386/machine.c284
-rw-r--r--target/mips/kvm.c5
-rw-r--r--target/ppc/kvm.c5
-rw-r--r--target/s390x/kvm.c10
-rw-r--r--util/main-loop.c2
27 files changed, 1506 insertions, 329 deletions
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index b0c4bed6e3..e3cf72883b 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -87,6 +87,7 @@ struct KVMState
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
#endif
+ int max_nested_state_len;
int many_ioeventfds;
int intx_set_mask;
bool sync_mmu;
@@ -291,6 +292,11 @@ int kvm_destroy_vcpu(CPUState *cpu)
DPRINTF("kvm_destroy_vcpu\n");
+ ret = kvm_arch_destroy_vcpu(cpu);
+ if (ret < 0) {
+ goto err;
+ }
+
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
ret = mmap_size;
@@ -863,8 +869,8 @@ static void kvm_mem_ioeventfd_add(MemoryListener *listener,
data, true, int128_get64(section->size),
match_data);
if (r < 0) {
- fprintf(stderr, "%s: error adding ioeventfd: %s\n",
- __func__, strerror(-r));
+ fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
+ __func__, strerror(-r), -r);
abort();
}
}
@@ -881,6 +887,8 @@ static void kvm_mem_ioeventfd_del(MemoryListener *listener,
data, false, int128_get64(section->size),
match_data);
if (r < 0) {
+ fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
+ __func__, strerror(-r), -r);
abort();
}
}
@@ -897,8 +905,8 @@ static void kvm_io_ioeventfd_add(MemoryListener *listener,
data, true, int128_get64(section->size),
match_data);
if (r < 0) {
- fprintf(stderr, "%s: error adding ioeventfd: %s\n",
- __func__, strerror(-r));
+ fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
+ __func__, strerror(-r), -r);
abort();
}
}
@@ -916,6 +924,8 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener,
data, false, int128_get64(section->size),
match_data);
if (r < 0) {
+ fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
+ __func__, strerror(-r), -r);
abort();
}
}
@@ -1672,6 +1682,8 @@ static int kvm_init(MachineState *ms)
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
#endif
+ s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
+
#ifdef KVM_CAP_IRQ_ROUTING
kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
#endif
@@ -2239,6 +2251,11 @@ int kvm_has_debugregs(void)
return kvm_state->debugregs;
}
+int kvm_max_nested_state_length(void)
+{
+ return kvm_state->max_nested_state_len;
+}
+
int kvm_has_many_ioeventfds(void)
{
if (!kvm_enabled()) {
diff --git a/cpus.c b/cpus.c
index dde3b7b981..1af51b73dd 100644
--- a/cpus.c
+++ b/cpus.c
@@ -1594,7 +1594,6 @@ static void *qemu_hax_cpu_thread_fn(void *arg)
cpu->thread_id = qemu_get_thread_id();
cpu->created = true;
- cpu->halted = 0;
current_cpu = cpu;
hax_init_vcpu(cpu);
diff --git a/docs/hyperv.txt b/docs/hyperv.txt
new file mode 100644
index 0000000000..8fdf25c829
--- /dev/null
+++ b/docs/hyperv.txt
@@ -0,0 +1,201 @@
+Hyper-V Enlightenments
+======================
+
+
+1. Description
+===============
+In some cases when implementing a hardware interface in software is slow, KVM
+implements its own paravirtualized interfaces. This works well for Linux as
+guest support for such features is added simultaneously with the feature itself.
+It may, however, be hard-to-impossible to add support for these interfaces to
+proprietary OSes, namely, Microsoft Windows.
+
+KVM on x86 implements Hyper-V Enlightenments for Windows guests. These features
+make Windows and Hyper-V guests think they're running on top of a Hyper-V
+compatible hypervisor and use Hyper-V specific features.
+
+
+2. Setup
+=========
+No Hyper-V enlightenments are enabled by default by either KVM or QEMU. In
+QEMU, individual enlightenments can be enabled through CPU flags, e.g:
+
+ qemu-system-x86_64 --enable-kvm --cpu host,hv_relaxed,hv_vpindex,hv_time, ...
+
+Sometimes there are dependencies between enlightenments, QEMU is supposed to
+check that the supplied configuration is sane.
+
+When any set of the Hyper-V enlightenments is enabled, QEMU changes hypervisor
+identification (CPUID 0x40000000..0x4000000A) to Hyper-V. KVM identification
+and features are kept in leaves 0x40000100..0x40000101.
+
+
+3. Existing enlightenments
+===========================
+
+3.1. hv-relaxed
+================
+This feature tells guest OS to disable watchdog timeouts as it is running on a
+hypervisor. It is known that some Windows versions will do this even when they
+see 'hypervisor' CPU flag.
+
+3.2. hv-vapic
+==============
+Provides so-called VP Assist page MSR to guest allowing it to work with APIC
+more efficiently. In particular, this enlightenment allows paravirtualized
+(exit-less) EOI processing.
+
+3.3. hv-spinlocks=xxx
+======================
+Enables paravirtualized spinlocks. The parameter indicates how many times
+spinlock acquisition should be attempted before indicating the situation to the
+hypervisor. A special value 0xffffffff indicates "never to retry".
+
+3.4. hv-vpindex
+================
+Provides HV_X64_MSR_VP_INDEX (0x40000002) MSR to the guest which has Virtual
+processor index information. This enlightenment makes sense in conjunction with
+hv-synic, hv-stimer and other enlightenments which require the guest to know its
+Virtual Processor indices (e.g. when VP index needs to be passed in a
+hypercall).
+
+3.5. hv-runtime
+================
+Provides HV_X64_MSR_VP_RUNTIME (0x40000010) MSR to the guest. The MSR keeps the
+virtual processor run time in 100ns units. This gives guest operating system an
+idea of how much time was 'stolen' from it (when the virtual CPU was preempted
+to perform some other work).
+
+3.6. hv-crash
+==============
+Provides HV_X64_MSR_CRASH_P0..HV_X64_MSR_CRASH_P5 (0x40000100..0x40000105) and
+HV_X64_MSR_CRASH_CTL (0x40000105) MSRs to the guest. These MSRs are written to
+by the guest when it crashes, HV_X64_MSR_CRASH_P0..HV_X64_MSR_CRASH_P5 MSRs
+contain additional crash information. This information is outputted in QEMU log
+and through QAPI.
+Note: unlike under genuine Hyper-V, write to HV_X64_MSR_CRASH_CTL causes guest
+to shutdown. This effectively blocks crash dump generation by Windows.
+
+3.7. hv-time
+=============
+Enables two Hyper-V-specific clocksources available to the guest: MSR-based
+Hyper-V clocksource (HV_X64_MSR_TIME_REF_COUNT, 0x40000020) and Reference TSC
+page (enabled via MSR HV_X64_MSR_REFERENCE_TSC, 0x40000021). Both clocksources
+are per-guest, Reference TSC page clocksource allows for exit-less time stamp
+readings. Using this enlightenment leads to significant speedup of all timestamp
+related operations.
+
+3.8. hv-synic
+==============
+Enables Hyper-V Synthetic interrupt controller - an extension of a local APIC.
+When enabled, this enlightenment provides additional communication facilities
+to the guest: SynIC messages and Events. This is a pre-requisite for
+implementing VMBus devices (not yet in QEMU). Additionally, this enlightenment
+is needed to enable Hyper-V synthetic timers. SynIC is controlled through MSRs
+HV_X64_MSR_SCONTROL..HV_X64_MSR_EOM (0x40000080..0x40000084) and
+HV_X64_MSR_SINT0..HV_X64_MSR_SINT15 (0x40000090..0x4000009F)
+
+Requires: hv-vpindex
+
+3.9. hv-stimer
+===============
+Enables Hyper-V synthetic timers. There are four synthetic timers per virtual
+CPU controlled through HV_X64_MSR_STIMER0_CONFIG..HV_X64_MSR_STIMER3_COUNT
+(0x400000B0..0x400000B7) MSRs. These timers can work either in single-shot or
+periodic mode. It is known that certain Windows versions revert to using HPET
+(or even RTC when HPET is unavailable) extensively when this enlightenment is
+not provided; this can lead to significant CPU consumption, even when virtual
+CPU is idle.
+
+Requires: hv-vpindex, hv-synic, hv-time
+
+3.10. hv-tlbflush
+==================
+Enables paravirtualized TLB shoot-down mechanism. On x86 architecture, remote
+TLB flush procedure requires sending IPIs and waiting for other CPUs to perform
+local TLB flush. In virtualized environment some virtual CPUs may not even be
+scheduled at the time of the call and may not require flushing (or, flushing
+may be postponed until the virtual CPU is scheduled). hv-tlbflush enlightenment
+implements TLB shoot-down through hypervisor enabling the optimization.
+
+Requires: hv-vpindex
+
+3.11. hv-ipi
+=============
+Enables paravirtualized IPI send mechanism. HvCallSendSyntheticClusterIpi
+hypercall may target more than 64 virtual CPUs simultaneously, doing the same
+through APIC requires more than one access (and thus exit to the hypervisor).
+
+Requires: hv-vpindex
+
+3.12. hv-vendor-id=xxx
+=======================
+This changes Hyper-V identification in CPUID 0x40000000.EBX-EDX from the default
+"Microsoft Hv". The parameter should be no longer than 12 characters. According
+to the specification, guests shouldn't use this information and it is unknown
+if there is a Windows version which acts differently.
+Note: hv-vendor-id is not an enlightenment and thus doesn't enable Hyper-V
+identification when specified without some other enlightenment.
+
+3.13. hv-reset
+===============
+Provides HV_X64_MSR_RESET (0x40000003) MSR to the guest allowing it to reset
+itself by writing to it. Even when this MSR is enabled, it is not a recommended
+way for Windows to perform system reboot and thus it may not be used.
+
+3.14. hv-frequencies
+============================================
+Provides HV_X64_MSR_TSC_FREQUENCY (0x40000022) and HV_X64_MSR_APIC_FREQUENCY
+(0x40000023) allowing the guest to get its TSC/APIC frequencies without doing
+measurements.
+
+3.15 hv-reenlightenment
+========================
+The enlightenment is nested specific, it targets Hyper-V on KVM guests. When
+enabled, it provides HV_X64_MSR_REENLIGHTENMENT_CONTROL (0x40000106),
+HV_X64_MSR_TSC_EMULATION_CONTROL (0x40000107)and HV_X64_MSR_TSC_EMULATION_STATUS
+(0x40000108) MSRs allowing the guest to get notified when TSC frequency changes
+(only happens on migration) and keep using old frequency (through emulation in
+the hypervisor) until it is ready to switch to the new one. This, in conjunction
+with hv-frequencies, allows Hyper-V on KVM to pass stable clocksource (Reference
+TSC page) to its own guests.
+
+Recommended: hv-frequencies
+
+3.16. hv-evmcs
+===============
+The enlightenment is nested specific, it targets Hyper-V on KVM guests. When
+enabled, it provides Enlightened VMCS feature to the guest. The feature
+implements paravirtualized protocol between L0 (KVM) and L1 (Hyper-V)
+hypervisors making L2 exits to the hypervisor faster. The feature is Intel-only.
+Note: some virtualization features (e.g. Posted Interrupts) are disabled when
+hv-evmcs is enabled. It may make sense to measure your nested workload with and
+without the feature to find out if enabling it is beneficial.
+
+Requires: hv-vapic
+
+3.17. hv-stimer-direct
+=======================
+Hyper-V specification allows synthetic timer operation in two modes: "classic",
+when expiration event is delivered as SynIC message and "direct", when the event
+is delivered via normal interrupt. It is known that nested Hyper-V can only
+use synthetic timers in direct mode and thus 'hv-stimer-direct' needs to be
+enabled.
+
+Requires: hv-vpindex, hv-synic, hv-time, hv-stimer
+
+
+4. Development features
+========================
+In some cases (e.g. during development) it may make sense to use QEMU in
+'pass-through' mode and give Windows guests all enlightenments currently
+supported by KVM. This pass-through mode is enabled by "hv-passthrough" CPU
+flag.
+Note: enabling this flag effectively prevents migration as supported features
+may differ between target and destination.
+
+
+4. Useful links
+================
+Hyper-V Top Level Functional specification and other information:
+https://github.com/MicrosoftDocs/Virtualization-Documentation
diff --git a/hw/core/machine.c b/hw/core/machine.c
index 84ebb8d247..ea5a01aa49 100644
--- a/hw/core/machine.c
+++ b/hw/core/machine.c
@@ -24,16 +24,13 @@
#include "hw/pci/pci.h"
#include "hw/mem/nvdimm.h"
-GlobalProperty hw_compat_4_0_1[] = {
+GlobalProperty hw_compat_4_0[] = {
{ "VGA", "edid", "false" },
{ "secondary-vga", "edid", "false" },
{ "bochs-display", "edid", "false" },
{ "virtio-vga", "edid", "false" },
{ "virtio-gpu-pci", "edid", "false" },
};
-const size_t hw_compat_4_0_1_len = G_N_ELEMENTS(hw_compat_4_0_1);
-
-GlobalProperty hw_compat_4_0[] = {};
const size_t hw_compat_4_0_len = G_N_ELEMENTS(hw_compat_4_0);
GlobalProperty hw_compat_3_1[] = {
diff --git a/hw/i386/pc.c b/hw/i386/pc.c
index 2c5446b095..e96360b47a 100644
--- a/hw/i386/pc.c
+++ b/hw/i386/pc.c
@@ -111,9 +111,6 @@ struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX};
/* Physical Address of PVH entry point read from kernel ELF NOTE */
static size_t pvh_start_addr;
-GlobalProperty pc_compat_4_0_1[] = {};
-const size_t pc_compat_4_0_1_len = G_N_ELEMENTS(pc_compat_4_0_1);
-
GlobalProperty pc_compat_4_0[] = {};
const size_t pc_compat_4_0_len = G_N_ELEMENTS(pc_compat_4_0);
@@ -2386,7 +2383,8 @@ static void pc_cpu_pre_plug(HotplugHandler *hotplug_dev,
}
cpu->thread_id = topo.smt_id;
- if (cpu->hyperv_vpindex && !kvm_hv_vpindex_settable()) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) &&
+ !kvm_hv_vpindex_settable()) {
error_setg(errp, "kernel doesn't allow setting HyperV VP_INDEX");
return;
}
diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c
index dcddc64662..57232aed6b 100644
--- a/hw/i386/pc_q35.c
+++ b/hw/i386/pc_q35.c
@@ -378,8 +378,13 @@ static void pc_q35_4_0_1_machine_options(MachineClass *m)
{
pc_q35_4_1_machine_options(m);
m->alias = NULL;
- compat_props_add(m->compat_props, hw_compat_4_0_1, hw_compat_4_0_1_len);
- compat_props_add(m->compat_props, pc_compat_4_0_1, pc_compat_4_0_1_len);
+ /*
+ * This is the default machine for the 4.0-stable branch. It is basically
+ * a 4.0 that doesn't use split irqchip by default. It MUST hence apply the
+ * 4.0 compat props.
+ */
+ compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len);
+ compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len);
}
DEFINE_Q35_MACHINE(v4_0_1, "pc-q35-4.0.1", NULL,
@@ -390,8 +395,7 @@ static void pc_q35_4_0_machine_options(MachineClass *m)
pc_q35_4_0_1_machine_options(m);
m->default_kernel_irqchip_split = true;
m->alias = NULL;
- compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len);
- compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len);
+ /* Compat props are applied by the 4.0.1 machine */
}
DEFINE_Q35_MACHINE(v4_0, "pc-q35-4.0", NULL,
diff --git a/hw/sd/sd.c b/hw/sd/sd.c
index 60500ec8fe..917195a65b 100644
--- a/hw/sd/sd.c
+++ b/hw/sd/sd.c
@@ -145,7 +145,7 @@ static const char *sd_state_name(enum SDCardStates state)
if (state == sd_inactive_state) {
return "inactive";
}
- assert(state <= ARRAY_SIZE(state_name));
+ assert(state < ARRAY_SIZE(state_name));
return state_name[state];
}
@@ -166,7 +166,7 @@ static const char *sd_response_name(sd_rsp_type_t rsp)
if (rsp == sd_r1b) {
rsp = sd_r1;
}
- assert(rsp <= ARRAY_SIZE(response_name));
+ assert(rsp < ARRAY_SIZE(response_name));
return response_name[rsp];
}
diff --git a/include/hw/boards.h b/include/hw/boards.h
index b7362af3f1..eaa050a7ab 100644
--- a/include/hw/boards.h
+++ b/include/hw/boards.h
@@ -293,9 +293,6 @@ struct MachineState {
} \
type_init(machine_initfn##_register_types)
-extern GlobalProperty hw_compat_4_0_1[];
-extern const size_t hw_compat_4_0_1_len;
-
extern GlobalProperty hw_compat_4_0[];
extern const size_t hw_compat_4_0_len;
diff --git a/include/hw/i386/pc.h b/include/hw/i386/pc.h
index a7d0b87166..c54cc54a47 100644
--- a/include/hw/i386/pc.h
+++ b/include/hw/i386/pc.h
@@ -293,9 +293,6 @@ int e820_add_entry(uint64_t, uint64_t, uint32_t);
int e820_get_num_entries(void);
bool e820_get_entry(int, uint32_t, uint64_t *, uint64_t *);
-extern GlobalProperty pc_compat_4_0_1[];
-extern const size_t pc_compat_4_0_1_len;
-
extern GlobalProperty pc_compat_4_0[];
extern const size_t pc_compat_4_0_len;
diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h
index 9224370ed5..ca68584eba 100644
--- a/include/migration/vmstate.h
+++ b/include/migration/vmstate.h
@@ -797,6 +797,19 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_UINT64_V(_f, _s, _v) \
VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, uint64_t)
+#ifdef CONFIG_LINUX
+
+#define VMSTATE_U8_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint8, __u8)
+#define VMSTATE_U16_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint16, __u16)
+#define VMSTATE_U32_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint32, __u32)
+#define VMSTATE_U64_V(_f, _s, _v) \
+ VMSTATE_SINGLE(_f, _s, _v, vmstate_info_uint64, __u64)
+
+#endif
+
#define VMSTATE_BOOL(_f, _s) \
VMSTATE_BOOL_V(_f, _s, 0)
@@ -818,6 +831,19 @@ extern const VMStateInfo vmstate_info_qtailq;
#define VMSTATE_UINT64(_f, _s) \
VMSTATE_UINT64_V(_f, _s, 0)
+#ifdef CONFIG_LINUX
+
+#define VMSTATE_U8(_f, _s) \
+ VMSTATE_U8_V(_f, _s, 0)
+#define VMSTATE_U16(_f, _s) \
+ VMSTATE_U16_V(_f, _s, 0)
+#define VMSTATE_U32(_f, _s) \
+ VMSTATE_U32_V(_f, _s, 0)
+#define VMSTATE_U64(_f, _s) \
+ VMSTATE_U64_V(_f, _s, 0)
+
+#endif
+
#define VMSTATE_UINT8_EQUAL(_f, _s, _err_hint) \
VMSTATE_SINGLE_FULL(_f, _s, 0, 0, \
vmstate_info_uint8_equal, uint8_t, _err_hint)
diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h
index a6d1cd190f..acd90aebb6 100644
--- a/include/sysemu/kvm.h
+++ b/include/sysemu/kvm.h
@@ -210,6 +210,7 @@ bool kvm_has_sync_mmu(void);
int kvm_has_vcpu_events(void);
int kvm_has_robust_singlestep(void);
int kvm_has_debugregs(void);
+int kvm_max_nested_state_length(void);
int kvm_has_pit_state2(void);
int kvm_has_many_ioeventfds(void);
int kvm_has_gsi_routing(void);
@@ -371,6 +372,7 @@ int kvm_arch_put_registers(CPUState *cpu, int level);
int kvm_arch_init(MachineState *ms, KVMState *s);
int kvm_arch_init_vcpu(CPUState *cpu);
+int kvm_arch_destroy_vcpu(CPUState *cpu);
bool kvm_vcpu_id_is_valid(int vcpu_id);
diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h
index 7a0e64ccd6..6e7dd792e4 100644
--- a/linux-headers/asm-x86/kvm.h
+++ b/linux-headers/asm-x86/kvm.h
@@ -383,16 +383,26 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#define KVM_X86_QUIRK_OUT_7E_INC_RIP (1 << 3)
+#define KVM_STATE_NESTED_FORMAT_VMX 0
+#define KVM_STATE_NESTED_FORMAT_SVM 1
+
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
+#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
+
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
-struct kvm_vmx_nested_state {
+struct kvm_vmx_nested_state_data {
+ __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+ __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
+};
+
+struct kvm_vmx_nested_state_hdr {
__u64 vmxon_pa;
- __u64 vmcs_pa;
+ __u64 vmcs12_pa;
struct {
__u16 flags;
@@ -401,24 +411,25 @@ struct kvm_vmx_nested_state {
/* for KVM_CAP_NESTED_STATE */
struct kvm_nested_state {
- /* KVM_STATE_* flags */
__u16 flags;
-
- /* 0 for VMX, 1 for SVM. */
__u16 format;
-
- /* 128 for SVM, 128 + VMCS size for VMX. */
__u32 size;
union {
- /* VMXON, VMCS */
- struct kvm_vmx_nested_state vmx;
+ struct kvm_vmx_nested_state_hdr vmx;
/* Pad the header to 128 bytes. */
__u8 pad[120];
- };
+ } hdr;
- __u8 data[0];
+ /*
+ * Define data region as 0 bytes to preserve backwards-compatability
+ * to old definition of kvm_nested_state in order to avoid changing
+ * KVM_{GET,PUT}_NESTED_STATE ioctl values.
+ */
+ union {
+ struct kvm_vmx_nested_state_data vmx[0];
+ } data;
};
#endif /* _ASM_X86_KVM_H */
diff --git a/target/arm/kvm32.c b/target/arm/kvm32.c
index 4e54e372a6..51f78f722b 100644
--- a/target/arm/kvm32.c
+++ b/target/arm/kvm32.c
@@ -240,6 +240,11 @@ int kvm_arch_init_vcpu(CPUState *cs)
return kvm_arm_init_cpreg_list(cpu);
}
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
typedef struct Reg {
uint64_t id;
int offset;
diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c
index 998d21f399..22d19c9aec 100644
--- a/target/arm/kvm64.c
+++ b/target/arm/kvm64.c
@@ -654,6 +654,11 @@ int kvm_arch_init_vcpu(CPUState *cs)
return kvm_arm_init_cpreg_list(cpu);
}
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
{
/* Return true if the regidx is a register we should synchronize
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index fbed2eb804..da6eb67cfb 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -1085,7 +1085,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
NULL, NULL, NULL, NULL,
NULL, NULL, NULL, NULL,
NULL, NULL, "spec-ctrl", "stibp",
- NULL, "arch-capabilities", NULL, "ssbd",
+ NULL, "arch-capabilities", "core-capability", "ssbd",
},
.cpuid = {
.eax = 7,
@@ -1203,6 +1203,26 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
}
},
},
+ [FEAT_CORE_CAPABILITY] = {
+ .type = MSR_FEATURE_WORD,
+ .feat_names = {
+ NULL, NULL, NULL, NULL,
+ NULL, "split-lock-detect", NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ NULL, NULL, NULL, NULL,
+ },
+ .msr = {
+ .index = MSR_IA32_CORE_CAPABILITY,
+ .cpuid_dep = {
+ FEAT_7_0_EDX,
+ CPUID_7_0_EDX_CORE_CAPABILITY,
+ },
+ },
+ },
};
typedef struct X86RegisterInfo32 {
@@ -4799,7 +4819,11 @@ static void x86_cpu_reset(CPUState *s)
memset(env->mtrr_fixed, 0, sizeof(env->mtrr_fixed));
env->interrupt_injected = -1;
- env->exception_injected = -1;
+ env->exception_nr = -1;
+ env->exception_pending = 0;
+ env->exception_injected = 0;
+ env->exception_has_payload = false;
+ env->exception_payload = 0;
env->nmi_injected = false;
#if !defined(CONFIG_USER_ONLY)
/* We hard-wire the BSP to the first CPU. */
@@ -5195,12 +5219,6 @@ static int x86_cpu_filter_features(X86CPU *cpu)
return rv;
}
-#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
- (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
- (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
-#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
- (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
- (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
static void x86_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
@@ -5853,21 +5871,40 @@ static Property x86_cpu_properties[] = {
#endif
DEFINE_PROP_INT32("node-id", X86CPU, node_id, CPU_UNSET_NUMA_NODE_ID),
DEFINE_PROP_BOOL("pmu", X86CPU, enable_pmu, false),
+
{ .name = "hv-spinlocks", .info = &qdev_prop_spinlocks },
- DEFINE_PROP_BOOL("hv-relaxed", X86CPU, hyperv_relaxed_timing, false),
- DEFINE_PROP_BOOL("hv-vapic", X86CPU, hyperv_vapic, false),
- DEFINE_PROP_BOOL("hv-time", X86CPU, hyperv_time, false),
- DEFINE_PROP_BOOL("hv-crash", X86CPU, hyperv_crash, false),
- DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
- DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
- DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
- DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
- DEFINE_PROP_BOOL("hv-stimer", X86CPU, hyperv_stimer, false),
- DEFINE_PROP_BOOL("hv-frequencies", X86CPU, hyperv_frequencies, false),
- DEFINE_PROP_BOOL("hv-reenlightenment", X86CPU, hyperv_reenlightenment, false),
- DEFINE_PROP_BOOL("hv-tlbflush", X86CPU, hyperv_tlbflush, false),
- DEFINE_PROP_BOOL("hv-evmcs", X86CPU, hyperv_evmcs, false),
- DEFINE_PROP_BOOL("hv-ipi", X86CPU, hyperv_ipi, false),
+ DEFINE_PROP_BIT64("hv-relaxed", X86CPU, hyperv_features,
+ HYPERV_FEAT_RELAXED, 0),
+ DEFINE_PROP_BIT64("hv-vapic", X86CPU, hyperv_features,
+ HYPERV_FEAT_VAPIC, 0),
+ DEFINE_PROP_BIT64("hv-time", X86CPU, hyperv_features,
+ HYPERV_FEAT_TIME, 0),
+ DEFINE_PROP_BIT64("hv-crash", X86CPU, hyperv_features,
+ HYPERV_FEAT_CRASH, 0),
+ DEFINE_PROP_BIT64("hv-reset", X86CPU, hyperv_features,
+ HYPERV_FEAT_RESET, 0),
+ DEFINE_PROP_BIT64("hv-vpindex", X86CPU, hyperv_features,
+ HYPERV_FEAT_VPINDEX, 0),
+ DEFINE_PROP_BIT64("hv-runtime", X86CPU, hyperv_features,
+ HYPERV_FEAT_RUNTIME, 0),
+ DEFINE_PROP_BIT64("hv-synic", X86CPU, hyperv_features,
+ HYPERV_FEAT_SYNIC, 0),
+ DEFINE_PROP_BIT64("hv-stimer", X86CPU, hyperv_features,
+ HYPERV_FEAT_STIMER, 0),
+ DEFINE_PROP_BIT64("hv-frequencies", X86CPU, hyperv_features,
+ HYPERV_FEAT_FREQUENCIES, 0),
+ DEFINE_PROP_BIT64("hv-reenlightenment", X86CPU, hyperv_features,
+ HYPERV_FEAT_REENLIGHTENMENT, 0),
+ DEFINE_PROP_BIT64("hv-tlbflush", X86CPU, hyperv_features,
+ HYPERV_FEAT_TLBFLUSH, 0),
+ DEFINE_PROP_BIT64("hv-evmcs", X86CPU, hyperv_features,
+ HYPERV_FEAT_EVMCS, 0),
+ DEFINE_PROP_BIT64("hv-ipi", X86CPU, hyperv_features,
+ HYPERV_FEAT_IPI, 0),
+ DEFINE_PROP_BIT64("hv-stimer-direct", X86CPU, hyperv_features,
+ HYPERV_FEAT_STIMER_DIRECT, 0),
+ DEFINE_PROP_BOOL("hv-passthrough", X86CPU, hyperv_passthrough, false),
+
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 0732e059ec..93345792f4 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -345,6 +345,7 @@ typedef enum X86Seg {
#define MSR_IA32_SPEC_CTRL 0x48
#define MSR_VIRT_SSBD 0xc001011f
#define MSR_IA32_PRED_CMD 0x49
+#define MSR_IA32_CORE_CAPABILITY 0xcf
#define MSR_IA32_ARCH_CAPABILITIES 0x10a
#define MSR_IA32_TSCDEADLINE 0x6e0
@@ -496,6 +497,7 @@ typedef enum FeatureWord {
FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */
FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */
FEAT_ARCH_CAPABILITIES,
+ FEAT_CORE_CAPABILITY,
FEATURE_WORDS,
} FeatureWord;
@@ -687,6 +689,7 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */
#define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */
#define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /*Arch Capabilities*/
+#define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) /*Core Capability*/
#define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */
#define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and
@@ -719,6 +722,13 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define CPUID_VENDOR_HYGON "HygonGenuine"
+#define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3)
+#define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \
+ (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \
+ (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3)
+
#define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */
#define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */
@@ -734,6 +744,25 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS];
#define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3)
#define MSR_ARCH_CAP_SSB_NO (1U << 4)
+#define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5)
+
+/* Supported Hyper-V Enlightenments */
+#define HYPERV_FEAT_RELAXED 0
+#define HYPERV_FEAT_VAPIC 1
+#define HYPERV_FEAT_TIME 2
+#define HYPERV_FEAT_CRASH 3
+#define HYPERV_FEAT_RESET 4
+#define HYPERV_FEAT_VPINDEX 5
+#define HYPERV_FEAT_RUNTIME 6
+#define HYPERV_FEAT_SYNIC 7
+#define HYPERV_FEAT_STIMER 8
+#define HYPERV_FEAT_FREQUENCIES 9
+#define HYPERV_FEAT_REENLIGHTENMENT 10
+#define HYPERV_FEAT_TLBFLUSH 11
+#define HYPERV_FEAT_EVMCS 12
+#define HYPERV_FEAT_IPI 13
+#define HYPERV_FEAT_STIMER_DIRECT 14
+
#ifndef HYPERV_SPINLOCK_NEVER_RETRY
#define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF
#endif
@@ -1319,10 +1348,14 @@ typedef struct CPUX86State {
/* For KVM */
uint32_t mp_state;
- int32_t exception_injected;
+ int32_t exception_nr;
int32_t interrupt_injected;
uint8_t soft_interrupt;
+ uint8_t exception_pending;
+ uint8_t exception_injected;
uint8_t has_error_code;
+ uint8_t exception_has_payload;
+ uint64_t exception_payload;
uint32_t ins_len;
uint32_t sipi_vector;
bool tsc_valid;
@@ -1331,6 +1364,9 @@ typedef struct CPUX86State {
#if defined(CONFIG_KVM) || defined(CONFIG_HVF)
void *xsave_buf;
#endif
+#if defined(CONFIG_KVM)
+ struct kvm_nested_state *nested_state;
+#endif
#if defined(CONFIG_HVF)
HVFX86EmulatorState *hvf_emul;
#endif
@@ -1370,23 +1406,12 @@ struct X86CPU {
CPUNegativeOffsetState neg;
CPUX86State env;
- bool hyperv_vapic;
- bool hyperv_relaxed_timing;
int hyperv_spinlock_attempts;
char *hyperv_vendor_id;
- bool hyperv_time;
- bool hyperv_crash;
- bool hyperv_reset;
- bool hyperv_vpindex;
- bool hyperv_runtime;
- bool hyperv_synic;
bool hyperv_synic_kvm_only;
- bool hyperv_stimer;
- bool hyperv_frequencies;
- bool hyperv_reenlightenment;
- bool hyperv_tlbflush;
- bool hyperv_evmcs;
- bool hyperv_ipi;
+ uint64_t hyperv_features;
+ bool hyperv_passthrough;
+
bool check_cpuid;
bool enforce_cpuid;
bool expose_kvm;
@@ -1837,6 +1862,11 @@ static inline int32_t x86_get_a20_mask(CPUX86State *env)
}
}
+static inline bool cpu_has_vmx(CPUX86State *env)
+{
+ return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
+}
+
/* fpu_helper.c */
void update_fp_status(CPUX86State *env);
void update_mxcsr_status(CPUX86State *env);
@@ -1906,4 +1936,9 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf);
void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf);
void x86_update_hflags(CPUX86State* env);
+static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat)
+{
+ return !!(cpu->hyperv_features & BIT(feat));
+}
+
#endif /* I386_CPU_H */
diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c
index 64fd51ad4a..9e7b77965d 100644
--- a/target/i386/hax-all.c
+++ b/target/i386/hax-all.c
@@ -471,13 +471,35 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
return 0;
}
- cpu->halted = 0;
-
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
cpu->interrupt_request &= ~CPU_INTERRUPT_POLL;
apic_poll_irq(x86_cpu->apic_state);
}
+ /* After a vcpu is halted (either because it is an AP and has just been
+ * reset, or because it has executed the HLT instruction), it will not be
+ * run (hax_vcpu_run()) until it is unhalted. The next few if blocks check
+ * for events that may change the halted state of this vcpu:
+ * a) Maskable interrupt, when RFLAGS.IF is 1;
+ * Note: env->eflags may not reflect the current RFLAGS state, because
+ * it is not updated after each hax_vcpu_run(). We cannot afford
+ * to fail to recognize any unhalt-by-maskable-interrupt event
+ * (in which case the vcpu will halt forever), and yet we cannot
+ * afford the overhead of hax_vcpu_sync_state(). The current
+ * solution is to err on the side of caution and have the HLT
+ * handler (see case HAX_EXIT_HLT below) unconditionally set the
+ * IF_MASK bit in env->eflags, which, in effect, disables the
+ * RFLAGS.IF check.
+ * b) NMI;
+ * c) INIT signal;
+ * d) SIPI signal.
+ */
+ if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) &&
+ (env->eflags & IF_MASK)) ||
+ (cpu->interrupt_request & CPU_INTERRUPT_NMI)) {
+ cpu->halted = 0;
+ }
+
if (cpu->interrupt_request & CPU_INTERRUPT_INIT) {
DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n",
cpu->cpu_index);
@@ -493,6 +515,16 @@ static int hax_vcpu_hax_exec(CPUArchState *env)
hax_vcpu_sync_state(env, 1);
}
+ if (cpu->halted) {
+ /* If this vcpu is halted, we must not ask HAXM to run it. Instead, we
+ * break out of hax_smp_cpu_exec() as if this vcpu had executed HLT.
+ * That way, this vcpu thread will be trapped in qemu_wait_io_event(),
+ * until the vcpu is unhalted.
+ */
+ cpu->exception_index = EXCP_HLT;
+ return 0;
+ }
+
do {
int hax_ret;
diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c
index 2751c8125c..dc4bb63536 100644
--- a/target/i386/hvf/hvf.c
+++ b/target/i386/hvf/hvf.c
@@ -605,7 +605,9 @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in
X86CPU *x86_cpu = X86_CPU(cpu);
CPUX86State *env = &x86_cpu->env;
- env->exception_injected = -1;
+ env->exception_nr = -1;
+ env->exception_pending = 0;
+ env->exception_injected = 0;
env->interrupt_injected = -1;
env->nmi_injected = false;
if (idtvec_info & VMCS_IDT_VEC_VALID) {
@@ -619,7 +621,8 @@ static void hvf_store_events(CPUState *cpu, uint32_t ins_len, uint64_t idtvec_in
break;
case VMCS_IDT_VEC_HWEXCEPTION:
case VMCS_IDT_VEC_SWEXCEPTION:
- env->exception_injected = idtvec_info & VMCS_IDT_VEC_VECNUM;
+ env->exception_nr = idtvec_info & VMCS_IDT_VEC_VECNUM;
+ env->exception_injected = 1;
break;
case VMCS_IDT_VEC_PRIV_SWEXCEPTION:
default:
@@ -912,7 +915,8 @@ int hvf_vcpu_exec(CPUState *cpu)
macvm_set_rip(cpu, rip + ins_len);
break;
case VMX_REASON_VMCALL:
- env->exception_injected = EXCP0D_GPF;
+ env->exception_nr = EXCP0D_GPF;
+ env->exception_injected = 1;
env->has_error_code = true;
env->error_code = 0;
break;
diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c
index df8e946fbc..e0ea02d631 100644
--- a/target/i386/hvf/x86hvf.c
+++ b/target/i386/hvf/x86hvf.c
@@ -362,8 +362,8 @@ bool hvf_inject_interrupts(CPUState *cpu_state)
if (env->interrupt_injected != -1) {
vector = env->interrupt_injected;
intr_type = VMCS_INTR_T_SWINTR;
- } else if (env->exception_injected != -1) {
- vector = env->exception_injected;
+ } else if (env->exception_nr != -1) {
+ vector = env->exception_nr;
if (vector == EXCP03_INT3 || vector == EXCP04_INTO) {
intr_type = VMCS_INTR_T_SWEXCEPTION;
} else {
diff --git a/target/i386/hyperv-proto.h b/target/i386/hyperv-proto.h
index c0272b3a01..cffac10b45 100644
--- a/target/i386/hyperv-proto.h
+++ b/target/i386/hyperv-proto.h
@@ -49,6 +49,7 @@
#define HV_GUEST_IDLE_STATE_AVAILABLE (1u << 5)
#define HV_FREQUENCY_MSRS_AVAILABLE (1u << 8)
#define HV_GUEST_CRASH_MSR_AVAILABLE (1u << 10)
+#define HV_STIMER_DIRECT_MODE_AVAILABLE (1u << 19)
/*
* HV_CPUID_ENLIGHTMENT_INFO.EAX bits
diff --git a/target/i386/hyperv.c b/target/i386/hyperv.c
index b264a28620..26efc1e0e6 100644
--- a/target/i386/hyperv.c
+++ b/target/i386/hyperv.c
@@ -52,7 +52,7 @@ int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit)
switch (exit->type) {
case KVM_EXIT_HYPERV_SYNIC:
- if (!cpu->hyperv_synic) {
+ if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
return -1;
}
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 6899061b4e..e4b4f5756a 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -95,6 +95,7 @@ static bool has_msr_spec_ctrl;
static bool has_msr_virt_ssbd;
static bool has_msr_smi_count;
static bool has_msr_arch_capabs;
+static bool has_msr_core_capabs;
static uint32_t has_architectural_pmu_version;
static uint32_t num_architectural_pmu_gp_counters;
@@ -103,6 +104,7 @@ static uint32_t num_architectural_pmu_fixed_counters;
static int has_xsave;
static int has_xcrs;
static int has_pit_state2;
+static int has_exception_payload;
static bool has_msr_mcg_ext_ctl;
@@ -583,15 +585,56 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
/* Hope we are lucky for AO MCE */
}
+static void kvm_reset_exception(CPUX86State *env)
+{
+ env->exception_nr = -1;
+ env->exception_pending = 0;
+ env->exception_injected = 0;
+ env->exception_has_payload = false;
+ env->exception_payload = 0;
+}
+
+static void kvm_queue_exception(CPUX86State *env,
+ int32_t exception_nr,
+ uint8_t exception_has_payload,
+ uint64_t exception_payload)
+{
+ assert(env->exception_nr == -1);
+ assert(!env->exception_pending);
+ assert(!env->exception_injected);
+ assert(!env->exception_has_payload);
+
+ env->exception_nr = exception_nr;
+
+ if (has_exception_payload) {
+ env->exception_pending = 1;
+
+ env->exception_has_payload = exception_has_payload;
+ env->exception_payload = exception_payload;
+ } else {
+ env->exception_injected = 1;
+
+ if (exception_nr == EXCP01_DB) {
+ assert(exception_has_payload);
+ env->dr[6] = exception_payload;
+ } else if (exception_nr == EXCP0E_PAGE) {
+ assert(exception_has_payload);
+ env->cr[2] = exception_payload;
+ } else {
+ assert(!exception_has_payload);
+ }
+ }
+}
+
static int kvm_inject_mce_oldstyle(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
- if (!kvm_has_vcpu_events() && env->exception_injected == EXCP12_MCHK) {
+ if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) {
unsigned int bank, bank_num = env->mcg_cap & 0xff;
struct kvm_x86_mce mce;
- env->exception_injected = -1;
+ kvm_reset_exception(env);
/*
* There must be at least one bank in use if an MCE is pending.
@@ -634,28 +677,12 @@ unsigned long kvm_arch_vcpu_id(CPUState *cs)
#define KVM_CPUID_SIGNATURE_NEXT 0x40000100
#endif
-static bool hyperv_hypercall_available(X86CPU *cpu)
-{
- return cpu->hyperv_vapic ||
- (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY);
-}
-
static bool hyperv_enabled(X86CPU *cpu)
{
CPUState *cs = CPU(cpu);
return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 &&
- (hyperv_hypercall_available(cpu) ||
- cpu->hyperv_time ||
- cpu->hyperv_relaxed_timing ||
- cpu->hyperv_crash ||
- cpu->hyperv_reset ||
- cpu->hyperv_vpindex ||
- cpu->hyperv_runtime ||
- cpu->hyperv_synic ||
- cpu->hyperv_stimer ||
- cpu->hyperv_reenlightenment ||
- cpu->hyperv_tlbflush ||
- cpu->hyperv_ipi);
+ ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) ||
+ cpu->hyperv_features || cpu->hyperv_passthrough);
}
static int kvm_arch_set_tsc_khz(CPUState *cs)
@@ -699,164 +726,573 @@ static bool tsc_is_stable_and_known(CPUX86State *env)
|| env->user_tsc_khz;
}
-static int hyperv_handle_properties(CPUState *cs)
+static struct {
+ const char *desc;
+ struct {
+ uint32_t fw;
+ uint32_t bits;
+ } flags[2];
+ uint64_t dependencies;
+} kvm_hyperv_properties[] = {
+ [HYPERV_FEAT_RELAXED] = {
+ .desc = "relaxed timing (hv-relaxed)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_HYPERCALL_AVAILABLE},
+ {.fw = FEAT_HV_RECOMM_EAX,
+ .bits = HV_RELAXED_TIMING_RECOMMENDED}
+ }
+ },
+ [HYPERV_FEAT_VAPIC] = {
+ .desc = "virtual APIC (hv-vapic)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE},
+ {.fw = FEAT_HV_RECOMM_EAX,
+ .bits = HV_APIC_ACCESS_RECOMMENDED}
+ }
+ },
+ [HYPERV_FEAT_TIME] = {
+ .desc = "clocksources (hv-time)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE |
+ HV_REFERENCE_TSC_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_CRASH] = {
+ .desc = "crash MSRs (hv-crash)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EDX,
+ .bits = HV_GUEST_CRASH_MSR_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_RESET] = {
+ .desc = "reset MSR (hv-reset)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_RESET_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_VPINDEX] = {
+ .desc = "VP_INDEX MSR (hv-vpindex)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_VP_INDEX_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_RUNTIME] = {
+ .desc = "VP_RUNTIME MSR (hv-runtime)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_VP_RUNTIME_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_SYNIC] = {
+ .desc = "synthetic interrupt controller (hv-synic)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_SYNIC_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_STIMER] = {
+ .desc = "synthetic timers (hv-stimer)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_SYNTIMERS_AVAILABLE}
+ },
+ .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME)
+ },
+ [HYPERV_FEAT_FREQUENCIES] = {
+ .desc = "frequency MSRs (hv-frequencies)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_ACCESS_FREQUENCY_MSRS},
+ {.fw = FEAT_HYPERV_EDX,
+ .bits = HV_FREQUENCY_MSRS_AVAILABLE}
+ }
+ },
+ [HYPERV_FEAT_REENLIGHTENMENT] = {
+ .desc = "reenlightenment MSRs (hv-reenlightenment)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EAX,
+ .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL}
+ }
+ },
+ [HYPERV_FEAT_TLBFLUSH] = {
+ .desc = "paravirtualized TLB flush (hv-tlbflush)",
+ .flags = {
+ {.fw = FEAT_HV_RECOMM_EAX,
+ .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED |
+ HV_EX_PROCESSOR_MASKS_RECOMMENDED}
+ },
+ .dependencies = BIT(HYPERV_FEAT_VPINDEX)
+ },
+ [HYPERV_FEAT_EVMCS] = {
+ .desc = "enlightened VMCS (hv-evmcs)",
+ .flags = {
+ {.fw = FEAT_HV_RECOMM_EAX,
+ .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED}
+ },
+ .dependencies = BIT(HYPERV_FEAT_VAPIC)
+ },
+ [HYPERV_FEAT_IPI] = {
+ .desc = "paravirtualized IPI (hv-ipi)",
+ .flags = {
+ {.fw = FEAT_HV_RECOMM_EAX,
+ .bits = HV_CLUSTER_IPI_RECOMMENDED |
+ HV_EX_PROCESSOR_MASKS_RECOMMENDED}
+ },
+ .dependencies = BIT(HYPERV_FEAT_VPINDEX)
+ },
+ [HYPERV_FEAT_STIMER_DIRECT] = {
+ .desc = "direct mode synthetic timers (hv-stimer-direct)",
+ .flags = {
+ {.fw = FEAT_HYPERV_EDX,
+ .bits = HV_STIMER_DIRECT_MODE_AVAILABLE}
+ },
+ .dependencies = BIT(HYPERV_FEAT_STIMER)
+ },
+};
+
+static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max)
+{
+ struct kvm_cpuid2 *cpuid;
+ int r, size;
+
+ size = sizeof(*cpuid) + max * sizeof(*cpuid->entries);
+ cpuid = g_malloc0(size);
+ cpuid->nent = max;
+
+ r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid);
+ if (r == 0 && cpuid->nent >= max) {
+ r = -E2BIG;
+ }
+ if (r < 0) {
+ if (r == -E2BIG) {
+ g_free(cpuid);
+ return NULL;
+ } else {
+ fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n",
+ strerror(-r));
+ exit(1);
+ }
+ }
+ return cpuid;
+}
+
+/*
+ * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough
+ * for all entries.
+ */
+static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs)
+{
+ struct kvm_cpuid2 *cpuid;
+ int max = 7; /* 0x40000000..0x40000005, 0x4000000A */
+
+ /*
+ * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with
+ * -E2BIG, however, it doesn't report back the right size. Keep increasing
+ * it and re-trying until we succeed.
+ */
+ while ((cpuid = try_get_hv_cpuid(cs, max)) == NULL) {
+ max++;
+ }
+ return cpuid;
+}
+
+/*
+ * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature
+ * leaves from KVM_CAP_HYPERV* and present MSRs data.
+ */
+static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs)
{
X86CPU *cpu = X86_CPU(cs);
- CPUX86State *env = &cpu->env;
+ struct kvm_cpuid2 *cpuid;
+ struct kvm_cpuid_entry2 *entry_feat, *entry_recomm;
- if (cpu->hyperv_relaxed_timing) {
- env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
+ /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */
+ cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries));
+ cpuid->nent = 2;
+
+ /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */
+ entry_feat = &cpuid->entries[0];
+ entry_feat->function = HV_CPUID_FEATURES;
+
+ entry_recomm = &cpuid->entries[1];
+ entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO;
+ entry_recomm->ebx = cpu->hyperv_spinlock_attempts;
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) {
+ entry_feat->eax |= HV_HYPERCALL_AVAILABLE;
+ entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE;
+ entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+ entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED;
+ entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED;
}
- if (cpu->hyperv_vapic) {
- env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_APIC_ACCESS_AVAILABLE;
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) {
+ entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE;
+ entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE;
}
- if (cpu->hyperv_time) {
- if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) <= 0) {
- fprintf(stderr, "Hyper-V clocksources "
- "(requested by 'hv-time' cpu flag) "
- "are not supported by kernel\n");
- return -ENOSYS;
- }
- env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_TIME_REF_COUNT_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_REFERENCE_TSC_AVAILABLE;
+
+ if (has_msr_hv_frequencies) {
+ entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS;
+ entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE;
}
- if (cpu->hyperv_frequencies) {
- if (!has_msr_hv_frequencies) {
- fprintf(stderr, "Hyper-V frequency MSRs "
- "(requested by 'hv-frequencies' cpu flag) "
- "are not supported by kernel\n");
- return -ENOSYS;
- }
- env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_FREQUENCY_MSRS;
- env->features[FEAT_HYPERV_EDX] |= HV_FREQUENCY_MSRS_AVAILABLE;
+
+ if (has_msr_hv_crash) {
+ entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE;
}
- if (cpu->hyperv_crash) {
- if (!has_msr_hv_crash) {
- fprintf(stderr, "Hyper-V crash MSRs "
- "(requested by 'hv-crash' cpu flag) "
- "are not supported by kernel\n");
- return -ENOSYS;
- }
- env->features[FEAT_HYPERV_EDX] |= HV_GUEST_CRASH_MSR_AVAILABLE;
+
+ if (has_msr_hv_reenlightenment) {
+ entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
}
- if (cpu->hyperv_reenlightenment) {
- if (!has_msr_hv_reenlightenment) {
- fprintf(stderr,
- "Hyper-V Reenlightenment MSRs "
- "(requested by 'hv-reenlightenment' cpu flag) "
- "are not supported by kernel\n");
- return -ENOSYS;
- }
- env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_REENLIGHTENMENTS_CONTROL;
+
+ if (has_msr_hv_reset) {
+ entry_feat->eax |= HV_RESET_AVAILABLE;
}
- env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
- if (cpu->hyperv_reset) {
- if (!has_msr_hv_reset) {
- fprintf(stderr, "Hyper-V reset MSR "
- "(requested by 'hv-reset' cpu flag) "
- "is not supported by kernel\n");
- return -ENOSYS;
- }
- env->features[FEAT_HYPERV_EAX] |= HV_RESET_AVAILABLE;
+
+ if (has_msr_hv_vpindex) {
+ entry_feat->eax |= HV_VP_INDEX_AVAILABLE;
}
- if (cpu->hyperv_vpindex) {
- if (!has_msr_hv_vpindex) {
- fprintf(stderr, "Hyper-V VP_INDEX MSR "
- "(requested by 'hv-vpindex' cpu flag) "
- "is not supported by kernel\n");
- return -ENOSYS;
+
+ if (has_msr_hv_runtime) {
+ entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE;
+ }
+
+ if (has_msr_hv_synic) {
+ unsigned int cap = cpu->hyperv_synic_kvm_only ?
+ KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
+
+ if (kvm_check_extension(cs->kvm_state, cap) > 0) {
+ entry_feat->eax |= HV_SYNIC_AVAILABLE;
}
- env->features[FEAT_HYPERV_EAX] |= HV_VP_INDEX_AVAILABLE;
}
- if (cpu->hyperv_runtime) {
- if (!has_msr_hv_runtime) {
- fprintf(stderr, "Hyper-V VP_RUNTIME MSR "
- "(requested by 'hv-runtime' cpu flag) "
- "is not supported by kernel\n");
- return -ENOSYS;
+
+ if (has_msr_hv_stimer) {
+ entry_feat->eax |= HV_SYNTIMERS_AVAILABLE;
+ }
+
+ if (kvm_check_extension(cs->kvm_state,
+ KVM_CAP_HYPERV_TLBFLUSH) > 0) {
+ entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
+ entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
+ }
+
+ if (kvm_check_extension(cs->kvm_state,
+ KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) {
+ entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
+ }
+
+ if (kvm_check_extension(cs->kvm_state,
+ KVM_CAP_HYPERV_SEND_IPI) > 0) {
+ entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED;
+ entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
+ }
+
+ return cpuid;
+}
+
+static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r)
+{
+ struct kvm_cpuid_entry2 *entry;
+ uint32_t func;
+ int reg;
+
+ switch (fw) {
+ case FEAT_HYPERV_EAX:
+ reg = R_EAX;
+ func = HV_CPUID_FEATURES;
+ break;
+ case FEAT_HYPERV_EDX:
+ reg = R_EDX;
+ func = HV_CPUID_FEATURES;
+ break;
+ case FEAT_HV_RECOMM_EAX:
+ reg = R_EAX;
+ func = HV_CPUID_ENLIGHTMENT_INFO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ entry = cpuid_find_entry(cpuid, func, 0);
+ if (!entry) {
+ return -ENOENT;
+ }
+
+ switch (reg) {
+ case R_EAX:
+ *r = entry->eax;
+ break;
+ case R_EDX:
+ *r = entry->edx;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid,
+ int feature)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ uint32_t r, fw, bits;
+ uint64_t deps;
+ int i, dep_feat = 0;
+
+ if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) {
+ return 0;
+ }
+
+ deps = kvm_hyperv_properties[feature].dependencies;
+ while ((dep_feat = find_next_bit(&deps, 64, dep_feat)) < 64) {
+ if (!(hyperv_feat_enabled(cpu, dep_feat))) {
+ fprintf(stderr,
+ "Hyper-V %s requires Hyper-V %s\n",
+ kvm_hyperv_properties[feature].desc,
+ kvm_hyperv_properties[dep_feat].desc);
+ return 1;
}
- env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE;
+ dep_feat++;
}
- if (cpu->hyperv_synic) {
- unsigned int cap = KVM_CAP_HYPERV_SYNIC;
- if (!cpu->hyperv_synic_kvm_only) {
- if (!cpu->hyperv_vpindex) {
- fprintf(stderr, "Hyper-V SynIC "
- "(requested by 'hv-synic' cpu flag) "
- "requires Hyper-V VP_INDEX ('hv-vpindex')\n");
- return -ENOSYS;
- }
- cap = KVM_CAP_HYPERV_SYNIC2;
+
+ for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) {
+ fw = kvm_hyperv_properties[feature].flags[i].fw;
+ bits = kvm_hyperv_properties[feature].flags[i].bits;
+
+ if (!fw) {
+ continue;
}
- if (!has_msr_hv_synic || !kvm_check_extension(cs->kvm_state, cap)) {
- fprintf(stderr, "Hyper-V SynIC (requested by 'hv-synic' cpu flag) "
- "is not supported by kernel\n");
- return -ENOSYS;
+ if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) {
+ if (hyperv_feat_enabled(cpu, feature)) {
+ fprintf(stderr,
+ "Hyper-V %s is not supported by kernel\n",
+ kvm_hyperv_properties[feature].desc);
+ return 1;
+ } else {
+ return 0;
+ }
}
- env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE;
+ env->features[fw] |= bits;
}
- if (cpu->hyperv_stimer) {
- if (!has_msr_hv_stimer) {
- fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
+
+ if (cpu->hyperv_passthrough) {
+ cpu->hyperv_features |= BIT(feature);
+ }
+
+ return 0;
+}
+
+/*
+ * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent in
+ * case of success, errno < 0 in case of failure and 0 when no Hyper-V
+ * extentions are enabled.
+ */
+static int hyperv_handle_properties(CPUState *cs,
+ struct kvm_cpuid_entry2 *cpuid_ent)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+ struct kvm_cpuid2 *cpuid;
+ struct kvm_cpuid_entry2 *c;
+ uint32_t signature[3];
+ uint32_t cpuid_i = 0;
+ int r;
+
+ if (!hyperv_enabled(cpu))
+ return 0;
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ||
+ cpu->hyperv_passthrough) {
+ uint16_t evmcs_version;
+
+ r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
+ (uintptr_t)&evmcs_version);
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) {
+ fprintf(stderr, "Hyper-V %s is not supported by kernel\n",
+ kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc);
return -ENOSYS;
}
- env->features[FEAT_HYPERV_EAX] |= HV_SYNTIMERS_AVAILABLE;
- }
- if (cpu->hyperv_relaxed_timing) {
- env->features[FEAT_HV_RECOMM_EAX] |= HV_RELAXED_TIMING_RECOMMENDED;
+
+ if (!r) {
+ env->features[FEAT_HV_RECOMM_EAX] |=
+ HV_ENLIGHTENED_VMCS_RECOMMENDED;
+ env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
+ }
}
- if (cpu->hyperv_vapic) {
- env->features[FEAT_HV_RECOMM_EAX] |= HV_APIC_ACCESS_RECOMMENDED;
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) {
+ cpuid = get_supported_hv_cpuid(cs);
+ } else {
+ cpuid = get_supported_hv_cpuid_legacy(cs);
}
- if (cpu->hyperv_tlbflush) {
- if (kvm_check_extension(cs->kvm_state,
- KVM_CAP_HYPERV_TLBFLUSH) <= 0) {
- fprintf(stderr, "Hyper-V TLB flush support "
- "(requested by 'hv-tlbflush' cpu flag) "
- " is not supported by kernel\n");
- return -ENOSYS;
+
+ if (cpu->hyperv_passthrough) {
+ memcpy(cpuid_ent, &cpuid->entries[0],
+ cpuid->nent * sizeof(cpuid->entries[0]));
+
+ c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0);
+ if (c) {
+ env->features[FEAT_HYPERV_EAX] = c->eax;
+ env->features[FEAT_HYPERV_EBX] = c->ebx;
+ env->features[FEAT_HYPERV_EDX] = c->eax;
}
- env->features[FEAT_HV_RECOMM_EAX] |= HV_REMOTE_TLB_FLUSH_RECOMMENDED;
- env->features[FEAT_HV_RECOMM_EAX] |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
- }
- if (cpu->hyperv_ipi) {
- if (kvm_check_extension(cs->kvm_state,
- KVM_CAP_HYPERV_SEND_IPI) <= 0) {
- fprintf(stderr, "Hyper-V IPI send support "
- "(requested by 'hv-ipi' cpu flag) "
- " is not supported by kernel\n");
- return -ENOSYS;
+ c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0);
+ if (c) {
+ env->features[FEAT_HV_RECOMM_EAX] = c->eax;
+
+ /* hv-spinlocks may have been overriden */
+ if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) {
+ c->ebx = cpu->hyperv_spinlock_attempts;
+ }
+ }
+ c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0);
+ if (c) {
+ env->features[FEAT_HV_NESTED_EAX] = c->eax;
}
- env->features[FEAT_HV_RECOMM_EAX] |= HV_CLUSTER_IPI_RECOMMENDED;
- env->features[FEAT_HV_RECOMM_EAX] |= HV_EX_PROCESSOR_MASKS_RECOMMENDED;
}
- if (cpu->hyperv_evmcs) {
- uint16_t evmcs_version;
- if (kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0,
- (uintptr_t)&evmcs_version)) {
- fprintf(stderr, "Hyper-V Enlightened VMCS "
- "(requested by 'hv-evmcs' cpu flag) "
- "is not supported by kernel\n");
- return -ENOSYS;
+ /* Features */
+ r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI);
+ r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT);
+
+ /* Additional dependencies not covered by kvm_hyperv_properties[] */
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) &&
+ !cpu->hyperv_synic_kvm_only &&
+ !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) {
+ fprintf(stderr, "Hyper-V %s requires Hyper-V %s\n",
+ kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc,
+ kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc);
+ r |= 1;
+ }
+
+ /* Not exposed by KVM but needed to make CPU hotplug in Windows work */
+ env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+
+ if (r) {
+ r = -ENOSYS;
+ goto free;
+ }
+
+ if (cpu->hyperv_passthrough) {
+ /* We already copied all feature words from KVM as is */
+ r = cpuid->nent;
+ goto free;
+ }
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+ if (!cpu->hyperv_vendor_id) {
+ memcpy(signature, "Microsoft Hv", 12);
+ } else {
+ size_t len = strlen(cpu->hyperv_vendor_id);
+
+ if (len > 12) {
+ error_report("hv-vendor-id truncated to 12 characters");
+ len = 12;
+ }
+ memset(signature, 0, 12);
+ memcpy(signature, cpu->hyperv_vendor_id, len);
+ }
+ c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ?
+ HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
+ c->ebx = signature[0];
+ c->ecx = signature[1];
+ c->edx = signature[2];
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_INTERFACE;
+ memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
+ c->eax = signature[0];
+ c->ebx = 0;
+ c->ecx = 0;
+ c->edx = 0;
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_VERSION;
+ c->eax = 0x00001bbc;
+ c->ebx = 0x00060001;
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_FEATURES;
+ c->eax = env->features[FEAT_HYPERV_EAX];
+ c->ebx = env->features[FEAT_HYPERV_EBX];
+ c->edx = env->features[FEAT_HYPERV_EDX];
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_ENLIGHTMENT_INFO;
+ c->eax = env->features[FEAT_HV_RECOMM_EAX];
+ c->ebx = cpu->hyperv_spinlock_attempts;
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_IMPLEMENT_LIMITS;
+ c->eax = cpu->hv_max_vps;
+ c->ebx = 0x40;
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) {
+ __u32 function;
+
+ /* Create zeroed 0x40000006..0x40000009 leaves */
+ for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
+ function < HV_CPUID_NESTED_FEATURES; function++) {
+ c = &cpuid_ent[cpuid_i++];
+ c->function = function;
}
- env->features[FEAT_HV_RECOMM_EAX] |= HV_ENLIGHTENED_VMCS_RECOMMENDED;
- env->features[FEAT_HV_NESTED_EAX] = evmcs_version;
+
+ c = &cpuid_ent[cpuid_i++];
+ c->function = HV_CPUID_NESTED_FEATURES;
+ c->eax = env->features[FEAT_HV_NESTED_EAX];
}
+ r = cpuid_i;
- return 0;
+free:
+ g_free(cpuid);
+
+ return r;
}
+static Error *hv_passthrough_mig_blocker;
+
static int hyperv_init_vcpu(X86CPU *cpu)
{
CPUState *cs = CPU(cpu);
+ Error *local_err = NULL;
int ret;
- if (cpu->hyperv_vpindex && !hv_vpindex_settable) {
+ if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) {
+ error_setg(&hv_passthrough_mig_blocker,
+ "'hv-passthrough' CPU flag prevents migration, use explicit"
+ " set of hv-* flags instead");
+ ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ error_free(hv_passthrough_mig_blocker);
+ return ret;
+ }
+ }
+
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) {
/*
* the kernel doesn't support setting vp_index; assert that its value
* is in sync
@@ -881,7 +1317,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
}
}
- if (cpu->hyperv_synic) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
uint32_t synic_cap = cpu->hyperv_synic_kvm_only ?
KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2;
ret = kvm_vcpu_enable_cap(cs, synic_cap, 0);
@@ -905,7 +1341,7 @@ static int hyperv_init_vcpu(X86CPU *cpu)
}
static Error *invtsc_mig_blocker;
-static Error *vmx_mig_blocker;
+static Error *nested_virt_mig_blocker;
#define KVM_MAX_CPUID_ENTRIES 100
@@ -930,6 +1366,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
int kvm_base = KVM_CPUID_SIGNATURE;
+ int max_nested_state_len;
int r;
Error *local_err = NULL;
@@ -939,7 +1376,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
r = kvm_arch_set_tsc_khz(cs);
if (r < 0) {
- goto fail;
+ return r;
}
/* vcpu's TSC frequency is either specified by user, or following
@@ -957,79 +1394,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
}
/* Paravirtualization CPUIDs */
- if (hyperv_enabled(cpu)) {
- c = &cpuid_data.entries[cpuid_i++];
- c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
- if (!cpu->hyperv_vendor_id) {
- memcpy(signature, "Microsoft Hv", 12);
- } else {
- size_t len = strlen(cpu->hyperv_vendor_id);
-
- if (len > 12) {
- error_report("hv-vendor-id truncated to 12 characters");
- len = 12;
- }
- memset(signature, 0, 12);
- memcpy(signature, cpu->hyperv_vendor_id, len);
- }
- c->eax = cpu->hyperv_evmcs ?
- HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS;
- c->ebx = signature[0];
- c->ecx = signature[1];
- c->edx = signature[2];
-
- c = &cpuid_data.entries[cpuid_i++];
- c->function = HV_CPUID_INTERFACE;
- memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
- c->eax = signature[0];
- c->ebx = 0;
- c->ecx = 0;
- c->edx = 0;
-
- c = &cpuid_data.entries[cpuid_i++];
- c->function = HV_CPUID_VERSION;
- c->eax = 0x00001bbc;
- c->ebx = 0x00060001;
-
- c = &cpuid_data.entries[cpuid_i++];
- c->function = HV_CPUID_FEATURES;
- r = hyperv_handle_properties(cs);
- if (r) {
- return r;
- }
- c->eax = env->features[FEAT_HYPERV_EAX];
- c->ebx = env->features[FEAT_HYPERV_EBX];
- c->edx = env->features[FEAT_HYPERV_EDX];
-
- c = &cpuid_data.entries[cpuid_i++];
- c->function = HV_CPUID_ENLIGHTMENT_INFO;
-
- c->eax = env->features[FEAT_HV_RECOMM_EAX];
- c->ebx = cpu->hyperv_spinlock_attempts;
-
- c = &cpuid_data.entries[cpuid_i++];
- c->function = HV_CPUID_IMPLEMENT_LIMITS;
-
- c->eax = cpu->hv_max_vps;
- c->ebx = 0x40;
-
+ r = hyperv_handle_properties(cs, cpuid_data.entries);
+ if (r < 0) {
+ return r;
+ } else if (r > 0) {
+ cpuid_i = r;
kvm_base = KVM_CPUID_SIGNATURE_NEXT;
has_msr_hv_hypercall = true;
-
- if (cpu->hyperv_evmcs) {
- __u32 function;
-
- /* Create zeroed 0x40000006..0x40000009 leaves */
- for (function = HV_CPUID_IMPLEMENT_LIMITS + 1;
- function < HV_CPUID_NESTED_FEATURES; function++) {
- c = &cpuid_data.entries[cpuid_i++];
- c->function = function;
- }
-
- c = &cpuid_data.entries[cpuid_i++];
- c->function = HV_CPUID_NESTED_FEATURES;
- c->eax = env->features[FEAT_HV_NESTED_EAX];
- }
}
if (cpu->expose_kvm) {
@@ -1269,13 +1640,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
!!(c->ecx & CPUID_EXT_SMX);
}
- if ((env->features[FEAT_1_ECX] & CPUID_EXT_VMX) && !vmx_mig_blocker) {
- error_setg(&vmx_mig_blocker,
- "Nested VMX virtualization does not support live migration yet");
- r = migrate_add_blocker(vmx_mig_blocker, &local_err);
+ if (cpu_has_vmx(env) && !nested_virt_mig_blocker &&
+ ((kvm_max_nested_state_length() <= 0) || !has_exception_payload)) {
+ error_setg(&nested_virt_mig_blocker,
+ "Kernel do not provide required capabilities for "
+ "nested virtualization migration. "
+ "(CAP_NESTED_STATE=%d, CAP_EXCEPTION_PAYLOAD=%d)",
+ kvm_max_nested_state_length() > 0,
+ has_exception_payload);
+ r = migrate_add_blocker(nested_virt_mig_blocker, &local_err);
if (local_err) {
error_report_err(local_err);
- error_free(vmx_mig_blocker);
+ error_free(nested_virt_mig_blocker);
return r;
}
}
@@ -1294,7 +1670,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (local_err) {
error_report_err(local_err);
error_free(invtsc_mig_blocker);
- return r;
+ goto fail2;
}
}
}
@@ -1330,6 +1706,24 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (has_xsave) {
env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave));
}
+
+ max_nested_state_len = kvm_max_nested_state_length();
+ if (max_nested_state_len > 0) {
+ assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
+ env->nested_state = g_malloc0(max_nested_state_len);
+
+ env->nested_state->size = max_nested_state_len;
+
+ if (IS_INTEL_CPU(env)) {
+ struct kvm_vmx_nested_state_hdr *vmx_hdr =
+ &env->nested_state->hdr.vmx;
+
+ env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
+ vmx_hdr->vmxon_pa = -1ull;
+ vmx_hdr->vmcs12_pa = -1ull;
+ }
+ }
+
cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE);
if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) {
@@ -1345,9 +1739,30 @@ int kvm_arch_init_vcpu(CPUState *cs)
fail:
migrate_del_blocker(invtsc_mig_blocker);
+ fail2:
+ migrate_del_blocker(nested_virt_mig_blocker);
+
return r;
}
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ if (cpu->kvm_msr_buf) {
+ g_free(cpu->kvm_msr_buf);
+ cpu->kvm_msr_buf = NULL;
+ }
+
+ if (env->nested_state) {
+ g_free(env->nested_state);
+ env->nested_state = NULL;
+ }
+
+ return 0;
+}
+
void kvm_arch_reset_vcpu(X86CPU *cpu)
{
CPUX86State *env = &cpu->env;
@@ -1360,7 +1775,7 @@ void kvm_arch_reset_vcpu(X86CPU *cpu)
env->mp_state = KVM_MP_STATE_RUNNABLE;
}
- if (cpu->hyperv_synic) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
int i;
for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
env->msr_hv_synic_sint[i] = HV_SINT_MASKED;
@@ -1515,6 +1930,9 @@ static int kvm_get_supported_msrs(KVMState *s)
case MSR_IA32_ARCH_CAPABILITIES:
has_msr_arch_capabs = true;
break;
+ case MSR_IA32_CORE_CAPABILITY:
+ has_msr_core_capabs = true;
+ break;
}
}
}
@@ -1572,6 +1990,16 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX);
+ has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD);
+ if (has_exception_payload) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true);
+ if (ret < 0) {
+ error_report("kvm: Failed to enable exception payload cap: %s",
+ strerror(-ret));
+ return ret;
+ }
+ }
+
ret = kvm_get_supported_msrs(s);
if (ret < 0) {
return ret;
@@ -2041,6 +2469,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->features[FEAT_ARCH_CAPABILITIES]);
}
+ if (has_msr_core_capabs) {
+ kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY,
+ env->features[FEAT_CORE_CAPABILITY]);
+ }
+
/*
* The following MSRs have side effects on the guest or are too heavy
* for normal writeback. Limit them to reset or full state updates.
@@ -2100,11 +2533,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL,
env->msr_hv_hypercall);
}
- if (cpu->hyperv_time) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC,
env->msr_hv_tsc);
}
- if (cpu->hyperv_reenlightenment) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL,
env->msr_hv_reenlightenment_control);
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL,
@@ -2113,7 +2546,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->msr_hv_tsc_emulation_status);
}
}
- if (cpu->hyperv_vapic) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE,
env->msr_hv_vapic);
}
@@ -2129,11 +2562,12 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (has_msr_hv_runtime) {
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
}
- if (cpu->hyperv_vpindex && hv_vpindex_settable) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)
+ && hv_vpindex_settable) {
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX,
hyperv_vp_index(CPU(cpu)));
}
- if (cpu->hyperv_synic) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
int j;
kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION);
@@ -2473,13 +2907,13 @@ static int kvm_get_msrs(X86CPU *cpu)
kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0);
}
- if (cpu->hyperv_vapic) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) {
kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0);
}
- if (cpu->hyperv_time) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0);
}
- if (cpu->hyperv_reenlightenment) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) {
kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0);
kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0);
@@ -2494,7 +2928,7 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hv_runtime) {
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0);
}
- if (cpu->hyperv_synic) {
+ if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) {
uint32_t msr;
kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0);
@@ -2876,8 +3310,16 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
return 0;
}
- events.exception.injected = (env->exception_injected >= 0);
- events.exception.nr = env->exception_injected;
+ events.flags = 0;
+
+ if (has_exception_payload) {
+ events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD;
+ events.exception.pending = env->exception_pending;
+ events.exception_has_payload = env->exception_has_payload;
+ events.exception_payload = env->exception_payload;
+ }
+ events.exception.nr = env->exception_nr;
+ events.exception.injected = env->exception_injected;
events.exception.has_error_code = env->has_error_code;
events.exception.error_code = env->error_code;
@@ -2890,7 +3332,6 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level)
events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK);
events.sipi_vector = env->sipi_vector;
- events.flags = 0;
if (has_msr_smbase) {
events.smi.smm = !!(env->hflags & HF_SMM_MASK);
@@ -2940,8 +3381,19 @@ static int kvm_get_vcpu_events(X86CPU *cpu)
if (ret < 0) {
return ret;
}
- env->exception_injected =
- events.exception.injected ? events.exception.nr : -1;
+
+ if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) {
+ env->exception_pending = events.exception.pending;
+ env->exception_has_payload = events.exception_has_payload;
+ env->exception_payload = events.exception_payload;
+ } else {
+ env->exception_pending = 0;
+ env->exception_has_payload = false;
+ }
+ env->exception_injected = events.exception.injected;
+ env->exception_nr =
+ (env->exception_pending || env->exception_injected) ?
+ events.exception.nr : -1;
env->has_error_code = events.exception.has_error_code;
env->error_code = events.exception.error_code;
@@ -2993,12 +3445,12 @@ static int kvm_guest_debug_workarounds(X86CPU *cpu)
unsigned long reinject_trap = 0;
if (!kvm_has_vcpu_events()) {
- if (env->exception_injected == 1) {
+ if (env->exception_nr == EXCP01_DB) {
reinject_trap = KVM_GUESTDBG_INJECT_DB;
- } else if (env->exception_injected == 3) {
+ } else if (env->exception_injected == EXCP03_INT3) {
reinject_trap = KVM_GUESTDBG_INJECT_BP;
}
- env->exception_injected = -1;
+ kvm_reset_exception(env);
}
/*
@@ -3059,6 +3511,52 @@ static int kvm_get_debugregs(X86CPU *cpu)
return 0;
}
+static int kvm_put_nested_state(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int max_nested_state_len = kvm_max_nested_state_length();
+
+ if (max_nested_state_len <= 0) {
+ return 0;
+ }
+
+ assert(env->nested_state->size <= max_nested_state_len);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
+}
+
+static int kvm_get_nested_state(X86CPU *cpu)
+{
+ CPUX86State *env = &cpu->env;
+ int max_nested_state_len = kvm_max_nested_state_length();
+ int ret;
+
+ if (max_nested_state_len <= 0) {
+ return 0;
+ }
+
+ /*
+ * It is possible that migration restored a smaller size into
+ * nested_state->hdr.size than what our kernel support.
+ * We preserve migration origin nested_state->hdr.size for
+ * call to KVM_SET_NESTED_STATE but wish that our next call
+ * to KVM_GET_NESTED_STATE will use max size our kernel support.
+ */
+ env->nested_state->size = max_nested_state_len;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
+ env->hflags |= HF_GUEST_MASK;
+ } else {
+ env->hflags &= ~HF_GUEST_MASK;
+ }
+
+ return ret;
+}
+
int kvm_arch_put_registers(CPUState *cpu, int level)
{
X86CPU *x86_cpu = X86_CPU(cpu);
@@ -3066,6 +3564,11 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+ ret = kvm_put_nested_state(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_msr_feature_control(x86_cpu);
if (ret < 0) {
@@ -3181,6 +3684,10 @@ int kvm_arch_get_registers(CPUState *cs)
if (ret < 0) {
goto out;
}
+ ret = kvm_get_nested_state(cpu);
+ if (ret < 0) {
+ goto out;
+ }
ret = 0;
out:
cpu_sync_bndcs_hflags(&cpu->env);
@@ -3319,13 +3826,13 @@ int kvm_arch_process_async_events(CPUState *cs)
kvm_cpu_synchronize_state(cs);
- if (env->exception_injected == EXCP08_DBLE) {
+ if (env->exception_nr == EXCP08_DBLE) {
/* this means triple fault */
qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
cs->exit_request = 1;
return 0;
}
- env->exception_injected = EXCP12_MCHK;
+ kvm_queue_exception(env, EXCP12_MCHK, 0, 0);
env->has_error_code = 0;
cs->halted = 0;
@@ -3507,8 +4014,8 @@ static int kvm_handle_debug(X86CPU *cpu,
int ret = 0;
int n;
- if (arch_info->exception == 1) {
- if (arch_info->dr6 & (1 << 14)) {
+ if (arch_info->exception == EXCP01_DB) {
+ if (arch_info->dr6 & DR6_BS) {
if (cs->singlestep_enabled) {
ret = EXCP_DEBUG;
}
@@ -3540,10 +4047,12 @@ static int kvm_handle_debug(X86CPU *cpu,
}
if (ret == 0) {
cpu_synchronize_state(cs);
- assert(env->exception_injected == -1);
+ assert(env->exception_nr == -1);
/* pass to guest */
- env->exception_injected = arch_info->exception;
+ kvm_queue_exception(env, arch_info->exception,
+ arch_info->exception == EXCP01_DB,
+ arch_info->dr6);
env->has_error_code = 0;
}
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 4aff1a763f..851b249d1a 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -231,6 +231,50 @@ static int cpu_pre_save(void *opaque)
env->segs[R_SS].flags &= ~(env->segs[R_SS].flags & DESC_DPL_MASK);
}
+#ifdef CONFIG_KVM
+ /* Verify we have nested virtualization state from kernel if required */
+ if (kvm_enabled() && cpu_has_vmx(env) && !env->nested_state) {
+ error_report("Guest enabled nested virtualization but kernel "
+ "does not support saving of nested state");
+ return -EINVAL;
+ }
+#endif
+
+ /*
+ * When vCPU is running L2 and exception is still pending,
+ * it can potentially be intercepted by L1 hypervisor.
+ * In contrast to an injected exception which cannot be
+ * intercepted anymore.
+ *
+ * Furthermore, when a L2 exception is intercepted by L1
+ * hypervisor, it's exception payload (CR2/DR6 on #PF/#DB)
+ * should not be set yet in the respective vCPU register.
+ * Thus, in case an exception is pending, it is
+ * important to save the exception payload seperately.
+ *
+ * Therefore, if an exception is not in a pending state
+ * or vCPU is not in guest-mode, it is not important to
+ * distinguish between a pending and injected exception
+ * and we don't need to store seperately the exception payload.
+ *
+ * In order to preserve better backwards-compatabile migration,
+ * convert a pending exception to an injected exception in
+ * case it is not important to distingiush between them
+ * as described above.
+ */
+ if (env->exception_pending && !(env->hflags & HF_GUEST_MASK)) {
+ env->exception_pending = 0;
+ env->exception_injected = 1;
+
+ if (env->exception_has_payload) {
+ if (env->exception_nr == EXCP01_DB) {
+ env->dr[6] = env->exception_payload;
+ } else if (env->exception_nr == EXCP0E_PAGE) {
+ env->cr[2] = env->exception_payload;
+ }
+ }
+ }
+
return 0;
}
@@ -278,6 +322,33 @@ static int cpu_post_load(void *opaque, int version_id)
env->hflags &= ~HF_CPL_MASK;
env->hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+#ifdef CONFIG_KVM
+ if ((env->hflags & HF_GUEST_MASK) &&
+ (!env->nested_state ||
+ !(env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE))) {
+ error_report("vCPU set in guest-mode inconsistent with "
+ "migrated kernel nested state");
+ return -EINVAL;
+ }
+#endif
+
+ /*
+ * There are cases that we can get valid exception_nr with both
+ * exception_pending and exception_injected being cleared.
+ * This can happen in one of the following scenarios:
+ * 1) Source is older QEMU without KVM_CAP_EXCEPTION_PAYLOAD support.
+ * 2) Source is running on kernel without KVM_CAP_EXCEPTION_PAYLOAD support.
+ * 3) "cpu/exception_info" subsection not sent because there is no exception
+ * pending or guest wasn't running L2 (See comment in cpu_pre_save()).
+ *
+ * In those cases, we can just deduce that a valid exception_nr means
+ * we can treat the exception as already injected.
+ */
+ if ((env->exception_nr != -1) &&
+ !env->exception_pending && !env->exception_injected) {
+ env->exception_injected = 1;
+ }
+
env->fpstt = (env->fpus_vmstate >> 11) & 7;
env->fpus = env->fpus_vmstate & ~0x3800;
env->fptag_vmstate ^= 0xff;
@@ -323,6 +394,35 @@ static bool steal_time_msr_needed(void *opaque)
return cpu->env.steal_time_msr != 0;
}
+static bool exception_info_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ /*
+ * It is important to save exception-info only in case
+ * we need to distingiush between a pending and injected
+ * exception. Which is only required in case there is a
+ * pending exception and vCPU is running L2.
+ * For more info, refer to comment in cpu_pre_save().
+ */
+ return env->exception_pending && (env->hflags & HF_GUEST_MASK);
+}
+
+static const VMStateDescription vmstate_exception_info = {
+ .name = "cpu/exception_info",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = exception_info_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8(env.exception_pending, X86CPU),
+ VMSTATE_UINT8(env.exception_injected, X86CPU),
+ VMSTATE_UINT8(env.exception_has_payload, X86CPU),
+ VMSTATE_UINT64(env.exception_payload, X86CPU),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_steal_time_msr = {
.name = "cpu/steal_time_msr",
.version_id = 1,
@@ -634,7 +734,7 @@ static bool hyperv_runtime_enable_needed(void *opaque)
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
- if (!cpu->hyperv_runtime) {
+ if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_RUNTIME)) {
return false;
}
@@ -851,6 +951,182 @@ static const VMStateDescription vmstate_tsc_khz = {
}
};
+#ifdef CONFIG_KVM
+
+static bool vmx_vmcs12_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+ return (nested_state->size >
+ offsetof(struct kvm_nested_state, data.vmx[0].vmcs12));
+}
+
+static const VMStateDescription vmstate_vmx_vmcs12 = {
+ .name = "cpu/kvm_nested_state/vmx/vmcs12",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vmx_vmcs12_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(data.vmx[0].vmcs12,
+ struct kvm_nested_state,
+ KVM_STATE_NESTED_VMX_VMCS_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vmx_shadow_vmcs12_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+ return (nested_state->size >
+ offsetof(struct kvm_nested_state, data.vmx[0].shadow_vmcs12));
+}
+
+static const VMStateDescription vmstate_vmx_shadow_vmcs12 = {
+ .name = "cpu/kvm_nested_state/vmx/shadow_vmcs12",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vmx_shadow_vmcs12_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT8_ARRAY(data.vmx[0].shadow_vmcs12,
+ struct kvm_nested_state,
+ KVM_STATE_NESTED_VMX_VMCS_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool vmx_nested_state_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+
+ return ((nested_state->format == KVM_STATE_NESTED_FORMAT_VMX) &&
+ ((nested_state->hdr.vmx.vmxon_pa != -1ull) ||
+ (nested_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON)));
+}
+
+static const VMStateDescription vmstate_vmx_nested_state = {
+ .name = "cpu/kvm_nested_state/vmx",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = vmx_nested_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_U64(hdr.vmx.vmxon_pa, struct kvm_nested_state),
+ VMSTATE_U64(hdr.vmx.vmcs12_pa, struct kvm_nested_state),
+ VMSTATE_U16(hdr.vmx.smm.flags, struct kvm_nested_state),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_vmx_vmcs12,
+ &vmstate_vmx_shadow_vmcs12,
+ NULL,
+ }
+};
+
+static bool svm_nested_state_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+
+ return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM);
+}
+
+static const VMStateDescription vmstate_svm_nested_state = {
+ .name = "cpu/kvm_nested_state/svm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = svm_nested_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static bool nested_state_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+
+ return (env->nested_state &&
+ (vmx_nested_state_needed(env->nested_state) ||
+ svm_nested_state_needed(env->nested_state)));
+}
+
+static int nested_state_post_load(void *opaque, int version_id)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ struct kvm_nested_state *nested_state = env->nested_state;
+ int min_nested_state_len = offsetof(struct kvm_nested_state, data);
+ int max_nested_state_len = kvm_max_nested_state_length();
+
+ /*
+ * If our kernel don't support setting nested state
+ * and we have received nested state from migration stream,
+ * we need to fail migration
+ */
+ if (max_nested_state_len <= 0) {
+ error_report("Received nested state when kernel cannot restore it");
+ return -EINVAL;
+ }
+
+ /*
+ * Verify that the size of received nested_state struct
+ * at least cover required header and is not larger
+ * than the max size that our kernel support
+ */
+ if (nested_state->size < min_nested_state_len) {
+ error_report("Received nested state size less than min: "
+ "len=%d, min=%d",
+ nested_state->size, min_nested_state_len);
+ return -EINVAL;
+ }
+ if (nested_state->size > max_nested_state_len) {
+ error_report("Recieved unsupported nested state size: "
+ "nested_state->size=%d, max=%d",
+ nested_state->size, max_nested_state_len);
+ return -EINVAL;
+ }
+
+ /* Verify format is valid */
+ if ((nested_state->format != KVM_STATE_NESTED_FORMAT_VMX) &&
+ (nested_state->format != KVM_STATE_NESTED_FORMAT_SVM)) {
+ error_report("Received invalid nested state format: %d",
+ nested_state->format);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const VMStateDescription vmstate_kvm_nested_state = {
+ .name = "cpu/kvm_nested_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_U16(flags, struct kvm_nested_state),
+ VMSTATE_U16(format, struct kvm_nested_state),
+ VMSTATE_U32(size, struct kvm_nested_state),
+ VMSTATE_END_OF_LIST()
+ },
+ .subsections = (const VMStateDescription*[]) {
+ &vmstate_vmx_nested_state,
+ &vmstate_svm_nested_state,
+ NULL
+ }
+};
+
+static const VMStateDescription vmstate_nested_state = {
+ .name = "cpu/nested_state",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = nested_state_needed,
+ .post_load = nested_state_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_STRUCT_POINTER(env.nested_state, X86CPU,
+ vmstate_kvm_nested_state,
+ struct kvm_nested_state),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+#endif
+
static bool mcg_ext_ctl_needed(void *opaque)
{
X86CPU *cpu = opaque;
@@ -1056,7 +1332,7 @@ VMStateDescription vmstate_x86_cpu = {
VMSTATE_INT32(env.interrupt_injected, X86CPU),
VMSTATE_UINT32(env.mp_state, X86CPU),
VMSTATE_UINT64(env.tsc, X86CPU),
- VMSTATE_INT32(env.exception_injected, X86CPU),
+ VMSTATE_INT32(env.exception_nr, X86CPU),
VMSTATE_UINT8(env.soft_interrupt, X86CPU),
VMSTATE_UINT8(env.nmi_injected, X86CPU),
VMSTATE_UINT8(env.nmi_pending, X86CPU),
@@ -1080,6 +1356,7 @@ VMStateDescription vmstate_x86_cpu = {
/* The above list is not sorted /wrt version numbers, watch out! */
},
.subsections = (const VMStateDescription*[]) {
+ &vmstate_exception_info,
&vmstate_async_pf_msr,
&vmstate_pv_eoi_msr,
&vmstate_steal_time_msr,
@@ -1113,6 +1390,9 @@ VMStateDescription vmstate_x86_cpu = {
#ifndef TARGET_X86_64
&vmstate_efer32,
#endif
+#ifdef CONFIG_KVM
+ &vmstate_nested_state,
+#endif
NULL
}
};
diff --git a/target/mips/kvm.c b/target/mips/kvm.c
index 8e72850962..938f8f144b 100644
--- a/target/mips/kvm.c
+++ b/target/mips/kvm.c
@@ -91,6 +91,11 @@ int kvm_arch_init_vcpu(CPUState *cs)
return ret;
}
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
void kvm_mips_reset_vcpu(MIPSCPU *cpu)
{
CPUMIPSState *env = &cpu->env;
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index d4107dd70d..4b4989c0af 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -521,6 +521,11 @@ int kvm_arch_init_vcpu(CPUState *cs)
return ret;
}
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
static void kvm_sw_tlb_put(PowerPCCPU *cpu)
{
CPUPPCState *env = &cpu->env;
diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c
index bcec9795ec..0267c6c2f6 100644
--- a/target/s390x/kvm.c
+++ b/target/s390x/kvm.c
@@ -368,6 +368,16 @@ int kvm_arch_init_vcpu(CPUState *cs)
return 0;
}
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ S390CPU *cpu = S390_CPU(cs);
+
+ g_free(cpu->irqstate);
+ cpu->irqstate = NULL;
+
+ return 0;
+}
+
void kvm_s390_reset_vcpu(S390CPU *cpu)
{
CPUState *cs = CPU(cpu);
diff --git a/util/main-loop.c b/util/main-loop.c
index e1e349ca5c..a9f4e8de75 100644
--- a/util/main-loop.c
+++ b/util/main-loop.c
@@ -422,7 +422,7 @@ static int os_host_main_loop_wait(int64_t timeout)
g_main_context_prepare(context, &max_priority);
n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
poll_fds, ARRAY_SIZE(poll_fds));
- g_assert(n_poll_fds <= ARRAY_SIZE(poll_fds));
+ g_assert(n_poll_fds + w->num <= ARRAY_SIZE(poll_fds));
for (i = 0; i < w->num; i++) {
poll_fds[n_poll_fds + i].fd = (DWORD_PTR)w->events[i];