aboutsummaryrefslogtreecommitdiff
path: root/target/i386/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/kvm.c')
-rw-r--r--target/i386/kvm.c62
1 files changed, 30 insertions, 32 deletions
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index ee4e91fa64..b1e32e95d3 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -27,6 +27,7 @@
#include "sysemu/kvm_int.h"
#include "kvm_i386.h"
#include "hyperv.h"
+#include "hyperv-proto.h"
#include "exec/gdbstub.h"
#include "qemu/host-utils.h"
@@ -40,7 +41,6 @@
#include "hw/i386/x86-iommu.h"
#include "exec/ioport.h"
-#include "standard-headers/asm-x86/hyperv.h"
#include "hw/pci/pci.h"
#include "hw/pci/msi.h"
#include "hw/pci/msix.h"
@@ -632,35 +632,34 @@ static int hyperv_handle_properties(CPUState *cs)
}
if (cpu->hyperv_relaxed_timing) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
}
if (cpu->hyperv_vapic) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_APIC_ACCESS_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_APIC_ACCESS_AVAILABLE;
}
if (cpu->hyperv_time) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_HYPERCALL_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_TIME_REF_COUNT_AVAILABLE;
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_REFERENCE_TSC_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_HYPERCALL_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_TIME_REF_COUNT_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_REFERENCE_TSC_AVAILABLE;
if (has_msr_hv_frequencies && tsc_is_stable_and_known(env)) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_ACCESS_FREQUENCY_MSRS;
- env->features[FEAT_HYPERV_EDX] |=
- HV_FEATURE_FREQUENCY_MSRS_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_ACCESS_FREQUENCY_MSRS;
+ env->features[FEAT_HYPERV_EDX] |= HV_FREQUENCY_MSRS_AVAILABLE;
}
}
if (cpu->hyperv_crash && has_msr_hv_crash) {
- env->features[FEAT_HYPERV_EDX] |= HV_X64_GUEST_CRASH_MSR_AVAILABLE;
+ env->features[FEAT_HYPERV_EDX] |= HV_GUEST_CRASH_MSR_AVAILABLE;
}
- env->features[FEAT_HYPERV_EDX] |= HV_X64_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
+ env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE;
if (cpu->hyperv_reset && has_msr_hv_reset) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_RESET_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_RESET_AVAILABLE;
}
if (cpu->hyperv_vpindex && has_msr_hv_vpindex) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_INDEX_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_VP_INDEX_AVAILABLE;
}
if (cpu->hyperv_runtime && has_msr_hv_runtime) {
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE;
}
if (cpu->hyperv_synic) {
int sint;
@@ -671,10 +670,10 @@ static int hyperv_handle_properties(CPUState *cs)
return -ENOSYS;
}
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNIC_AVAILABLE;
- env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
+ env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE;
+ env->msr_hv_synic_version = HV_SYNIC_VERSION;
for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
- env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
+ env->msr_hv_synic_sint[sint] = HV_SINT_MASKED;
}
}
if (cpu->hyperv_stimer) {
@@ -682,7 +681,7 @@ static int hyperv_handle_properties(CPUState *cs)
fprintf(stderr, "Hyper-V timers aren't supported by kernel\n");
return -ENOSYS;
}
- env->features[FEAT_HYPERV_EAX] |= HV_X64_MSR_SYNTIMER_AVAILABLE;
+ env->features[FEAT_HYPERV_EAX] |= HV_SYNTIMERS_AVAILABLE;
}
return 0;
}
@@ -733,7 +732,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
/* Paravirtualization CPUIDs */
if (hyperv_enabled(cpu)) {
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
+ c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS;
if (!cpu->hyperv_vendor_id) {
memcpy(signature, "Microsoft Hv", 12);
} else {
@@ -746,13 +745,13 @@ int kvm_arch_init_vcpu(CPUState *cs)
memset(signature, 0, 12);
memcpy(signature, cpu->hyperv_vendor_id, len);
}
- c->eax = HYPERV_CPUID_MIN;
+ c->eax = HV_CPUID_MIN;
c->ebx = signature[0];
c->ecx = signature[1];
c->edx = signature[2];
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_INTERFACE;
+ c->function = HV_CPUID_INTERFACE;
memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12);
c->eax = signature[0];
c->ebx = 0;
@@ -760,12 +759,12 @@ int kvm_arch_init_vcpu(CPUState *cs)
c->edx = 0;
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_VERSION;
+ c->function = HV_CPUID_VERSION;
c->eax = 0x00001bbc;
c->ebx = 0x00060001;
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_FEATURES;
+ c->function = HV_CPUID_FEATURES;
r = hyperv_handle_properties(cs);
if (r) {
return r;
@@ -775,17 +774,17 @@ int kvm_arch_init_vcpu(CPUState *cs)
c->edx = env->features[FEAT_HYPERV_EDX];
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
+ c->function = HV_CPUID_ENLIGHTMENT_INFO;
if (cpu->hyperv_relaxed_timing) {
- c->eax |= HV_X64_RELAXED_TIMING_RECOMMENDED;
+ c->eax |= HV_RELAXED_TIMING_RECOMMENDED;
}
if (cpu->hyperv_vapic) {
- c->eax |= HV_X64_APIC_ACCESS_RECOMMENDED;
+ c->eax |= HV_APIC_ACCESS_RECOMMENDED;
}
c->ebx = cpu->hyperv_spinlock_attempts;
c = &cpuid_data.entries[cpuid_i++];
- c->function = HYPERV_CPUID_IMPLEMENT_LIMITS;
+ c->function = HV_CPUID_IMPLEMENT_LIMITS;
c->eax = cpu->hv_max_vps;
c->ebx = 0x40;
@@ -1695,12 +1694,11 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
if (has_msr_hv_crash) {
int j;
- for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++)
+ for (j = 0; j < HV_CRASH_PARAMS; j++)
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j,
env->msr_hv_crash_params[j]);
- kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL,
- HV_X64_MSR_CRASH_CTL_NOTIFY);
+ kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY);
}
if (has_msr_hv_runtime) {
kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime);
@@ -2064,7 +2062,7 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hv_crash) {
int j;
- for (j = 0; j < HV_X64_MSR_CRASH_PARAMS; j++) {
+ for (j = 0; j < HV_CRASH_PARAMS; j++) {
kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0);
}
}