aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrey Smetanin <asmetanin@virtuozzo.com>2015-11-11 13:18:38 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2015-12-17 15:24:34 +0100
commit866eea9a130d429d14ca10212a303cffc6af6c15 (patch)
tree8653fdf46c9154913052d3fe75a7e7646ddddd20
parentfff02bc00b41bd4f2d1081a31808849be0b275f8 (diff)
target-i386/kvm: Hyper-V SynIC MSR's support
This patch does Hyper-V Synthetic interrupt controller(Hyper-V SynIC) MSR's support and migration. Hyper-V SynIC is enabled by cpu's 'hv-synic' option. This patch does not allow cpu creation if 'hv-synic' option specified but kernel doesn't support Hyper-V SynIC. Changes v3: * removed 'msr_hv_synic_version' migration because it's value always the same * moved SynIC msr's initialization into kvm_arch_init_vcpu Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com> Reviewed-by: Roman Kagan <rkagan@virtuozzo.com> Signed-off-by: Denis V. Lunev <den@openvz.org> CC: Paolo Bonzini <pbonzini@redhat.com> CC: Richard Henderson <rth@twiddle.net> CC: Eduardo Habkost <ehabkost@redhat.com> CC: "Andreas Färber" <afaerber@suse.de> CC: Marcelo Tosatti <mtosatti@redhat.com> CC: Roman Kagan <rkagan@virtuozzo.com> CC: Denis V. Lunev <den@openvz.org> CC: kvm@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--target-i386/cpu-qom.h1
-rw-r--r--target-i386/cpu.c1
-rw-r--r--target-i386/cpu.h5
-rw-r--r--target-i386/kvm.c66
-rw-r--r--target-i386/machine.c37
5 files changed, 109 insertions, 1 deletions
diff --git a/target-i386/cpu-qom.h b/target-i386/cpu-qom.h
index e3bfe9d07e..7ea5b34166 100644
--- a/target-i386/cpu-qom.h
+++ b/target-i386/cpu-qom.h
@@ -94,6 +94,7 @@ typedef struct X86CPU {
bool hyperv_reset;
bool hyperv_vpindex;
bool hyperv_runtime;
+ bool hyperv_synic;
bool check_cpuid;
bool enforce_cpuid;
bool expose_kvm;
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 11e5e39a75..2fdd855158 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -3146,6 +3146,7 @@ static Property x86_cpu_properties[] = {
DEFINE_PROP_BOOL("hv-reset", X86CPU, hyperv_reset, false),
DEFINE_PROP_BOOL("hv-vpindex", X86CPU, hyperv_vpindex, false),
DEFINE_PROP_BOOL("hv-runtime", X86CPU, hyperv_runtime, false),
+ DEFINE_PROP_BOOL("hv-synic", X86CPU, hyperv_synic, false),
DEFINE_PROP_BOOL("check", X86CPU, check_cpuid, true),
DEFINE_PROP_BOOL("enforce", X86CPU, enforce_cpuid, false),
DEFINE_PROP_BOOL("kvm", X86CPU, expose_kvm, true),
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index 84edfd0d8a..1168ddf26b 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -920,6 +920,11 @@ typedef struct CPUX86State {
uint64_t msr_hv_tsc;
uint64_t msr_hv_crash_params[HV_X64_MSR_CRASH_PARAMS];
uint64_t msr_hv_runtime;
+ uint64_t msr_hv_synic_control;
+ uint64_t msr_hv_synic_version;
+ uint64_t msr_hv_synic_evt_page;
+ uint64_t msr_hv_synic_msg_page;
+ uint64_t msr_hv_synic_sint[HV_SYNIC_SINT_COUNT];
/* exception/interrupt handling */
int error_code;
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 6dc9846398..ca0708ace5 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -86,6 +86,7 @@ static bool has_msr_hv_crash;
static bool has_msr_hv_reset;
static bool has_msr_hv_vpindex;
static bool has_msr_hv_runtime;
+static bool has_msr_hv_synic;
static bool has_msr_mtrr;
static bool has_msr_xss;
@@ -521,7 +522,8 @@ static bool hyperv_enabled(X86CPU *cpu)
cpu->hyperv_crash ||
cpu->hyperv_reset ||
cpu->hyperv_vpindex ||
- cpu->hyperv_runtime);
+ cpu->hyperv_runtime ||
+ cpu->hyperv_synic);
}
static Error *invtsc_mig_blocker;
@@ -610,6 +612,21 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (cpu->hyperv_runtime && has_msr_hv_runtime) {
c->eax |= HV_X64_MSR_VP_RUNTIME_AVAILABLE;
}
+ if (cpu->hyperv_synic) {
+ int sint;
+
+ if (!has_msr_hv_synic ||
+ kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) {
+ fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n");
+ return -ENOSYS;
+ }
+
+ c->eax |= HV_X64_MSR_SYNIC_AVAILABLE;
+ env->msr_hv_synic_version = HV_SYNIC_VERSION_1;
+ for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) {
+ env->msr_hv_synic_sint[sint] = HV_SYNIC_SINT_MASKED;
+ }
+ }
c = &cpuid_data.entries[cpuid_i++];
c->function = HYPERV_CPUID_ENLIGHTMENT_INFO;
if (cpu->hyperv_relaxed_timing) {
@@ -956,6 +973,10 @@ static int kvm_get_supported_msrs(KVMState *s)
has_msr_hv_runtime = true;
continue;
}
+ if (kvm_msr_list->indices[i] == HV_X64_MSR_SCONTROL) {
+ has_msr_hv_synic = true;
+ continue;
+ }
}
}
@@ -1517,6 +1538,23 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_VP_RUNTIME,
env->msr_hv_runtime);
}
+ if (cpu->hyperv_synic) {
+ int j;
+
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SCONTROL,
+ env->msr_hv_synic_control);
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SVERSION,
+ env->msr_hv_synic_version);
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIEFP,
+ env->msr_hv_synic_evt_page);
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SIMP,
+ env->msr_hv_synic_msg_page);
+
+ for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) {
+ kvm_msr_entry_set(&msrs[n++], HV_X64_MSR_SINT0 + j,
+ env->msr_hv_synic_sint[j]);
+ }
+ }
if (has_msr_mtrr) {
kvm_msr_entry_set(&msrs[n++], MSR_MTRRdefType, env->mtrr_deftype);
kvm_msr_entry_set(&msrs[n++],
@@ -1885,6 +1923,17 @@ static int kvm_get_msrs(X86CPU *cpu)
if (has_msr_hv_runtime) {
msrs[n++].index = HV_X64_MSR_VP_RUNTIME;
}
+ if (cpu->hyperv_synic) {
+ uint32_t msr;
+
+ msrs[n++].index = HV_X64_MSR_SCONTROL;
+ msrs[n++].index = HV_X64_MSR_SVERSION;
+ msrs[n++].index = HV_X64_MSR_SIEFP;
+ msrs[n++].index = HV_X64_MSR_SIMP;
+ for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) {
+ msrs[n++].index = msr;
+ }
+ }
if (has_msr_mtrr) {
msrs[n++].index = MSR_MTRRdefType;
msrs[n++].index = MSR_MTRRfix64K_00000;
@@ -2041,6 +2090,21 @@ static int kvm_get_msrs(X86CPU *cpu)
case HV_X64_MSR_VP_RUNTIME:
env->msr_hv_runtime = msrs[i].data;
break;
+ case HV_X64_MSR_SCONTROL:
+ env->msr_hv_synic_control = msrs[i].data;
+ break;
+ case HV_X64_MSR_SVERSION:
+ env->msr_hv_synic_version = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIEFP:
+ env->msr_hv_synic_evt_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SIMP:
+ env->msr_hv_synic_msg_page = msrs[i].data;
+ break;
+ case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15:
+ env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data;
+ break;
case MSR_MTRRdefType:
env->mtrr_deftype = msrs[i].data;
break;
diff --git a/target-i386/machine.c b/target-i386/machine.c
index a18e16e0de..d41e286ba4 100644
--- a/target-i386/machine.c
+++ b/target-i386/machine.c
@@ -710,6 +710,42 @@ static const VMStateDescription vmstate_msr_hyperv_runtime = {
}
};
+static bool hyperv_synic_enable_needed(void *opaque)
+{
+ X86CPU *cpu = opaque;
+ CPUX86State *env = &cpu->env;
+ int i;
+
+ if (env->msr_hv_synic_control != 0 ||
+ env->msr_hv_synic_evt_page != 0 ||
+ env->msr_hv_synic_msg_page != 0) {
+ return true;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) {
+ if (env->msr_hv_synic_sint[i] != 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const VMStateDescription vmstate_msr_hyperv_synic = {
+ .name = "cpu/msr_hyperv_synic",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = hyperv_synic_enable_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(env.msr_hv_synic_control, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_evt_page, X86CPU),
+ VMSTATE_UINT64(env.msr_hv_synic_msg_page, X86CPU),
+ VMSTATE_UINT64_ARRAY(env.msr_hv_synic_sint, X86CPU,
+ HV_SYNIC_SINT_COUNT),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool avx512_needed(void *opaque)
{
X86CPU *cpu = opaque;
@@ -893,6 +929,7 @@ VMStateDescription vmstate_x86_cpu = {
&vmstate_msr_hyperv_time,
&vmstate_msr_hyperv_crash,
&vmstate_msr_hyperv_runtime,
+ &vmstate_msr_hyperv_synic,
&vmstate_avx512,
&vmstate_xss,
NULL