aboutsummaryrefslogtreecommitdiff
path: root/target/i386
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386')
-rw-r--r--target/i386/cpu.c1
-rw-r--r--target/i386/cpu.h5
-rw-r--r--target/i386/kvm.c42
-rw-r--r--target/i386/machine.c31
4 files changed, 70 insertions, 9 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index e46ab8f774..008fd93ff1 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5968,6 +5968,7 @@ static void x86_cpu_reset(DeviceState *dev)
/* init to reset state */
env->hflags2 |= HF2_GIF_MASK;
+ env->hflags &= ~HF_GUEST_MASK;
cpu_x86_update_cr0(env, 0x60000010);
env->a20_mask = ~0x0;
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 06b2e3a5c6..9284f96896 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2118,6 +2118,11 @@ static inline bool cpu_has_vmx(CPUX86State *env)
return env->features[FEAT_1_ECX] & CPUID_EXT_VMX;
}
+static inline bool cpu_has_svm(CPUX86State *env)
+{
+ return env->features[FEAT_8000_0001_ECX] & CPUID_EXT3_SVM;
+}
+
/*
* In order for a vCPU to enter VMX operation it must have CR4.VMXE set.
* Since it was set, CR4.VMXE must remain set as long as vCPU is in
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 6adbff3d74..2b6b7443d2 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -1840,16 +1840,18 @@ int kvm_arch_init_vcpu(CPUState *cs)
if (max_nested_state_len > 0) {
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
- if (cpu_has_vmx(env)) {
+ if (cpu_has_vmx(env) || cpu_has_svm(env)) {
struct kvm_vmx_nested_state_hdr *vmx_hdr;
env->nested_state = g_malloc0(max_nested_state_len);
env->nested_state->size = max_nested_state_len;
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
- vmx_hdr = &env->nested_state->hdr.vmx;
- vmx_hdr->vmxon_pa = -1ull;
- vmx_hdr->vmcs12_pa = -1ull;
+ if (cpu_has_vmx(env)) {
+ vmx_hdr = &env->nested_state->hdr.vmx;
+ vmx_hdr->vmxon_pa = -1ull;
+ vmx_hdr->vmcs12_pa = -1ull;
+ }
}
}
@@ -3873,6 +3875,20 @@ static int kvm_put_nested_state(X86CPU *cpu)
return 0;
}
+ /*
+ * Copy flags that are affected by reset from env->hflags and env->hflags2.
+ */
+ if (env->hflags & HF_GUEST_MASK) {
+ env->nested_state->flags |= KVM_STATE_NESTED_GUEST_MODE;
+ } else {
+ env->nested_state->flags &= ~KVM_STATE_NESTED_GUEST_MODE;
+ }
+ if (env->hflags2 & HF2_GIF_MASK) {
+ env->nested_state->flags |= KVM_STATE_NESTED_GIF_SET;
+ } else {
+ env->nested_state->flags &= ~KVM_STATE_NESTED_GIF_SET;
+ }
+
assert(env->nested_state->size <= max_nested_state_len);
return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state);
}
@@ -3901,11 +3917,19 @@ static int kvm_get_nested_state(X86CPU *cpu)
return ret;
}
+ /*
+ * Copy flags that are affected by reset to env->hflags and env->hflags2.
+ */
if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) {
env->hflags |= HF_GUEST_MASK;
} else {
env->hflags &= ~HF_GUEST_MASK;
}
+ if (env->nested_state->flags & KVM_STATE_NESTED_GIF_SET) {
+ env->hflags2 |= HF2_GIF_MASK;
+ } else {
+ env->hflags2 &= ~HF2_GIF_MASK;
+ }
return ret;
}
@@ -3917,6 +3941,12 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu));
+ /* must be before kvm_put_nested_state so that EFER.SVME is set */
+ ret = kvm_put_sregs(x86_cpu);
+ if (ret < 0) {
+ return ret;
+ }
+
if (level >= KVM_PUT_RESET_STATE) {
ret = kvm_put_nested_state(x86_cpu);
if (ret < 0) {
@@ -3950,10 +3980,6 @@ int kvm_arch_put_registers(CPUState *cpu, int level)
if (ret < 0) {
return ret;
}
- ret = kvm_put_sregs(x86_cpu);
- if (ret < 0) {
- return ret;
- }
/* must be before kvm_put_msrs */
ret = kvm_inject_mce_oldstyle(x86_cpu);
if (ret < 0) {
diff --git a/target/i386/machine.c b/target/i386/machine.c
index 0c96531a56..b1acf7d0ef 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1071,13 +1071,41 @@ static const VMStateDescription vmstate_vmx_nested_state = {
}
};
+static bool svm_nested_state_needed(void *opaque)
+{
+ struct kvm_nested_state *nested_state = opaque;
+
+ /*
+ * HF_GUEST_MASK and HF2_GIF_MASK are already serialized
+ * via hflags and hflags2, all that's left is the opaque
+ * nested state blob.
+ */
+ return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM &&
+ nested_state->size > offsetof(struct kvm_nested_state, data));
+}
+
+static const VMStateDescription vmstate_svm_nested_state = {
+ .name = "cpu/kvm_nested_state/svm",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = svm_nested_state_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_U64(hdr.svm.vmcb_pa, struct kvm_nested_state),
+ VMSTATE_UINT8_ARRAY(data.svm[0].vmcb12,
+ struct kvm_nested_state,
+ KVM_STATE_NESTED_SVM_VMCB_SIZE),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static bool nested_state_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
return (env->nested_state &&
- vmx_nested_state_needed(env->nested_state));
+ (vmx_nested_state_needed(env->nested_state) ||
+ svm_nested_state_needed(env->nested_state)));
}
static int nested_state_post_load(void *opaque, int version_id)
@@ -1139,6 +1167,7 @@ static const VMStateDescription vmstate_kvm_nested_state = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_vmx_nested_state,
+ &vmstate_svm_nested_state,
NULL
}
};