aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2019-07-11 15:41:48 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2019-07-19 18:02:22 +0200
commit1e44f3ab71fb4291d266a264f7c207ae5c6d59b2 (patch)
tree2ad8b18748abac7f148f05b6faa01d46d322c692 /target
parent79a197ab180e75838523c58973b1221ad7bf51eb (diff)
target/i386: skip KVM_GET/SET_NESTED_STATE if VMX disabled, or for SVM
Do not allocate env->nested_state unless we later need to migrate the nested virtualization state. With this change, nested_state_needed() will return false if the VMX flag is not included in the virtual machine. KVM_GET/SET_NESTED_STATE is also disabled for SVM which is safer (we know that at least the NPT root and paging mode have to be saved/loaded), and thus the corresponding subsection can go away as well. Inspired by a patch from Liran Alon. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target')
-rw-r--r--target/i386/kvm.c16
-rw-r--r--target/i386/machine.c21
2 files changed, 9 insertions, 28 deletions
diff --git a/target/i386/kvm.c b/target/i386/kvm.c
index 4542f0fad0..ada89d27cc 100644
--- a/target/i386/kvm.c
+++ b/target/i386/kvm.c
@@ -1711,15 +1711,15 @@ int kvm_arch_init_vcpu(CPUState *cs)
max_nested_state_len = kvm_max_nested_state_length();
if (max_nested_state_len > 0) {
assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data));
- env->nested_state = g_malloc0(max_nested_state_len);
- env->nested_state->size = max_nested_state_len;
-
- if (IS_INTEL_CPU(env)) {
- struct kvm_vmx_nested_state_hdr *vmx_hdr =
- &env->nested_state->hdr.vmx;
+ if (cpu_has_vmx(env)) {
+ struct kvm_vmx_nested_state_hdr *vmx_hdr;
+ env->nested_state = g_malloc0(max_nested_state_len);
+ env->nested_state->size = max_nested_state_len;
env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX;
+
+ vmx_hdr = &env->nested_state->hdr.vmx;
vmx_hdr->vmxon_pa = -1ull;
vmx_hdr->vmcs12_pa = -1ull;
}
@@ -3515,7 +3515,7 @@ static int kvm_put_nested_state(X86CPU *cpu)
CPUX86State *env = &cpu->env;
int max_nested_state_len = kvm_max_nested_state_length();
- if (max_nested_state_len <= 0) {
+ if (!env->nested_state) {
return 0;
}
@@ -3529,7 +3529,7 @@ static int kvm_get_nested_state(X86CPU *cpu)
int max_nested_state_len = kvm_max_nested_state_length();
int ret;
- if (max_nested_state_len <= 0) {
+ if (!env->nested_state) {
return 0;
}
diff --git a/target/i386/machine.c b/target/i386/machine.c
index ac2d1d1d36..b1146093b5 100644
--- a/target/i386/machine.c
+++ b/target/i386/machine.c
@@ -1035,31 +1035,13 @@ static const VMStateDescription vmstate_vmx_nested_state = {
}
};
-static bool svm_nested_state_needed(void *opaque)
-{
- struct kvm_nested_state *nested_state = opaque;
-
- return (nested_state->format == KVM_STATE_NESTED_FORMAT_SVM);
-}
-
-static const VMStateDescription vmstate_svm_nested_state = {
- .name = "cpu/kvm_nested_state/svm",
- .version_id = 1,
- .minimum_version_id = 1,
- .needed = svm_nested_state_needed,
- .fields = (VMStateField[]) {
- VMSTATE_END_OF_LIST()
- }
-};
-
static bool nested_state_needed(void *opaque)
{
X86CPU *cpu = opaque;
CPUX86State *env = &cpu->env;
return (env->nested_state &&
- (vmx_nested_state_needed(env->nested_state) ||
- svm_nested_state_needed(env->nested_state)));
+ vmx_nested_state_needed(env->nested_state));
}
static int nested_state_post_load(void *opaque, int version_id)
@@ -1121,7 +1103,6 @@ static const VMStateDescription vmstate_kvm_nested_state = {
},
.subsections = (const VMStateDescription*[]) {
&vmstate_vmx_nested_state,
- &vmstate_svm_nested_state,
NULL
}
};