diff options
author | David Edmondson <david.edmondson@oracle.com> | 2021-07-05 11:46:31 +0100 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2021-07-06 08:33:48 +0200 |
commit | fea4500841024195ec701713e05b92ebf667f192 (patch) | |
tree | 7901cb3679ee3b1177ff80493f874bade24d63ea /target/i386 | |
parent | 3568987f78faff90829ea6c885bbdd5b083dc86c (diff) |
target/i386: Populate x86_ext_save_areas offsets using cpuid where possible
Rather than relying on the X86XSaveArea structure definition,
determine the offset of XSAVE state areas using CPUID leaf 0xd where
possible (KVM and HVF).
Signed-off-by: David Edmondson <david.edmondson@oracle.com>
Message-Id: <20210705104632.2902400-8-david.edmondson@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386')
-rw-r--r-- | target/i386/cpu.c | 13 | ||||
-rw-r--r-- | target/i386/cpu.h | 2 | ||||
-rw-r--r-- | target/i386/hvf/hvf-cpu.c | 29 | ||||
-rw-r--r-- | target/i386/hvf/hvf.c | 6 | ||||
-rw-r--r-- | target/i386/kvm/kvm-cpu.c | 30 | ||||
-rw-r--r-- | target/i386/kvm/kvm.c | 7 | ||||
-rw-r--r-- | target/i386/tcg/tcg-cpu.c | 20 |
7 files changed, 94 insertions, 13 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 13caa0de50..5f595a0d7e 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1304,48 +1304,37 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { }; #undef REGISTER -const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { +ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_FP_BIT] = { /* x87 FP state component is always enabled if XSAVE is supported */ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, - /* x87 state is in the legacy region of the XSAVE area */ - .offset = 0, .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), }, [XSTATE_SSE_BIT] = { /* SSE state component is always enabled if XSAVE is supported */ .feature = FEAT_1_ECX, .bits = CPUID_EXT_XSAVE, - /* SSE state is in the legacy region of the XSAVE area */ - .offset = 0, .size = sizeof(X86LegacyXSaveArea) + sizeof(X86XSaveHeader), }, [XSTATE_YMM_BIT] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX, - .offset = offsetof(X86XSaveArea, avx_state), .size = sizeof(XSaveAVX) }, [XSTATE_BNDREGS_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, - .offset = offsetof(X86XSaveArea, bndreg_state), .size = sizeof(XSaveBNDREG) }, [XSTATE_BNDCSR_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX, - .offset = offsetof(X86XSaveArea, bndcsr_state), .size = sizeof(XSaveBNDCSR) }, [XSTATE_OPMASK_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, - .offset = offsetof(X86XSaveArea, opmask_state), .size = sizeof(XSaveOpmask) }, [XSTATE_ZMM_Hi256_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, - .offset = offsetof(X86XSaveArea, zmm_hi256_state), .size = sizeof(XSaveZMM_Hi256) }, [XSTATE_Hi16_ZMM_BIT] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_AVX512F, - .offset = offsetof(X86XSaveArea, hi16_zmm_state), .size = sizeof(XSaveHi16_ZMM) }, [XSTATE_PKRU_BIT] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, - .offset = offsetof(X86XSaveArea, pkru_state), .size = sizeof(XSavePKRU) }, }; diff --git a/target/i386/cpu.h b/target/i386/cpu.h index c9c0a34330..96b672f8bd 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1377,7 +1377,7 @@ typedef struct ExtSaveArea { #define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1) -extern const ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; +extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; typedef enum TPRAccess { TPR_ACCESS_READ, diff --git a/target/i386/hvf/hvf-cpu.c b/target/i386/hvf/hvf-cpu.c index 8fbc423888..333db59898 100644 --- a/target/i386/hvf/hvf-cpu.c +++ b/target/i386/hvf/hvf-cpu.c @@ -30,6 +30,33 @@ static void hvf_cpu_max_instance_init(X86CPU *cpu) hvf_get_supported_cpuid(0xC0000000, 0, R_EAX); } +static void hvf_cpu_xsave_init(void) +{ + static bool first = true; + int i; + + if (!first) { + return; + } + first = false; + + /* x87 and SSE states are in the legacy region of the XSAVE area. */ + x86_ext_save_areas[XSTATE_FP_BIT].offset = 0; + x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0; + + for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) { + ExtSaveArea *esa = &x86_ext_save_areas[i]; + + if (esa->size) { + int sz = hvf_get_supported_cpuid(0xd, i, R_EAX); + if (sz != 0) { + assert(esa->size == sz); + esa->offset = hvf_get_supported_cpuid(0xd, i, R_EBX); + } + } + } +} + static void hvf_cpu_instance_init(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); @@ -42,6 +69,8 @@ static void hvf_cpu_instance_init(CPUState *cs) if (cpu->max_features) { hvf_cpu_max_instance_init(cpu); } + + hvf_cpu_xsave_init(); } static void hvf_cpu_accel_class_init(ObjectClass *oc, void *data) diff --git a/target/i386/hvf/hvf.c b/target/i386/hvf/hvf.c index e62e8df028..79ba4ed93a 100644 --- a/target/i386/hvf/hvf.c +++ b/target/i386/hvf/hvf.c @@ -270,6 +270,12 @@ int hvf_arch_init_vcpu(CPUState *cpu) x86cpu->env.xsave_buf_len = 4096; x86cpu->env.xsave_buf = qemu_memalign(4096, x86cpu->env.xsave_buf_len); + /* + * The allocated storage must be large enough for all of the + * possible XSAVE state components. + */ + assert(hvf_get_supported_cpuid(0xd, 0, R_ECX) <= x86cpu->env.xsave_buf_len); + hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_STAR, 1); hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_LSTAR, 1); hv_vcpu_enable_native_msr(cpu->hvf->fd, MSR_CSTAR, 1); diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index 00369c2000..bbe817764d 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -122,6 +122,34 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu) kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); } +static void kvm_cpu_xsave_init(void) +{ + static bool first = true; + KVMState *s = kvm_state; + int i; + + if (!first) { + return; + } + first = false; + + /* x87 and SSE states are in the legacy region of the XSAVE area. */ + x86_ext_save_areas[XSTATE_FP_BIT].offset = 0; + x86_ext_save_areas[XSTATE_SSE_BIT].offset = 0; + + for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) { + ExtSaveArea *esa = &x86_ext_save_areas[i]; + + if (esa->size) { + int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX); + if (sz != 0) { + assert(esa->size == sz); + esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX); + } + } + } +} + static void kvm_cpu_instance_init(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); @@ -141,6 +169,8 @@ static void kvm_cpu_instance_init(CPUState *cs) if (cpu->max_features) { kvm_cpu_max_instance_init(cpu); } + + kvm_cpu_xsave_init(); } static void kvm_cpu_accel_class_init(ObjectClass *oc, void *data) diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 41b0764ab7..a85035492f 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -1891,6 +1891,13 @@ int kvm_arch_init_vcpu(CPUState *cs) env->xsave_buf_len = sizeof(struct kvm_xsave); env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); memset(env->xsave_buf, 0, env->xsave_buf_len); + + /* + * The allocated storage must be large enough for all of the + * possible XSAVE state components. + */ + assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) + <= env->xsave_buf_len); } max_nested_state_len = kvm_max_nested_state_length(); diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 014ebea2f6..e96ec9bbcc 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -80,6 +80,24 @@ static void tcg_cpu_class_init(CPUClass *cc) cc->init_accel_cpu = tcg_cpu_init_ops; } +static void tcg_cpu_xsave_init(void) +{ +#define XO(bit, field) \ + x86_ext_save_areas[bit].offset = offsetof(X86XSaveArea, field); + + XO(XSTATE_FP_BIT, legacy); + XO(XSTATE_SSE_BIT, legacy); + XO(XSTATE_YMM_BIT, avx_state); + XO(XSTATE_BNDREGS_BIT, bndreg_state); + XO(XSTATE_BNDCSR_BIT, bndcsr_state); + XO(XSTATE_OPMASK_BIT, opmask_state); + XO(XSTATE_ZMM_Hi256_BIT, zmm_hi256_state); + XO(XSTATE_Hi16_ZMM_BIT, hi16_zmm_state); + XO(XSTATE_PKRU_BIT, pkru_state); + +#undef XO +} + /* * TCG-specific defaults that override all CPU models when using TCG */ @@ -93,6 +111,8 @@ static void tcg_cpu_instance_init(CPUState *cs) X86CPU *cpu = X86_CPU(cs); /* Special cases not set in the X86CPUDefinition structs: */ x86_cpu_apply_props(cpu, tcg_default_props); + + tcg_cpu_xsave_init(); } static void tcg_cpu_accel_class_init(ObjectClass *oc, void *data) |