diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-03-13 14:02:47 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-03-13 14:02:47 +0000 |
commit | 59667bb167f773965ce6547352f312eff0d4d523 (patch) | |
tree | 268e771807b2559d1c45772ed6e33f16a2f3efc5 | |
parent | 22ef7ba8e8ce7fef297549b3defcac333742b804 (diff) | |
parent | b77146e9a129bcdb60edc23639211679ae846a92 (diff) |
Merge remote-tracking branch 'remotes/ehabkost/tags/x86-next-pull-request' into staging
x86 queue, 2018-03-12
* Intel Processor Trace support
* KVM_HINTS_DEDICATED
# gpg: Signature made Mon 12 Mar 2018 19:58:39 GMT
# gpg: using RSA key 2807936F984DC5A6
# gpg: Good signature from "Eduardo Habkost <ehabkost@redhat.com>"
# Primary key fingerprint: 5A32 2FD5 ABC4 D3DB ACCF D1AA 2807 936F 984D C5A6
* remotes/ehabkost/tags/x86-next-pull-request:
i386: Add support to get/set/migrate Intel Processor Trace feature
i386: Add Intel Processor Trace feature support
target-i386: add KVM_HINTS_DEDICATED performance hint
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r-- | target/i386/cpu.c | 92 | ||||
-rw-r--r-- | target/i386/cpu.h | 26 | ||||
-rw-r--r-- | target/i386/kvm.c | 78 | ||||
-rw-r--r-- | target/i386/machine.c | 38 |
4 files changed, 232 insertions, 2 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 2c04645cea..ec1efd3a3c 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -173,7 +173,32 @@ #define L2_ITLB_4K_ASSOC 4 #define L2_ITLB_4K_ENTRIES 512 - +/* CPUID Leaf 0x14 constants: */ +#define INTEL_PT_MAX_SUBLEAF 0x1 +/* + * bit[00]: IA32_RTIT_CTL.CR3 filter can be set to 1 and IA32_RTIT_CR3_MATCH + * MSR can be accessed; + * bit[01]: Support Configurable PSB and Cycle-Accurate Mode; + * bit[02]: Support IP Filtering, TraceStop filtering, and preservation + * of Intel PT MSRs across warm reset; + * bit[03]: Support MTC timing packet and suppression of COFI-based packets; + */ +#define INTEL_PT_MINIMAL_EBX 0xf +/* + * bit[00]: Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1 and + * IA32_RTIT_OUTPUT_BASE and IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be + * accessed; + * bit[01]: ToPA tables can hold any number of output entries, up to the + * maximum allowed by the MaskOrTableOffset field of + * IA32_RTIT_OUTPUT_MASK_PTRS; + * bit[02]: Support Single-Range Output scheme; + */ +#define INTEL_PT_MINIMAL_ECX 0x7 +#define INTEL_PT_ADDR_RANGES_NUM 0x2 /* Number of configurable address ranges */ +#define INTEL_PT_ADDR_RANGES_NUM_MASK 0x3 +#define INTEL_PT_MTC_BITMAP (0x0249 << 16) /* Support ART(0,3,6,9) */ +#define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ +#define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ static void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, uint32_t vendor2, uint32_t vendor3) @@ -359,6 +384,20 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EAX, .tcg_features = TCG_KVM_FEATURES, }, + [FEAT_KVM_HINTS] = { + .feat_names = { + "kvm-hint-dedicated", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid_eax = KVM_CPUID_FEATURES, .cpuid_reg = R_EDX, + .tcg_features = TCG_KVM_FEATURES, + }, [FEAT_HYPERV_EAX] = { .feat_names = { NULL /* hv_msr_vp_runtime_access */, NULL /* hv_msr_time_refcount_access */, @@ -428,7 +467,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, NULL, "mpx", NULL, "avx512f", "avx512dq", "rdseed", "adx", "smap", "avx512ifma", "pcommit", "clflushopt", - "clwb", NULL, "avx512pf", "avx512er", + "clwb", "intel-pt", "avx512pf", "avx512er", "avx512cd", "sha-ni", "avx512bw", "avx512vl", }, .cpuid_eax = 7, @@ -3453,6 +3492,27 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x14: { + /* Intel Processor Trace Enumeration */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) || + !kvm_enabled()) { + break; + } + + if (count == 0) { + *eax = INTEL_PT_MAX_SUBLEAF; + *ebx = INTEL_PT_MINIMAL_EBX; + *ecx = INTEL_PT_MINIMAL_ECX; + } else if (count == 1) { + *eax = INTEL_PT_MTC_BITMAP | INTEL_PT_ADDR_RANGES_NUM; + *ebx = INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP; + } + break; + } case 0x40000000: /* * CPUID code in kvm_arch_init_vcpu() ignores stuff @@ -4083,6 +4143,34 @@ static int x86_cpu_filter_features(X86CPU *cpu) } } + if ((env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) && + kvm_enabled()) { + KVMState *s = CPU(cpu)->kvm_state; + uint32_t eax_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EAX); + uint32_t ebx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_EBX); + uint32_t ecx_0 = kvm_arch_get_supported_cpuid(s, 0x14, 0, R_ECX); + uint32_t eax_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EAX); + uint32_t ebx_1 = kvm_arch_get_supported_cpuid(s, 0x14, 1, R_EBX); + + if (!eax_0 || + ((ebx_0 & INTEL_PT_MINIMAL_EBX) != INTEL_PT_MINIMAL_EBX) || + ((ecx_0 & INTEL_PT_MINIMAL_ECX) != INTEL_PT_MINIMAL_ECX) || + ((eax_1 & INTEL_PT_MTC_BITMAP) != INTEL_PT_MTC_BITMAP) || + ((eax_1 & INTEL_PT_ADDR_RANGES_NUM_MASK) < + INTEL_PT_ADDR_RANGES_NUM) || + ((ebx_1 & (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP)) != + (INTEL_PT_PSB_BITMAP | INTEL_PT_CYCLE_BITMAP))) { + /* + * Processor Trace capabilities aren't configurable, so if the + * host can't emulate the capabilities we report on + * cpu_x86_cpuid(), intel-pt can't be enabled on the current host. + */ + env->features[FEAT_7_0_EBX] &= ~CPUID_7_0_EBX_INTEL_PT; + cpu->filtered_features[FEAT_7_0_EBX] |= CPUID_7_0_EBX_INTEL_PT; + rv = 1; + } + } + return rv; } diff --git a/target/i386/cpu.h b/target/i386/cpu.h index faf39ec1ce..0c3f51445e 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -415,6 +415,21 @@ typedef enum X86Seg { #define MSR_MC0_ADDR 0x402 #define MSR_MC0_MISC 0x403 +#define MSR_IA32_RTIT_OUTPUT_BASE 0x560 +#define MSR_IA32_RTIT_OUTPUT_MASK 0x561 +#define MSR_IA32_RTIT_CTL 0x570 +#define MSR_IA32_RTIT_STATUS 0x571 +#define MSR_IA32_RTIT_CR3_MATCH 0x572 +#define MSR_IA32_RTIT_ADDR0_A 0x580 +#define MSR_IA32_RTIT_ADDR0_B 0x581 +#define MSR_IA32_RTIT_ADDR1_A 0x582 +#define MSR_IA32_RTIT_ADDR1_B 0x583 +#define MSR_IA32_RTIT_ADDR2_A 0x584 +#define MSR_IA32_RTIT_ADDR2_B 0x585 +#define MSR_IA32_RTIT_ADDR3_A 0x586 +#define MSR_IA32_RTIT_ADDR3_B 0x587 +#define MAX_RTIT_ADDRS 8 + #define MSR_EFER 0xc0000080 #define MSR_EFER_SCE (1 << 0) @@ -471,6 +486,7 @@ typedef enum FeatureWord { FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ + FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */ FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */ FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */ @@ -640,6 +656,7 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */ #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */ #define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */ +#define CPUID_7_0_EBX_INTEL_PT (1U << 25) /* Intel Processor Trace */ #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */ #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */ #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */ @@ -666,6 +683,8 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */ #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */ +#define KVM_HINTS_DEDICATED (1U << 0) + #define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */ #define CPUID_XSAVE_XSAVEOPT (1U << 0) @@ -1153,6 +1172,13 @@ typedef struct CPUX86State { uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; + uint64_t msr_rtit_ctrl; + uint64_t msr_rtit_status; + uint64_t msr_rtit_output_base; + uint64_t msr_rtit_output_mask; + uint64_t msr_rtit_cr3_match; + uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; + /* exception/interrupt handling */ int error_code; int exception_is_int; diff --git a/target/i386/kvm.c b/target/i386/kvm.c index ad4b159b28..d996cca68b 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -383,6 +383,9 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, if (!kvm_irqchip_in_kernel()) { ret &= ~(1U << KVM_FEATURE_PV_UNHALT); } + } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) { + ret |= KVM_HINTS_DEDICATED; + found = 1; } /* fallback for older kernels */ @@ -801,6 +804,7 @@ int kvm_arch_init_vcpu(CPUState *cs) c = &cpuid_data.entries[cpuid_i++]; c->function = KVM_CPUID_FEATURES | kvm_base; c->eax = env->features[FEAT_KVM]; + c->edx = env->features[FEAT_KVM_HINTS]; } cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); @@ -865,6 +869,29 @@ int kvm_arch_init_vcpu(CPUState *cs) c = &cpuid_data.entries[cpuid_i++]; } break; + case 0x14: { + uint32_t times; + + c->function = i; + c->index = 0; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); + times = c->eax; + + for (j = 1; j <= times; ++j) { + if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { + fprintf(stderr, "cpuid_data is full, no space for " + "cpuid(eax:0x14,ecx:0x%x)\n", j); + abort(); + } + c = &cpuid_data.entries[cpuid_i++]; + c->function = i; + c->index = j; + c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; + cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); + } + break; + } default: c->function = i; c->flags = 0; @@ -1788,6 +1815,25 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); } } + if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { + int addr_num = kvm_arch_get_supported_cpuid(kvm_state, + 0x14, 1, R_EAX) & 0x7; + + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, + env->msr_rtit_ctrl); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, + env->msr_rtit_status); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, + env->msr_rtit_output_base); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, + env->msr_rtit_output_mask); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, + env->msr_rtit_cr3_match); + for (i = 0; i < addr_num; i++) { + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, + env->msr_rtit_addrs[i]); + } + } /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ @@ -2101,6 +2147,20 @@ static int kvm_get_msrs(X86CPU *cpu) } } + if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { + int addr_num = + kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7; + + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0); + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0); + for (i = 0; i < addr_num; i++) { + kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0); + } + } + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; @@ -2341,6 +2401,24 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_SPEC_CTRL: env->spec_ctrl = msrs[i].data; break; + case MSR_IA32_RTIT_CTL: + env->msr_rtit_ctrl = msrs[i].data; + break; + case MSR_IA32_RTIT_STATUS: + env->msr_rtit_status = msrs[i].data; + break; + case MSR_IA32_RTIT_OUTPUT_BASE: + env->msr_rtit_output_base = msrs[i].data; + break; + case MSR_IA32_RTIT_OUTPUT_MASK: + env->msr_rtit_output_mask = msrs[i].data; + break; + case MSR_IA32_RTIT_CR3_MATCH: + env->msr_rtit_cr3_match = msrs[i].data; + break; + case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: + env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; + break; } } diff --git a/target/i386/machine.c b/target/i386/machine.c index 361c05aedf..c05fe6fb1a 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -837,6 +837,43 @@ static const VMStateDescription vmstate_spec_ctrl = { } }; +static bool intel_pt_enable_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + int i; + + if (env->msr_rtit_ctrl || env->msr_rtit_status || + env->msr_rtit_output_base || env->msr_rtit_output_mask || + env->msr_rtit_cr3_match) { + return true; + } + + for (i = 0; i < MAX_RTIT_ADDRS; i++) { + if (env->msr_rtit_addrs[i]) { + return true; + } + } + + return false; +} + +static const VMStateDescription vmstate_msr_intel_pt = { + .name = "cpu/intel_pt", + .version_id = 1, + .minimum_version_id = 1, + .needed = intel_pt_enable_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_rtit_ctrl, X86CPU), + VMSTATE_UINT64(env.msr_rtit_status, X86CPU), + VMSTATE_UINT64(env.msr_rtit_output_base, X86CPU), + VMSTATE_UINT64(env.msr_rtit_output_mask, X86CPU), + VMSTATE_UINT64(env.msr_rtit_cr3_match, X86CPU), + VMSTATE_UINT64_ARRAY(env.msr_rtit_addrs, X86CPU, MAX_RTIT_ADDRS), + VMSTATE_END_OF_LIST() + } +}; + VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -957,6 +994,7 @@ VMStateDescription vmstate_x86_cpu = { #endif &vmstate_spec_ctrl, &vmstate_mcg_ext_ctl, + &vmstate_msr_intel_pt, NULL } }; |