diff options
Diffstat (limited to 'target/i386/kvm.c')
-rw-r--r-- | target/i386/kvm.c | 74 |
1 files changed, 69 insertions, 5 deletions
diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 8c73438c67..bfd09bd441 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -95,6 +95,7 @@ static bool has_msr_hv_stimer; static bool has_msr_hv_frequencies; static bool has_msr_hv_reenlightenment; static bool has_msr_xss; +static bool has_msr_umwait; static bool has_msr_spec_ctrl; static bool has_msr_virt_ssbd; static bool has_msr_smi_count; @@ -401,6 +402,12 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, if (host_tsx_blacklisted()) { ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); } + } else if (function == 7 && index == 0 && reg == R_ECX) { + if (enable_cpu_pm) { + ret |= CPUID_7_0_ECX_WAITPKG; + } else { + ret &= ~CPUID_7_0_ECX_WAITPKG; + } } else if (function == 7 && index == 0 && reg == R_EDX) { /* * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. @@ -592,9 +599,9 @@ static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) (MCM_ADDR_PHYS << 6) | 0xc, flags); } -static void hardware_memory_error(void) +static void hardware_memory_error(void *host_addr) { - fprintf(stderr, "Hardware memory error!\n"); + error_report("QEMU got Hardware memory error at addr %p", host_addr); exit(1); } @@ -618,15 +625,34 @@ void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { kvm_hwpoison_page_add(ram_addr); kvm_mce_inject(cpu, paddr, code); + + /* + * Use different logging severity based on error type. + * If there is additional MCE reporting on the hypervisor, QEMU VA + * could be another source to identify the PA and MCE details. + */ + if (code == BUS_MCEERR_AR) { + error_report("Guest MCE Memory Error at QEMU addr %p and " + "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", + addr, paddr, "BUS_MCEERR_AR"); + } else { + warn_report("Guest MCE Memory Error at QEMU addr %p and " + "GUEST addr 0x%" HWADDR_PRIx " of type %s injected", + addr, paddr, "BUS_MCEERR_AO"); + } + return; } - fprintf(stderr, "Hardware memory error for memory used by " - "QEMU itself instead of guest system!\n"); + if (code == BUS_MCEERR_AO) { + warn_report("Hardware memory error at addr %p of type %s " + "for memory used by QEMU itself instead of guest system!", + addr, "BUS_MCEERR_AO"); + } } if (code == BUS_MCEERR_AR) { - hardware_memory_error(); + hardware_memory_error(addr); } /* Hope we are lucky for AO MCE */ @@ -1208,6 +1234,16 @@ static int hyperv_handle_properties(CPUState *cs, } } + if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_ON) { + env->features[FEAT_HV_RECOMM_EAX] |= HV_NO_NONARCH_CORESHARING; + } else if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO) { + c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); + if (c) { + env->features[FEAT_HV_RECOMM_EAX] |= + c->eax & HV_NO_NONARCH_CORESHARING; + } + } + /* Features */ r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED); r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC); @@ -1321,6 +1357,7 @@ free: } static Error *hv_passthrough_mig_blocker; +static Error *hv_no_nonarch_cs_mig_blocker; static int hyperv_init_vcpu(X86CPU *cpu) { @@ -1340,6 +1377,21 @@ static int hyperv_init_vcpu(X86CPU *cpu) } } + if (cpu->hyperv_no_nonarch_cs == ON_OFF_AUTO_AUTO && + hv_no_nonarch_cs_mig_blocker == NULL) { + error_setg(&hv_no_nonarch_cs_mig_blocker, + "'hv-no-nonarch-coresharing=auto' CPU flag prevents migration" + " use explicit 'hv-no-nonarch-coresharing=on' instead (but" + " make sure SMT is disabled and/or that vCPUs are properly" + " pinned)"); + ret = migrate_add_blocker(hv_no_nonarch_cs_mig_blocker, &local_err); + if (local_err) { + error_report_err(local_err); + error_free(hv_no_nonarch_cs_mig_blocker); + return ret; + } + } + if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) { /* * the kernel doesn't support setting vp_index; assert that its value @@ -1954,6 +2006,9 @@ static int kvm_get_supported_msrs(KVMState *s) case MSR_IA32_XSS: has_msr_xss = true; break; + case MSR_IA32_UMWAIT_CONTROL: + has_msr_umwait = true; + break; case HV_X64_MSR_CRASH_CTL: has_msr_hv_crash = true; break; @@ -2633,6 +2688,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); } + if (has_msr_umwait) { + kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, env->umwait); + } if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); } @@ -3046,6 +3104,9 @@ static int kvm_get_msrs(X86CPU *cpu) if (has_msr_xss) { kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); } + if (has_msr_umwait) { + kvm_msr_entry_add(cpu, MSR_IA32_UMWAIT_CONTROL, 0); + } if (has_msr_spec_ctrl) { kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); } @@ -3298,6 +3359,9 @@ static int kvm_get_msrs(X86CPU *cpu) case MSR_IA32_XSS: env->xss = msrs[i].data; break; + case MSR_IA32_UMWAIT_CONTROL: + env->umwait = msrs[i].data; + break; default: if (msrs[i].index >= MSR_MC0_CTL && msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { |