diff options
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/cpu.c | 161 | ||||
-rw-r--r-- | target-i386/cpu.h | 10 | ||||
-rw-r--r-- | target-i386/helper.c | 16 | ||||
-rw-r--r-- | target-i386/kvm.c | 181 |
4 files changed, 262 insertions, 106 deletions
diff --git a/target-i386/cpu.c b/target-i386/cpu.c index d4f2e65cd9..c46286ab3e 100644 --- a/target-i386/cpu.c +++ b/target-i386/cpu.c @@ -37,6 +37,13 @@ #include <linux/kvm_para.h> #endif +#include "sysemu.h" +#ifndef CONFIG_USER_ONLY +#include "hw/xen.h" +#include "hw/sysbus.h" +#include "hw/apic_internal.h" +#endif + /* feature flags taken from "Intel Processor Identification and the CPUID * Instruction" and AMD's "CPUID Specification". In cases of disagreement * between feature naming conventions, aliases may be added. @@ -88,10 +95,14 @@ static const char *ext3_feature_name[] = { }; static const char *kvm_feature_name[] = { - "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", "kvm_asyncpf", NULL, "kvm_pv_eoi", NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "kvmclock", "kvm_nopiodelay", "kvm_mmu", "kvmclock", + "kvm_asyncpf", "kvm_steal_time", "kvm_pv_eoi", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, }; static const char *svm_feature_name[] = { @@ -106,8 +117,8 @@ static const char *svm_feature_name[] = { }; static const char *cpuid_7_0_ebx_feature_name[] = { - NULL, NULL, NULL, NULL, NULL, NULL, NULL, "smep", - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + "fsgsbase", NULL, NULL, "bmi1", "hle", "avx2", NULL, "smep", + "bmi2", "erms", "invpcid", "rtm", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, "smap", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; @@ -762,13 +773,20 @@ static int cpu_x86_fill_model_id(char *str) return 0; } -static int cpu_x86_fill_host(x86_def_t *x86_cpu_def) +/* Fill a x86_def_t struct with information about the host CPU, and + * the CPU features supported by the host hardware + host kernel + * + * This function may be called only if KVM is enabled. + */ +static void kvm_cpu_fill_host(x86_def_t *x86_cpu_def) { + KVMState *s = kvm_state; uint32_t eax = 0, ebx = 0, ecx = 0, edx = 0; + assert(kvm_enabled()); + x86_cpu_def->name = "host"; host_cpuid(0x0, 0, &eax, &ebx, &ecx, &edx); - x86_cpu_def->level = eax; x86_cpu_def->vendor1 = ebx; x86_cpu_def->vendor2 = edx; x86_cpu_def->vendor3 = ecx; @@ -777,21 +795,24 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def) x86_cpu_def->family = ((eax >> 8) & 0x0F) + ((eax >> 20) & 0xFF); x86_cpu_def->model = ((eax >> 4) & 0x0F) | ((eax & 0xF0000) >> 12); x86_cpu_def->stepping = eax & 0x0F; - x86_cpu_def->ext_features = ecx; - x86_cpu_def->features = edx; - if (kvm_enabled() && x86_cpu_def->level >= 7) { - x86_cpu_def->cpuid_7_0_ebx_features = kvm_arch_get_supported_cpuid(kvm_state, 0x7, 0, R_EBX); + x86_cpu_def->level = kvm_arch_get_supported_cpuid(s, 0x0, 0, R_EAX); + x86_cpu_def->features = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_EDX); + x86_cpu_def->ext_features = kvm_arch_get_supported_cpuid(s, 0x1, 0, R_ECX); + + if (x86_cpu_def->level >= 7) { + x86_cpu_def->cpuid_7_0_ebx_features = + kvm_arch_get_supported_cpuid(s, 0x7, 0, R_EBX); } else { x86_cpu_def->cpuid_7_0_ebx_features = 0; } - host_cpuid(0x80000000, 0, &eax, &ebx, &ecx, &edx); - x86_cpu_def->xlevel = eax; + x86_cpu_def->xlevel = kvm_arch_get_supported_cpuid(s, 0x80000000, 0, R_EAX); + x86_cpu_def->ext2_features = + kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX); + x86_cpu_def->ext3_features = + kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX); - host_cpuid(0x80000001, 0, &eax, &ebx, &ecx, &edx); - x86_cpu_def->ext2_features = edx; - x86_cpu_def->ext3_features = ecx; cpu_x86_fill_model_id(x86_cpu_def->model_id); x86_cpu_def->vendor_override = 0; @@ -800,11 +821,13 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def) x86_cpu_def->vendor2 == CPUID_VENDOR_VIA_2 && x86_cpu_def->vendor3 == CPUID_VENDOR_VIA_3) { host_cpuid(0xC0000000, 0, &eax, &ebx, &ecx, &edx); + eax = kvm_arch_get_supported_cpuid(s, 0xC0000000, 0, R_EAX); if (eax >= 0xC0000001) { /* Support VIA max extended level */ x86_cpu_def->xlevel2 = eax; host_cpuid(0xC0000001, 0, &eax, &ebx, &ecx, &edx); - x86_cpu_def->ext4_features = edx; + x86_cpu_def->ext4_features = + kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX); } } @@ -815,8 +838,6 @@ static int cpu_x86_fill_host(x86_def_t *x86_cpu_def) * unsupported ones later. */ x86_cpu_def->svm_features = -1; - - return 0; } static int unavailable_host_feature(struct model_features_t *f, uint32_t mask) @@ -837,8 +858,10 @@ static int unavailable_host_feature(struct model_features_t *f, uint32_t mask) /* best effort attempt to inform user requested cpu flags aren't making * their way to the guest. Note: ft[].check_feat ideally should be * specified via a guest_def field to suppress report of extraneous flags. + * + * This function may be called only if KVM is enabled. */ -static int check_features_against_host(x86_def_t *guest_def) +static int kvm_check_features_against_host(x86_def_t *guest_def) { x86_def_t host_def; uint32_t mask; @@ -853,7 +876,9 @@ static int check_features_against_host(x86_def_t *guest_def) {&guest_def->ext3_features, &host_def.ext3_features, ~CPUID_EXT3_SVM, ext3_feature_name, 0x80000001}}; - cpu_x86_fill_host(&host_def); + assert(kvm_enabled()); + + kvm_cpu_fill_host(&host_def); for (rv = 0, i = 0; i < ARRAY_SIZE(ft); ++i) for (mask = 1; mask; mask <<= 1) if (ft[i].check_feat & mask && *ft[i].guest_feat & mask && @@ -1140,7 +1165,7 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model) if (name && !strcmp(name, def->name)) break; if (kvm_enabled() && name && strcmp(name, "host") == 0) { - cpu_x86_fill_host(x86_cpu_def); + kvm_cpu_fill_host(x86_cpu_def); } else if (!def) { goto error; } else { @@ -1278,8 +1303,8 @@ static int cpu_x86_find_by_name(x86_def_t *x86_cpu_def, const char *cpu_model) x86_cpu_def->kvm_features &= ~minus_kvm_features; x86_cpu_def->svm_features &= ~minus_svm_features; x86_cpu_def->cpuid_7_0_ebx_features &= ~minus_7_0_ebx_features; - if (check_cpuid) { - if (check_features_against_host(x86_cpu_def) && enforce_cpuid) + if (check_cpuid && kvm_enabled()) { + if (kvm_check_features_against_host(x86_cpu_def) && enforce_cpuid) goto error; } if (x86_cpu_def->cpuid_7_0_ebx_features && x86_cpu_def->level < 7) { @@ -1368,6 +1393,32 @@ CpuDefinitionInfoList *arch_query_cpu_definitions(Error **errp) return cpu_list; } +#ifdef CONFIG_KVM +static void filter_features_for_kvm(X86CPU *cpu) +{ + CPUX86State *env = &cpu->env; + KVMState *s = kvm_state; + + env->cpuid_features &= + kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); + env->cpuid_ext_features &= + kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX); + env->cpuid_ext2_features &= + kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_EDX); + env->cpuid_ext3_features &= + kvm_arch_get_supported_cpuid(s, 0x80000001, 0, R_ECX); + env->cpuid_svm_features &= + kvm_arch_get_supported_cpuid(s, 0x8000000A, 0, R_EDX); + env->cpuid_7_0_ebx_features &= + kvm_arch_get_supported_cpuid(s, 7, 0, R_EBX); + env->cpuid_kvm_features &= + kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX); + env->cpuid_ext4_features &= + kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX); + +} +#endif + int cpu_x86_register(X86CPU *cpu, const char *cpu_model) { CPUX86State *env = &cpu->env; @@ -1425,9 +1476,14 @@ int cpu_x86_register(X86CPU *cpu, const char *cpu_model) ); env->cpuid_ext3_features &= TCG_EXT3_FEATURES; env->cpuid_svm_features &= TCG_SVM_FEATURES; + } else { +#ifdef CONFIG_KVM + filter_features_for_kvm(cpu); +#endif } object_property_set_str(OBJECT(cpu), def->model_id, "model-id", &error); - if (error_is_set(&error)) { + if (error) { + fprintf(stderr, "%s\n", error_get_pretty(error)); error_free(error); return -1; } @@ -1878,12 +1934,65 @@ static void mce_init(X86CPU *cpu) } } +#define MSI_ADDR_BASE 0xfee00000 + +#ifndef CONFIG_USER_ONLY +static void x86_cpu_apic_init(X86CPU *cpu, Error **errp) +{ + static int apic_mapped; + CPUX86State *env = &cpu->env; + APICCommonState *apic; + const char *apic_type = "apic"; + + if (kvm_irqchip_in_kernel()) { + apic_type = "kvm-apic"; + } else if (xen_enabled()) { + apic_type = "xen-apic"; + } + + env->apic_state = qdev_try_create(NULL, apic_type); + if (env->apic_state == NULL) { + error_setg(errp, "APIC device '%s' could not be created", apic_type); + return; + } + + object_property_add_child(OBJECT(cpu), "apic", + OBJECT(env->apic_state), NULL); + qdev_prop_set_uint8(env->apic_state, "id", env->cpuid_apic_id); + /* TODO: convert to link<> */ + apic = APIC_COMMON(env->apic_state); + apic->cpu = cpu; + + if (qdev_init(env->apic_state)) { + error_setg(errp, "APIC device '%s' could not be initialized", + object_get_typename(OBJECT(env->apic_state))); + return; + } + + /* XXX: mapping more APICs at the same memory location */ + if (apic_mapped == 0) { + /* NOTE: the APIC is directly connected to the CPU - it is not + on the global memory bus. */ + /* XXX: what if the base changes? */ + sysbus_mmio_map(sysbus_from_qdev(env->apic_state), 0, MSI_ADDR_BASE); + apic_mapped = 1; + } +} +#endif + void x86_cpu_realize(Object *obj, Error **errp) { X86CPU *cpu = X86_CPU(obj); #ifndef CONFIG_USER_ONLY qemu_register_reset(x86_cpu_machine_reset_cb, cpu); + + if (cpu->env.cpuid_features & CPUID_APIC || smp_cpus > 1) { + x86_cpu_apic_init(cpu, errp); + if (error_is_set(errp)) { + return; + } + } #endif mce_init(cpu); diff --git a/target-i386/cpu.h b/target-i386/cpu.h index de33303dea..cdc59dc0ca 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -907,9 +907,11 @@ static inline void cpu_x86_load_seg_cache(CPUX86State *env, } } -static inline void cpu_x86_load_seg_cache_sipi(CPUX86State *env, +static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, int sipi_vector) { + CPUX86State *env = &cpu->env; + env->eip = 0; cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, sipi_vector << 12, @@ -1098,8 +1100,10 @@ static inline void cpu_clone_regs(CPUX86State *env, target_ulong newsp) #include "hw/apic.h" #endif -static inline bool cpu_has_work(CPUX86State *env) +static inline bool cpu_has_work(CPUState *cpu) { + CPUX86State *env = &X86_CPU(cpu)->env; + return ((env->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_POLL)) && (env->eflags & IF_MASK)) || @@ -1131,7 +1135,7 @@ void do_cpu_sipi(X86CPU *cpu); #define MCE_INJECT_BROADCAST 1 #define MCE_INJECT_UNCOND_AO 2 -void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank, +void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags); diff --git a/target-i386/helper.c b/target-i386/helper.c index c5d42c5916..bf206cfa97 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -1141,10 +1141,11 @@ static void do_inject_x86_mce(void *data) } } -void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank, +void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, uint64_t status, uint64_t mcg_status, uint64_t addr, uint64_t misc, int flags) { + CPUX86State *cenv = &cpu->env; MCEInjectionParams params = { .mon = mon, .env = cenv, @@ -1176,7 +1177,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank, return; } - run_on_cpu(cenv, do_inject_x86_mce, ¶ms); + run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms); if (flags & MCE_INJECT_BROADCAST) { params.bank = 1; params.status = MCI_STATUS_VAL | MCI_STATUS_UC; @@ -1188,7 +1189,7 @@ void cpu_x86_inject_mce(Monitor *mon, CPUX86State *cenv, int bank, continue; } params.env = env; - run_on_cpu(cenv, do_inject_x86_mce, ¶ms); + run_on_cpu(CPU(cpu), do_inject_x86_mce, ¶ms); } } } @@ -1243,6 +1244,7 @@ X86CPU *cpu_x86_init(const char *cpu_model) { X86CPU *cpu; CPUX86State *env; + Error *error = NULL; cpu = X86_CPU(object_new(TYPE_X86_CPU)); env = &cpu->env; @@ -1253,8 +1255,12 @@ X86CPU *cpu_x86_init(const char *cpu_model) return NULL; } - x86_cpu_realize(OBJECT(cpu), NULL); - + x86_cpu_realize(OBJECT(cpu), &error); + if (error) { + error_free(error); + object_delete(OBJECT(cpu)); + return NULL; + } return cpu; } diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 3aa62b20ff..73e2035637 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -98,6 +98,19 @@ static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) return cpuid; } +/* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough + * for all entries. + */ +static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s) +{ + struct kvm_cpuid2 *cpuid; + int max = 1; + while ((cpuid = try_get_cpuid(s, max)) == NULL) { + max *= 2; + } + return cpuid; +} + struct kvm_para_features { int cap; int feature; @@ -123,60 +136,98 @@ static int get_para_features(KVMState *s) } +/* Returns the value for a specific register on the cpuid entry + */ +static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) +{ + uint32_t ret = 0; + switch (reg) { + case R_EAX: + ret = entry->eax; + break; + case R_EBX: + ret = entry->ebx; + break; + case R_ECX: + ret = entry->ecx; + break; + case R_EDX: + ret = entry->edx; + break; + } + return ret; +} + +/* Find matching entry for function/index on kvm_cpuid2 struct + */ +static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, + uint32_t function, + uint32_t index) +{ + int i; + for (i = 0; i < cpuid->nent; ++i) { + if (cpuid->entries[i].function == function && + cpuid->entries[i].index == index) { + return &cpuid->entries[i]; + } + } + /* not found: */ + return NULL; +} + uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, uint32_t index, int reg) { struct kvm_cpuid2 *cpuid; - int i, max; uint32_t ret = 0; uint32_t cpuid_1_edx; - int has_kvm_features = 0; + bool found = false; - max = 1; - while ((cpuid = try_get_cpuid(s, max)) == NULL) { - max *= 2; + cpuid = get_supported_cpuid(s); + + struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index); + if (entry) { + found = true; + ret = cpuid_entry_get_reg(entry, reg); } - for (i = 0; i < cpuid->nent; ++i) { - if (cpuid->entries[i].function == function && - cpuid->entries[i].index == index) { - if (cpuid->entries[i].function == KVM_CPUID_FEATURES) { - has_kvm_features = 1; - } - switch (reg) { - case R_EAX: - ret = cpuid->entries[i].eax; - break; - case R_EBX: - ret = cpuid->entries[i].ebx; - break; - case R_ECX: - ret = cpuid->entries[i].ecx; - break; - case R_EDX: - ret = cpuid->entries[i].edx; - switch (function) { - case 1: - /* KVM before 2.6.30 misreports the following features */ - ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; - break; - case 0x80000001: - /* On Intel, kvm returns cpuid according to the Intel spec, - * so add missing bits according to the AMD spec: - */ - cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); - ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; - break; - } - break; - } + /* Fixups for the data returned by KVM, below */ + + if (function == 1 && reg == R_EDX) { + /* KVM before 2.6.30 misreports the following features */ + ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; + } else if (function == 1 && reg == R_ECX) { + /* We can set the hypervisor flag, even if KVM does not return it on + * GET_SUPPORTED_CPUID + */ + ret |= CPUID_EXT_HYPERVISOR; + /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it + * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER, + * and the irqchip is in the kernel. + */ + if (kvm_irqchip_in_kernel() && + kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { + ret |= CPUID_EXT_TSC_DEADLINE_TIMER; + } + + /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled + * without the in-kernel irqchip + */ + if (!kvm_irqchip_in_kernel()) { + ret &= ~CPUID_EXT_X2APIC; } + } else if (function == 0x80000001 && reg == R_EDX) { + /* On Intel, kvm returns cpuid according to the Intel spec, + * so add missing bits according to the AMD spec: + */ + cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); + ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; } g_free(cpuid); /* fallback for older kernels */ - if (!has_kvm_features && (function == KVM_CPUID_FEATURES)) { + if ((function == KVM_CPUID_FEATURES) && !found) { ret = get_para_features(s); } @@ -229,8 +280,9 @@ static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, return -ENOSYS; } -static void kvm_mce_inject(CPUX86State *env, hwaddr paddr, int code) +static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) { + CPUX86State *env = &cpu->env; uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; uint64_t mcg_status = MCG_STATUS_MCIP; @@ -242,7 +294,7 @@ static void kvm_mce_inject(CPUX86State *env, hwaddr paddr, int code) status |= 0xc0; mcg_status |= MCG_STATUS_RIPV; } - cpu_x86_inject_mce(NULL, env, 9, status, mcg_status, paddr, + cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr, (MCM_ADDR_PHYS << 6) | 0xc, cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0); @@ -256,6 +308,7 @@ static void hardware_memory_error(void) int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr) { + X86CPU *cpu = x86_env_get_cpu(env); ram_addr_t ram_addr; hwaddr paddr; @@ -273,7 +326,7 @@ int kvm_arch_on_sigbus_vcpu(CPUX86State *env, int code, void *addr) } } kvm_hwpoison_page_add(ram_addr); - kvm_mce_inject(env, paddr, code); + kvm_mce_inject(cpu, paddr, code); } else { if (code == BUS_MCEERR_AO) { return 0; @@ -301,7 +354,7 @@ int kvm_arch_on_sigbus(int code, void *addr) return 0; } kvm_hwpoison_page_add(ram_addr); - kvm_mce_inject(first_cpu, paddr, code); + kvm_mce_inject(x86_env_get_cpu(first_cpu), paddr, code); } else { if (code == BUS_MCEERR_AO) { return 0; @@ -359,31 +412,12 @@ int kvm_arch_init_vcpu(CPUX86State *env) struct kvm_cpuid2 cpuid; struct kvm_cpuid_entry2 entries[100]; } QEMU_PACKED cpuid_data; - KVMState *s = env->kvm_state; uint32_t limit, i, j, cpuid_i; uint32_t unused; struct kvm_cpuid_entry2 *c; uint32_t signature[3]; int r; - env->cpuid_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); - - i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR; - j = env->cpuid_ext_features & CPUID_EXT_TSC_DEADLINE_TIMER; - env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(s, 1, 0, R_ECX); - env->cpuid_ext_features |= i; - if (j && kvm_irqchip_in_kernel() && - kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { - env->cpuid_ext_features |= CPUID_EXT_TSC_DEADLINE_TIMER; - } - - env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(s, 0x80000001, - 0, R_EDX); - env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(s, 0x80000001, - 0, R_ECX); - env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(s, 0x8000000A, - 0, R_EDX); - cpuid_i = 0; /* Paravirtualization CPUIDs */ @@ -404,8 +438,7 @@ int kvm_arch_init_vcpu(CPUX86State *env) c = &cpuid_data.entries[cpuid_i++]; memset(c, 0, sizeof(*c)); c->function = KVM_CPUID_FEATURES; - c->eax = env->cpuid_kvm_features & - kvm_arch_get_supported_cpuid(s, KVM_CPUID_FEATURES, 0, R_EAX); + c->eax = env->cpuid_kvm_features; if (hyperv_enabled()) { memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0", 12); @@ -526,8 +559,6 @@ int kvm_arch_init_vcpu(CPUX86State *env) /* Call Centaur's CPUID instructions they are supported. */ if (env->cpuid_xlevel2 > 0) { - env->cpuid_ext4_features &= - kvm_arch_get_supported_cpuid(s, 0xC0000001, 0, R_EDX); cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); for (i = 0xC0000000; i <= limit; i++) { @@ -1365,8 +1396,9 @@ static int kvm_put_mp_state(CPUX86State *env) return kvm_vcpu_ioctl(env, KVM_SET_MP_STATE, &mp_state); } -static int kvm_get_mp_state(CPUX86State *env) +static int kvm_get_mp_state(X86CPU *cpu) { + CPUX86State *env = &cpu->env; struct kvm_mp_state mp_state; int ret; @@ -1552,9 +1584,10 @@ static int kvm_get_debugregs(CPUX86State *env) int kvm_arch_put_registers(CPUX86State *env, int level) { + CPUState *cpu = ENV_GET_CPU(env); int ret; - assert(cpu_is_stopped(env) || qemu_cpu_is_self(env)); + assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); ret = kvm_getput_regs(env, 1); if (ret < 0) { @@ -1609,9 +1642,10 @@ int kvm_arch_put_registers(CPUX86State *env, int level) int kvm_arch_get_registers(CPUX86State *env) { + X86CPU *cpu = x86_env_get_cpu(env); int ret; - assert(cpu_is_stopped(env) || qemu_cpu_is_self(env)); + assert(cpu_is_stopped(CPU(cpu)) || qemu_cpu_is_self(CPU(cpu))); ret = kvm_getput_regs(env, 0); if (ret < 0) { @@ -1633,7 +1667,7 @@ int kvm_arch_get_registers(CPUX86State *env) if (ret < 0) { return ret; } - ret = kvm_get_mp_state(env); + ret = kvm_get_mp_state(cpu); if (ret < 0) { return ret; } @@ -1781,8 +1815,10 @@ int kvm_arch_process_async_events(CPUX86State *env) return env->halted; } -static int kvm_handle_halt(CPUX86State *env) +static int kvm_handle_halt(X86CPU *cpu) { + CPUX86State *env = &cpu->env; + if (!((env->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) && !(env->interrupt_request & CPU_INTERRUPT_NMI)) { @@ -1996,13 +2032,14 @@ static bool host_supports_vmx(void) int kvm_arch_handle_exit(CPUX86State *env, struct kvm_run *run) { + X86CPU *cpu = x86_env_get_cpu(env); uint64_t code; int ret; switch (run->exit_reason) { case KVM_EXIT_HLT: DPRINTF("handle_hlt\n"); - ret = kvm_handle_halt(env); + ret = kvm_handle_halt(cpu); break; case KVM_EXIT_SET_TPR: ret = 0; |