diff options
Diffstat (limited to 'target/i386')
-rw-r--r-- | target/i386/cpu.c | 16 | ||||
-rw-r--r-- | target/i386/cpu.h | 12 | ||||
-rw-r--r-- | target/i386/kvm.c | 44 | ||||
-rw-r--r-- | target/i386/translate.c | 9 |
4 files changed, 44 insertions, 37 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 82603e3130..b069eafcc6 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -437,9 +437,9 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = { [FEAT_7_0_ECX] = { .feat_names = { NULL, "avx512vbmi", "umip", "pku", - "ospke", NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, "avx512-vpopcntdq", NULL, + "ospke", NULL, "avx512vbmi2", NULL, + "gfni", "vaes", "vpclmulqdq", "avx512vnni", + "avx512bitalg", NULL, "avx512-vpopcntdq", NULL, "la57", NULL, NULL, NULL, NULL, NULL, "rdpid", NULL, NULL, NULL, NULL, NULL, @@ -3736,11 +3736,6 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) #ifndef CONFIG_USER_ONLY if (tcg_enabled()) { - AddressSpace *as_normal = g_new0(AddressSpace, 1); - AddressSpace *as_smm = g_new(AddressSpace, 1); - - address_space_init(as_normal, cs->memory, "cpu-memory"); - cpu->cpu_as_mem = g_new(MemoryRegion, 1); cpu->cpu_as_root = g_new(MemoryRegion, 1); @@ -3755,11 +3750,10 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) get_system_memory(), 0, ~0ull); memory_region_add_subregion_overlap(cpu->cpu_as_root, 0, cpu->cpu_as_mem, 0); memory_region_set_enabled(cpu->cpu_as_mem, true); - address_space_init(as_smm, cpu->cpu_as_root, "CPU"); cs->num_ases = 2; - cpu_address_space_init(cs, as_normal, 0); - cpu_address_space_init(cs, as_smm, 1); + cpu_address_space_init(cs, 0, "cpu-memory", cs->memory); + cpu_address_space_init(cs, 1, "cpu-smm", cpu->cpu_as_root); /* ... SMRAM with higher priority, linked from /machine/smram. */ cpu->machine_done.notify = x86_cpu_machine_done; diff --git a/target/i386/cpu.h b/target/i386/cpu.h index b086b1528b..d605cc6ccb 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -635,6 +635,12 @@ typedef uint32_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_ECX_UMIP (1U << 2) #define CPUID_7_0_ECX_PKU (1U << 3) #define CPUID_7_0_ECX_OSPKE (1U << 4) +#define CPUID_7_0_ECX_VBMI2 (1U << 6) /* Additional VBMI Instrs */ +#define CPUID_7_0_ECX_GFNI (1U << 8) +#define CPUID_7_0_ECX_VAES (1U << 9) +#define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) +#define CPUID_7_0_ECX_AVX512VNNI (1U << 11) +#define CPUID_7_0_ECX_AVX512BITALG (1U << 12) #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */ #define CPUID_7_0_ECX_LA57 (1U << 16) #define CPUID_7_0_ECX_RDPID (1U << 22) @@ -1091,14 +1097,16 @@ typedef struct CPUX86State { uint64_t async_pf_en_msr; uint64_t pv_eoi_en_msr; + /* Partition-wide HV MSRs, will be updated only on the first vcpu */ uint64_t msr_hv_hypercall; uint64_t msr_hv_guest_os_id; - uint64_t msr_hv_vapic; uint64_t msr_hv_tsc; + + /* Per-VCPU HV MSRs */ + uint64_t msr_hv_vapic; uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; uint64_t msr_hv_runtime; uint64_t msr_hv_synic_control; - uint64_t msr_hv_synic_version; uint64_t msr_hv_synic_evt_page; uint64_t msr_hv_synic_msg_page; uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; diff --git a/target/i386/kvm.c b/target/i386/kvm.c index d4b2ce2e94..351b64f77c 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -662,8 +662,6 @@ static int hyperv_handle_properties(CPUState *cs) env->features[FEAT_HYPERV_EAX] |= HV_VP_RUNTIME_AVAILABLE; } if (cpu->hyperv_synic) { - int sint; - if (!has_msr_hv_synic || kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_SYNIC, 0)) { fprintf(stderr, "Hyper-V SynIC is not supported by kernel\n"); @@ -671,10 +669,6 @@ static int hyperv_handle_properties(CPUState *cs) } env->features[FEAT_HYPERV_EAX] |= HV_SYNIC_AVAILABLE; - env->msr_hv_synic_version = HV_SYNIC_VERSION; - for (sint = 0; sint < ARRAY_SIZE(env->msr_hv_synic_sint); sint++) { - env->msr_hv_synic_sint[sint] = HV_SINT_MASKED; - } } if (cpu->hyperv_stimer) { if (!has_msr_hv_stimer) { @@ -1053,6 +1047,13 @@ void kvm_arch_reset_vcpu(X86CPU *cpu) } else { env->mp_state = KVM_MP_STATE_RUNNABLE; } + + if (cpu->hyperv_synic) { + int i; + for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { + env->msr_hv_synic_sint[i] = HV_SINT_MASKED; + } + } } void kvm_arch_do_init_vcpu(X86CPU *cpu) @@ -1678,19 +1679,26 @@ static int kvm_put_msrs(X86CPU *cpu, int level) kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, env->msr_global_ctrl); } - if (has_msr_hv_hypercall) { - kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, - env->msr_hv_guest_os_id); - kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, - env->msr_hv_hypercall); + /* + * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, + * only sync them to KVM on the first cpu + */ + if (current_cpu == first_cpu) { + if (has_msr_hv_hypercall) { + kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, + env->msr_hv_guest_os_id); + kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, + env->msr_hv_hypercall); + } + if (cpu->hyperv_time) { + kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, + env->msr_hv_tsc); + } } if (cpu->hyperv_vapic) { kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, env->msr_hv_vapic); } - if (cpu->hyperv_time) { - kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, env->msr_hv_tsc); - } if (has_msr_hv_crash) { int j; @@ -1706,10 +1714,10 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (cpu->hyperv_synic) { int j; + kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION); + kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, env->msr_hv_synic_control); - kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, - env->msr_hv_synic_version); kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, env->msr_hv_synic_evt_page); kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, @@ -2073,7 +2081,6 @@ static int kvm_get_msrs(X86CPU *cpu) uint32_t msr; kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); - kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { @@ -2277,9 +2284,6 @@ static int kvm_get_msrs(X86CPU *cpu) case HV_X64_MSR_SCONTROL: env->msr_hv_synic_control = msrs[i].data; break; - case HV_X64_MSR_SVERSION: - env->msr_hv_synic_version = msrs[i].data; - break; case HV_X64_MSR_SIEFP: env->msr_hv_synic_evt_page = msrs[i].data; break; diff --git a/target/i386/translate.c b/target/i386/translate.c index 088a9d9766..23d7eec964 100644 --- a/target/i386/translate.c +++ b/target/i386/translate.c @@ -4467,10 +4467,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) target_ulong pc_start = s->base.pc_next; s->pc_start = s->pc = pc_start; - prefixes = 0; s->override = -1; - rex_w = -1; - rex_r = 0; #ifdef TARGET_X86_64 s->rex_x = 0; s->rex_b = 0; @@ -4484,6 +4481,10 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) return s->pc; } + prefixes = 0; + rex_w = -1; + rex_r = 0; + next_byte: b = x86_ldub_code(env, s); /* Collect prefixes. */ @@ -4547,9 +4548,9 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) if (!CODE64(s) && (vex2 & 0xc0) != 0xc0) { /* 4.1.4.6: In 32-bit mode, bits [7:6] must be 11b, otherwise the instruction is LES or LDS. */ + s->pc--; /* rewind the advance_pc() x86_ldub_code() did */ break; } - s->pc++; /* 4.1.1-4.1.3: No preceding lock, 66, f2, f3, or rex prefixes. */ if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ |