diff options
Diffstat (limited to 'target')
-rw-r--r-- | target/arm/translate-a64.c | 2 | ||||
-rw-r--r-- | target/i386/cpu.c | 42 | ||||
-rw-r--r-- | target/i386/cpu.h | 2 | ||||
-rw-r--r-- | target/i386/hax-all.c | 54 | ||||
-rw-r--r-- | target/i386/hvf/x86hvf.c | 2 | ||||
-rw-r--r-- | target/i386/kvm.c | 121 | ||||
-rw-r--r-- | target/mips/msa_helper.c | 34 | ||||
-rw-r--r-- | target/s390x/kvm.c | 2 |
8 files changed, 116 insertions, 143 deletions
diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index cf537e9d9b..70c1e08a36 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -400,7 +400,7 @@ static void unallocated_encoding(DisasContext *s) "at pc=%016" PRIx64 "\n", \ __FILE__, __LINE__, insn, s->pc - 4); \ unallocated_encoding(s); \ - } while (0); + } while (0) static void init_tmp_a64_array(DisasContext *s) { diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 3818d72831..ad8196b166 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -4147,6 +4147,48 @@ static void x86_disas_set_info(CPUState *cs, disassemble_info *info) info->cap_insn_split = 8; } +void x86_update_hflags(CPUX86State *env) +{ + uint32_t hflags; +#define HFLAG_COPY_MASK \ + ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ + HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ + HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ + HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) + + hflags = env->hflags & HFLAG_COPY_MASK; + hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; + hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); + hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & + (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); + hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); + + if (env->cr[4] & CR4_OSFXSR_MASK) { + hflags |= HF_OSFXSR_MASK; + } + + if (env->efer & MSR_EFER_LMA) { + hflags |= HF_LMA_MASK; + } + + if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { + hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; + } else { + hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_CS32_SHIFT); + hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> + (DESC_B_SHIFT - HF_SS32_SHIFT); + if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || + !(hflags & HF_CS32_MASK)) { + hflags |= HF_ADDSEG_MASK; + } else { + hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | + env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; + } + } + env->hflags = hflags; +} + static Property x86_cpu_properties[] = { #ifdef CONFIG_USER_ONLY /* apic_id = 0 by default for *-user, see commit 9886e834 */ diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 62c4742703..f64e5ed827 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1778,4 +1778,6 @@ bool cpu_is_bsp(X86CPU *cpu); void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf); void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf); +void x86_update_hflags(CPUX86State* env); + #endif /* I386_CPU_H */ diff --git a/target/i386/hax-all.c b/target/i386/hax-all.c index 3ce6950296..934ec4afd1 100644 --- a/target/i386/hax-all.c +++ b/target/i386/hax-all.c @@ -782,56 +782,6 @@ static int hax_set_segments(CPUArchState *env, struct vcpu_state_t *sregs) return 0; } -/* - * After get the state from the kernel module, some - * qemu emulator state need be updated also - */ -static int hax_setup_qemu_emulator(CPUArchState *env) -{ - -#define HFLAG_COPY_MASK (~( \ - HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ - HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ - HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ - HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)) - - uint32_t hflags; - - hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; - hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); - hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & - (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); - hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); - hflags |= (env->cr[4] & CR4_OSFXSR_MASK) << - (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT); - - if (env->efer & MSR_EFER_LMA) { - hflags |= HF_LMA_MASK; - } - - if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { - hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; - } else { - hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_CS32_SHIFT); - hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_SS32_SHIFT); - if (!(env->cr[0] & CR0_PE_MASK) || - (env->eflags & VM_MASK) || !(hflags & HF_CS32_MASK)) { - hflags |= HF_ADDSEG_MASK; - } else { - hflags |= ((env->segs[R_DS].base | - env->segs[R_ES].base | - env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; - } - } - - hflags &= ~HF_SMM_MASK; - - env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags; - return 0; -} - static int hax_sync_vcpu_register(CPUArchState *env, int set) { struct vcpu_state_t regs; @@ -887,9 +837,6 @@ static int hax_sync_vcpu_register(CPUArchState *env, int set) return -1; } } - if (!set) { - hax_setup_qemu_emulator(env); - } return 0; } @@ -1070,6 +1017,7 @@ static int hax_arch_get_registers(CPUArchState *env) return ret; } + x86_update_hflags(env); return 0; } diff --git a/target/i386/hvf/x86hvf.c b/target/i386/hvf/x86hvf.c index 71c0515073..7803e09a28 100644 --- a/target/i386/hvf/x86hvf.c +++ b/target/i386/hvf/x86hvf.c @@ -297,7 +297,6 @@ int hvf_get_registers(CPUState *cpu_state) X86CPU *x86cpu = X86_CPU(cpu_state); CPUX86State *env = &x86cpu->env; - env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX); env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX); env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX); @@ -333,6 +332,7 @@ int hvf_get_registers(CPUState *cpu_state) env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6); env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7); + x86_update_hflags(env); return 0; } diff --git a/target/i386/kvm.c b/target/i386/kvm.c index 6f69e2fcfd..4912f4d538 100644 --- a/target/i386/kvm.c +++ b/target/i386/kvm.c @@ -92,8 +92,9 @@ static bool has_msr_hv_stimer; static bool has_msr_hv_frequencies; static bool has_msr_xss; -static bool has_msr_architectural_pmu; -static uint32_t num_architectural_pmu_counters; +static uint32_t has_architectural_pmu_version; +static uint32_t num_architectural_pmu_gp_counters; +static uint32_t num_architectural_pmu_fixed_counters; static int has_xsave; static int has_xcrs; @@ -872,19 +873,28 @@ int kvm_arch_init_vcpu(CPUState *cs) } if (limit >= 0x0a) { - uint32_t ver; + uint32_t eax, edx; - cpu_x86_cpuid(env, 0x0a, 0, &ver, &unused, &unused, &unused); - if ((ver & 0xff) > 0) { - has_msr_architectural_pmu = true; - num_architectural_pmu_counters = (ver & 0xff00) >> 8; + cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); + + has_architectural_pmu_version = eax & 0xff; + if (has_architectural_pmu_version > 0) { + num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; /* Shouldn't be more than 32, since that's the number of bits * available in EBX to tell us _which_ counters are available. * Play it safe. */ - if (num_architectural_pmu_counters > MAX_GP_COUNTERS) { - num_architectural_pmu_counters = MAX_GP_COUNTERS; + if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { + num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; + } + + if (has_architectural_pmu_version > 1) { + num_architectural_pmu_fixed_counters = edx & 0x1f; + + if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { + num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; + } } } } @@ -1650,32 +1660,36 @@ static int kvm_put_msrs(X86CPU *cpu, int level) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); } - if (has_msr_architectural_pmu) { - /* Stop the counter. */ - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + if (has_architectural_pmu_version > 0) { + if (has_architectural_pmu_version > 1) { + /* Stop the counter. */ + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + } /* Set the counter values. */ - for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, env->msr_fixed_counters[i]); } - for (i = 0; i < num_architectural_pmu_counters; i++) { + for (i = 0; i < num_architectural_pmu_gp_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, env->msr_gp_counters[i]); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, env->msr_gp_evtsel[i]); } - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, - env->msr_global_status); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, - env->msr_global_ovf_ctrl); - - /* Now start the PMU. */ - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, - env->msr_fixed_ctr_ctrl); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, - env->msr_global_ctrl); + if (has_architectural_pmu_version > 1) { + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, + env->msr_global_status); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, + env->msr_global_ovf_ctrl); + + /* Now start the PMU. */ + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, + env->msr_fixed_ctr_ctrl); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, + env->msr_global_ctrl); + } } /* * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, @@ -1877,7 +1891,6 @@ static int kvm_get_sregs(X86CPU *cpu) { CPUX86State *env = &cpu->env; struct kvm_sregs sregs; - uint32_t hflags; int bit, i, ret; ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); @@ -1919,44 +1932,7 @@ static int kvm_get_sregs(X86CPU *cpu) env->efer = sregs.efer; /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ - -#define HFLAG_COPY_MASK \ - ~( HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \ - HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \ - HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \ - HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK) - - hflags = env->hflags & HFLAG_COPY_MASK; - hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK; - hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT); - hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) & - (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK); - hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK)); - - if (env->cr[4] & CR4_OSFXSR_MASK) { - hflags |= HF_OSFXSR_MASK; - } - - if (env->efer & MSR_EFER_LMA) { - hflags |= HF_LMA_MASK; - } - - if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) { - hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; - } else { - hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_CS32_SHIFT); - hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >> - (DESC_B_SHIFT - HF_SS32_SHIFT); - if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK) || - !(hflags & HF_CS32_MASK)) { - hflags |= HF_ADDSEG_MASK; - } else { - hflags |= ((env->segs[R_DS].base | env->segs[R_ES].base | - env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT; - } - } - env->hflags = hflags; + x86_update_hflags(env); return 0; } @@ -2030,15 +2006,17 @@ static int kvm_get_msrs(X86CPU *cpu) if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); } - if (has_msr_architectural_pmu) { - kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); - kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); - for (i = 0; i < MAX_FIXED_COUNTERS; i++) { + if (has_architectural_pmu_version > 0) { + if (has_architectural_pmu_version > 1) { + kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); + kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); + } + for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); } - for (i = 0; i < num_architectural_pmu_counters; i++) { + for (i = 0; i < num_architectural_pmu_gp_counters; i++) { kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); } @@ -3492,6 +3470,7 @@ int kvm_arch_release_virq_post(int virq) if (entry->virq == virq) { trace_kvm_x86_remove_msi_route(virq); QLIST_REMOVE(entry, list); + g_free(entry); break; } } diff --git a/target/mips/msa_helper.c b/target/mips/msa_helper.c index f167a42655..8fb7a369ca 100644 --- a/target/mips/msa_helper.c +++ b/target/mips/msa_helper.c @@ -682,13 +682,13 @@ static inline int64_t msa_mod_u_df(uint32_t df, int64_t arg1, int64_t arg2) do { \ e = SIGNED_EVEN(a, df); \ o = SIGNED_ODD(a, df); \ - } while (0); + } while (0) #define UNSIGNED_EXTRACT(e, o, a, df) \ do { \ e = UNSIGNED_EVEN(a, df); \ o = UNSIGNED_ODD(a, df); \ - } while (0); + } while (0) static inline int64_t msa_dotp_s_df(uint32_t df, int64_t arg1, int64_t arg2) { @@ -1120,9 +1120,11 @@ void helper_msa_splat_df(CPUMIPSState *env, uint32_t df, uint32_t wd, #define MSA_LOOP_COND_D MSA_LOOP_COND(DF_DOUBLE) #define MSA_LOOP(DF) \ + do { \ for (i = 0; i < (MSA_LOOP_COND_ ## DF) ; i++) { \ - MSA_DO_ ## DF \ - } + MSA_DO_ ## DF; \ + } \ + } while (0) #define MSA_FN_DF(FUNC) \ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ @@ -1135,17 +1137,17 @@ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ uint32_t i; \ switch (df) { \ case DF_BYTE: \ - MSA_LOOP_B \ + MSA_LOOP_B; \ break; \ case DF_HALF: \ - MSA_LOOP_H \ + MSA_LOOP_H; \ break; \ case DF_WORD: \ - MSA_LOOP_W \ + MSA_LOOP_W; \ break; \ case DF_DOUBLE: \ - MSA_LOOP_D \ - break; \ + MSA_LOOP_D; \ + break; \ default: \ assert(0); \ } \ @@ -1168,7 +1170,7 @@ void helper_msa_##FUNC(CPUMIPSState *env, uint32_t df, uint32_t wd, \ do { \ R##DF(pwx, i) = pwt->DF[2*i]; \ L##DF(pwx, i) = pws->DF[2*i]; \ - } while (0); + } while (0) MSA_FN_DF(pckev_df) #undef MSA_DO @@ -1176,7 +1178,7 @@ MSA_FN_DF(pckev_df) do { \ R##DF(pwx, i) = pwt->DF[2*i+1]; \ L##DF(pwx, i) = pws->DF[2*i+1]; \ - } while (0); + } while (0) MSA_FN_DF(pckod_df) #undef MSA_DO @@ -1184,7 +1186,7 @@ MSA_FN_DF(pckod_df) do { \ pwx->DF[2*i] = L##DF(pwt, i); \ pwx->DF[2*i+1] = L##DF(pws, i); \ - } while (0); + } while (0) MSA_FN_DF(ilvl_df) #undef MSA_DO @@ -1192,7 +1194,7 @@ MSA_FN_DF(ilvl_df) do { \ pwx->DF[2*i] = R##DF(pwt, i); \ pwx->DF[2*i+1] = R##DF(pws, i); \ - } while (0); + } while (0) MSA_FN_DF(ilvr_df) #undef MSA_DO @@ -1200,7 +1202,7 @@ MSA_FN_DF(ilvr_df) do { \ pwx->DF[2*i] = pwt->DF[2*i]; \ pwx->DF[2*i+1] = pws->DF[2*i]; \ - } while (0); + } while (0) MSA_FN_DF(ilvev_df) #undef MSA_DO @@ -1208,7 +1210,7 @@ MSA_FN_DF(ilvev_df) do { \ pwx->DF[2*i] = pwt->DF[2*i+1]; \ pwx->DF[2*i+1] = pws->DF[2*i+1]; \ - } while (0); + } while (0) MSA_FN_DF(ilvod_df) #undef MSA_DO #undef MSA_LOOP_COND @@ -1222,7 +1224,7 @@ MSA_FN_DF(ilvod_df) uint32_t k = (pwd->DF[i] & 0x3f) % (2 * n); \ pwx->DF[i] = \ (pwd->DF[i] & 0xc0) ? 0 : k < n ? pwt->DF[k] : pws->DF[k - n]; \ - } while (0); + } while (0) MSA_FN_DF(vshf_df) #undef MSA_DO #undef MSA_LOOP_COND diff --git a/target/s390x/kvm.c b/target/s390x/kvm.c index 9b8b59f2a2..6a18a413b4 100644 --- a/target/s390x/kvm.c +++ b/target/s390x/kvm.c @@ -58,7 +58,7 @@ if (DEBUG_KVM) { \ fprintf(stderr, fmt, ## __VA_ARGS__); \ } \ -} while (0); +} while (0) #define kvm_vm_check_mem_attr(s, attr) \ kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr) |