aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--target-i386/Makefile.objs2
-rw-r--r--target-i386/cc_helper.c10
-rw-r--r--target-i386/cpu.c91
-rw-r--r--target-i386/cpu.h21
-rw-r--r--target-i386/fpu_helper.c396
-rw-r--r--target-i386/helper.c14
-rw-r--r--target-i386/helper.h19
-rw-r--r--target-i386/int_helper.c10
-rw-r--r--target-i386/kvm.c34
-rw-r--r--target-i386/mem_helper.c6
-rw-r--r--target-i386/misc_helper.c9
-rw-r--r--target-i386/mpx_helper.c166
-rw-r--r--target-i386/smm_helper.c4
-rw-r--r--target-i386/translate.c1173
14 files changed, 1424 insertions, 531 deletions
diff --git a/target-i386/Makefile.objs b/target-i386/Makefile.objs
index 2255f46a9e..b223d7932b 100644
--- a/target-i386/Makefile.objs
+++ b/target-i386/Makefile.objs
@@ -1,6 +1,6 @@
obj-y += translate.o helper.o cpu.o bpt_helper.o
obj-y += excp_helper.o fpu_helper.o cc_helper.o int_helper.o svm_helper.o
-obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o
+obj-y += smm_helper.o misc_helper.o mem_helper.o seg_helper.o mpx_helper.o
obj-y += gdbstub.o
obj-$(CONFIG_SOFTMMU) += machine.o arch_memory_mapping.o arch_dump.o monitor.o
obj-$(CONFIG_KVM) += kvm.o hyperv.o
diff --git a/target-i386/cc_helper.c b/target-i386/cc_helper.c
index 99a3b5496b..83af223c9f 100644
--- a/target-i386/cc_helper.c
+++ b/target-i386/cc_helper.c
@@ -383,13 +383,3 @@ void helper_sti_vm(CPUX86State *env)
}
}
#endif
-
-void helper_set_inhibit_irq(CPUX86State *env)
-{
- env->hflags |= HF_INHIBIT_IRQ_MASK;
-}
-
-void helper_reset_inhibit_irq(CPUX86State *env)
-{
- env->hflags &= ~HF_INHIBIT_IRQ_MASK;
-}
diff --git a/target-i386/cpu.c b/target-i386/cpu.c
index 3fa14bf171..0af43a3ae1 100644
--- a/target-i386/cpu.c
+++ b/target-i386/cpu.c
@@ -331,14 +331,14 @@ static const char *cpuid_6_feature_name[] = {
#define TCG_EXT_FEATURES (CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | \
CPUID_EXT_MONITOR | CPUID_EXT_SSSE3 | CPUID_EXT_CX16 | \
CPUID_EXT_SSE41 | CPUID_EXT_SSE42 | CPUID_EXT_POPCNT | \
+ CPUID_EXT_XSAVE | /* CPUID_EXT_OSXSAVE is dynamic */ \
CPUID_EXT_MOVBE | CPUID_EXT_AES | CPUID_EXT_HYPERVISOR)
/* missing:
CPUID_EXT_DTES64, CPUID_EXT_DSCPL, CPUID_EXT_VMX, CPUID_EXT_SMX,
CPUID_EXT_EST, CPUID_EXT_TM2, CPUID_EXT_CID, CPUID_EXT_FMA,
CPUID_EXT_XTPR, CPUID_EXT_PDCM, CPUID_EXT_PCID, CPUID_EXT_DCA,
- CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_XSAVE,
- CPUID_EXT_OSXSAVE, CPUID_EXT_AVX, CPUID_EXT_F16C,
- CPUID_EXT_RDRAND */
+ CPUID_EXT_X2APIC, CPUID_EXT_TSC_DEADLINE_TIMER, CPUID_EXT_AVX,
+ CPUID_EXT_F16C, CPUID_EXT_RDRAND */
#ifdef TARGET_X86_64
#define TCG_EXT2_X86_64_FEATURES (CPUID_EXT2_SYSCALL | CPUID_EXT2_LM)
@@ -358,15 +358,17 @@ static const char *cpuid_6_feature_name[] = {
#define TCG_7_0_EBX_FEATURES (CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_SMAP | \
CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ADX | \
CPUID_7_0_EBX_PCOMMIT | CPUID_7_0_EBX_CLFLUSHOPT | \
- CPUID_7_0_EBX_CLWB)
+ CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_MPX | CPUID_7_0_EBX_FSGSBASE)
/* missing:
- CPUID_7_0_EBX_FSGSBASE, CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
+ CPUID_7_0_EBX_HLE, CPUID_7_0_EBX_AVX2,
CPUID_7_0_EBX_ERMS, CPUID_7_0_EBX_INVPCID, CPUID_7_0_EBX_RTM,
CPUID_7_0_EBX_RDSEED */
#define TCG_7_0_ECX_FEATURES 0
#define TCG_APM_FEATURES 0
#define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT
-
+#define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1)
+ /* missing:
+ CPUID_XSAVE_XSAVEC, CPUID_XSAVE_XSAVES */
typedef struct FeatureWordInfo {
const char **feat_names;
@@ -440,7 +442,7 @@ static FeatureWordInfo feature_word_info[FEATURE_WORDS] = {
.cpuid_eax = 0xd,
.cpuid_needs_ecx = true, .cpuid_ecx = 1,
.cpuid_reg = R_EAX,
- .tcg_features = 0,
+ .tcg_features = TCG_XSAVE_FEATURES,
},
[FEAT_6_EAX] = {
.feat_names = cpuid_6_feature_name,
@@ -470,12 +472,7 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = {
};
#undef REGISTER
-typedef struct ExtSaveArea {
- uint32_t feature, bits;
- uint32_t offset, size;
-} ExtSaveArea;
-
-static const ExtSaveArea ext_save_areas[] = {
+const ExtSaveArea x86_ext_save_areas[] = {
[2] = { .feature = FEAT_1_ECX, .bits = CPUID_EXT_AVX,
.offset = 0x240, .size = 0x100 },
[3] = { .feature = FEAT_7_0_EBX, .bits = CPUID_7_0_EBX_MPX,
@@ -2323,10 +2320,13 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx = (cpu->apic_id << 24) |
8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
*ecx = env->features[FEAT_1_ECX];
+ if ((*ecx & CPUID_EXT_XSAVE) && (env->cr[4] & CR4_OSXSAVE_MASK)) {
+ *ecx |= CPUID_EXT_OSXSAVE;
+ }
*edx = env->features[FEAT_1_EDX];
if (cs->nr_cores * cs->nr_threads > 1) {
*ebx |= (cs->nr_cores * cs->nr_threads) << 16;
- *edx |= 1 << 28; /* HTT bit */
+ *edx |= CPUID_HT;
}
break;
case 2:
@@ -2450,7 +2450,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
break;
case 0xD: {
KVMState *s = cs->kvm_state;
- uint64_t kvm_mask;
+ uint64_t ena_mask;
int i;
/* Processor Extended State */
@@ -2458,35 +2458,39 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
*ebx = 0;
*ecx = 0;
*edx = 0;
- if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) || !kvm_enabled()) {
+ if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) {
break;
}
- kvm_mask =
- kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX) |
- ((uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32);
+ if (kvm_enabled()) {
+ ena_mask = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX);
+ ena_mask <<= 32;
+ ena_mask |= kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX);
+ } else {
+ ena_mask = -1;
+ }
if (count == 0) {
*ecx = 0x240;
- for (i = 2; i < ARRAY_SIZE(ext_save_areas); i++) {
- const ExtSaveArea *esa = &ext_save_areas[i];
- if ((env->features[esa->feature] & esa->bits) == esa->bits &&
- (kvm_mask & (1 << i)) != 0) {
+ for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[i];
+ if ((env->features[esa->feature] & esa->bits) == esa->bits
+ && ((ena_mask >> i) & 1) != 0) {
if (i < 32) {
- *eax |= 1 << i;
+ *eax |= 1u << i;
} else {
- *edx |= 1 << (i - 32);
+ *edx |= 1u << (i - 32);
}
*ecx = MAX(*ecx, esa->offset + esa->size);
}
}
- *eax |= kvm_mask & (XSTATE_FP | XSTATE_SSE);
+ *eax |= ena_mask & (XSTATE_FP | XSTATE_SSE);
*ebx = *ecx;
} else if (count == 1) {
*eax = env->features[FEAT_XSAVE];
- } else if (count < ARRAY_SIZE(ext_save_areas)) {
- const ExtSaveArea *esa = &ext_save_areas[count];
- if ((env->features[esa->feature] & esa->bits) == esa->bits &&
- (kvm_mask & (1 << count)) != 0) {
+ } else if (count < ARRAY_SIZE(x86_ext_save_areas)) {
+ const ExtSaveArea *esa = &x86_ext_save_areas[count];
+ if ((env->features[esa->feature] & esa->bits) == esa->bits
+ && ((ena_mask >> count) & 1) != 0) {
*eax = esa->size;
*ebx = esa->offset;
}
@@ -2639,6 +2643,8 @@ static void x86_cpu_reset(CPUState *s)
X86CPU *cpu = X86_CPU(s);
X86CPUClass *xcc = X86_CPU_GET_CLASS(cpu);
CPUX86State *env = &cpu->env;
+ target_ulong cr4;
+ uint64_t xcr0;
int i;
xcc->parent_reset(s);
@@ -2698,7 +2704,8 @@ static void x86_cpu_reset(CPUState *s)
cpu_set_fpuc(env, 0x37f);
env->mxcsr = 0x1f80;
- env->xstate_bv = XSTATE_FP | XSTATE_SSE;
+ /* All units are in INIT state. */
+ env->xstate_bv = 0;
env->pat = 0x0007040600070406ULL;
env->msr_ia32_misc_enable = MSR_IA32_MISC_ENABLE_DEFAULT;
@@ -2709,7 +2716,27 @@ static void x86_cpu_reset(CPUState *s)
cpu_breakpoint_remove_all(s, BP_CPU);
cpu_watchpoint_remove_all(s, BP_CPU);
- env->xcr0 = 1;
+ cr4 = 0;
+ xcr0 = XSTATE_FP;
+
+#ifdef CONFIG_USER_ONLY
+ /* Enable all the features for user-mode. */
+ if (env->features[FEAT_1_EDX] & CPUID_SSE) {
+ xcr0 |= XSTATE_SSE;
+ }
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_MPX) {
+ xcr0 |= XSTATE_BNDREGS | XSTATE_BNDCSR;
+ }
+ if (env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE) {
+ cr4 |= CR4_OSFXSR_MASK | CR4_OSXSAVE_MASK;
+ }
+ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_FSGSBASE) {
+ cr4 |= CR4_FSGSBASE_MASK;
+ }
+#endif
+
+ env->xcr0 = xcr0;
+ cpu_x86_update_cr4(env, cr4);
/*
* SDM 11.11.5 requires:
diff --git a/target-i386/cpu.h b/target-i386/cpu.h
index a990ea7fef..94cb4db27d 100644
--- a/target-i386/cpu.h
+++ b/target-i386/cpu.h
@@ -156,6 +156,8 @@
#define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */
#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
#define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */
+#define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */
+#define HF_MPX_IU_SHIFT 26 /* BND registers in-use */
#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT)
@@ -180,6 +182,8 @@
#define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT)
#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
#define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT)
+#define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT)
+#define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT)
/* hflags2 */
@@ -188,12 +192,14 @@
#define HF2_NMI_SHIFT 2 /* CPU serving NMI */
#define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */
#define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */
+#define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */
#define HF2_GIF_MASK (1 << HF2_GIF_SHIFT)
#define HF2_HIF_MASK (1 << HF2_HIF_SHIFT)
#define HF2_NMI_MASK (1 << HF2_NMI_SHIFT)
#define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT)
#define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT)
+#define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT)
#define CR0_PE_SHIFT 0
#define CR0_MP_SHIFT 1
@@ -753,6 +759,10 @@ typedef struct BNDCSReg {
uint64_t sts;
} BNDCSReg;
+#define BNDCFG_ENABLE 1ULL
+#define BNDCFG_BNDPRESERVE 2ULL
+#define BNDCFG_BDIR_MASK TARGET_PAGE_MASK
+
#ifdef HOST_WORDS_BIGENDIAN
#define ZMM_B(n) _b_ZMMReg[63 - (n)]
#define ZMM_W(n) _w_ZMMReg[31 - (n)]
@@ -1121,7 +1131,14 @@ void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32);
int cpu_x86_signal_handler(int host_signum, void *pinfo,
void *puc);
-/* cpuid.c */
+/* cpu.c */
+typedef struct ExtSaveArea {
+ uint32_t feature, bits;
+ uint32_t offset, size;
+} ExtSaveArea;
+
+extern const ExtSaveArea x86_ext_save_areas[];
+
void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count,
uint32_t *eax, uint32_t *ebx,
uint32_t *ecx, uint32_t *edx);
@@ -1342,6 +1359,8 @@ void cpu_report_tpr_access(CPUX86State *env, TPRAccess access);
*/
void x86_cpu_change_kvm_default(const char *prop, const char *value);
+/* mpx_helper.c */
+void cpu_sync_bndcs_hflags(CPUX86State *env);
/* Return name of 32-bit register, from a R_* constant */
const char *get_register_name_32(unsigned int reg);
diff --git a/target-i386/fpu_helper.c b/target-i386/fpu_helper.c
index 2d54b47ac6..9dfbc4c7a6 100644
--- a/target-i386/fpu_helper.c
+++ b/target-i386/fpu_helper.c
@@ -1115,89 +1115,174 @@ void cpu_x86_frstor(CPUX86State *env, target_ulong ptr, int data32)
}
#endif
-static void do_fxsave(CPUX86State *env, target_ulong ptr, int data64,
- uintptr_t retaddr)
+static void do_xsave_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
{
- int fpus, fptag, i, nb_xmm_regs;
- floatx80 tmp;
+ int fpus, fptag, i;
target_ulong addr;
- /* The operand must be 16 byte aligned */
- if (ptr & 0xf) {
- raise_exception_ra(env, EXCP0D_GPF, retaddr);
- }
-
fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
fptag = 0;
for (i = 0; i < 8; i++) {
fptag |= (env->fptags[i] << i);
}
- cpu_stw_data_ra(env, ptr, env->fpuc, retaddr);
- cpu_stw_data_ra(env, ptr + 2, fpus, retaddr);
- cpu_stw_data_ra(env, ptr + 4, fptag ^ 0xff, retaddr);
-#ifdef TARGET_X86_64
- if (data64) {
- cpu_stq_data_ra(env, ptr + 0x08, 0, retaddr); /* rip */
- cpu_stq_data_ra(env, ptr + 0x10, 0, retaddr); /* rdp */
- } else
-#endif
- {
- cpu_stl_data_ra(env, ptr + 0x08, 0, retaddr); /* eip */
- cpu_stl_data_ra(env, ptr + 0x0c, 0, retaddr); /* sel */
- cpu_stl_data_ra(env, ptr + 0x10, 0, retaddr); /* dp */
- cpu_stl_data_ra(env, ptr + 0x14, 0, retaddr); /* sel */
- }
+ cpu_stw_data_ra(env, ptr, env->fpuc, ra);
+ cpu_stw_data_ra(env, ptr + 2, fpus, ra);
+ cpu_stw_data_ra(env, ptr + 4, fptag ^ 0xff, ra);
+
+ /* In 32-bit mode this is eip, sel, dp, sel.
+ In 64-bit mode this is rip, rdp.
+ But in either case we don't write actual data, just zeros. */
+ cpu_stq_data_ra(env, ptr + 0x08, 0, ra); /* eip+sel; rip */
+ cpu_stq_data_ra(env, ptr + 0x10, 0, ra); /* edp+sel; rdp */
addr = ptr + 0x20;
for (i = 0; i < 8; i++) {
- tmp = ST(i);
- helper_fstt(env, tmp, addr, retaddr);
+ floatx80 tmp = ST(i);
+ helper_fstt(env, tmp, addr, ra);
+ addr += 16;
+ }
+}
+
+static void do_xsave_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_stl_data_ra(env, ptr + 0x18, env->mxcsr, ra); /* mxcsr */
+ cpu_stl_data_ra(env, ptr + 0x1c, 0x0000ffff, ra); /* mxcsr_mask */
+}
+
+static void do_xsave_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + 0xa0;
+ for (i = 0; i < nb_xmm_regs; i++) {
+ cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), ra);
+ cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), ra);
addr += 16;
}
+}
+
+static void do_xsave_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ cpu_stq_data_ra(env, addr, env->bnd_regs[i].lb, ra);
+ cpu_stq_data_ra(env, addr + 8, env->bnd_regs[i].ub, ra);
+ }
+}
+
+static void do_xsave_bndcsr(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ cpu_stq_data_ra(env, addr, env->bndcs_regs.cfgu, ra);
+ cpu_stq_data_ra(env, addr + 8, env->bndcs_regs.sts, ra);
+}
+
+void helper_fxsave(CPUX86State *env, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xsave_fpu(env, ptr, ra);
if (env->cr[4] & CR4_OSFXSR_MASK) {
- /* XXX: finish it */
- cpu_stl_data_ra(env, ptr + 0x18, env->mxcsr, retaddr); /* mxcsr */
- cpu_stl_data_ra(env, ptr + 0x1c, 0x0000ffff, retaddr); /* mxcsr_mask */
- if (env->hflags & HF_CS64_MASK) {
- nb_xmm_regs = 16;
- } else {
- nb_xmm_regs = 8;
- }
- addr = ptr + 0xa0;
+ do_xsave_mxcsr(env, ptr, ra);
/* Fast FXSAVE leaves out the XMM registers */
if (!(env->efer & MSR_EFER_FFXSR)
|| (env->hflags & HF_CPL_MASK)
|| !(env->hflags & HF_LMA_MASK)) {
- for (i = 0; i < nb_xmm_regs; i++) {
- cpu_stq_data_ra(env, addr, env->xmm_regs[i].ZMM_Q(0), retaddr);
- cpu_stq_data_ra(env, addr + 8, env->xmm_regs[i].ZMM_Q(1), retaddr);
- addr += 16;
- }
+ do_xsave_sse(env, ptr, ra);
}
}
}
-void helper_fxsave(CPUX86State *env, target_ulong ptr, int data64)
+static uint64_t get_xinuse(CPUX86State *env)
{
- do_fxsave(env, ptr, data64, GETPC());
+ uint64_t inuse = -1;
+
+ /* For the most part, we don't track XINUSE. We could calculate it
+ here for all components, but it's probably less work to simply
+ indicate in use. That said, the state of BNDREGS is important
+ enough to track in HFLAGS, so we might as well use that here. */
+ if ((env->hflags & HF_MPX_IU_MASK) == 0) {
+ inuse &= ~XSTATE_BNDREGS;
+ }
+ return inuse;
}
-static void do_fxrstor(CPUX86State *env, target_ulong ptr, int data64,
- uintptr_t retaddr)
+static void do_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm,
+ uint64_t inuse, uint64_t opt, uintptr_t ra)
{
- int i, fpus, fptag, nb_xmm_regs;
- floatx80 tmp;
- target_ulong addr;
+ uint64_t old_bv, new_bv;
- /* The operand must be 16 byte aligned */
- if (ptr & 0xf) {
- raise_exception_ra(env, EXCP0D_GPF, retaddr);
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Never save anything not enabled by XCR0. */
+ rfbm &= env->xcr0;
+ opt &= rfbm;
+
+ if (opt & XSTATE_FP) {
+ do_xsave_fpu(env, ptr, ra);
+ }
+ if (rfbm & XSTATE_SSE) {
+ /* Note that saving MXCSR is not suppressed by XSAVEOPT. */
+ do_xsave_mxcsr(env, ptr, ra);
+ }
+ if (opt & XSTATE_SSE) {
+ do_xsave_sse(env, ptr, ra);
+ }
+ if (opt & XSTATE_BNDREGS) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDREGS].offset;
+ do_xsave_bndregs(env, ptr + off, ra);
}
+ if (opt & XSTATE_BNDCSR) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDCSR].offset;
+ do_xsave_bndcsr(env, ptr + off, ra);
+ }
+
+ /* Update the XSTATE_BV field. */
+ old_bv = cpu_ldq_data_ra(env, ptr + 512, ra);
+ new_bv = (old_bv & ~rfbm) | (inuse & rfbm);
+ cpu_stq_data_ra(env, ptr + 512, new_bv, ra);
+}
+
+void helper_xsave(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ do_xsave(env, ptr, rfbm, get_xinuse(env), -1, GETPC());
+}
- cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, retaddr));
- fpus = cpu_lduw_data_ra(env, ptr + 2, retaddr);
- fptag = cpu_lduw_data_ra(env, ptr + 4, retaddr);
+void helper_xsaveopt(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
+{
+ uint64_t inuse = get_xinuse(env);
+ do_xsave(env, ptr, rfbm, inuse, inuse, GETPC());
+}
+
+static void do_xrstor_fpu(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, fpus, fptag;
+ target_ulong addr;
+
+ cpu_set_fpuc(env, cpu_lduw_data_ra(env, ptr, ra));
+ fpus = cpu_lduw_data_ra(env, ptr + 2, ra);
+ fptag = cpu_lduw_data_ra(env, ptr + 4, ra);
env->fpstt = (fpus >> 11) & 7;
env->fpus = fpus & ~0x3800;
fptag ^= 0xff;
@@ -1207,37 +1292,206 @@ static void do_fxrstor(CPUX86State *env, target_ulong ptr, int data64,
addr = ptr + 0x20;
for (i = 0; i < 8; i++) {
- tmp = helper_fldt(env, addr, retaddr);
+ floatx80 tmp = helper_fldt(env, addr, ra);
ST(i) = tmp;
addr += 16;
}
+}
+
+static void do_xrstor_mxcsr(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + 0x18, ra));
+}
+
+static void do_xrstor_sse(CPUX86State *env, target_ulong ptr, uintptr_t ra)
+{
+ int i, nb_xmm_regs;
+ target_ulong addr;
+
+ if (env->hflags & HF_CS64_MASK) {
+ nb_xmm_regs = 16;
+ } else {
+ nb_xmm_regs = 8;
+ }
+
+ addr = ptr + 0xa0;
+ for (i = 0; i < nb_xmm_regs; i++) {
+ env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, ra);
+ env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, ra);
+ addr += 16;
+ }
+}
+
+static void do_xrstor_bndregs(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ int i;
+
+ for (i = 0; i < 4; i++, addr += 16) {
+ env->bnd_regs[i].lb = cpu_ldq_data_ra(env, addr, ra);
+ env->bnd_regs[i].ub = cpu_ldq_data_ra(env, addr + 8, ra);
+ }
+}
+
+static void do_xrstor_bndcsr(CPUX86State *env, target_ulong addr, uintptr_t ra)
+{
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->bndcs_regs.cfgu = cpu_ldq_data_ra(env, addr, ra);
+ env->bndcs_regs.sts = cpu_ldq_data_ra(env, addr + 8, ra);
+}
+
+void helper_fxrstor(CPUX86State *env, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+
+ /* The operand must be 16 byte aligned */
+ if (ptr & 0xf) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ do_xrstor_fpu(env, ptr, ra);
if (env->cr[4] & CR4_OSFXSR_MASK) {
- /* XXX: finish it */
- cpu_set_mxcsr(env, cpu_ldl_data_ra(env, ptr + 0x18, retaddr));
- /* cpu_ldl_data_ra(env, ptr + 0x1c, retaddr); */
- if (env->hflags & HF_CS64_MASK) {
- nb_xmm_regs = 16;
- } else {
- nb_xmm_regs = 8;
- }
- addr = ptr + 0xa0;
- /* Fast FXRESTORE leaves out the XMM registers */
+ do_xrstor_mxcsr(env, ptr, ra);
+ /* Fast FXRSTOR leaves out the XMM registers */
if (!(env->efer & MSR_EFER_FFXSR)
|| (env->hflags & HF_CPL_MASK)
|| !(env->hflags & HF_LMA_MASK)) {
- for (i = 0; i < nb_xmm_regs; i++) {
- env->xmm_regs[i].ZMM_Q(0) = cpu_ldq_data_ra(env, addr, retaddr);
- env->xmm_regs[i].ZMM_Q(1) = cpu_ldq_data_ra(env, addr + 8, retaddr);
- addr += 16;
- }
+ do_xrstor_sse(env, ptr, ra);
}
}
}
-void helper_fxrstor(CPUX86State *env, target_ulong ptr, int data64)
+void helper_xrstor(CPUX86State *env, target_ulong ptr, uint64_t rfbm)
{
- do_fxrstor(env, ptr, data64, GETPC());
+ uintptr_t ra = GETPC();
+ uint64_t xstate_bv, xcomp_bv0, xcomp_bv1;
+
+ rfbm &= env->xcr0;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, ra);
+ }
+
+ /* The operand must be 64 byte aligned. */
+ if (ptr & 63) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ xstate_bv = cpu_ldq_data_ra(env, ptr + 512, ra);
+
+ if ((int64_t)xstate_bv < 0) {
+ /* FIXME: Compact form. */
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* Standard form. */
+
+ /* The XSTATE field must not set bits not present in XCR0. */
+ if (xstate_bv & ~env->xcr0) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ /* The XCOMP field must be zero. */
+ xcomp_bv0 = cpu_ldq_data_ra(env, ptr + 520, ra);
+ xcomp_bv1 = cpu_ldq_data_ra(env, ptr + 528, ra);
+ if (xcomp_bv0 || xcomp_bv1) {
+ raise_exception_ra(env, EXCP0D_GPF, ra);
+ }
+
+ if (rfbm & XSTATE_FP) {
+ if (xstate_bv & XSTATE_FP) {
+ do_xrstor_fpu(env, ptr, ra);
+ } else {
+ helper_fninit(env);
+ memset(env->fpregs, 0, sizeof(env->fpregs));
+ }
+ }
+ if (rfbm & XSTATE_SSE) {
+ /* Note that the standard form of XRSTOR loads MXCSR from memory
+ whether or not the XSTATE_BV bit is set. */
+ do_xrstor_mxcsr(env, ptr, ra);
+ if (xstate_bv & XSTATE_SSE) {
+ do_xrstor_sse(env, ptr, ra);
+ } else {
+ /* ??? When AVX is implemented, we may have to be more
+ selective in the clearing. */
+ memset(env->xmm_regs, 0, sizeof(env->xmm_regs));
+ }
+ }
+ if (rfbm & XSTATE_BNDREGS) {
+ if (xstate_bv & XSTATE_BNDREGS) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDREGS].offset;
+ do_xrstor_bndregs(env, ptr + off, ra);
+ env->hflags |= HF_MPX_IU_MASK;
+ } else {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+ }
+ if (rfbm & XSTATE_BNDCSR) {
+ if (xstate_bv & XSTATE_BNDCSR) {
+ target_ulong off = x86_ext_save_areas[XSTATE_BNDCSR].offset;
+ do_xrstor_bndcsr(env, ptr + off, ra);
+ } else {
+ memset(&env->bndcs_regs, 0, sizeof(env->bndcs_regs));
+ }
+ cpu_sync_bndcs_hflags(env);
+ }
+}
+
+uint64_t helper_xgetbv(CPUX86State *env, uint32_t ecx)
+{
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ switch (ecx) {
+ case 0:
+ return env->xcr0;
+ case 1:
+ if (env->features[FEAT_XSAVE] & CPUID_XSAVE_XGETBV1) {
+ return env->xcr0 & get_xinuse(env);
+ }
+ break;
+ }
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
+}
+
+void helper_xsetbv(CPUX86State *env, uint32_t ecx, uint64_t mask)
+{
+ uint32_t dummy, ena_lo, ena_hi;
+ uint64_t ena;
+
+ /* The OS must have enabled XSAVE. */
+ if (!(env->cr[4] & CR4_OSXSAVE_MASK)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+
+ /* Only XCR0 is defined at present; the FPU may not be disabled. */
+ if (ecx != 0 || (mask & XSTATE_FP) == 0) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling unimplemented features. */
+ cpu_x86_cpuid(env, 0x0d, 0, &ena_lo, &dummy, &dummy, &ena_hi);
+ ena = ((uint64_t)ena_hi << 32) | ena_lo;
+ if (mask & ~ena) {
+ goto do_gpf;
+ }
+
+ /* Disallow enabling only half of MPX. */
+ if ((mask ^ (mask * (XSTATE_BNDCSR / XSTATE_BNDREGS))) & XSTATE_BNDCSR) {
+ goto do_gpf;
+ }
+
+ env->xcr0 = mask;
+ cpu_sync_bndcs_hflags(env);
+ return;
+
+ do_gpf:
+ raise_exception_ra(env, EXCP0D_GPF, GETPC());
}
void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 3802ed9359..3f60ec6122 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -647,6 +647,7 @@ void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3)
void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
{
X86CPU *cpu = x86_env_get_cpu(env);
+ uint32_t hflags;
#if defined(DEBUG_MMU)
printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]);
@@ -656,24 +657,29 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4)
CR4_SMEP_MASK | CR4_SMAP_MASK)) {
tlb_flush(CPU(cpu), 1);
}
+
+ /* Clear bits we're going to recompute. */
+ hflags = env->hflags & ~(HF_OSFXSR_MASK | HF_SMAP_MASK);
+
/* SSE handling */
if (!(env->features[FEAT_1_EDX] & CPUID_SSE)) {
new_cr4 &= ~CR4_OSFXSR_MASK;
}
- env->hflags &= ~HF_OSFXSR_MASK;
if (new_cr4 & CR4_OSFXSR_MASK) {
- env->hflags |= HF_OSFXSR_MASK;
+ hflags |= HF_OSFXSR_MASK;
}
if (!(env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SMAP)) {
new_cr4 &= ~CR4_SMAP_MASK;
}
- env->hflags &= ~HF_SMAP_MASK;
if (new_cr4 & CR4_SMAP_MASK) {
- env->hflags |= HF_SMAP_MASK;
+ hflags |= HF_SMAP_MASK;
}
env->cr[4] = new_cr4;
+ env->hflags = hflags;
+
+ cpu_sync_bndcs_hflags(env);
}
#if defined(CONFIG_USER_ONLY)
diff --git a/target-i386/helper.h b/target-i386/helper.h
index 3a25c3b392..e33451aea9 100644
--- a/target-i386/helper.h
+++ b/target-i386/helper.h
@@ -15,6 +15,14 @@ DEF_HELPER_2(idivl_EAX, void, env, tl)
DEF_HELPER_2(divq_EAX, void, env, tl)
DEF_HELPER_2(idivq_EAX, void, env, tl)
#endif
+DEF_HELPER_FLAGS_2(cr4_testbit, TCG_CALL_NO_WG, void, env, i32)
+
+DEF_HELPER_FLAGS_2(bndck, TCG_CALL_NO_WG, void, env, i32)
+DEF_HELPER_FLAGS_3(bndldx32, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_3(bndldx64, TCG_CALL_NO_WG, i64, env, tl, tl)
+DEF_HELPER_FLAGS_5(bndstx32, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_FLAGS_5(bndstx64, TCG_CALL_NO_WG, void, env, tl, tl, i64, i64)
+DEF_HELPER_1(bnd_jmp, void, env)
DEF_HELPER_2(aam, void, env, int)
DEF_HELPER_2(aad, void, env, int)
@@ -62,8 +70,6 @@ DEF_HELPER_1(cli, void, env)
DEF_HELPER_1(sti, void, env)
DEF_HELPER_1(clac, void, env)
DEF_HELPER_1(stac, void, env)
-DEF_HELPER_1(set_inhibit_irq, void, env)
-DEF_HELPER_1(reset_inhibit_irq, void, env)
DEF_HELPER_3(boundw, void, env, tl, int)
DEF_HELPER_3(boundl, void, env, tl, int)
DEF_HELPER_1(rsm, void, env)
@@ -185,8 +191,13 @@ DEF_HELPER_3(fstenv, void, env, tl, int)
DEF_HELPER_3(fldenv, void, env, tl, int)
DEF_HELPER_3(fsave, void, env, tl, int)
DEF_HELPER_3(frstor, void, env, tl, int)
-DEF_HELPER_3(fxsave, void, env, tl, int)
-DEF_HELPER_3(fxrstor, void, env, tl, int)
+DEF_HELPER_FLAGS_2(fxsave, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_2(fxrstor, TCG_CALL_NO_WG, void, env, tl)
+DEF_HELPER_FLAGS_3(xsave, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xsaveopt, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_3(xrstor, TCG_CALL_NO_WG, void, env, tl, i64)
+DEF_HELPER_FLAGS_2(xgetbv, TCG_CALL_NO_WG, i64, env, i32)
+DEF_HELPER_FLAGS_3(xsetbv, TCG_CALL_NO_WG, void, env, i32, i64)
DEF_HELPER_FLAGS_1(clz, TCG_CALL_NO_RWG_SE, tl, tl)
DEF_HELPER_FLAGS_1(ctz, TCG_CALL_NO_RWG_SE, tl, tl)
diff --git a/target-i386/int_helper.c b/target-i386/int_helper.c
index 9d0d21e786..cf5bbb0481 100644
--- a/target-i386/int_helper.c
+++ b/target-i386/int_helper.c
@@ -470,3 +470,13 @@ target_ulong helper_pext(target_ulong src, target_ulong mask)
#include "shift_helper_template.h"
#undef SHIFT
#endif
+
+/* Test that BIT is enabled in CR4. If not, raise an illegal opcode
+ exception. This reduces the requirements for rare CR4 bits being
+ mapped into HFLAGS. */
+void helper_cr4_testbit(CPUX86State *env, uint32_t bit)
+{
+ if (unlikely((env->cr[4] & bit) == 0)) {
+ raise_exception_ra(env, EXCP06_ILLOP, GETPC());
+ }
+}
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index 94024bc1b1..7974acb399 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -1855,13 +1855,16 @@ static int kvm_get_sregs(X86CPU *cpu)
HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
- hflags = (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags = env->hflags & HFLAG_COPY_MASK;
+ hflags |= (env->segs[R_SS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
(HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
- hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
- (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
+
+ if (env->cr[4] & CR4_OSFXSR_MASK) {
+ hflags |= HF_OSFXSR_MASK;
+ }
if (env->efer & MSR_EFER_LMA) {
hflags |= HF_LMA_MASK;
@@ -1882,7 +1885,7 @@ static int kvm_get_sregs(X86CPU *cpu)
env->segs[R_SS].base) != 0) << HF_ADDSEG_SHIFT;
}
}
- env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
+ env->hflags = hflags;
return 0;
}
@@ -2585,41 +2588,44 @@ int kvm_arch_get_registers(CPUState *cs)
ret = kvm_getput_regs(cpu, 0);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_xsave(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_xcrs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_sregs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_msrs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_mp_state(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_apic(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_vcpu_events(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
ret = kvm_get_debugregs(cpu);
if (ret < 0) {
- return ret;
+ goto out;
}
- return 0;
+ ret = 0;
+ out:
+ cpu_sync_bndcs_hflags(&cpu->env);
+ return ret;
}
void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
diff --git a/target-i386/mem_helper.c b/target-i386/mem_helper.c
index 7de775259d..85e75161bc 100644
--- a/target-i386/mem_helper.c
+++ b/target-i386/mem_helper.c
@@ -112,6 +112,9 @@ void helper_boundw(CPUX86State *env, target_ulong a0, int v)
high = cpu_ldsw_data_ra(env, a0 + 2, GETPC());
v = (int16_t)v;
if (v < low || v > high) {
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
raise_exception_ra(env, EXCP05_BOUND, GETPC());
}
}
@@ -123,6 +126,9 @@ void helper_boundl(CPUX86State *env, target_ulong a0, int v)
low = cpu_ldl_data_ra(env, a0, GETPC());
high = cpu_ldl_data_ra(env, a0 + 4, GETPC());
if (v < low || v > high) {
+ if (env->hflags & HF_MPX_EN_MASK) {
+ env->bndcs_regs.sts = 0;
+ }
raise_exception_ra(env, EXCP05_BOUND, GETPC());
}
}
diff --git a/target-i386/misc_helper.c b/target-i386/misc_helper.c
index 460257f6bc..5fbab8fd0c 100644
--- a/target-i386/misc_helper.c
+++ b/target-i386/misc_helper.c
@@ -361,6 +361,12 @@ void helper_wrmsr(CPUX86State *env)
case MSR_IA32_MISC_ENABLE:
env->msr_ia32_misc_enable = val;
break;
+ case MSR_IA32_BNDCFGS:
+ /* FIXME: #GP if reserved bits are set. */
+ /* FIXME: Extend highest implemented bit of linear address. */
+ env->msr_bndcfgs = val;
+ cpu_sync_bndcs_hflags(env);
+ break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
@@ -506,6 +512,9 @@ void helper_rdmsr(CPUX86State *env)
case MSR_IA32_MISC_ENABLE:
val = env->msr_ia32_misc_enable;
break;
+ case MSR_IA32_BNDCFGS:
+ val = env->msr_bndcfgs;
+ break;
default:
if ((uint32_t)env->regs[R_ECX] >= MSR_MC0_CTL
&& (uint32_t)env->regs[R_ECX] < MSR_MC0_CTL +
diff --git a/target-i386/mpx_helper.c b/target-i386/mpx_helper.c
new file mode 100644
index 0000000000..1bf717af05
--- /dev/null
+++ b/target-i386/mpx_helper.c
@@ -0,0 +1,166 @@
+/*
+ * x86 MPX helpers
+ *
+ * Copyright (c) 2015 Red Hat, Inc.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "cpu.h"
+#include "exec/helper-proto.h"
+#include "exec/cpu_ldst.h"
+
+
+void cpu_sync_bndcs_hflags(CPUX86State *env)
+{
+ uint32_t hflags = env->hflags;
+ uint32_t hflags2 = env->hflags2;
+ uint32_t bndcsr;
+
+ if ((hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ if ((env->cr[4] & CR4_OSXSAVE_MASK)
+ && (env->xcr0 & XSTATE_BNDCSR)
+ && (bndcsr & BNDCFG_ENABLE)) {
+ hflags |= HF_MPX_EN_MASK;
+ } else {
+ hflags &= ~HF_MPX_EN_MASK;
+ }
+
+ if (bndcsr & BNDCFG_BNDPRESERVE) {
+ hflags2 |= HF2_MPX_PR_MASK;
+ } else {
+ hflags2 &= ~HF2_MPX_PR_MASK;
+ }
+
+ env->hflags = hflags;
+ env->hflags2 = hflags2;
+}
+
+void helper_bndck(CPUX86State *env, uint32_t fail)
+{
+ if (unlikely(fail)) {
+ env->bndcs_regs.sts = 1;
+ raise_exception_ra(env, EXCP05_BOUND, GETPC());
+ }
+}
+
+static uint64_t lookup_bte64(CPUX86State *env, uint64_t base, uintptr_t ra)
+{
+ uint64_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract64(base, 20, 28) << 3) + (extract64(bndcsr, 20, 44) << 12);
+ bt = cpu_ldq_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract64(base, 3, 17) << 5) + (bt & ~7);
+}
+
+static uint32_t lookup_bte32(CPUX86State *env, uint32_t base, uintptr_t ra)
+{
+ uint32_t bndcsr, bde, bt;
+
+ if ((env->hflags & HF_CPL_MASK) == 3) {
+ bndcsr = env->bndcs_regs.cfgu;
+ } else {
+ bndcsr = env->msr_bndcfgs;
+ }
+
+ bde = (extract32(base, 12, 20) << 2) + (bndcsr & TARGET_PAGE_MASK);
+ bt = cpu_ldl_data_ra(env, bde, ra);
+ if ((bt & 1) == 0) {
+ env->bndcs_regs.sts = bde | 2;
+ raise_exception_ra(env, EXCP05_BOUND, ra);
+ }
+
+ return (extract32(base, 2, 10) << 4) + (bt & ~3);
+}
+
+uint64_t helper_bndldx64(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte, lb, ub, pt;
+
+ bte = lookup_bte64(env, base, ra);
+ lb = cpu_ldq_data_ra(env, bte, ra);
+ ub = cpu_ldq_data_ra(env, bte + 8, ra);
+ pt = cpu_ldq_data_ra(env, bte + 16, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ env->mmx_t0.MMX_Q(0) = ub;
+ return lb;
+}
+
+uint64_t helper_bndldx32(CPUX86State *env, target_ulong base, target_ulong ptr)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte, lb, ub, pt;
+
+ bte = lookup_bte32(env, base, ra);
+ lb = cpu_ldl_data_ra(env, bte, ra);
+ ub = cpu_ldl_data_ra(env, bte + 4, ra);
+ pt = cpu_ldl_data_ra(env, bte + 8, ra);
+
+ if (pt != ptr) {
+ lb = ub = 0;
+ }
+ return ((uint64_t)ub << 32) | lb;
+}
+
+void helper_bndstx64(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint64_t bte;
+
+ bte = lookup_bte64(env, base, ra);
+ cpu_stq_data_ra(env, bte, lb, ra);
+ cpu_stq_data_ra(env, bte + 8, ub, ra);
+ cpu_stq_data_ra(env, bte + 16, ptr, ra);
+}
+
+void helper_bndstx32(CPUX86State *env, target_ulong base, target_ulong ptr,
+ uint64_t lb, uint64_t ub)
+{
+ uintptr_t ra = GETPC();
+ uint32_t bte;
+
+ bte = lookup_bte32(env, base, ra);
+ cpu_stl_data_ra(env, bte, lb, ra);
+ cpu_stl_data_ra(env, bte + 4, ub, ra);
+ cpu_stl_data_ra(env, bte + 8, ptr, ra);
+}
+
+void helper_bnd_jmp(CPUX86State *env)
+{
+ if (!(env->hflags2 & HF2_MPX_PR_MASK)) {
+ memset(env->bnd_regs, 0, sizeof(env->bnd_regs));
+ env->hflags &= ~HF_MPX_IU_MASK;
+ }
+}
diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c
index e7bb5be521..4dd6a2c544 100644
--- a/target-i386/smm_helper.c
+++ b/target-i386/smm_helper.c
@@ -99,6 +99,10 @@ void do_smm_enter(X86CPU *cpu)
x86_stl_phys(cs, sm_state + 0x7e94, env->tr.limit);
x86_stw_phys(cs, sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
+ /* ??? Vol 1, 16.5.6 Intel MPX and SMM says that IA32_BNDCFGS
+ is saved at offset 7ED0. Vol 3, 34.4.1.1, Table 32-2, has
+ 7EA0-7ED7 as "reserved". What's this, and what's really
+ supposed to happen? */
x86_stq_phys(cs, sm_state + 0x7ed0, env->efer);
x86_stq_phys(cs, sm_state + 0x7ff8, env->regs[R_EAX]);
diff --git a/target-i386/translate.c b/target-i386/translate.c
index c8e2799269..9171929fc7 100644
--- a/target-i386/translate.c
+++ b/target-i386/translate.c
@@ -56,6 +56,12 @@
# define clztl clz32
#endif
+/* For a switch indexed by MODRM, match all memory operands for a given OP. */
+#define CASE_MEM_OP(OP) \
+ case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
+ case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
+ case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
+
//#define MACRO_TEST 1
/* global register indexes */
@@ -65,6 +71,8 @@ static TCGv cpu_cc_dst, cpu_cc_src, cpu_cc_src2, cpu_cc_srcT;
static TCGv_i32 cpu_cc_op;
static TCGv cpu_regs[CPU_NB_REGS];
static TCGv cpu_seg_base[6];
+static TCGv_i64 cpu_bndl[4];
+static TCGv_i64 cpu_bndu[4];
/* local temps */
static TCGv cpu_T0, cpu_T1;
/* local register indexes (only used inside old micro ops) */
@@ -121,6 +129,7 @@ typedef struct DisasContext {
int cpuid_ext2_features;
int cpuid_ext3_features;
int cpuid_7_0_ebx_features;
+ int cpuid_xsave_features;
} DisasContext;
static void gen_eob(DisasContext *s);
@@ -1800,37 +1809,52 @@ static void gen_shifti(DisasContext *s1, int op, TCGMemOp ot, int d, int c)
}
}
-static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
+/* Decompose an address. */
+
+typedef struct AddressParts {
+ int def_seg;
+ int base;
+ int index;
+ int scale;
+ target_long disp;
+} AddressParts;
+
+static AddressParts gen_lea_modrm_0(CPUX86State *env, DisasContext *s,
+ int modrm)
{
+ int def_seg, base, index, scale, mod, rm;
target_long disp;
- int havesib, base, index, scale;
- int mod, rm, code, def_seg, ovr_seg;
- TCGv sum;
+ bool havesib;
def_seg = R_DS;
- ovr_seg = s->override;
+ index = -1;
+ scale = 0;
+ disp = 0;
+
mod = (modrm >> 6) & 3;
rm = modrm & 7;
+ base = rm | REX_B(s);
+
+ if (mod == 3) {
+ /* Normally filtered out earlier, but including this path
+ simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
+ goto done;
+ }
switch (s->aflag) {
case MO_64:
case MO_32:
havesib = 0;
- base = rm;
- index = -1;
- scale = 0;
-
- if (base == 4) {
- havesib = 1;
- code = cpu_ldub_code(env, s->pc++);
+ if (rm == 4) {
+ int code = cpu_ldub_code(env, s->pc++);
scale = (code >> 6) & 3;
index = ((code >> 3) & 7) | REX_X(s);
if (index == 4) {
index = -1; /* no index */
}
- base = (code & 7);
+ base = (code & 7) | REX_B(s);
+ havesib = 1;
}
- base |= REX_B(s);
switch (mod) {
case 0:
@@ -1839,10 +1863,9 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
disp = (int32_t)cpu_ldl_code(env, s->pc);
s->pc += 4;
if (CODE64(s) && !havesib) {
+ base = -2;
disp += s->pc + s->rip_offset;
}
- } else {
- disp = 0;
}
break;
case 1:
@@ -1859,46 +1882,19 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
if (base == R_ESP && s->popl_esp_hack) {
disp += s->popl_esp_hack;
}
-
- /* Compute the address, with a minimum number of TCG ops. */
- TCGV_UNUSED(sum);
- if (index >= 0) {
- if (scale == 0) {
- sum = cpu_regs[index];
- } else {
- tcg_gen_shli_tl(cpu_A0, cpu_regs[index], scale);
- sum = cpu_A0;
- }
- if (base >= 0) {
- tcg_gen_add_tl(cpu_A0, sum, cpu_regs[base]);
- sum = cpu_A0;
- }
- } else if (base >= 0) {
- sum = cpu_regs[base];
- }
- if (TCGV_IS_UNUSED(sum)) {
- tcg_gen_movi_tl(cpu_A0, disp);
- sum = cpu_A0;
- } else if (disp != 0) {
- tcg_gen_addi_tl(cpu_A0, sum, disp);
- sum = cpu_A0;
- }
-
if (base == R_EBP || base == R_ESP) {
def_seg = R_SS;
}
break;
case MO_16:
- sum = cpu_A0;
if (mod == 0) {
if (rm == 6) {
+ base = -1;
disp = cpu_lduw_code(env, s->pc);
s->pc += 2;
- tcg_gen_movi_tl(cpu_A0, disp);
break;
}
- disp = 0;
} else if (mod == 1) {
disp = (int8_t)cpu_ldub_code(env, s->pc++);
} else {
@@ -1908,102 +1904,104 @@ static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
switch (rm) {
case 0:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_ESI]);
+ base = R_EBX;
+ index = R_ESI;
break;
case 1:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBX], cpu_regs[R_EDI]);
+ base = R_EBX;
+ index = R_EDI;
break;
case 2:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_ESI]);
+ base = R_EBP;
+ index = R_ESI;
def_seg = R_SS;
break;
case 3:
- tcg_gen_add_tl(cpu_A0, cpu_regs[R_EBP], cpu_regs[R_EDI]);
+ base = R_EBP;
+ index = R_EDI;
def_seg = R_SS;
break;
case 4:
- sum = cpu_regs[R_ESI];
+ base = R_ESI;
break;
case 5:
- sum = cpu_regs[R_EDI];
+ base = R_EDI;
break;
case 6:
- sum = cpu_regs[R_EBP];
+ base = R_EBP;
def_seg = R_SS;
break;
default:
case 7:
- sum = cpu_regs[R_EBX];
+ base = R_EBX;
break;
}
- if (disp != 0) {
- tcg_gen_addi_tl(cpu_A0, sum, disp);
- sum = cpu_A0;
- }
break;
default:
tcg_abort();
}
- gen_lea_v_seg(s, s->aflag, sum, def_seg, ovr_seg);
+ done:
+ return (AddressParts){ def_seg, base, index, scale, disp };
}
-static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
+/* Compute the address, with a minimum number of TCG ops. */
+static TCGv gen_lea_modrm_1(AddressParts a)
{
- int mod, rm, base, code;
+ TCGv ea;
- mod = (modrm >> 6) & 3;
- if (mod == 3)
- return;
- rm = modrm & 7;
+ TCGV_UNUSED(ea);
+ if (a.index >= 0) {
+ if (a.scale == 0) {
+ ea = cpu_regs[a.index];
+ } else {
+ tcg_gen_shli_tl(cpu_A0, cpu_regs[a.index], a.scale);
+ ea = cpu_A0;
+ }
+ if (a.base >= 0) {
+ tcg_gen_add_tl(cpu_A0, ea, cpu_regs[a.base]);
+ ea = cpu_A0;
+ }
+ } else if (a.base >= 0) {
+ ea = cpu_regs[a.base];
+ }
+ if (TCGV_IS_UNUSED(ea)) {
+ tcg_gen_movi_tl(cpu_A0, a.disp);
+ ea = cpu_A0;
+ } else if (a.disp != 0) {
+ tcg_gen_addi_tl(cpu_A0, ea, a.disp);
+ ea = cpu_A0;
+ }
- switch (s->aflag) {
- case MO_64:
- case MO_32:
- base = rm;
+ return ea;
+}
- if (base == 4) {
- code = cpu_ldub_code(env, s->pc++);
- base = (code & 7);
- }
+static void gen_lea_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(a);
+ gen_lea_v_seg(s, s->aflag, ea, a.def_seg, s->override);
+}
- switch (mod) {
- case 0:
- if (base == 5) {
- s->pc += 4;
- }
- break;
- case 1:
- s->pc++;
- break;
- default:
- case 2:
- s->pc += 4;
- break;
- }
- break;
+static void gen_nop_modrm(CPUX86State *env, DisasContext *s, int modrm)
+{
+ (void)gen_lea_modrm_0(env, s, modrm);
+}
- case MO_16:
- switch (mod) {
- case 0:
- if (rm == 6) {
- s->pc += 2;
- }
- break;
- case 1:
- s->pc++;
- break;
- default:
- case 2:
- s->pc += 2;
- break;
- }
- break;
+/* Used for BNDCL, BNDCU, BNDCN. */
+static void gen_bndck(CPUX86State *env, DisasContext *s, int modrm,
+ TCGCond cond, TCGv_i64 bndv)
+{
+ TCGv ea = gen_lea_modrm_1(gen_lea_modrm_0(env, s, modrm));
- default:
- tcg_abort();
+ tcg_gen_extu_tl_i64(cpu_tmp1_i64, ea);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(cpu_tmp1_i64, cpu_tmp1_i64);
}
+ tcg_gen_setcond_i64(cond, cpu_tmp1_i64, cpu_tmp1_i64, bndv);
+ tcg_gen_extrl_i64_i32(cpu_tmp2_i32, cpu_tmp1_i64);
+ gen_helper_bndck(cpu_env, cpu_tmp2_i32);
}
/* used for LEA and MOV AX, mem */
@@ -2384,14 +2382,49 @@ static void gen_debug(DisasContext *s, target_ulong cur_eip)
s->is_jmp = DISAS_TB_JUMP;
}
+static void gen_set_hflag(DisasContext *s, uint32_t mask)
+{
+ if ((s->flags & mask) == 0) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_ori_i32(t, t, mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags |= mask;
+ }
+}
+
+static void gen_reset_hflag(DisasContext *s, uint32_t mask)
+{
+ if (s->flags & mask) {
+ TCGv_i32 t = tcg_temp_new_i32();
+ tcg_gen_ld_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_gen_andi_i32(t, t, ~mask);
+ tcg_gen_st_i32(t, cpu_env, offsetof(CPUX86State, hflags));
+ tcg_temp_free_i32(t);
+ s->flags &= ~mask;
+ }
+}
+
+/* Clear BND registers during legacy branches. */
+static void gen_bnd_jmp(DisasContext *s)
+{
+ /* Do nothing if BND prefix present, MPX is disabled, or if the
+ BNDREGs are known to be in INIT state already. The helper
+ itself will check BNDPRESERVE at runtime. */
+ if ((s->prefix & PREFIX_REPNZ) == 0
+ && (s->flags & HF_MPX_EN_MASK) == 0
+ && (s->flags & HF_MPX_IU_MASK) == 0) {
+ gen_helper_bnd_jmp(cpu_env);
+ }
+}
+
/* generate a generic end of block. Trace exception is also generated
if needed */
static void gen_eob(DisasContext *s)
{
gen_update_cc_op(s);
- if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
- gen_helper_reset_inhibit_irq(cpu_env);
- }
+ gen_reset_hflag(s, HF_INHIBIT_IRQ_MASK);
if (s->tb->flags & HF_RF_MASK) {
gen_helper_reset_rf(cpu_env);
}
@@ -4775,6 +4808,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_gen_movi_tl(cpu_T1, next_eip);
gen_push_v(s, cpu_T1);
gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 3: /* lcall Ev */
@@ -4800,6 +4834,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
tcg_gen_ext16u_tl(cpu_T0, cpu_T0);
}
gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 5: /* ljmp Ev */
@@ -5140,8 +5175,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* if reg == SS, inhibit interrupts/trace. */
/* If several instructions disable interrupts, only the
_first_ does it */
- if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
- gen_helper_set_inhibit_irq(cpu_env);
+ gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
s->tf = 0;
}
if (s->is_jmp) {
@@ -5208,8 +5242,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* if reg == SS, inhibit interrupts/trace */
/* If several instructions disable interrupts, only the
_first_ does it */
- if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
- gen_helper_set_inhibit_irq(cpu_env);
+ gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
s->tf = 0;
}
if (s->is_jmp) {
@@ -5275,19 +5308,16 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
break;
case 0x8d: /* lea */
- ot = dflag;
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
if (mod == 3)
goto illegal_op;
reg = ((modrm >> 3) & 7) | rex_r;
- /* we must ensure that no segment is added */
- s->override = -1;
- val = s->addseg;
- s->addseg = 0;
- gen_lea_modrm(env, s, modrm);
- s->addseg = val;
- gen_op_mov_reg_v(ot, reg, cpu_A0);
+ {
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ TCGv ea = gen_lea_modrm_1(a);
+ gen_op_mov_reg_v(dflag, reg, ea);
+ }
break;
case 0xa0: /* mov EAX, Ov */
@@ -6186,6 +6216,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_stack_update(s, val + (1 << ot));
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xc3: /* ret */
@@ -6193,6 +6224,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
gen_pop_update(s, ot);
/* Note that gen_pop_T0 uses a zero-extending load. */
gen_op_jmp_v(cpu_T0);
+ gen_bnd_jmp(s);
gen_eob(s);
break;
case 0xca: /* lret im */
@@ -6259,6 +6291,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
tcg_gen_movi_tl(cpu_T0, next_eip);
gen_push_v(s, cpu_T0);
+ gen_bnd_jmp(s);
gen_jmp(s, tval);
}
break;
@@ -6288,6 +6321,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
} else if (!CODE64(s)) {
tval &= 0xffffffff;
}
+ gen_bnd_jmp(s);
gen_jmp(s, tval);
break;
case 0xea: /* ljmp im */
@@ -6327,6 +6361,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
if (dflag == MO_16) {
tval &= 0xffff;
}
+ gen_bnd_jmp(s);
gen_jcc(s, b, tval, next_eip);
break;
@@ -6745,8 +6780,7 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
/* interruptions are enabled only the first insn after sti */
/* If several instructions disable interrupts, only the
_first_ does it */
- if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
- gen_helper_set_inhibit_irq(cpu_env);
+ gen_set_hflag(s, HF_INHIBIT_IRQ_MASK);
/* give a chance to handle pending irqs */
gen_jmp_im(s->pc - s->cs_base);
gen_eob(s);
@@ -7000,15 +7034,11 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
goto illegal_op;
}
break;
+
case 0x101:
modrm = cpu_ldub_code(env, s->pc++);
- mod = (modrm >> 6) & 3;
- op = (modrm >> 3) & 7;
- rm = modrm & 7;
- switch(op) {
- case 0: /* sgdt */
- if (mod == 3)
- goto illegal_op;
+ switch (modrm) {
+ CASE_MEM_OP(0): /* sgdt */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
gen_lea_modrm(env, s, modrm);
tcg_gen_ld32u_tl(cpu_T0,
@@ -7021,178 +7051,230 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
}
gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
break;
- case 1:
- if (mod == 3) {
- switch (rm) {
- case 0: /* monitor */
- if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
- s->cpl != 0)
- goto illegal_op;
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
- gen_extu(s->aflag, cpu_A0);
- gen_add_A0_ds_seg(s);
- gen_helper_monitor(cpu_env, cpu_A0);
- break;
- case 1: /* mwait */
- if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
- s->cpl != 0)
- goto illegal_op;
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
- gen_eob(s);
- break;
- case 2: /* clac */
- if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
- s->cpl != 0) {
- goto illegal_op;
- }
- gen_helper_clac(cpu_env);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
- break;
- case 3: /* stac */
- if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP) ||
- s->cpl != 0) {
- goto illegal_op;
- }
- gen_helper_stac(cpu_env);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
- break;
- default:
- goto illegal_op;
- }
- } else { /* sidt */
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
- gen_lea_modrm(env, s, modrm);
- tcg_gen_ld32u_tl(cpu_T0,
- cpu_env, offsetof(CPUX86State, idt.limit));
- gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
- gen_add_A0_im(s, 2);
- tcg_gen_ld_tl(cpu_T0,
- cpu_env, offsetof(CPUX86State, idt.base));
- if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
- }
- gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+
+ case 0xc8: /* monitor */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ goto illegal_op;
}
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ tcg_gen_mov_tl(cpu_A0, cpu_regs[R_EAX]);
+ gen_extu(s->aflag, cpu_A0);
+ gen_add_A0_ds_seg(s);
+ gen_helper_monitor(cpu_env, cpu_A0);
break;
- case 2: /* lgdt */
- case 3: /* lidt */
- if (mod == 3) {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- switch(rm) {
- case 0: /* VMRUN */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
- tcg_const_i32(s->pc - pc_start));
- tcg_gen_exit_tb(0);
- s->is_jmp = DISAS_TB_JUMP;
- }
- break;
- case 1: /* VMMCALL */
- if (!(s->flags & HF_SVME_MASK))
- goto illegal_op;
- gen_helper_vmmcall(cpu_env);
- break;
- case 2: /* VMLOAD */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
- }
- break;
- case 3: /* VMSAVE */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
- }
- break;
- case 4: /* STGI */
- if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
- !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_stgi(cpu_env);
- }
- break;
- case 5: /* CLGI */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_clgi(cpu_env);
- }
- break;
- case 6: /* SKINIT */
- if ((!(s->flags & HF_SVME_MASK) &&
- !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
- !s->pe)
- goto illegal_op;
- gen_helper_skinit(cpu_env);
- break;
- case 7: /* INVLPGA */
- if (!(s->flags & HF_SVME_MASK) || !s->pe)
- goto illegal_op;
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- break;
- } else {
- gen_helper_invlpga(cpu_env,
- tcg_const_i32(s->aflag - 1));
- }
- break;
- default:
- goto illegal_op;
- }
- } else if (s->cpl != 0) {
+
+ case 0xc9: /* mwait */
+ if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_mwait(cpu_env, tcg_const_i32(s->pc - pc_start));
+ gen_eob(s);
+ break;
+
+ case 0xca: /* clac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_clac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xcb: /* stac */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)
+ || s->cpl != 0) {
+ goto illegal_op;
+ }
+ gen_helper_stac(cpu_env);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ CASE_MEM_OP(1): /* sidt */
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.limit));
+ gen_op_st_v(s, MO_16, cpu_T0, cpu_A0);
+ gen_add_A0_im(s, 2);
+ tcg_gen_ld_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ gen_op_st_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ break;
+
+ case 0xd0: /* xgetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xgetbv(cpu_tmp1_i64, cpu_env, cpu_tmp2_i32);
+ tcg_gen_extr_i64_tl(cpu_regs[R_EAX], cpu_regs[R_EDX], cpu_tmp1_i64);
+ break;
+
+ case 0xd1: /* xsetbv */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->prefix & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_svm_check_intercept(s, pc_start,
- op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
- gen_lea_modrm(env, s, modrm);
- gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
- gen_add_A0_im(s, 2);
- gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
- if (dflag == MO_16) {
- tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
- }
- if (op == 2) {
- tcg_gen_st_tl(cpu_T0, cpu_env,
- offsetof(CPUX86State, gdt.base));
- tcg_gen_st32_tl(cpu_T1, cpu_env,
- offsetof(CPUX86State, gdt.limit));
- } else {
- tcg_gen_st_tl(cpu_T0, cpu_env,
- offsetof(CPUX86State, idt.base));
- tcg_gen_st32_tl(cpu_T1, cpu_env,
- offsetof(CPUX86State, idt.limit));
- }
+ break;
+ }
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_regs[R_ECX]);
+ gen_helper_xsetbv(cpu_env, cpu_tmp2_i32, cpu_tmp1_i64);
+ /* End TB because translation flags may change. */
+ gen_jmp_im(s->pc - pc_start);
+ gen_eob(s);
+ break;
+
+ case 0xd8: /* VMRUN */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmrun(cpu_env, tcg_const_i32(s->aflag - 1),
+ tcg_const_i32(s->pc - pc_start));
+ tcg_gen_exit_tb(0);
+ s->is_jmp = DISAS_TB_JUMP;
+ break;
+
+ case 0xd9: /* VMMCALL */
+ if (!(s->flags & HF_SVME_MASK)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmmcall(cpu_env);
+ break;
+
+ case 0xda: /* VMLOAD */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmload(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ case 0xdb: /* VMSAVE */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
}
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_vmsave(cpu_env, tcg_const_i32(s->aflag - 1));
break;
- case 4: /* smsw */
+
+ case 0xdc: /* STGI */
+ if ((!(s->flags & HF_SVME_MASK)
+ && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_stgi(cpu_env);
+ break;
+
+ case 0xdd: /* CLGI */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_clgi(cpu_env);
+ break;
+
+ case 0xde: /* SKINIT */
+ if ((!(s->flags & HF_SVME_MASK)
+ && !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT))
+ || !s->pe) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_skinit(cpu_env);
+ break;
+
+ case 0xdf: /* INVLPGA */
+ if (!(s->flags & HF_SVME_MASK) || !s->pe) {
+ goto illegal_op;
+ }
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_helper_invlpga(cpu_env, tcg_const_i32(s->aflag - 1));
+ break;
+
+ CASE_MEM_OP(2): /* lgdt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, gdt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, gdt.limit));
+ break;
+
+ CASE_MEM_OP(3): /* lidt */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_WRITE);
+ gen_lea_modrm(env, s, modrm);
+ gen_op_ld_v(s, MO_16, cpu_T1, cpu_A0);
+ gen_add_A0_im(s, 2);
+ gen_op_ld_v(s, CODE64(s) + MO_32, cpu_T0, cpu_A0);
+ if (dflag == MO_16) {
+ tcg_gen_andi_tl(cpu_T0, cpu_T0, 0xffffff);
+ }
+ tcg_gen_st_tl(cpu_T0, cpu_env, offsetof(CPUX86State, idt.base));
+ tcg_gen_st32_tl(cpu_T1, cpu_env, offsetof(CPUX86State, idt.limit));
+ break;
+
+ CASE_MEM_OP(4): /* smsw */
gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
#if defined TARGET_X86_64 && defined HOST_WORDS_BIGENDIAN
tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, cr[0]) + 4);
@@ -7201,70 +7283,70 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
#endif
gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 1);
break;
- case 6: /* lmsw */
+
+ CASE_MEM_OP(6): /* lmsw */
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
- gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
- gen_helper_lmsw(cpu_env, cpu_T0);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
+ break;
}
+ gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
+ gen_ldst_modrm(env, s, modrm, MO_16, OR_TMP0, 0);
+ gen_helper_lmsw(cpu_env, cpu_T0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
break;
- case 7:
- if (mod != 3) { /* invlpg */
+
+ CASE_MEM_OP(7): /* invlpg */
+ if (s->cpl != 0) {
+ gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
+ break;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ gen_lea_modrm(env, s, modrm);
+ gen_helper_invlpg(cpu_env, cpu_A0);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
+ break;
+
+ case 0xf8: /* swapgs */
+#ifdef TARGET_X86_64
+ if (CODE64(s)) {
if (s->cpl != 0) {
gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
} else {
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- gen_lea_modrm(env, s, modrm);
- gen_helper_invlpg(cpu_env, cpu_A0);
- gen_jmp_im(s->pc - s->cs_base);
- gen_eob(s);
+ tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
+ tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
+ tcg_gen_st_tl(cpu_T0, cpu_env,
+ offsetof(CPUX86State, kernelgsbase));
}
- } else {
- switch (rm) {
- case 0: /* swapgs */
-#ifdef TARGET_X86_64
- if (CODE64(s)) {
- if (s->cpl != 0) {
- gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
- } else {
- tcg_gen_mov_tl(cpu_T0, cpu_seg_base[R_GS]);
- tcg_gen_ld_tl(cpu_seg_base[R_GS], cpu_env,
- offsetof(CPUX86State, kernelgsbase));
- tcg_gen_st_tl(cpu_T0, cpu_env,
- offsetof(CPUX86State, kernelgsbase));
- }
- break;
- }
+ break;
+ }
#endif
- goto illegal_op;
- case 1: /* rdtscp */
- if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP))
- goto illegal_op;
- gen_update_cc_op(s);
- gen_jmp_im(pc_start - s->cs_base);
- if (s->tb->cflags & CF_USE_ICOUNT) {
- gen_io_start();
- }
- gen_helper_rdtscp(cpu_env);
- if (s->tb->cflags & CF_USE_ICOUNT) {
- gen_io_end();
- gen_jmp(s, s->pc - s->cs_base);
- }
- break;
- default:
- goto illegal_op;
- }
+ goto illegal_op;
+
+ case 0xf9: /* rdtscp */
+ if (!(s->cpuid_ext2_features & CPUID_EXT2_RDTSCP)) {
+ goto illegal_op;
+ }
+ gen_update_cc_op(s);
+ gen_jmp_im(pc_start - s->cs_base);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_start();
+ }
+ gen_helper_rdtscp(cpu_env);
+ if (s->tb->cflags & CF_USE_ICOUNT) {
+ gen_io_end();
+ gen_jmp(s, s->pc - s->cs_base);
}
break;
+
default:
goto illegal_op;
}
break;
+
case 0x108: /* invd */
case 0x109: /* wbinvd */
if (s->cpl != 0) {
@@ -7393,7 +7475,199 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
break;
}
break;
- case 0x119 ... 0x11f: /* nop (multi byte) */
+ case 0x11a:
+ modrm = cpu_ldub_code(env, s->pc++);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (prefixes & PREFIX_REPZ) {
+ /* bndcl */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_LTU, cpu_bndl[reg]);
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcu */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ TCGv_i64 notu = tcg_temp_new_i64();
+ tcg_gen_not_i64(notu, cpu_bndu[reg]);
+ gen_bndck(env, s, modrm, TCG_COND_GTU, notu);
+ tcg_temp_free_i64(notu);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- from reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg], cpu_bndl[reg2]);
+ tcg_gen_mov_i64(cpu_bndu[reg], cpu_bndu[reg2]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_ld_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+ tcg_gen_qemu_ld_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ } else if (mod != 3) {
+ /* bndldx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(cpu_A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(cpu_T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndldx64(cpu_bndl[reg], cpu_env, cpu_A0, cpu_T0);
+ tcg_gen_ld_i64(cpu_bndu[reg], cpu_env,
+ offsetof(CPUX86State, mmx_t0.MMX_Q(0)));
+ } else {
+ gen_helper_bndldx32(cpu_bndu[reg], cpu_env, cpu_A0, cpu_T0);
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndu[reg]);
+ tcg_gen_shri_i64(cpu_bndu[reg], cpu_bndu[reg], 32);
+ }
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x11b:
+ modrm = cpu_ldub_code(env, s->pc++);
+ if (s->flags & HF_MPX_EN_MASK) {
+ mod = (modrm >> 6) & 3;
+ reg = ((modrm >> 3) & 7) | rex_r;
+ if (mod != 3 && (prefixes & PREFIX_REPZ)) {
+ /* bndmk */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (a.base >= 0) {
+ tcg_gen_extu_tl_i64(cpu_bndl[reg], cpu_regs[a.base]);
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_i64(cpu_bndl[reg], cpu_bndl[reg]);
+ }
+ } else if (a.base == -1) {
+ /* no base register has lower bound of 0 */
+ tcg_gen_movi_i64(cpu_bndl[reg], 0);
+ } else {
+ /* rip-relative generates #ud */
+ goto illegal_op;
+ }
+ tcg_gen_not_tl(cpu_A0, gen_lea_modrm_1(a));
+ if (!CODE64(s)) {
+ tcg_gen_ext32u_tl(cpu_A0, cpu_A0);
+ }
+ tcg_gen_extu_tl_i64(cpu_bndu[reg], cpu_A0);
+ /* bnd registers are now in-use */
+ gen_set_hflag(s, HF_MPX_IU_MASK);
+ break;
+ } else if (prefixes & PREFIX_REPNZ) {
+ /* bndcn */
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ gen_bndck(env, s, modrm, TCG_COND_GTU, cpu_bndu[reg]);
+ } else if (prefixes & PREFIX_DATA) {
+ /* bndmov -- to reg/mem */
+ if (reg >= 4 || s->aflag == MO_16) {
+ goto illegal_op;
+ }
+ if (mod == 3) {
+ int reg2 = (modrm & 7) | REX_B(s);
+ if (reg2 >= 4 || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ if (s->flags & HF_MPX_IU_MASK) {
+ tcg_gen_mov_i64(cpu_bndl[reg2], cpu_bndl[reg]);
+ tcg_gen_mov_i64(cpu_bndu[reg2], cpu_bndu[reg]);
+ }
+ } else {
+ gen_lea_modrm(env, s, modrm);
+ if (CODE64(s)) {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 8);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEQ);
+ } else {
+ tcg_gen_qemu_st_i64(cpu_bndl[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ tcg_gen_addi_tl(cpu_A0, cpu_A0, 4);
+ tcg_gen_qemu_st_i64(cpu_bndu[reg], cpu_A0,
+ s->mem_index, MO_LEUL);
+ }
+ }
+ } else if (mod != 3) {
+ /* bndstx */
+ AddressParts a = gen_lea_modrm_0(env, s, modrm);
+ if (reg >= 4
+ || (prefixes & PREFIX_LOCK)
+ || s->aflag == MO_16
+ || a.base < -1) {
+ goto illegal_op;
+ }
+ if (a.base >= 0) {
+ tcg_gen_addi_tl(cpu_A0, cpu_regs[a.base], a.disp);
+ } else {
+ tcg_gen_movi_tl(cpu_A0, 0);
+ }
+ gen_lea_v_seg(s, s->aflag, cpu_A0, a.def_seg, s->override);
+ if (a.index >= 0) {
+ tcg_gen_mov_tl(cpu_T0, cpu_regs[a.index]);
+ } else {
+ tcg_gen_movi_tl(cpu_T0, 0);
+ }
+ if (CODE64(s)) {
+ gen_helper_bndstx64(cpu_env, cpu_A0, cpu_T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ } else {
+ gen_helper_bndstx32(cpu_env, cpu_A0, cpu_T0,
+ cpu_bndl[reg], cpu_bndu[reg]);
+ }
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+ case 0x119: case 0x11c ... 0x11f: /* nop (multi byte) */
modrm = cpu_ldub_code(env, s->pc++);
gen_nop_modrm(env, s, modrm);
break;
@@ -7503,96 +7777,189 @@ static target_ulong disas_insn(CPUX86State *env, DisasContext *s,
break;
case 0x1ae:
modrm = cpu_ldub_code(env, s->pc++);
- mod = (modrm >> 6) & 3;
- op = (modrm >> 3) & 7;
- switch(op) {
- case 0: /* fxsave */
- if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
- (s->prefix & PREFIX_LOCK))
+ switch (modrm) {
+ CASE_MEM_OP(0): /* fxsave */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
+ }
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
- gen_helper_fxsave(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
+ gen_helper_fxsave(cpu_env, cpu_A0);
break;
- case 1: /* fxrstor */
- if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
- (s->prefix & PREFIX_LOCK))
+
+ CASE_MEM_OP(1): /* fxrstor */
+ if (!(s->cpuid_features & CPUID_FXSR)
+ || (prefixes & PREFIX_LOCK)) {
goto illegal_op;
+ }
if ((s->flags & HF_EM_MASK) || (s->flags & HF_TS_MASK)) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
gen_lea_modrm(env, s, modrm);
- gen_helper_fxrstor(cpu_env, cpu_A0, tcg_const_i32(dflag == MO_64));
+ gen_helper_fxrstor(cpu_env, cpu_A0);
break;
- case 2: /* ldmxcsr */
- case 3: /* stmxcsr */
+
+ CASE_MEM_OP(2): /* ldmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
+ goto illegal_op;
+ }
if (s->flags & HF_TS_MASK) {
gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
break;
}
- if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
- mod == 3)
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0, s->mem_index, MO_LEUL);
+ gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
+ break;
+
+ CASE_MEM_OP(3): /* stmxcsr */
+ if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK)) {
goto illegal_op;
+ }
+ if (s->flags & HF_TS_MASK) {
+ gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
+ break;
+ }
gen_lea_modrm(env, s, modrm);
- if (op == 2) {
- tcg_gen_qemu_ld_i32(cpu_tmp2_i32, cpu_A0,
- s->mem_index, MO_LEUL);
- gen_helper_ldmxcsr(cpu_env, cpu_tmp2_i32);
- } else {
- tcg_gen_ld32u_tl(cpu_T0,
- cpu_env, offsetof(CPUX86State, mxcsr));
- gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+ tcg_gen_ld32u_tl(cpu_T0, cpu_env, offsetof(CPUX86State, mxcsr));
+ gen_op_st_v(s, MO_32, cpu_T0, cpu_A0);
+ break;
+
+ CASE_MEM_OP(4): /* xsave */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
+ goto illegal_op;
}
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsave(cpu_env, cpu_A0, cpu_tmp1_i64);
break;
- case 5: /* lfence */
- if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
+
+ CASE_MEM_OP(5): /* xrstor */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (prefixes & (PREFIX_LOCK | PREFIX_DATA
+ | PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xrstor(cpu_env, cpu_A0, cpu_tmp1_i64);
+ /* XRSTOR is how MPX is enabled, which changes how
+ we translate. Thus we need to end the TB. */
+ gen_update_cc_op(s);
+ gen_jmp_im(s->pc - s->cs_base);
+ gen_eob(s);
break;
- case 6: /* mfence/clwb */
- if (s->prefix & PREFIX_DATA) {
+
+ CASE_MEM_OP(6): /* xsaveopt / clwb */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
/* clwb */
- if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB))
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLWB)) {
goto illegal_op;
+ }
gen_nop_modrm(env, s, modrm);
} else {
- /* mfence */
- if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE2))
+ /* xsaveopt */
+ if ((s->cpuid_ext_features & CPUID_EXT_XSAVE) == 0
+ || (s->cpuid_xsave_features & CPUID_XSAVE_XSAVEOPT) == 0
+ || (prefixes & (PREFIX_REPZ | PREFIX_REPNZ))) {
goto illegal_op;
+ }
+ gen_lea_modrm(env, s, modrm);
+ tcg_gen_concat_tl_i64(cpu_tmp1_i64, cpu_regs[R_EAX],
+ cpu_regs[R_EDX]);
+ gen_helper_xsaveopt(cpu_env, cpu_A0, cpu_tmp1_i64);
}
break;
- case 7: /* sfence / clflush / clflushopt / pcommit */
- if ((modrm & 0xc7) == 0xc0) {
- if (s->prefix & PREFIX_DATA) {
- /* pcommit */
- if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT))
- goto illegal_op;
- } else {
- /* sfence */
- /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
- if (!(s->cpuid_features & CPUID_SSE))
- goto illegal_op;
+
+ CASE_MEM_OP(7): /* clflush / clflushopt */
+ if (prefixes & PREFIX_LOCK) {
+ goto illegal_op;
+ }
+ if (prefixes & PREFIX_DATA) {
+ /* clflushopt */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT)) {
+ goto illegal_op;
}
} else {
- if (s->prefix & PREFIX_DATA) {
- /* clflushopt */
- if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_CLFLUSHOPT))
- goto illegal_op;
+ /* clflush */
+ if ((s->prefix & (PREFIX_REPZ | PREFIX_REPNZ))
+ || !(s->cpuid_features & CPUID_CLFLUSH)) {
+ goto illegal_op;
+ }
+ }
+ gen_nop_modrm(env, s, modrm);
+ break;
+
+ case 0xc0 ... 0xc7: /* rdfsbase (f3 0f ae /0) */
+ case 0xc8 ... 0xc8: /* rdgsbase (f3 0f ae /1) */
+ case 0xd0 ... 0xd7: /* wrfsbase (f3 0f ae /2) */
+ case 0xd8 ... 0xd8: /* wrgsbase (f3 0f ae /3) */
+ if (CODE64(s)
+ && (prefixes & PREFIX_REPZ)
+ && !(prefixes & PREFIX_LOCK)
+ && (s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_FSGSBASE)) {
+ TCGv base, treg, src, dst;
+
+ /* Preserve hflags bits by testing CR4 at runtime. */
+ tcg_gen_movi_i32(cpu_tmp2_i32, CR4_FSGSBASE_MASK);
+ gen_helper_cr4_testbit(cpu_env, cpu_tmp2_i32);
+
+ base = cpu_seg_base[modrm & 8 ? R_GS : R_FS];
+ treg = cpu_regs[(modrm & 7) | REX_B(s)];
+
+ if (modrm & 0x10) {
+ /* wr*base */
+ dst = base, src = treg;
} else {
- /* clflush */
- if (!(s->cpuid_features & CPUID_CLFLUSH))
- goto illegal_op;
+ /* rd*base */
+ dst = treg, src = base;
}
- gen_lea_modrm(env, s, modrm);
+
+ if (s->dflag == MO_32) {
+ tcg_gen_ext32u_tl(dst, src);
+ } else {
+ tcg_gen_mov_tl(dst, src);
+ }
+ break;
+ }
+ goto illegal_op;
+
+ case 0xf8: /* sfence / pcommit */
+ if (prefixes & PREFIX_DATA) {
+ /* pcommit */
+ if (!(s->cpuid_7_0_ebx_features & CPUID_7_0_EBX_PCOMMIT)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
+ }
+ break;
+ }
+ /* fallthru */
+ case 0xf9 ... 0xff: /* sfence */
+ case 0xe8 ... 0xef: /* lfence */
+ case 0xf0 ... 0xf7: /* mfence */
+ if (!(s->cpuid_features & CPUID_SSE2)
+ || (prefixes & PREFIX_LOCK)) {
+ goto illegal_op;
}
break;
+
default:
goto illegal_op;
}
break;
+
case 0x10d: /* 3DNow! prefetch(w) */
modrm = cpu_ldub_code(env, s->pc++);
mod = (modrm >> 6) & 3;
@@ -7699,6 +8066,12 @@ void tcg_x86_init(void)
[R_GS] = "gs_base",
[R_SS] = "ss_base",
};
+ static const char bnd_regl_names[4][8] = {
+ "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
+ };
+ static const char bnd_regu_names[4][8] = {
+ "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
+ };
int i;
cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
@@ -7724,6 +8097,17 @@ void tcg_x86_init(void)
seg_base_names[i]);
}
+ for (i = 0; i < 4; ++i) {
+ cpu_bndl[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].lb),
+ bnd_regl_names[i]);
+ cpu_bndu[i]
+ = tcg_global_mem_new_i64(cpu_env,
+ offsetof(CPUX86State, bnd_regs[i].ub),
+ bnd_regu_names[i]);
+ }
+
helper_lock_init();
}
@@ -7770,6 +8154,7 @@ void gen_intermediate_code(CPUX86State *env, TranslationBlock *tb)
dc->cpuid_ext2_features = env->features[FEAT_8000_0001_EDX];
dc->cpuid_ext3_features = env->features[FEAT_8000_0001_ECX];
dc->cpuid_7_0_ebx_features = env->features[FEAT_7_0_EBX];
+ dc->cpuid_xsave_features = env->features[FEAT_XSAVE];
#ifdef TARGET_X86_64
dc->lma = (flags >> HF_LMA_SHIFT) & 1;
dc->code64 = (flags >> HF_CS64_SHIFT) & 1;