diff options
Diffstat (limited to 'target-i386/helper.c')
-rw-r--r-- | target-i386/helper.c | 150 |
1 files changed, 121 insertions, 29 deletions
diff --git a/target-i386/helper.c b/target-i386/helper.c index 8a5da3d7c0..c635667d60 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -443,17 +443,27 @@ void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4) #if defined(DEBUG_MMU) printf("CR4 update: CR4=%08x\n", (uint32_t)env->cr[4]); #endif - if ((new_cr4 & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK)) != - (env->cr[4] & (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK))) { + if ((new_cr4 ^ env->cr[4]) & + (CR4_PGE_MASK | CR4_PAE_MASK | CR4_PSE_MASK | + CR4_SMEP_MASK | CR4_SMAP_MASK)) { tlb_flush(env, 1); } /* SSE handling */ - if (!(env->cpuid_features & CPUID_SSE)) + if (!(env->cpuid_features & CPUID_SSE)) { new_cr4 &= ~CR4_OSFXSR_MASK; - if (new_cr4 & CR4_OSFXSR_MASK) + } + env->hflags &= ~HF_OSFXSR_MASK; + if (new_cr4 & CR4_OSFXSR_MASK) { env->hflags |= HF_OSFXSR_MASK; - else - env->hflags &= ~HF_OSFXSR_MASK; + } + + if (!(env->cpuid_7_0_ebx_features & CPUID_7_0_EBX_SMAP)) { + new_cr4 &= ~CR4_SMAP_MASK; + } + env->hflags &= ~HF_SMAP_MASK; + if (new_cr4 & CR4_SMAP_MASK) { + env->hflags |= HF_SMAP_MASK; + } env->cr[4] = new_cr4; } @@ -591,17 +601,38 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, /* 2 MB page */ page_size = 2048 * 1024; ptep ^= PG_NX_MASK; - if ((ptep & PG_NX_MASK) && is_write1 == 2) + if ((ptep & PG_NX_MASK) && is_write1 == 2) { goto do_fault_protect; - if (is_user) { - if (!(ptep & PG_USER_MASK)) + } + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(ptep & PG_USER_MASK)) { goto do_fault_protect; - if (is_write && !(ptep & PG_RW_MASK)) + } + if (is_write && !(ptep & PG_RW_MASK)) { goto do_fault_protect; - } else { + } + break; + + case MMU_KERNEL_IDX: + if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + /* fall through */ + case MMU_KSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) + is_write && !(ptep & PG_RW_MASK)) { goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; } is_dirty = is_write && !(pde & PG_DIRTY_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { @@ -635,15 +666,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, ptep ^= PG_NX_MASK; if ((ptep & PG_NX_MASK) && is_write1 == 2) goto do_fault_protect; - if (is_user) { - if (!(ptep & PG_USER_MASK)) + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(ptep & PG_USER_MASK)) { goto do_fault_protect; - if (is_write && !(ptep & PG_RW_MASK)) + } + if (is_write && !(ptep & PG_RW_MASK)) { goto do_fault_protect; - } else { + } + break; + + case MMU_KERNEL_IDX: + if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + /* fall through */ + case MMU_KSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) + is_write && !(ptep & PG_RW_MASK)) { goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; } is_dirty = is_write && !(pte & PG_DIRTY_MASK); if (!(pte & PG_ACCESSED_MASK) || is_dirty) { @@ -670,15 +721,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, /* if PSE bit is set, then we use a 4MB page */ if ((pde & PG_PSE_MASK) && (env->cr[4] & CR4_PSE_MASK)) { page_size = 4096 * 1024; - if (is_user) { - if (!(pde & PG_USER_MASK)) + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(pde & PG_USER_MASK)) { goto do_fault_protect; - if (is_write && !(pde & PG_RW_MASK)) + } + if (is_write && !(pde & PG_RW_MASK)) { goto do_fault_protect; - } else { + } + break; + + case MMU_KERNEL_IDX: + if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && + (pde & PG_USER_MASK)) { + goto do_fault_protect; + } + /* fall through */ + case MMU_KSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (pde & PG_USER_MASK)) { + goto do_fault_protect; + } if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(pde & PG_RW_MASK)) + is_write && !(pde & PG_RW_MASK)) { goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; } is_dirty = is_write && !(pde & PG_DIRTY_MASK); if (!(pde & PG_ACCESSED_MASK) || is_dirty) { @@ -707,15 +778,35 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, } /* combine pde and pte user and rw protections */ ptep = pte & pde; - if (is_user) { - if (!(ptep & PG_USER_MASK)) + switch (mmu_idx) { + case MMU_USER_IDX: + if (!(ptep & PG_USER_MASK)) { goto do_fault_protect; - if (is_write && !(ptep & PG_RW_MASK)) + } + if (is_write && !(ptep & PG_RW_MASK)) { goto do_fault_protect; - } else { + } + break; + + case MMU_KERNEL_IDX: + if (is_write1 != 2 && (env->cr[4] & CR4_SMAP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } + /* fall through */ + case MMU_KSMAP_IDX: + if (is_write1 == 2 && (env->cr[4] & CR4_SMEP_MASK) && + (ptep & PG_USER_MASK)) { + goto do_fault_protect; + } if ((env->cr[0] & CR0_WP_MASK) && - is_write && !(ptep & PG_RW_MASK)) + is_write && !(ptep & PG_RW_MASK)) { goto do_fault_protect; + } + break; + + default: /* cannot happen */ + break; } is_dirty = is_write && !(pte & PG_DIRTY_MASK); if (!(pte & PG_ACCESSED_MASK) || is_dirty) { @@ -762,8 +853,9 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, if (is_user) error_code |= PG_ERROR_U_MASK; if (is_write1 == 2 && - (env->efer & MSR_EFER_NXE) && - (env->cr[4] & CR4_PAE_MASK)) + (((env->efer & MSR_EFER_NXE) && + (env->cr[4] & CR4_PAE_MASK)) || + (env->cr[4] & CR4_SMEP_MASK))) error_code |= PG_ERROR_I_D_MASK; if (env->intercept_exceptions & (1 << EXCP0E_PAGE)) { /* cr2 is not modified in case of exceptions */ |