aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2024-01-02 15:40:18 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2024-02-28 00:23:39 +0100
commit90f641531c782c873a05895f411c05fbbbef3c49 (patch)
treed2cd3320f85604ac5ef9f03fad94edddccaacb24 /target
parent5f97afe2543f09160a8d123ab6e2e8c6d98fa9ce (diff)
target/i386: use separate MMU indexes for 32-bit accesses
Accesses from a 32-bit environment (32-bit code segment for instruction accesses, EFER.LMA==0 for processor accesses) have to mask away the upper 32 bits of the address. While a bit wasteful, the easiest way to do so is to use separate MMU indexes. These days, QEMU anyway is compiled with a fixed value for NB_MMU_MODES. Split MMU_USER_IDX, MMU_KSMAP_IDX and MMU_KNOSMAP_IDX in two. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target')
-rw-r--r--target/i386/cpu.c11
-rw-r--r--target/i386/cpu.h34
-rw-r--r--target/i386/tcg/sysemu/excp_helper.c3
3 files changed, 33 insertions, 15 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index 7f90823676..647371198c 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -7732,13 +7732,16 @@ static bool x86_cpu_has_work(CPUState *cs)
return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
}
-static int x86_cpu_mmu_index(CPUState *cs, bool ifetch)
+static int x86_cpu_mmu_index(CPUState *env, bool ifetch)
{
CPUX86State *env = cpu_env(cs);
+ int mmu_index_32 = (env->hflags & HF_CS64_MASK) ? 1 : 0;
+ int mmu_index_base =
+ (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER64_IDX :
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ (env->eflags & AC_MASK) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
- return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
- (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
- ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+ return mmu_index_base + mmu_index_32;
}
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)
diff --git a/target/i386/cpu.h b/target/i386/cpu.h
index 8c271ca62e..ee4ad37202 100644
--- a/target/i386/cpu.h
+++ b/target/i386/cpu.h
@@ -2299,27 +2299,41 @@ uint64_t cpu_get_tsc(CPUX86State *env);
#define cpu_list x86_cpu_list
/* MMU modes definitions */
-#define MMU_KSMAP_IDX 0
-#define MMU_USER_IDX 1
-#define MMU_KNOSMAP_IDX 2
-#define MMU_NESTED_IDX 3
-#define MMU_PHYS_IDX 4
+#define MMU_KSMAP64_IDX 0
+#define MMU_KSMAP32_IDX 1
+#define MMU_USER64_IDX 2
+#define MMU_USER32_IDX 3
+#define MMU_KNOSMAP64_IDX 4
+#define MMU_KNOSMAP32_IDX 5
+#define MMU_PHYS_IDX 6
+#define MMU_NESTED_IDX 7
+
+#ifdef CONFIG_USER_ONLY
+#ifdef TARGET_X86_64
+#define MMU_USER_IDX MMU_USER64_IDX
+#else
+#define MMU_USER_IDX MMU_USER32_IDX
+#endif
+#endif
static inline bool is_mmu_index_smap(int mmu_index)
{
- return mmu_index == MMU_KSMAP_IDX;
+ return (mmu_index & ~1) == MMU_KSMAP64_IDX;
}
static inline bool is_mmu_index_user(int mmu_index)
{
- return mmu_index == MMU_USER_IDX;
+ return (mmu_index & ~1) == MMU_USER64_IDX;
}
static inline int cpu_mmu_index_kernel(CPUX86State *env)
{
- return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX :
- ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK))
- ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX;
+ int mmu_index_32 = (env->hflags & HF_LMA_MASK) ? 1 : 0;
+ int mmu_index_base =
+ !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP64_IDX :
+ ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) ? MMU_KNOSMAP64_IDX : MMU_KSMAP64_IDX;
+
+ return mmu_index_base + mmu_index_32;
}
#define CC_DST (env->cc_dst)
diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c
index a0d5ce3930..b2c525e1a9 100644
--- a/target/i386/tcg/sysemu/excp_helper.c
+++ b/target/i386/tcg/sysemu/excp_helper.c
@@ -545,7 +545,8 @@ static bool get_physical_address(CPUX86State *env, vaddr addr,
if (likely(use_stage2)) {
in.cr3 = env->nested_cr3;
in.pg_mode = env->nested_pg_mode;
- in.mmu_idx = MMU_USER_IDX;
+ in.mmu_idx =
+ env->nested_pg_mode & PG_MODE_LMA ? MMU_USER64_IDX : MMU_USER32_IDX;
in.ptw_idx = MMU_PHYS_IDX;
if (!mmu_translate(env, &in, out, err)) {