aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--cpu-defs.h2
-rw-r--r--exec-all.h14
-rw-r--r--exec.c55
-rw-r--r--hw/alpha_palcode.c7
-rw-r--r--target-arm/helper.c48
-rw-r--r--target-cris/helper.c5
-rw-r--r--target-i386/helper.c7
-rw-r--r--target-m68k/helper.c5
-rw-r--r--target-microblaze/helper.c7
-rw-r--r--target-mips/helper.c7
-rw-r--r--target-ppc/helper.c7
-rw-r--r--target-s390x/helper.c9
-rw-r--r--target-sh4/helper.c3
-rw-r--r--target-sparc/helper.c40
14 files changed, 136 insertions, 80 deletions
diff --git a/cpu-defs.h b/cpu-defs.h
index 47c1d85968..2e94585ae1 100644
--- a/cpu-defs.h
+++ b/cpu-defs.h
@@ -111,6 +111,8 @@ typedef struct CPUTLBEntry {
/* The meaning of the MMU modes is defined in the target code. */ \
CPUTLBEntry tlb_table[NB_MMU_MODES][CPU_TLB_SIZE]; \
target_phys_addr_t iotlb[NB_MMU_MODES][CPU_TLB_SIZE]; \
+ target_ulong tlb_flush_addr; \
+ target_ulong tlb_flush_mask;
#else
diff --git a/exec-all.h b/exec-all.h
index 56e79771a4..37da2f57c3 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -96,17 +96,9 @@ void tb_invalidate_page_range(target_ulong start, target_ulong end);
void tlb_flush_page(CPUState *env, target_ulong addr);
void tlb_flush(CPUState *env, int flush_global);
#if !defined(CONFIG_USER_ONLY)
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu);
-static inline int tlb_set_page(CPUState *env1, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu)
-{
- if (prot & PAGE_READ)
- prot |= PAGE_EXEC;
- return tlb_set_page_exec(env1, vaddr, paddr, prot, mmu_idx, is_softmmu);
-}
+void tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int mmu_idx, target_ulong size);
#endif
#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
diff --git a/exec.c b/exec.c
index b0b6056d52..14767b7672 100644
--- a/exec.c
+++ b/exec.c
@@ -1918,6 +1918,8 @@ void tlb_flush(CPUState *env, int flush_global)
memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
+ env->tlb_flush_addr = -1;
+ env->tlb_flush_mask = 0;
tlb_flush_count++;
}
@@ -1941,6 +1943,16 @@ void tlb_flush_page(CPUState *env, target_ulong addr)
#if defined(DEBUG_TLB)
printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
#endif
+ /* Check if we need to flush due to large pages. */
+ if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
+#if defined(DEBUG_TLB)
+ printf("tlb_flush_page: forced full flush ("
+ TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
+ env->tlb_flush_addr, env->tlb_flush_mask);
+#endif
+ tlb_flush(env, 1);
+ return;
+ }
/* must reset current TB so that interrupts cannot modify the
links while we are modifying them */
env->current_tb = NULL;
@@ -2090,13 +2102,35 @@ static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
}
-/* add a new TLB entry. At most one entry for a given virtual address
- is permitted. Return 0 if OK or 2 if the page could not be mapped
- (can only happen in non SOFTMMU mode for I/O pages or pages
- conflicting with the host address space). */
-int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
- target_phys_addr_t paddr, int prot,
- int mmu_idx, int is_softmmu)
+/* Our TLB does not support large pages, so remember the area covered by
+ large pages and trigger a full TLB flush if these are invalidated. */
+static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
+ target_ulong size)
+{
+ target_ulong mask = ~(size - 1);
+
+ if (env->tlb_flush_addr == (target_ulong)-1) {
+ env->tlb_flush_addr = vaddr & mask;
+ env->tlb_flush_mask = mask;
+ return;
+ }
+ /* Extend the existing region to include the new page.
+ This is a compromise between unnecessary flushes and the cost
+ of maintaining a full variable size TLB. */
+ mask &= env->tlb_flush_mask;
+ while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
+ mask <<= 1;
+ }
+ env->tlb_flush_addr &= mask;
+ env->tlb_flush_mask = mask;
+}
+
+/* Add a new TLB entry. At most one entry for a given virtual address
+ is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
+ supplied size is only used by tlb_flush_page. */
+void tlb_set_page(CPUState *env, target_ulong vaddr,
+ target_phys_addr_t paddr, int prot,
+ int mmu_idx, target_ulong size)
{
PhysPageDesc *p;
unsigned long pd;
@@ -2104,11 +2138,14 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
target_ulong address;
target_ulong code_address;
target_phys_addr_t addend;
- int ret;
CPUTLBEntry *te;
CPUWatchpoint *wp;
target_phys_addr_t iotlb;
+ assert(size >= TARGET_PAGE_SIZE);
+ if (size != TARGET_PAGE_SIZE) {
+ tlb_add_large_page(env, vaddr, size);
+ }
p = phys_page_find(paddr >> TARGET_PAGE_BITS);
if (!p) {
pd = IO_MEM_UNASSIGNED;
@@ -2120,7 +2157,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
vaddr, (int)paddr, prot, mmu_idx, is_softmmu, pd);
#endif
- ret = 0;
address = vaddr;
if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
/* IO memory case (romd handled later) */
@@ -2190,7 +2226,6 @@ int tlb_set_page_exec(CPUState *env, target_ulong vaddr,
} else {
te->addr_write = -1;
}
- return ret;
}
#else
diff --git a/hw/alpha_palcode.c b/hw/alpha_palcode.c
index c1220ad93e..6293d10936 100644
--- a/hw/alpha_palcode.c
+++ b/hw/alpha_palcode.c
@@ -1003,11 +1003,14 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, uint32_t address, int rw,
/* No fault */
page_size = 1ULL << zbits;
address &= ~(page_size - 1);
+ /* FIXME: page_size should probably be passed to tlb_set_page,
+ and this loop removed. */
for (end = physical + page_size; physical < end; physical += 0x1000) {
- ret = tlb_set_page(env, address, physical, prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address, physical, prot, mmu_idx,
+ TARGET_PAGE_SIZE);
address += 0x1000;
}
+ ret = 0;
break;
#if 0
case 1:
diff --git a/target-arm/helper.c b/target-arm/helper.c
index 1a181acce3..18e22b1afc 100644
--- a/target-arm/helper.c
+++ b/target-arm/helper.c
@@ -894,7 +894,8 @@ static uint32_t get_level1_table_address(CPUState *env, uint32_t address)
}
static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot)
+ int is_user, uint32_t *phys_ptr, int *prot,
+ target_ulong *page_size)
{
int code;
uint32_t table;
@@ -927,6 +928,7 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
ap = (desc >> 10) & 3;
code = 13;
+ *page_size = 1024 * 1024;
} else {
/* Lookup l2 entry. */
if (type == 1) {
@@ -944,10 +946,12 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ *page_size = 0x10000;
break;
case 2: /* 4k page. */
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
+ *page_size = 0x1000;
break;
case 3: /* 1k page. */
if (type == 1) {
@@ -962,6 +966,7 @@ static int get_phys_addr_v5(CPUState *env, uint32_t address, int access_type,
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
}
ap = (desc >> 4) & 3;
+ *page_size = 0x400;
break;
default:
/* Never happens, but compiler isn't smart enough to tell. */
@@ -981,7 +986,8 @@ do_fault:
}
static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
- int is_user, uint32_t *phys_ptr, int *prot)
+ int is_user, uint32_t *phys_ptr, int *prot,
+ target_ulong *page_size)
{
int code;
uint32_t table;
@@ -1021,9 +1027,11 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
if (desc & (1 << 18)) {
/* Supersection. */
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
+ *page_size = 0x1000000;
} else {
/* Section. */
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
+ *page_size = 0x100000;
}
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
xn = desc & (1 << 4);
@@ -1040,10 +1048,12 @@ static int get_phys_addr_v6(CPUState *env, uint32_t address, int access_type,
case 1: /* 64k page. */
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
xn = desc & (1 << 15);
+ *page_size = 0x10000;
break;
case 2: case 3: /* 4k page. */
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
xn = desc & 1;
+ *page_size = 0x1000;
break;
default:
/* Never happens, but compiler isn't smart enough to tell. */
@@ -1132,7 +1142,8 @@ static int get_phys_addr_mpu(CPUState *env, uint32_t address, int access_type,
static inline int get_phys_addr(CPUState *env, uint32_t address,
int access_type, int is_user,
- uint32_t *phys_ptr, int *prot)
+ uint32_t *phys_ptr, int *prot,
+ target_ulong *page_size)
{
/* Fast Context Switch Extension. */
if (address < 0x02000000)
@@ -1142,16 +1153,18 @@ static inline int get_phys_addr(CPUState *env, uint32_t address,
/* MMU/MPU disabled. */
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE;
+ *page_size = TARGET_PAGE_SIZE;
return 0;
} else if (arm_feature(env, ARM_FEATURE_MPU)) {
+ *page_size = TARGET_PAGE_SIZE;
return get_phys_addr_mpu(env, address, access_type, is_user, phys_ptr,
prot);
} else if (env->cp15.c1_sys & (1 << 23)) {
return get_phys_addr_v6(env, address, access_type, is_user, phys_ptr,
- prot);
+ prot, page_size);
} else {
return get_phys_addr_v5(env, address, access_type, is_user, phys_ptr,
- prot);
+ prot, page_size);
}
}
@@ -1159,17 +1172,20 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
int access_type, int mmu_idx, int is_softmmu)
{
uint32_t phys_addr;
+ target_ulong page_size;
int prot;
int ret, is_user;
is_user = mmu_idx == MMU_USER_IDX;
- ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot);
+ ret = get_phys_addr(env, address, access_type, is_user, &phys_addr, &prot,
+ &page_size);
if (ret == 0) {
/* Map a single [sub]page. */
phys_addr &= ~(uint32_t)0x3ff;
address &= ~(uint32_t)0x3ff;
- return tlb_set_page (env, address, phys_addr, prot, mmu_idx,
- is_softmmu);
+ tlb_set_page (env, address, phys_addr, prot | PAGE_EXEC, mmu_idx,
+ page_size);
+ return 0;
}
if (access_type == 2) {
@@ -1189,10 +1205,11 @@ int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address,
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
uint32_t phys_addr;
+ target_ulong page_size;
int prot;
int ret;
- ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot);
+ ret = get_phys_addr(env, addr, 0, 0, &phys_addr, &prot, &page_size);
if (ret != 0)
return -1;
@@ -1406,18 +1423,7 @@ void HELPER(set_cp15)(CPUState *env, uint32_t insn, uint32_t val)
tlb_flush(env, 0);
break;
case 1: /* Invalidate single TLB entry. */
-#if 0
- /* ??? This is wrong for large pages and sections. */
- /* As an ugly hack to make linux work we always flush a 4K
- pages. */
- val &= 0xfffff000;
- tlb_flush_page(env, val);
- tlb_flush_page(env, val + 0x400);
- tlb_flush_page(env, val + 0x800);
- tlb_flush_page(env, val + 0xc00);
-#else
- tlb_flush(env, 1);
-#endif
+ tlb_flush_page(env, val & TARGET_PAGE_MASK);
break;
case 2: /* Invalidate on ASID. */
tlb_flush(env, val == 0);
diff --git a/target-cris/helper.c b/target-cris/helper.c
index b101dc5ad0..fcdcf977fe 100644
--- a/target-cris/helper.c
+++ b/target-cris/helper.c
@@ -100,8 +100,9 @@ int cpu_cris_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
*/
phy = res.phy & ~0x80000000;
prot = res.prot;
- r = tlb_set_page(env, address & TARGET_PAGE_MASK,
- phy, prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK, phy,
+ prot | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
}
if (r > 0)
D_LOG("%s returns %d irqreq=%x addr=%x"
diff --git a/target-i386/helper.c b/target-i386/helper.c
index 05758f9bfe..35ab72090a 100644
--- a/target-i386/helper.c
+++ b/target-i386/helper.c
@@ -531,14 +531,13 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
-1 = cannot handle fault
0 = nothing more to do
1 = generate PF fault
- 2 = soft MMU activation required for this block
*/
int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
int is_write1, int mmu_idx, int is_softmmu)
{
uint64_t ptep, pte;
target_ulong pde_addr, pte_addr;
- int error_code, is_dirty, prot, page_size, ret, is_write, is_user;
+ int error_code, is_dirty, prot, page_size, is_write, is_user;
target_phys_addr_t paddr;
uint32_t page_offset;
target_ulong vaddr, virt_addr;
@@ -799,8 +798,8 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr,
paddr = (pte & TARGET_PAGE_MASK) + page_offset;
vaddr = virt_addr + page_offset;
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
do_fault_protect:
error_code = PG_ERROR_P_MASK;
do_fault:
diff --git a/target-m68k/helper.c b/target-m68k/helper.c
index 5d52bd34ea..2dfd48f7bf 100644
--- a/target-m68k/helper.c
+++ b/target-m68k/helper.c
@@ -368,8 +368,9 @@ int cpu_m68k_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
int prot;
address &= TARGET_PAGE_MASK;
- prot = PAGE_READ | PAGE_WRITE;
- return tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu);
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
}
/* Notify CPU of a pending interrupt. Prioritization and vectoring should
diff --git a/target-microblaze/helper.c b/target-microblaze/helper.c
index d99840c735..5230b52c18 100644
--- a/target-microblaze/helper.c
+++ b/target-microblaze/helper.c
@@ -76,8 +76,8 @@ int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
DMMU(qemu_log("MMU map mmu=%d v=%x p=%x prot=%x\n",
mmu_idx, vaddr, paddr, lu.prot));
- r = tlb_set_page(env, vaddr,
- paddr, lu.prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, vaddr, paddr, lu.prot, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
} else {
env->sregs[SR_EAR] = address;
DMMU(qemu_log("mmu=%d miss v=%x\n", mmu_idx, address));
@@ -107,7 +107,8 @@ int cpu_mb_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
/* MMU disabled or not available. */
address &= TARGET_PAGE_MASK;
prot = PAGE_BITS;
- r = tlb_set_page(env, address, address, prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, address, address, prot, mmu_idx, TARGET_PAGE_SIZE);
+ r = 0;
}
return r;
}
diff --git a/target-mips/helper.c b/target-mips/helper.c
index 933c7bc1df..8102f03c26 100644
--- a/target-mips/helper.c
+++ b/target-mips/helper.c
@@ -296,9 +296,10 @@ int cpu_mips_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
qemu_log("%s address=" TARGET_FMT_lx " ret %d physical " TARGET_FMT_plx " prot %d\n",
__func__, address, ret, physical, prot);
if (ret == TLBRET_MATCH) {
- ret = tlb_set_page(env, address & TARGET_PAGE_MASK,
- physical & TARGET_PAGE_MASK, prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
} else if (ret < 0)
#endif
{
diff --git a/target-ppc/helper.c b/target-ppc/helper.c
index fc54d71802..c28223bd29 100644
--- a/target-ppc/helper.c
+++ b/target-ppc/helper.c
@@ -1410,9 +1410,10 @@ int cpu_ppc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
}
ret = get_physical_address(env, &ctx, address, rw, access_type);
if (ret == 0) {
- ret = tlb_set_page_exec(env, address & TARGET_PAGE_MASK,
- ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ ctx.raddr & TARGET_PAGE_MASK, ctx.prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ ret = 0;
} else if (ret < 0) {
LOG_MMU_STATE(env);
if (access_type == ACCESS_CODE) {
diff --git a/target-s390x/helper.c b/target-s390x/helper.c
index b97d1ff8a4..ba0c0521d3 100644
--- a/target-s390x/helper.c
+++ b/target-s390x/helper.c
@@ -69,10 +69,11 @@ int cpu_s390x_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
/* XXX: implement mmu */
phys = address;
- prot = PAGE_READ | PAGE_WRITE;
+ prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- return tlb_set_page(env, address & TARGET_PAGE_MASK,
- phys & TARGET_PAGE_MASK, prot,
- mmu_idx, is_softmmu);
+ tlb_set_page(env, address & TARGET_PAGE_MASK,
+ phys & TARGET_PAGE_MASK, prot,
+ mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
}
#endif /* CONFIG_USER_ONLY */
diff --git a/target-sh4/helper.c b/target-sh4/helper.c
index 486be5d511..6c3f896a2d 100644
--- a/target-sh4/helper.c
+++ b/target-sh4/helper.c
@@ -470,7 +470,8 @@ int cpu_sh4_handle_mmu_fault(CPUState * env, target_ulong address, int rw,
address &= TARGET_PAGE_MASK;
physical &= TARGET_PAGE_MASK;
- return tlb_set_page(env, address, physical, prot, mmu_idx, is_softmmu);
+ tlb_set_page(env, address, physical, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
}
target_phys_addr_t cpu_get_phys_page_debug(CPUState * env, target_ulong addr)
diff --git a/target-sparc/helper.c b/target-sparc/helper.c
index 976eb43d33..1f0f7d49d4 100644
--- a/target-sparc/helper.c
+++ b/target-sparc/helper.c
@@ -102,7 +102,8 @@ static const int perm_table[2][8] = {
static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
int *prot, int *access_index,
- target_ulong address, int rw, int mmu_idx)
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
{
int access_perms = 0;
target_phys_addr_t pde_ptr;
@@ -113,6 +114,7 @@ static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
is_user = mmu_idx == MMU_USER_IDX;
if ((env->mmuregs[0] & MMU_E) == 0) { /* MMU disabled */
+ *page_size = TARGET_PAGE_SIZE;
// Boot mode: instruction fetches are taken from PROM
if (rw == 2 && (env->mmuregs[0] & env->def->mmu_bm)) {
*physical = env->prom_addr | (address & 0x7ffffULL);
@@ -175,13 +177,16 @@ static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
page_offset = (address & TARGET_PAGE_MASK) &
(TARGET_PAGE_SIZE - 1);
}
+ *page_size = TARGET_PAGE_SIZE;
break;
case 2: /* L2 PTE */
page_offset = address & 0x3ffff;
+ *page_size = 0x40000;
}
break;
case 2: /* L1 PTE */
page_offset = address & 0xffffff;
+ *page_size = 0x1000000;
}
}
@@ -220,10 +225,11 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
{
target_phys_addr_t paddr;
target_ulong vaddr;
- int error_code = 0, prot, ret = 0, access_index;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx);
+ address, rw, mmu_idx, &page_size);
if (error_code == 0) {
vaddr = address & TARGET_PAGE_MASK;
paddr &= TARGET_PAGE_MASK;
@@ -231,8 +237,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
printf("Translate at " TARGET_FMT_lx " -> " TARGET_FMT_plx ", vaddr "
TARGET_FMT_lx "\n", address, paddr, vaddr);
#endif
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
}
if (env->mmuregs[3]) /* Fault status register */
@@ -247,8 +253,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
// switching to normal mode.
vaddr = address & TARGET_PAGE_MASK;
prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, TARGET_PAGE_SIZE);
+ return 0;
} else {
if (rw & 2)
env->exception_index = TT_TFAULT;
@@ -531,10 +537,14 @@ static int get_physical_address_code(CPUState *env,
static int get_physical_address(CPUState *env, target_phys_addr_t *physical,
int *prot, int *access_index,
- target_ulong address, int rw, int mmu_idx)
+ target_ulong address, int rw, int mmu_idx,
+ target_ulong *page_size)
{
int is_user = mmu_idx == MMU_USER_IDX;
+ /* ??? We treat everything as a small page, then explicitly flush
+ everything when an entry is evicted. */
+ *page_size = TARGET_PAGE_SIZE;
if (rw == 2)
return get_physical_address_code(env, physical, prot, address,
is_user);
@@ -549,10 +559,11 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
{
target_ulong virt_addr, vaddr;
target_phys_addr_t paddr;
- int error_code = 0, prot, ret = 0, access_index;
+ target_ulong page_size;
+ int error_code = 0, prot, access_index;
error_code = get_physical_address(env, &paddr, &prot, &access_index,
- address, rw, mmu_idx);
+ address, rw, mmu_idx, &page_size);
if (error_code == 0) {
virt_addr = address & TARGET_PAGE_MASK;
vaddr = virt_addr + ((address & TARGET_PAGE_MASK) &
@@ -561,8 +572,8 @@ int cpu_sparc_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
printf("Translate at 0x%" PRIx64 " -> 0x%" PRIx64 ", vaddr 0x%" PRIx64
"\n", address, paddr, vaddr);
#endif
- ret = tlb_set_page_exec(env, vaddr, paddr, prot, mmu_idx, is_softmmu);
- return ret;
+ tlb_set_page(env, vaddr, paddr, prot, mmu_idx, page_size);
+ return 0;
}
// XXX
return 1;
@@ -656,12 +667,13 @@ void dump_mmu(CPUState *env)
target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
{
target_phys_addr_t phys_addr;
+ target_ulong page_size;
int prot, access_index;
if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, 2,
- MMU_KERNEL_IDX) != 0)
+ MMU_KERNEL_IDX, &page_size) != 0)
if (get_physical_address(env, &phys_addr, &prot, &access_index, addr,
- 0, MMU_KERNEL_IDX) != 0)
+ 0, MMU_KERNEL_IDX, &page_size) != 0)
return -1;
if (cpu_get_physical_page_desc(phys_addr) == IO_MEM_UNASSIGNED)
return -1;