aboutsummaryrefslogtreecommitdiff
path: root/target-ppc/mmu-hash64.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2016-06-28 08:48:34 +0200
committerDavid Gibson <david@gibson.dropbear.id.au>2016-07-01 09:57:01 +1000
commit4322e8ced5aaac7191958f09622d199fe61e2d87 (patch)
treea5391ba478bd020001332b556dc06ce06dc5e5f9 /target-ppc/mmu-hash64.c
parenta36848ff7ca54d9181ec6c2202ce7563a2c5cfdc (diff)
ppc: Fix 64K pages support in full emulation
We were always advertising only 4K & 16M. Additionally the code wasn't properly matching the page size with the PTE content, which meant we could potentially hit an incorrect PTE if the guest used multiple sizes. Finally, honor the CPU capabilities when decoding the size from the SLB so we don't try to use 64K pages on 970. This still doesn't add support for MPSS (Multiple Page Sizes per Segment) Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [clg: fixed checkpatch.pl errors commits 61a36c9b5a12 and 1114e712c998 reworked the hpte code doing insertion/removal in hw/ppc/spapr_hcall.c. The hunks modifying these areas were removed. ] Signed-off-by: Cédric Le Goater <clg@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'target-ppc/mmu-hash64.c')
-rw-r--r--target-ppc/mmu-hash64.c39
1 files changed, 35 insertions, 4 deletions
diff --git a/target-ppc/mmu-hash64.c b/target-ppc/mmu-hash64.c
index 6d6f26c929..3b1357a648 100644
--- a/target-ppc/mmu-hash64.c
+++ b/target-ppc/mmu-hash64.c
@@ -450,9 +450,31 @@ void ppc_hash64_stop_access(PowerPCCPU *cpu, uint64_t token)
}
}
+/* Returns the effective page shift or 0. MPSS isn't supported yet so
+ * this will always be the slb_pshift or 0
+ */
+static uint32_t ppc_hash64_pte_size_decode(uint64_t pte1, uint32_t slb_pshift)
+{
+ switch (slb_pshift) {
+ case 12:
+ return 12;
+ case 16:
+ if ((pte1 & 0xf000) == 0x1000) {
+ return 16;
+ }
+ return 0;
+ case 24:
+ if ((pte1 & 0xff000) == 0) {
+ return 24;
+ }
+ return 0;
+ }
+ return 0;
+}
+
static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
- bool secondary, target_ulong ptem,
- ppc_hash_pte64_t *pte)
+ uint32_t slb_pshift, bool secondary,
+ target_ulong ptem, ppc_hash_pte64_t *pte)
{
CPUPPCState *env = &cpu->env;
int i;
@@ -472,6 +494,13 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
if ((pte0 & HPTE64_V_VALID)
&& (secondary == !!(pte0 & HPTE64_V_SECONDARY))
&& HPTE64_V_COMPARE(pte0, ptem)) {
+ uint32_t pshift = ppc_hash64_pte_size_decode(pte1, slb_pshift);
+ if (pshift == 0) {
+ continue;
+ }
+ /* We don't do anything with pshift yet as qemu TLB only deals
+ * with 4K pages anyway
+ */
pte->pte0 = pte0;
pte->pte1 = pte1;
ppc_hash64_stop_access(cpu, token);
@@ -525,7 +554,8 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
" vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
" hash=" TARGET_FMT_plx "\n",
env->htab_base, env->htab_mask, vsid, ptem, hash);
- pte_offset = ppc_hash64_pteg_search(cpu, hash, 0, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, hash, slb->sps->page_shift,
+ 0, ptem, pte);
if (pte_offset == -1) {
/* Secondary PTEG lookup */
@@ -535,7 +565,8 @@ static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
" hash=" TARGET_FMT_plx "\n", env->htab_base,
env->htab_mask, vsid, ptem, ~hash);
- pte_offset = ppc_hash64_pteg_search(cpu, ~hash, 1, ptem, pte);
+ pte_offset = ppc_hash64_pteg_search(cpu, ~hash, slb->sps->page_shift, 1,
+ ptem, pte);
}
return pte_offset;