aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2019-02-15 18:00:23 +0100
committerDavid Gibson <david@gibson.dropbear.id.au>2019-02-26 09:21:25 +1100
commit3054b0ca4bdd83e0780bd76805ca17aa733031c8 (patch)
tree9f53c5e901a52ca745dfc2ae28d9de8668188122
parent2819282dae6c38b2c9d5499ad748b1471f26af1a (diff)
target/ppc: Fix ordering of hash MMU accesses
With mttcg, we can have MMU lookups happening at the same time as the guest modifying the page tables. Since the HPTEs of the hash table MMU contains two words (or double worlds on 64-bit), we need to make sure we read them in the right order, with the correct memory barrier. Additionally, when using emulated SPAPR mode, the hypercalls writing to the hash table must also perform the udpates in the right order. Note: This part is still not entirely correct Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Cédric Le Goater <clg@kaod.org> Message-Id: <20190215170029.15641-7-clg@kaod.org> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
-rw-r--r--hw/ppc/spapr.c21
-rw-r--r--target/ppc/mmu-hash32.c6
-rw-r--r--target/ppc/mmu-hash64.c6
3 files changed, 31 insertions, 2 deletions
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 84f6e9d9a8..d2520bc662 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1524,8 +1524,25 @@ static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
if (!spapr->htab) {
kvmppc_write_hpte(ptex, pte0, pte1);
} else {
- stq_p(spapr->htab + offset, pte0);
- stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
+ if (pte0 & HPTE64_V_VALID) {
+ stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
+ /*
+ * When setting valid, we write PTE1 first. This ensures
+ * proper synchronization with the reading code in
+ * ppc_hash64_pteg_search()
+ */
+ smp_wmb();
+ stq_p(spapr->htab + offset, pte0);
+ } else {
+ stq_p(spapr->htab + offset, pte0);
+ /*
+ * When clearing it we set PTE0 first. This ensures proper
+ * synchronization with the reading code in
+ * ppc_hash64_pteg_search()
+ */
+ smp_wmb();
+ stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
+ }
}
}
diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c
index 03ae3c1279..e8562a7c87 100644
--- a/target/ppc/mmu-hash32.c
+++ b/target/ppc/mmu-hash32.c
@@ -319,6 +319,12 @@ static hwaddr ppc_hash32_pteg_search(PowerPCCPU *cpu, hwaddr pteg_off,
for (i = 0; i < HPTES_PER_GROUP; i++) {
pte0 = ppc_hash32_load_hpte0(cpu, pte_offset);
+ /*
+ * pte0 contains the valid bit and must be read before pte1,
+ * otherwise we might see an old pte1 with a new valid bit and
+ * thus an inconsistent hpte value
+ */
+ smp_rmb();
pte1 = ppc_hash32_load_hpte1(cpu, pte_offset);
if ((pte0 & HPTE32_V_VALID)
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index 1175b991d9..fbefe5b5aa 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -507,6 +507,12 @@ static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
}
for (i = 0; i < HPTES_PER_GROUP; i++) {
pte0 = ppc_hash64_hpte0(cpu, pteg, i);
+ /*
+ * pte0 contains the valid bit and must be read before pte1,
+ * otherwise we might see an old pte1 with a new valid bit and
+ * thus an inconsistent hpte value
+ */
+ smp_rmb();
pte1 = ppc_hash64_hpte1(cpu, pteg, i);
/* This compares V, B, H (secondary) and the AVPN */