diff options
author | David Gibson <david@gibson.dropbear.id.au> | 2017-02-27 16:03:41 +1100 |
---|---|---|
committer | David Gibson <david@gibson.dropbear.id.au> | 2017-03-01 11:23:39 +1100 |
commit | 7222b94a834e9f6ed99e55eb700cf492d61ba184 (patch) | |
tree | 32b2595c2a00034068476a04fd5d8a2e5d01da56 /hw | |
parent | 7d6250e3d1a145e5427f21f5664995e0056b34a6 (diff) |
target/ppc: Cleanup HPTE accessors for 64-bit hash MMU
Accesses to the hashed page table (HPT) are complicated by the fact that
the HPT could be in one of three places:
1) Within guest memory - when we're emulating a full guest CPU at the
hardware level (e.g. powernv, mac99, g3beige)
2) Within qemu, but outside guest memory - when we're emulating user and
supervisor instructions within TCG, but instead of emulating
the CPU's hypervisor mode, we just emulate a hypervisor's behaviour
(pseries in TCG or KVM-PR)
3) Within the host kernel - a pseries machine using KVM-HV
acceleration. Mostly accesses to the HPT are handled by KVM,
but there are a few cases where qemu needs to access it via a
special fd for the purpose.
In order to batch accesses to the fd in case (3), we use a somewhat awkward
ppc_hash64_start_access() / ppc_hash64_stop_access() pair, which for case
(3) reads / releases several HPTEs from the kernel as a batch (usually a
whole PTEG). For cases (1) & (2) it just returns an address value. The
actual HPTE load helpers then need to interpret the returned token
differently in the 3 cases.
This patch keeps the same basic structure, but simplfiies the details.
First start_access() / stop_access() are renamed to map_hptes() and
unmap_hptes() to make their operation more obvious. Second, map_hptes()
now always returns a qemu pointer, which can always be used in the same way
by the load_hpte() helpers. In case (1) it comes from address_space_map()
in case (2) directly from qemu's HPT buffer and in case (3) from a
temporary buffer read from the KVM fd.
While we're at it, make things a bit more consistent in terms of types and
variable names: avoid variables named 'index' (it shadows index(3) which
can lead to confusing results), use 'hwaddr ptex' for HPTE indices and
uint64_t for each of the HPTE words, use ptex throughout the call stack
instead of pte_offset in some places (we still need that at the bottom
layer, but nowhere else).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/ppc/spapr_hcall.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 3298a142c8..fd961b58d1 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -84,7 +84,7 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, unsigned apshift; target_ulong raddr; target_ulong slot; - uint64_t token; + const ppc_hash_pte64_t *hptes; apshift = ppc_hash64_hpte_page_shift_noslb(cpu, pteh, ptel); if (!apshift) { @@ -123,23 +123,23 @@ static target_ulong h_enter(PowerPCCPU *cpu, sPAPRMachineState *spapr, ptex = ptex & ~7ULL; if (likely((flags & H_EXACT) == 0)) { - token = ppc_hash64_start_access(cpu, ptex); + hptes = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP); for (slot = 0; slot < 8; slot++) { - if (!(ppc_hash64_load_hpte0(cpu, token, slot) & HPTE64_V_VALID)) { + if (!(ppc_hash64_hpte0(cpu, hptes, slot) & HPTE64_V_VALID)) { break; } } - ppc_hash64_stop_access(cpu, token); + ppc_hash64_unmap_hptes(cpu, hptes, ptex, HPTES_PER_GROUP); if (slot == 8) { return H_PTEG_FULL; } } else { - token = ppc_hash64_start_access(cpu, ptex); - if (ppc_hash64_load_hpte0(cpu, token, 0) & HPTE64_V_VALID) { - ppc_hash64_stop_access(cpu, token); + hptes = ppc_hash64_map_hptes(cpu, ptex + slot, 1); + if (ppc_hash64_hpte0(cpu, hptes, 0) & HPTE64_V_VALID) { + ppc_hash64_unmap_hptes(cpu, hptes, ptex + slot, 1); return H_PTEG_FULL; } - ppc_hash64_stop_access(cpu, token); + ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); } ppc_hash64_store_hpte(cpu, ptex + slot, pteh | HPTE64_V_HPTE_DIRTY, ptel); @@ -160,17 +160,17 @@ static RemoveResult remove_hpte(PowerPCCPU *cpu, target_ulong ptex, target_ulong flags, target_ulong *vp, target_ulong *rp) { - uint64_t token; + const ppc_hash_pte64_t *hptes; target_ulong v, r; if (!valid_ptex(cpu, ptex)) { return REMOVE_PARM; } - token = ppc_hash64_start_access(cpu, ptex); - v = ppc_hash64_load_hpte0(cpu, token, 0); - r = ppc_hash64_load_hpte1(cpu, token, 0); - ppc_hash64_stop_access(cpu, token); + hptes = ppc_hash64_map_hptes(cpu, ptex, 1); + v = ppc_hash64_hpte0(cpu, hptes, 0); + r = ppc_hash64_hpte1(cpu, hptes, 0); + ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); if ((v & HPTE64_V_VALID) == 0 || ((flags & H_AVPN) && (v & ~0x7fULL) != avpn) || @@ -291,17 +291,17 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr, target_ulong flags = args[0]; target_ulong ptex = args[1]; target_ulong avpn = args[2]; - uint64_t token; + const ppc_hash_pte64_t *hptes; target_ulong v, r; if (!valid_ptex(cpu, ptex)) { return H_PARAMETER; } - token = ppc_hash64_start_access(cpu, ptex); - v = ppc_hash64_load_hpte0(cpu, token, 0); - r = ppc_hash64_load_hpte1(cpu, token, 0); - ppc_hash64_stop_access(cpu, token); + hptes = ppc_hash64_map_hptes(cpu, ptex, 1); + v = ppc_hash64_hpte0(cpu, hptes, 0); + r = ppc_hash64_hpte1(cpu, hptes, 0); + ppc_hash64_unmap_hptes(cpu, hptes, ptex, 1); if ((v & HPTE64_V_VALID) == 0 || ((flags & H_AVPN) && (v & ~0x7fULL) != avpn)) { |