aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2017-02-23 11:39:18 +1100
committerDavid Gibson <david@gibson.dropbear.id.au>2017-03-01 11:23:39 +1100
commite57ca75ce3b2bd33102573a8c0555d62e1bcfceb (patch)
tree4bd3de614de344aff41a51060a67f0beb4ba912b /hw
parent36778660d7fd0748a6129916e47ecedd67bdb758 (diff)
target/ppc: Manage external HPT via virtual hypervisor
The pseries machine type implements the behaviour of a PAPR compliant hypervisor, without actually executing such a hypervisor on the virtual CPU. To do this we need some hooks in the CPU code to make hypervisor facilities get redirected to the machine instead of emulated internally. For hypercalls this is managed through the cpu->vhyp field, which points to a QOM interface with a method implementing the hypercall. For the hashed page table (HPT) - also a hypervisor resource - we use an older hack. CPUPPCState has an 'external_htab' field which when non-NULL indicates that the HPT is stored in qemu memory, rather than within the guest's address space. For consistency - and to make some future extensions easier - this merges the external HPT mechanism into the vhyp mechanism. Methods are added to vhyp for the basic operations the core hash MMU code needs: map_hptes() and unmap_hptes() for reading the HPT, store_hpte() for updating it and hpt_mask() to retrieve its size. To match this, the pseries machine now sets these vhyp fields in its existing vhyp class, rather than reaching into the cpu object to set the external_htab field. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Reviewed-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/ppc/spapr.c60
-rw-r--r--hw/ppc/spapr_cpu_core.c17
-rw-r--r--hw/ppc/spapr_hcall.c3
3 files changed, 76 insertions, 4 deletions
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index 0b57aade3f..e0bb9bcb85 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1053,6 +1053,62 @@ static void close_htab_fd(sPAPRMachineState *spapr)
spapr->htab_fd = -1;
}
+static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
+
+ return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1;
+}
+
+static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp,
+ hwaddr ptex, int n)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
+ hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
+
+ if (!spapr->htab) {
+ /*
+ * HTAB is controlled by KVM. Fetch into temporary buffer
+ */
+ ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64);
+ kvmppc_read_hptes(hptes, ptex, n);
+ return hptes;
+ }
+
+ /*
+ * HTAB is controlled by QEMU. Just point to the internally
+ * accessible PTEG.
+ */
+ return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset);
+}
+
+static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp,
+ const ppc_hash_pte64_t *hptes,
+ hwaddr ptex, int n)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
+
+ if (!spapr->htab) {
+ g_free((void *)hptes);
+ }
+
+ /* Nothing to do for qemu managed HPT */
+}
+
+static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex,
+ uint64_t pte0, uint64_t pte1)
+{
+ sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp);
+ hwaddr offset = ptex * HASH_PTE_SIZE_64;
+
+ if (!spapr->htab) {
+ kvmppc_write_hpte(ptex, pte0, pte1);
+ } else {
+ stq_p(spapr->htab + offset, pte0);
+ stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1);
+ }
+}
+
static int spapr_hpt_shift_for_ramsize(uint64_t ramsize)
{
int shift;
@@ -2913,6 +2969,10 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
nc->nmi_monitor_handler = spapr_nmi;
smc->phb_placement = spapr_phb_placement;
vhc->hypercall = emulate_spapr_hypercall;
+ vhc->hpt_mask = spapr_hpt_mask;
+ vhc->map_hptes = spapr_map_hptes;
+ vhc->unmap_hptes = spapr_unmap_hptes;
+ vhc->store_hpte = spapr_store_hpte;
}
static const TypeInfo spapr_machine_info = {
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 76563c4182..ddb130f030 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -13,10 +13,12 @@
#include "hw/boards.h"
#include "qapi/error.h"
#include "sysemu/cpus.h"
+#include "sysemu/kvm.h"
#include "target/ppc/kvm_ppc.h"
#include "hw/ppc/ppc.h"
#include "target/ppc/mmu-hash64.h"
#include "sysemu/numa.h"
+#include "qemu/error-report.h"
static void spapr_cpu_reset(void *opaque)
{
@@ -34,8 +36,19 @@ static void spapr_cpu_reset(void *opaque)
env->spr[SPR_HIOR] = 0;
- ppc_hash64_set_external_hpt(cpu, spapr->htab, spapr->htab_shift,
- &error_fatal);
+ /*
+ * This is a hack for the benefit of KVM PR - it abuses the SDR1
+ * slot in kvm_sregs to communicate the userspace address of the
+ * HPT
+ */
+ if (kvm_enabled()) {
+ env->spr[SPR_SDR1] = (target_ulong)(uintptr_t)spapr->htab
+ | (spapr->htab_shift - 18);
+ if (kvmppc_put_books_sregs(cpu) < 0) {
+ error_report("Unable to update SDR1 in KVM");
+ exit(1);
+ }
+ }
}
static void spapr_cpu_destroy(PowerPCCPU *cpu)
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 85d96f6dc6..f05a90ed2c 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -326,7 +326,6 @@ static target_ulong h_protect(PowerPCCPU *cpu, sPAPRMachineState *spapr,
static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
target_ulong opcode, target_ulong *args)
{
- CPUPPCState *env = &cpu->env;
target_ulong flags = args[0];
target_ulong ptex = args[1];
uint8_t *hpte;
@@ -342,7 +341,7 @@ static target_ulong h_read(PowerPCCPU *cpu, sPAPRMachineState *spapr,
n_entries = 4;
}
- hpte = env->external_htab + (ptex * HASH_PTE_SIZE_64);
+ hpte = spapr->htab + (ptex * HASH_PTE_SIZE_64);
for (i = 0, ridx = 0; i < n_entries; i++) {
args[ridx++] = ldq_p(hpte);