aboutsummaryrefslogtreecommitdiff
path: root/target
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2020-03-18 15:07:57 +0000
committerPeter Maydell <peter.maydell@linaro.org>2020-03-18 15:07:57 +0000
commitb319df553707a3d44c7d027a5d5562f672a768a9 (patch)
tree6a23d058509b70cf64d7c87ca96a252d3409e5dc /target
parentd649689a8ecb2e276cc20d3af6d416e3c299cb17 (diff)
parent6961eae79f58385482775dc0a6c3d553f633662d (diff)
Merge remote-tracking branch 'remotes/dgibson/tags/ppc-for-5.0-20200317' into staging
ppc patch queue 2020-03-17 Here's my final pull request for the qemu-5.0 soft freeze. Sorry this is just under the wire - I hit some last minute problems that took a while to fix up and retest. Highlights are: * Numerous fixes for the FWNMI feature * A handful of cleanups to the device tree construction code * Numerous fixes for the spapr-vscsi device * A number of fixes and cleanups for real mode (MMU off) softmmu handling * Fixes for handling of the PAPR RMA * Better handling of hotplug/unplug events during boot * Assorted other fixes # gpg: Signature made Tue 17 Mar 2020 09:55:07 GMT # gpg: using RSA key 75F46586AE61A66CC44E87DC6C38CACA20D9B392 # gpg: Good signature from "David Gibson <david@gibson.dropbear.id.au>" [full] # gpg: aka "David Gibson (Red Hat) <dgibson@redhat.com>" [full] # gpg: aka "David Gibson (ozlabs.org) <dgibson@ozlabs.org>" [full] # gpg: aka "David Gibson (kernel.org) <dwg@kernel.org>" [unknown] # Primary key fingerprint: 75F4 6586 AE61 A66C C44E 87DC 6C38 CACA 20D9 B392 * remotes/dgibson/tags/ppc-for-5.0-20200317: (45 commits) pseries: Update SLOF firmware image ppc/spapr: Ignore common "ibm,nmi-interlock" Linux bug ppc/spapr: Implement FWNMI System Reset delivery target/ppc: allow ppc_cpu_do_system_reset to take an alternate vector ppc/spapr: Allow FWNMI on TCG ppc/spapr: Fix FWNMI machine check interrupt delivery ppc/spapr: Add FWNMI System Reset state ppc/spapr: Change FWNMI names ppc/spapr: Fix FWNMI machine check failure handling spapr: Rename DT functions to newer naming convention spapr: Move creation of ibm,architecture-vec-5 property spapr: Move creation of ibm,dynamic-reconfiguration-memory dt node spapr/rtas: Reserve space for RTAS blob and log pseries: Update SLOF firmware image ppc/spapr: Move GPRs setup to one place target/ppc: Fix rlwinm on ppc64 spapr/xive: use SPAPR_IRQ_IPI to define IPI ranges exposed to the guest hw/scsi/spapr_vscsi: Convert debug fprintf() to trace event hw/scsi/spapr_vscsi: Prevent buffer overflow hw/scsi/spapr_vscsi: Do not mix SRP IU size with DMA buffer size ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'target')
-rw-r--r--target/ppc/cpu-qom.h1
-rw-r--r--target/ppc/cpu.h28
-rw-r--r--target/ppc/excp_helper.c79
-rw-r--r--target/ppc/kvm.c5
-rw-r--r--target/ppc/kvm_ppc.h7
-rw-r--r--target/ppc/mmu-hash64.c319
-rw-r--r--target/ppc/translate.c20
-rw-r--r--target/ppc/translate_init.inc.c116
8 files changed, 251 insertions, 324 deletions
diff --git a/target/ppc/cpu-qom.h b/target/ppc/cpu-qom.h
index e499575dc8..15d6b54a7d 100644
--- a/target/ppc/cpu-qom.h
+++ b/target/ppc/cpu-qom.h
@@ -177,6 +177,7 @@ typedef struct PowerPCCPUClass {
uint64_t insns_flags;
uint64_t insns_flags2;
uint64_t msr_mask;
+ uint64_t lpcr_mask; /* Available bits in the LPCR */
uint64_t lpcr_pm; /* Power-saving mode Exit Cause Enable bits */
powerpc_mmu_t mmu_model;
powerpc_excp_t excp_model;
diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h
index b283042515..f8c7d6f19c 100644
--- a/target/ppc/cpu.h
+++ b/target/ppc/cpu.h
@@ -24,8 +24,6 @@
#include "exec/cpu-defs.h"
#include "cpu-qom.h"
-/* #define PPC_EMULATE_32BITS_HYPV */
-
#define TCG_GUEST_DEFAULT_MO 0
#define TARGET_PAGE_BITS_64K 16
@@ -300,13 +298,12 @@ typedef struct ppc_v3_pate_t {
#define MSR_SF 63 /* Sixty-four-bit mode hflags */
#define MSR_TAG 62 /* Tag-active mode (POWERx ?) */
#define MSR_ISF 61 /* Sixty-four-bit interrupt mode on 630 */
-#define MSR_SHV 60 /* hypervisor state hflags */
+#define MSR_HV 60 /* hypervisor state hflags */
#define MSR_TS0 34 /* Transactional state, 2 bits (Book3s) */
#define MSR_TS1 33
#define MSR_TM 32 /* Transactional Memory Available (Book3s) */
#define MSR_CM 31 /* Computation mode for BookE hflags */
#define MSR_ICM 30 /* Interrupt computation mode for BookE */
-#define MSR_THV 29 /* hypervisor state for 32 bits PowerPC hflags */
#define MSR_GS 28 /* guest state for BookE */
#define MSR_UCLE 26 /* User-mode cache lock enable for BookE */
#define MSR_VR 25 /* altivec available x hflags */
@@ -401,10 +398,13 @@ typedef struct ppc_v3_pate_t {
#define msr_sf ((env->msr >> MSR_SF) & 1)
#define msr_isf ((env->msr >> MSR_ISF) & 1)
-#define msr_shv ((env->msr >> MSR_SHV) & 1)
+#if defined(TARGET_PPC64)
+#define msr_hv ((env->msr >> MSR_HV) & 1)
+#else
+#define msr_hv (0)
+#endif
#define msr_cm ((env->msr >> MSR_CM) & 1)
#define msr_icm ((env->msr >> MSR_ICM) & 1)
-#define msr_thv ((env->msr >> MSR_THV) & 1)
#define msr_gs ((env->msr >> MSR_GS) & 1)
#define msr_ucle ((env->msr >> MSR_UCLE) & 1)
#define msr_vr ((env->msr >> MSR_VR) & 1)
@@ -449,16 +449,9 @@ typedef struct ppc_v3_pate_t {
/* Hypervisor bit is more specific */
#if defined(TARGET_PPC64)
-#define MSR_HVB (1ULL << MSR_SHV)
-#define msr_hv msr_shv
-#else
-#if defined(PPC_EMULATE_32BITS_HYPV)
-#define MSR_HVB (1ULL << MSR_THV)
-#define msr_hv msr_thv
+#define MSR_HVB (1ULL << MSR_HV)
#else
#define MSR_HVB (0ULL)
-#define msr_hv (0)
-#endif
#endif
/* DSISR */
@@ -1051,10 +1044,6 @@ struct CPUPPCState {
uint32_t flags;
uint64_t insns_flags;
uint64_t insns_flags2;
-#if defined(TARGET_PPC64)
- ppc_slb_t vrma_slb;
- target_ulong rmls;
-#endif
int error_code;
uint32_t pending_interrupts;
@@ -1231,7 +1220,8 @@ int ppc64_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int ppc32_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
#ifndef CONFIG_USER_ONLY
-void ppc_cpu_do_system_reset(CPUState *cs);
+void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector);
+void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector);
extern const VMStateDescription vmstate_ppc_cpu;
#endif
diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c
index 027f54c0ed..08bc885ca6 100644
--- a/target/ppc/excp_helper.c
+++ b/target/ppc/excp_helper.c
@@ -128,6 +128,37 @@ static uint64_t ppc_excp_vector_offset(CPUState *cs, int ail)
return offset;
}
+static inline void powerpc_set_excp_state(PowerPCCPU *cpu,
+ target_ulong vector, target_ulong msr)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+
+ /*
+ * We don't use hreg_store_msr here as already have treated any
+ * special case that could occur. Just store MSR and update hflags
+ *
+ * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
+ * will prevent setting of the HV bit which some exceptions might need
+ * to do.
+ */
+ env->msr = msr & env->msr_mask;
+ hreg_compute_hflags(env);
+ env->nip = vector;
+ /* Reset exception state */
+ cs->exception_index = POWERPC_EXCP_NONE;
+ env->error_code = 0;
+
+ /* Reset the reservation */
+ env->reserve_addr = -1;
+
+ /*
+ * Any interrupt is context synchronizing, check if TCG TLB needs
+ * a delayed flush on ppc64
+ */
+ check_tlb_flush(env, false);
+}
+
/*
* Note that this function should be greatly optimized when called
* with a constant excp, from ppc_hw_interrupt
@@ -768,29 +799,8 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp)
}
}
#endif
- /*
- * We don't use hreg_store_msr here as already have treated any
- * special case that could occur. Just store MSR and update hflags
- *
- * Note: We *MUST* not use hreg_store_msr() as-is anyway because it
- * will prevent setting of the HV bit which some exceptions might need
- * to do.
- */
- env->msr = new_msr & env->msr_mask;
- hreg_compute_hflags(env);
- env->nip = vector;
- /* Reset exception state */
- cs->exception_index = POWERPC_EXCP_NONE;
- env->error_code = 0;
- /* Reset the reservation */
- env->reserve_addr = -1;
-
- /*
- * Any interrupt is context synchronizing, check if TCG TLB needs
- * a delayed flush on ppc64
- */
- check_tlb_flush(env, false);
+ powerpc_set_excp_state(cpu, vector, new_msr);
}
void ppc_cpu_do_interrupt(CPUState *cs)
@@ -951,12 +961,35 @@ static void ppc_hw_interrupt(CPUPPCState *env)
}
}
-void ppc_cpu_do_system_reset(CPUState *cs)
+void ppc_cpu_do_system_reset(CPUState *cs, target_ulong vector)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_RESET);
+ if (vector != -1) {
+ env->nip = vector;
+ }
+}
+
+void ppc_cpu_do_fwnmi_machine_check(CPUState *cs, target_ulong vector)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ target_ulong msr = 0;
+
+ /*
+ * Set MSR and NIP for the handler, SRR0/1, DAR and DSISR have already
+ * been set by KVM.
+ */
+ msr = (1ULL << MSR_ME);
+ msr |= env->msr & (1ULL << MSR_SF);
+ if (!(*pcc->interrupts_big_endian)(cpu)) {
+ msr |= (1ULL << MSR_LE);
+ }
+
+ powerpc_set_excp_state(cpu, vector, msr);
}
#endif /* !CONFIG_USER_ONLY */
diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c
index 7f44b1aa1a..597f72be1b 100644
--- a/target/ppc/kvm.c
+++ b/target/ppc/kvm.c
@@ -2113,7 +2113,7 @@ void kvmppc_error_append_smt_possible_hint(Error *const *errp)
#ifdef TARGET_PPC64
-uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
+uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
{
struct kvm_ppc_smmu_info info;
long rampagesize, best_page_shift;
@@ -2140,8 +2140,7 @@ uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
}
}
- return MIN(current_size,
- 1ULL << (best_page_shift + hash_shift - 7));
+ return 1ULL << (best_page_shift + hash_shift - 7);
}
#endif
diff --git a/target/ppc/kvm_ppc.h b/target/ppc/kvm_ppc.h
index 9e4f2357cc..332fa0aa1c 100644
--- a/target/ppc/kvm_ppc.h
+++ b/target/ppc/kvm_ppc.h
@@ -47,7 +47,7 @@ void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
int *pfd, bool need_vfio);
int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
int kvmppc_reset_htab(int shift_hint);
-uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift);
+uint64_t kvmppc_vrma_limit(unsigned int hash_shift);
bool kvmppc_has_cap_spapr_vfio(void);
#endif /* !CONFIG_USER_ONLY */
bool kvmppc_has_cap_epr(void);
@@ -255,10 +255,9 @@ static inline int kvmppc_reset_htab(int shift_hint)
return 0;
}
-static inline uint64_t kvmppc_rma_size(uint64_t current_size,
- unsigned int hash_shift)
+static inline uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
{
- return ram_size;
+ g_assert_not_reached();
}
static inline bool kvmppc_hpt_needs_host_contiguous_pages(void)
diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c
index da8966ccf5..34f6009b1e 100644
--- a/target/ppc/mmu-hash64.c
+++ b/target/ppc/mmu-hash64.c
@@ -18,6 +18,7 @@
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
+#include "qemu/units.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
@@ -668,6 +669,21 @@ unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
return 0;
}
+static bool ppc_hash64_use_vrma(CPUPPCState *env)
+{
+ switch (env->mmu_model) {
+ case POWERPC_MMU_3_00:
+ /*
+ * ISAv3.0 (POWER9) always uses VRMA, the VPM0 field and RMOR
+ * register no longer exist
+ */
+ return true;
+
+ default:
+ return !!(env->spr[SPR_LPCR] & LPCR_VPM0);
+ }
+}
+
static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
{
CPUPPCState *env = &POWERPC_CPU(cs)->env;
@@ -676,15 +692,7 @@ static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
if (msr_ir) {
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
} else {
- switch (env->mmu_model) {
- case POWERPC_MMU_3_00:
- /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
- vpm = true;
- break;
- default:
- vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
- break;
- }
+ vpm = ppc_hash64_use_vrma(env);
}
if (vpm && !msr_hv) {
cs->exception_index = POWERPC_EXCP_HISI;
@@ -702,15 +710,7 @@ static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
if (msr_dr) {
vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
} else {
- switch (env->mmu_model) {
- case POWERPC_MMU_3_00:
- /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
- vpm = true;
- break;
- default:
- vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
- break;
- }
+ vpm = ppc_hash64_use_vrma(env);
}
if (vpm && !msr_hv) {
cs->exception_index = POWERPC_EXCP_HDSI;
@@ -758,11 +758,67 @@ static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
}
+static target_ulong rmls_limit(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ /*
+ * In theory the meanings of RMLS values are implementation
+ * dependent. In practice, this seems to have been the set from
+ * POWER4+..POWER8, and RMLS is no longer supported in POWER9.
+ *
+ * Unsupported values mean the OS has shot itself in the
+ * foot. Return a 0-sized RMA in this case, which we expect
+ * to trigger an immediate DSI or ISI
+ */
+ static const target_ulong rma_sizes[16] = {
+ [0] = 256 * GiB,
+ [1] = 16 * GiB,
+ [2] = 1 * GiB,
+ [3] = 64 * MiB,
+ [4] = 256 * MiB,
+ [7] = 128 * MiB,
+ [8] = 32 * MiB,
+ };
+ target_ulong rmls = (env->spr[SPR_LPCR] & LPCR_RMLS) >> LPCR_RMLS_SHIFT;
+
+ return rma_sizes[rmls];
+}
+
+static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
+{
+ CPUPPCState *env = &cpu->env;
+ target_ulong lpcr = env->spr[SPR_LPCR];
+ uint32_t vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
+ target_ulong vsid = SLB_VSID_VRMA | ((vrmasd << 4) & SLB_VSID_LLP_MASK);
+ int i;
+
+ for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
+ const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
+
+ if (!sps->page_shift) {
+ break;
+ }
+
+ if ((vsid & SLB_VSID_LLP_MASK) == sps->slb_enc) {
+ slb->esid = SLB_ESID_V;
+ slb->vsid = vsid;
+ slb->sps = sps;
+ return 0;
+ }
+ }
+
+ error_report("Bad page size encoding in LPCR[VRMASD]; LPCR=0x"
+ TARGET_FMT_lx"\n", lpcr);
+
+ return -1;
+}
+
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
int rwx, int mmu_idx)
{
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
+ ppc_slb_t vrma_slbe;
ppc_slb_t *slb;
unsigned apshift;
hwaddr ptex;
@@ -789,27 +845,32 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
*/
raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
- /* In HV mode, add HRMOR if top EA bit is clear */
- if (msr_hv || !env->has_hv_mode) {
+ if (cpu->vhyp) {
+ /*
+ * In virtual hypervisor mode, there's nothing to do:
+ * EA == GPA == qemu guest address
+ */
+ } else if (msr_hv || !env->has_hv_mode) {
+ /* In HV mode, add HRMOR if top EA bit is clear */
if (!(eaddr >> 63)) {
raddr |= env->spr[SPR_HRMOR];
}
- } else {
- /* Otherwise, check VPM for RMA vs VRMA */
- if (env->spr[SPR_LPCR] & LPCR_VPM0) {
- slb = &env->vrma_slb;
- if (slb->sps) {
- goto skip_slb_search;
- }
- /* Not much else to do here */
+ } else if (ppc_hash64_use_vrma(env)) {
+ /* Emulated VRMA mode */
+ slb = &vrma_slbe;
+ if (build_vrma_slbe(cpu, slb) != 0) {
+ /* Invalid VRMA setup, machine check */
cs->exception_index = POWERPC_EXCP_MCHECK;
env->error_code = 0;
return 1;
- } else if (raddr < env->rmls) {
- /* RMA. Check bounds in RMLS */
- raddr |= env->spr[SPR_RMOR];
- } else {
- /* The access failed, generate the approriate interrupt */
+ }
+
+ goto skip_slb_search;
+ } else {
+ target_ulong limit = rmls_limit(cpu);
+
+ /* Emulated old-style RMO mode, bounds check against RMLS */
+ if (raddr >= limit) {
if (rwx == 2) {
ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
} else {
@@ -821,6 +882,8 @@ int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
}
return 1;
}
+
+ raddr |= env->spr[SPR_RMOR];
}
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
@@ -943,6 +1006,7 @@ skip_slb_search:
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
{
CPUPPCState *env = &cpu->env;
+ ppc_slb_t vrma_slbe;
ppc_slb_t *slb;
hwaddr ptex, raddr;
ppc_hash_pte64_t pte;
@@ -953,22 +1017,29 @@ hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
/* In real mode the top 4 effective address bits are ignored */
raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
- /* In HV mode, add HRMOR if top EA bit is clear */
- if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
+ if (cpu->vhyp) {
+ /*
+ * In virtual hypervisor mode, there's nothing to do:
+ * EA == GPA == qemu guest address
+ */
+ return raddr;
+ } else if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
+ /* In HV mode, add HRMOR if top EA bit is clear */
return raddr | env->spr[SPR_HRMOR];
- }
+ } else if (ppc_hash64_use_vrma(env)) {
+ /* Emulated VRMA mode */
+ slb = &vrma_slbe;
+ if (build_vrma_slbe(cpu, slb) != 0) {
+ return -1;
+ }
+ } else {
+ target_ulong limit = rmls_limit(cpu);
- /* Otherwise, check VPM for RMA vs VRMA */
- if (env->spr[SPR_LPCR] & LPCR_VPM0) {
- slb = &env->vrma_slb;
- if (!slb->sps) {
+ /* Emulated old-style RMO mode, bounds check against RMLS */
+ if (raddr >= limit) {
return -1;
}
- } else if (raddr < env->rmls) {
- /* RMA. Check bounds in RMLS */
return raddr | env->spr[SPR_RMOR];
- } else {
- return -1;
}
} else {
slb = slb_lookup(cpu, addr);
@@ -997,168 +1068,12 @@ void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
}
-static void ppc_hash64_update_rmls(PowerPCCPU *cpu)
-{
- CPUPPCState *env = &cpu->env;
- uint64_t lpcr = env->spr[SPR_LPCR];
-
- /*
- * This is the full 4 bits encoding of POWER8. Previous
- * CPUs only support a subset of these but the filtering
- * is done when writing LPCR
- */
- switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
- case 0x8: /* 32MB */
- env->rmls = 0x2000000ull;
- break;
- case 0x3: /* 64MB */
- env->rmls = 0x4000000ull;
- break;
- case 0x7: /* 128MB */
- env->rmls = 0x8000000ull;
- break;
- case 0x4: /* 256MB */
- env->rmls = 0x10000000ull;
- break;
- case 0x2: /* 1GB */
- env->rmls = 0x40000000ull;
- break;
- case 0x1: /* 16GB */
- env->rmls = 0x400000000ull;
- break;
- default:
- /* What to do here ??? */
- env->rmls = 0;
- }
-}
-
-static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
-{
- CPUPPCState *env = &cpu->env;
- const PPCHash64SegmentPageSizes *sps = NULL;
- target_ulong esid, vsid, lpcr;
- ppc_slb_t *slb = &env->vrma_slb;
- uint32_t vrmasd;
- int i;
-
- /* First clear it */
- slb->esid = slb->vsid = 0;
- slb->sps = NULL;
-
- /* Is VRMA enabled ? */
- lpcr = env->spr[SPR_LPCR];
- if (!(lpcr & LPCR_VPM0)) {
- return;
- }
-
- /*
- * Make one up. Mostly ignore the ESID which will not be needed
- * for translation
- */
- vsid = SLB_VSID_VRMA;
- vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
- vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
- esid = SLB_ESID_V;
-
- for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
- const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
-
- if (!sps1->page_shift) {
- break;
- }
-
- if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
- sps = sps1;
- break;
- }
- }
-
- if (!sps) {
- error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
- " vsid 0x"TARGET_FMT_lx, esid, vsid);
- return;
- }
-
- slb->vsid = vsid;
- slb->esid = esid;
- slb->sps = sps;
-}
-
void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
CPUPPCState *env = &cpu->env;
- uint64_t lpcr = 0;
-
- /* Filter out bits */
- switch (env->mmu_model) {
- case POWERPC_MMU_64B: /* 970 */
- if (val & 0x40) {
- lpcr |= LPCR_LPES0;
- }
- if (val & 0x8000000000000000ull) {
- lpcr |= LPCR_LPES1;
- }
- if (val & 0x20) {
- lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
- }
- if (val & 0x4000000000000000ull) {
- lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
- }
- if (val & 0x2000000000000000ull) {
- lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
- }
- env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
- /*
- * XXX We could also write LPID from HID4 here
- * but since we don't tag any translation on it
- * it doesn't actually matter
- *
- * XXX For proper emulation of 970 we also need
- * to dig HRMOR out of HID5
- */
- break;
- case POWERPC_MMU_2_03: /* P5p */
- lpcr = val & (LPCR_RMLS | LPCR_ILE |
- LPCR_LPES0 | LPCR_LPES1 |
- LPCR_RMI | LPCR_HDICE);
- break;
- case POWERPC_MMU_2_06: /* P7 */
- lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
- LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
- LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
- LPCR_MER | LPCR_TC |
- LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
- break;
- case POWERPC_MMU_2_07: /* P8 */
- lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
- LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
- LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
- LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
- LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
- break;
- case POWERPC_MMU_3_00: /* P9 */
- lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
- (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
- LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
- (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
- LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC |
- LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE);
- /*
- * If we have a virtual hypervisor, we need to bring back RMLS. It
- * doesn't exist on an actual P9 but that's all we know how to
- * configure with softmmu at the moment
- */
- if (cpu->vhyp) {
- lpcr |= (val & LPCR_RMLS);
- }
- break;
- default:
- ;
- }
- env->spr[SPR_LPCR] = lpcr;
- ppc_hash64_update_rmls(cpu);
- ppc_hash64_update_vrma(cpu);
+ env->spr[SPR_LPCR] = val & pcc->lpcr_mask;
}
void helper_store_lpcr(CPUPPCState *env, target_ulong val)
diff --git a/target/ppc/translate.c b/target/ppc/translate.c
index 36fa27367c..127c82a24e 100644
--- a/target/ppc/translate.c
+++ b/target/ppc/translate.c
@@ -1938,15 +1938,17 @@ static void gen_rlwinm(DisasContext *ctx)
me += 32;
#endif
mask = MASK(mb, me);
- if (sh == 0) {
- tcg_gen_andi_tl(t_ra, t_rs, mask);
- } else if (mask <= 0xffffffffu) {
- TCGv_i32 t0 = tcg_temp_new_i32();
- tcg_gen_trunc_tl_i32(t0, t_rs);
- tcg_gen_rotli_i32(t0, t0, sh);
- tcg_gen_andi_i32(t0, t0, mask);
- tcg_gen_extu_i32_tl(t_ra, t0);
- tcg_temp_free_i32(t0);
+ if (mask <= 0xffffffffu) {
+ if (sh == 0) {
+ tcg_gen_andi_tl(t_ra, t_rs, mask);
+ } else {
+ TCGv_i32 t0 = tcg_temp_new_i32();
+ tcg_gen_trunc_tl_i32(t0, t_rs);
+ tcg_gen_rotli_i32(t0, t0, sh);
+ tcg_gen_andi_i32(t0, t0, mask);
+ tcg_gen_extu_i32_tl(t_ra, t0);
+ tcg_temp_free_i32(t0);
+ }
} else {
#if defined(TARGET_PPC64)
tcg_gen_deposit_i64(t_ra, t_rs, t_rs, 32, 32);
diff --git a/target/ppc/translate_init.inc.c b/target/ppc/translate_init.inc.c
index 53995f62ea..0ae145e18d 100644
--- a/target/ppc/translate_init.inc.c
+++ b/target/ppc/translate_init.inc.c
@@ -7895,25 +7895,21 @@ static void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn)
{
gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
}
-
-static void spr_write_970_hid4(DisasContext *ctx, int sprn, int gprn)
-{
-#if defined(TARGET_PPC64)
- spr_write_generic(ctx, sprn, gprn);
- gen_helper_store_lpcr(cpu_env, cpu_gpr[gprn]);
-#endif
-}
-
#endif /* !defined(CONFIG_USER_ONLY) */
static void gen_spr_970_lpar(CPUPPCState *env)
{
#if !defined(CONFIG_USER_ONLY)
- /* Logical partitionning */
- /* PPC970: HID4 is effectively the LPCR */
+ /*
+ * PPC970: HID4 covers things later controlled by the LPCR and
+ * RMOR in later CPUs, but with a different encoding. We only
+ * support the 970 in "Apple mode" which has all hypervisor
+ * facilities disabled by strapping, so we can basically just
+ * ignore it
+ */
spr_register(env, SPR_970_HID4, "HID4",
SPR_NOACCESS, SPR_NOACCESS,
- &spr_read_generic, &spr_write_970_hid4,
+ &spr_read_generic, &spr_write_generic,
0x00000000);
#endif
}
@@ -8019,12 +8015,16 @@ static void gen_spr_book3s_ids(CPUPPCState *env)
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register_hv(env, SPR_RMOR, "RMOR",
+ spr_register_hv(env, SPR_HRMOR, "HRMOR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
0x00000000);
- spr_register_hv(env, SPR_HRMOR, "HRMOR",
+}
+
+static void gen_spr_rmor(CPUPPCState *env)
+{
+ spr_register_hv(env, SPR_RMOR, "RMOR",
SPR_NOACCESS, SPR_NOACCESS,
SPR_NOACCESS, SPR_NOACCESS,
&spr_read_generic, &spr_write_generic,
@@ -8476,6 +8476,8 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
(1ull << MSR_DR) |
(1ull << MSR_PMM) |
(1ull << MSR_RI);
+ pcc->lpcr_mask = LPCR_RMLS | LPCR_ILE | LPCR_LPES0 | LPCR_LPES1 |
+ LPCR_RMI | LPCR_HDICE;
pcc->mmu_model = POWERPC_MMU_2_03;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
@@ -8492,44 +8494,6 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data)
pcc->l1_icache_size = 0x10000;
}
-/*
- * The CPU used to have a "compat" property which set the
- * compatibility mode PVR. However, this was conceptually broken - it
- * only makes sense on the pseries machine type (otherwise the guest
- * owns the PCR and can control the compatibility mode itself). It's
- * been replaced with the 'max-cpu-compat' property on the pseries
- * machine type. For backwards compatibility, pseries specially
- * parses the -cpu parameter and converts old compat= parameters into
- * the appropriate machine parameters. This stub implementation of
- * the parameter catches any uses on explicitly created CPUs.
- */
-static void getset_compat_deprecated(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
-{
- QNull *null = NULL;
-
- if (!qtest_enabled()) {
- warn_report("CPU 'compat' property is deprecated and has no effect; "
- "use max-cpu-compat machine property instead");
- }
- visit_type_null(v, name, &null, NULL);
- qobject_unref(null);
-}
-
-static const PropertyInfo ppc_compat_deprecated_propinfo = {
- .name = "str",
- .description = "compatibility mode (deprecated)",
- .get = getset_compat_deprecated,
- .set = getset_compat_deprecated,
-};
-static Property powerpc_servercpu_properties[] = {
- {
- .name = "compat",
- .info = &ppc_compat_deprecated_propinfo,
- },
- DEFINE_PROP_END_OF_LIST(),
-};
-
static void init_proc_POWER7(CPUPPCState *env)
{
/* Common Registers */
@@ -8539,6 +8503,7 @@ static void init_proc_POWER7(CPUPPCState *env)
/* POWER7 Specific Registers */
gen_spr_book3s_ids(env);
+ gen_spr_rmor(env);
gen_spr_amr(env);
gen_spr_book3s_purr(env);
gen_spr_power5p_common(env);
@@ -8611,7 +8576,6 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER7";
dc->desc = "POWER7";
- device_class_set_props(dc, powerpc_servercpu_properties);
pcc->pvr_match = ppc_pvr_match_power7;
pcc->pcr_mask = PCR_VEC_DIS | PCR_VSX_DIS | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
@@ -8652,6 +8616,12 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
+ LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
+ LPCR_MER | LPCR_TC |
+ LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE;
+ pcc->lpcr_pm = LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2;
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
@@ -8668,7 +8638,6 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data)
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
- pcc->lpcr_pm = LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2;
}
static void init_proc_POWER8(CPUPPCState *env)
@@ -8680,6 +8649,7 @@ static void init_proc_POWER8(CPUPPCState *env)
/* POWER8 Specific Registers */
gen_spr_book3s_ids(env);
+ gen_spr_rmor(env);
gen_spr_amr(env);
gen_spr_iamr(env);
gen_spr_book3s_purr(env);
@@ -8776,7 +8746,6 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER8";
dc->desc = "POWER8";
- device_class_set_props(dc, powerpc_servercpu_properties);
pcc->pvr_match = ppc_pvr_match_power8;
pcc->pcr_mask = PCR_TM_DIS | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05;
@@ -8804,7 +8773,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
PPC2_TM | PPC2_PM_ISA206;
pcc->msr_mask = (1ull << MSR_SF) |
- (1ull << MSR_SHV) |
+ (1ull << MSR_HV) |
(1ull << MSR_TM) |
(1ull << MSR_VR) |
(1ull << MSR_VSX) |
@@ -8823,6 +8792,13 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
(1ull << MSR_TS0) |
(1ull << MSR_TS1) |
(1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
+ LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
+ LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
+ LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
+ LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE;
+ pcc->lpcr_pm = LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
+ LPCR_P8_PECE3 | LPCR_P8_PECE4;
pcc->mmu_model = POWERPC_MMU_2_07;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
@@ -8840,8 +8816,6 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
- pcc->lpcr_pm = LPCR_P8_PECE0 | LPCR_P8_PECE1 | LPCR_P8_PECE2 |
- LPCR_P8_PECE3 | LPCR_P8_PECE4;
}
#ifdef CONFIG_SOFTMMU
@@ -8988,7 +8962,6 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER9";
dc->desc = "POWER9";
- device_class_set_props(dc, powerpc_servercpu_properties);
pcc->pvr_match = ppc_pvr_match_power9;
pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07;
pcc->pcr_supported = PCR_COMPAT_3_00 | PCR_COMPAT_2_07 | PCR_COMPAT_2_06 |
@@ -9017,7 +8990,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL;
pcc->msr_mask = (1ull << MSR_SF) |
- (1ull << MSR_SHV) |
+ (1ull << MSR_HV) |
(1ull << MSR_TM) |
(1ull << MSR_VR) |
(1ull << MSR_VSX) |
@@ -9034,6 +9007,14 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
+ (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
+ LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
+ (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
+ LPCR_DEE | LPCR_OEE))
+ | LPCR_MER | LPCR_GTSE | LPCR_TC |
+ LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
+ pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault;
@@ -9053,7 +9034,6 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data)
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
- pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
}
#ifdef CONFIG_SOFTMMU
@@ -9198,7 +9178,6 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
dc->fw_name = "PowerPC,POWER10";
dc->desc = "POWER10";
- device_class_set_props(dc, powerpc_servercpu_properties);
pcc->pvr_match = ppc_pvr_match_power10;
pcc->pcr_mask = PCR_COMPAT_2_05 | PCR_COMPAT_2_06 | PCR_COMPAT_2_07 |
PCR_COMPAT_3_00;
@@ -9228,7 +9207,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
PPC2_ISA205 | PPC2_ISA207S | PPC2_FP_CVT_S64 |
PPC2_TM | PPC2_ISA300 | PPC2_PRCNTL;
pcc->msr_mask = (1ull << MSR_SF) |
- (1ull << MSR_SHV) |
+ (1ull << MSR_HV) |
(1ull << MSR_TM) |
(1ull << MSR_VR) |
(1ull << MSR_VSX) |
@@ -9245,6 +9224,14 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
(1ull << MSR_PMM) |
(1ull << MSR_RI) |
(1ull << MSR_LE);
+ pcc->lpcr_mask = LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
+ (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
+ LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
+ (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
+ LPCR_DEE | LPCR_OEE))
+ | LPCR_MER | LPCR_GTSE | LPCR_TC |
+ LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE;
+ pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
pcc->mmu_model = POWERPC_MMU_3_00;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc64_v3_handle_mmu_fault;
@@ -9263,7 +9250,6 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data)
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
pcc->interrupts_big_endian = ppc_cpu_interrupts_big_endian_lpcr;
- pcc->lpcr_pm = LPCR_PDEE | LPCR_HDEE | LPCR_EEE | LPCR_DEE | LPCR_OEE;
}
#if !defined(CONFIG_USER_ONLY)
@@ -10486,6 +10472,8 @@ static void ppc_cpu_parse_featurestr(const char *type, char *features,
*s = '\0';
for (i = 0; inpieces[i]; i++) {
if (g_str_has_prefix(inpieces[i], "compat=")) {
+ warn_report_once("CPU 'compat' property is deprecated; "
+ "use max-cpu-compat machine property instead");
compat_str = inpieces[i];
continue;
}