diff options
Diffstat (limited to 'target/ppc')
-rw-r--r-- | target/ppc/Makefile.objs | 1 | ||||
-rw-r--r-- | target/ppc/cpu-models.h | 1 | ||||
-rw-r--r-- | target/ppc/cpu.h | 5 | ||||
-rw-r--r-- | target/ppc/excp_helper.c | 3 | ||||
-rw-r--r-- | target/ppc/kvm.c | 23 | ||||
-rw-r--r-- | target/ppc/mmu-book3s-v3.c | 6 | ||||
-rw-r--r-- | target/ppc/mmu-book3s-v3.h | 5 | ||||
-rw-r--r-- | target/ppc/mmu-radix64.c | 259 | ||||
-rw-r--r-- | target/ppc/mmu-radix64.h | 72 | ||||
-rw-r--r-- | target/ppc/translate.c | 48 | ||||
-rw-r--r-- | target/ppc/translate_init.c | 38 |
11 files changed, 426 insertions, 35 deletions
diff --git a/target/ppc/Makefile.objs b/target/ppc/Makefile.objs index f963777277..f92ba67ebd 100644 --- a/target/ppc/Makefile.objs +++ b/target/ppc/Makefile.objs @@ -4,6 +4,7 @@ obj-y += translate.o ifeq ($(CONFIG_SOFTMMU),y) obj-y += machine.o mmu_helper.o mmu-hash32.o monitor.o arch_dump.o obj-$(TARGET_PPC64) += mmu-hash64.o mmu-book3s-v3.o compat.o +obj-$(TARGET_PPC64) += mmu-radix64.o endif obj-$(CONFIG_KVM) += kvm.o obj-$(call lnot,$(CONFIG_KVM)) += kvm-stub.o diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index d587e69bbc..b563c45b68 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -561,6 +561,7 @@ enum { CPU_POWERPC_POWER8NVL_BASE = 0x004C0000, CPU_POWERPC_POWER8NVL_v10 = 0x004C0100, CPU_POWERPC_POWER9_BASE = 0x004E0000, + CPU_POWERPC_POWER9_DD1 = 0x004E0100, CPU_POWERPC_970_v22 = 0x00390202, CPU_POWERPC_970FX_v10 = 0x00391100, CPU_POWERPC_970FX_v20 = 0x003C0200, diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index e0ff0412d6..401e10e7da 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -30,6 +30,8 @@ #define TARGET_LONG_BITS 64 #define TARGET_PAGE_BITS 12 +#define TCG_GUEST_DEFAULT_MO 0 + /* Note that the official physical address space bits is 62-M where M is implementation dependent. I've not looked up M for the set of cpus we emulate at the system level. */ @@ -480,6 +482,8 @@ struct ppc_slb_t { #define DSISR_ISSTORE 0x02000000 /* Not permitted by virtual page class key protection */ #define DSISR_AMR 0x00200000 +/* Unsupported Radix Tree Configuration */ +#define DSISR_R_BADCONFIG 0x00080000 /* SRR1 error code fields */ @@ -1221,6 +1225,7 @@ static inline PowerPCCPU *ppc_env_get_cpu(CPUPPCState *env) PowerPCCPUClass *ppc_cpu_class_by_pvr(uint32_t pvr); PowerPCCPUClass *ppc_cpu_class_by_pvr_mask(uint32_t pvr); +PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc); struct PPCVirtualHypervisor { Object parent; diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index f4ee7aacd2..a6bcb47aa2 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -728,6 +728,9 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) cs->exception_index = POWERPC_EXCP_NONE; env->error_code = 0; + /* Reset the reservation */ + env->reserve_addr = -1; + /* Any interrupt is context synchronizing, check if TCG TLB * needs a delayed flush on ppc64 */ diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 8574c369e6..51249ce79e 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -2380,6 +2380,17 @@ static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) #if defined(TARGET_PPC64) pcc->radix_page_info = kvm_get_radix_page_info(); + + if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) { + /* + * POWER9 DD1 has some bugs which make it not really ISA 3.00 + * compliant. More importantly, advertising ISA 3.00 + * architected mode may prevent guests from activating + * necessary DD1 workarounds. + */ + pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07 + | PCR_COMPAT_2_06 | PCR_COMPAT_2_05); + } #endif /* defined(TARGET_PPC64) */ } @@ -2413,18 +2424,6 @@ bool kvmppc_has_cap_mmu_hash_v3(void) return cap_mmu_hash_v3; } -static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc) -{ - ObjectClass *oc = OBJECT_CLASS(pcc); - - while (oc && !object_class_is_abstract(oc)) { - oc = object_class_get_parent(oc); - } - assert(oc); - - return POWERPC_CPU_CLASS(oc); -} - PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void) { uint32_t host_pvr = mfpvr(); diff --git a/target/ppc/mmu-book3s-v3.c b/target/ppc/mmu-book3s-v3.c index 005c96340a..e7798b3582 100644 --- a/target/ppc/mmu-book3s-v3.c +++ b/target/ppc/mmu-book3s-v3.c @@ -22,15 +22,13 @@ #include "cpu.h" #include "mmu-hash64.h" #include "mmu-book3s-v3.h" -#include "qemu/error-report.h" +#include "mmu-radix64.h" int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { if (ppc64_radix_guest(cpu)) { /* Guest uses radix */ - /* TODO - Unsupported */ - error_report("Guest Radix Support Unimplemented"); - exit(1); + return ppc_radix64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); } else { /* Guest uses hash */ return ppc_hash64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx); } diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h index 636f6ab95f..56095dab52 100644 --- a/target/ppc/mmu-book3s-v3.h +++ b/target/ppc/mmu-book3s-v3.h @@ -25,6 +25,11 @@ /* Partition Table Entry Fields */ #define PATBE1_GR 0x8000000000000000 +/* Process Table Entry */ +struct prtb_entry { + uint64_t prtbe0, prtbe1; +}; + #ifdef TARGET_PPC64 static inline bool ppc64_use_proc_tbl(PowerPCCPU *cpu) diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c new file mode 100644 index 0000000000..de18c0b69e --- /dev/null +++ b/target/ppc/mmu-radix64.c @@ -0,0 +1,259 @@ +/* + * PowerPC Radix MMU mulation helpers for QEMU. + * + * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "kvm_ppc.h" +#include "exec/log.h" +#include "mmu-radix64.h" +#include "mmu-book3s-v3.h" + +static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr, + uint64_t *lpid, uint64_t *pid) +{ + /* We don't have HV support yet and shouldn't get here with it set anyway */ + assert(!msr_hv); + + if (!msr_hv) { /* !MSR[HV] -> Guest */ + switch (eaddr & R_EADDR_QUADRANT) { + case R_EADDR_QUADRANT0: /* Guest application */ + *lpid = env->spr[SPR_LPIDR]; + *pid = env->spr[SPR_BOOKS_PID]; + break; + case R_EADDR_QUADRANT1: /* Illegal */ + case R_EADDR_QUADRANT2: + return false; + case R_EADDR_QUADRANT3: /* Guest OS */ + *lpid = env->spr[SPR_LPIDR]; + *pid = 0; /* pid set to 0 -> addresses guest operating system */ + break; + } + } + + return true; +} + +static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + if (rwx == 2) { /* Instruction Segment Interrupt */ + cs->exception_index = POWERPC_EXCP_ISEG; + } else { /* Data Segment Interrupt */ + cs->exception_index = POWERPC_EXCP_DSEG; + env->spr[SPR_DAR] = eaddr; + } + env->error_code = 0; +} + +static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr, + uint32_t cause) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + + if (rwx == 2) { /* Instruction Storage Interrupt */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = cause; + } else { /* Data Storage Interrupt */ + cs->exception_index = POWERPC_EXCP_DSI; + if (rwx == 1) { /* Write -> Store */ + cause |= DSISR_ISSTORE; + } + env->spr[SPR_DSISR] = cause; + env->spr[SPR_DAR] = eaddr; + env->error_code = 0; + } +} + + +static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, + int *fault_cause, int *prot) +{ + CPUPPCState *env = &cpu->env; + const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; + + /* Check Page Attributes (pte58:59) */ + if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) { + /* + * Radix PTE entries with the non-idempotent I/O attribute are treated + * as guarded storage + */ + *fault_cause |= SRR1_NOEXEC_GUARD; + return true; + } + + /* Determine permissions allowed by Encoded Access Authority */ + if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */ + *prot = 0; + } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) { + *prot = ppc_radix64_get_prot_eaa(pte); + } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */ + *prot = ppc_radix64_get_prot_eaa(pte); + *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ + } + + /* Check if requested access type is allowed */ + if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */ + *fault_cause |= DSISR_PROTFAULT; + return true; + } + + return false; +} + +static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte, + hwaddr pte_addr, int *prot) +{ + CPUState *cs = CPU(cpu); + uint64_t npte; + + npte = pte | R_PTE_R; /* Always set reference bit */ + + if (rwx == 1) { /* Store/Write */ + npte |= R_PTE_C; /* Set change bit */ + } else { + /* + * Treat the page as read-only for now, so that a later write + * will pass through this function again to set the C bit. + */ + *prot &= ~PAGE_WRITE; + } + + if (pte ^ npte) { /* If pte has changed then write it back */ + stq_phys(cs->as, pte_addr, npte); + } +} + +static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, int rwx, vaddr eaddr, + uint64_t base_addr, uint64_t nls, + hwaddr *raddr, int *psize, + int *fault_cause, int *prot, + hwaddr *pte_addr) +{ + CPUState *cs = CPU(cpu); + uint64_t index, pde; + + if (nls < 5) { /* Directory maps less than 2**5 entries */ + *fault_cause |= DSISR_R_BADCONFIG; + return 0; + } + + /* Read page <directory/table> entry from guest address space */ + index = eaddr >> (*psize - nls); /* Shift */ + index &= ((1UL << nls) - 1); /* Mask */ + pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde))); + if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ + *fault_cause |= DSISR_NOPTE; + return 0; + } + + *psize -= nls; + + /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ + if (pde & R_PTE_LEAF) { + uint64_t rpn = pde & R_PTE_RPN; + uint64_t mask = (1UL << *psize) - 1; + + if (ppc_radix64_check_prot(cpu, rwx, pde, fault_cause, prot)) { + return 0; /* Protection Denied Access */ + } + + /* Or high bits of rpn and low bits to ea to form whole real addr */ + *raddr = (rpn & ~mask) | (eaddr & mask); + *pte_addr = base_addr + (index * sizeof(pde)); + return pde; + } + + /* Next Level of Radix Tree */ + return ppc_radix64_walk_tree(cpu, rwx, eaddr, pde & R_PDE_NLB, + pde & R_PDE_NLS, raddr, psize, + fault_cause, prot, pte_addr); +} + +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + PPCVirtualHypervisorClass *vhc = + PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); + hwaddr raddr, pte_addr; + uint64_t lpid = 0, pid = 0, offset, size, patbe, prtbe0, pte; + int page_size, prot, fault_cause = 0; + + assert((rwx == 0) || (rwx == 1) || (rwx == 2)); + assert(!msr_hv); /* For now there is no Radix PowerNV Support */ + assert(cpu->vhyp); + assert(ppc64_use_proc_tbl(cpu)); + + /* Real Mode Access */ + if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { + /* In real mode top 4 effective addr bits (mostly) ignored */ + raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, + TARGET_PAGE_SIZE); + return 0; + } + + /* Virtual Mode Access - get the fully qualified address */ + if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { + ppc_radix64_raise_segi(cpu, rwx, eaddr); + return 1; + } + + /* Get Process Table */ + patbe = vhc->get_patbe(cpu->vhyp); + + /* Index Process Table by PID to Find Corresponding Process Table Entry */ + offset = pid * sizeof(struct prtb_entry); + size = 1ULL << ((patbe & PATBE1_R_PRTS) + 12); + if (offset >= size) { + /* offset exceeds size of the process table */ + ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); + return 1; + } + prtbe0 = ldq_phys(cs->as, (patbe & PATBE1_R_PRTB) + offset); + + /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ + page_size = PRTBE_R_GET_RTS(prtbe0); + pte = ppc_radix64_walk_tree(cpu, rwx, eaddr & R_EADDR_MASK, + prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, + &raddr, &page_size, &fault_cause, &prot, + &pte_addr); + if (!pte) { + ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); + return 1; + } + + /* Update Reference and Change Bits */ + ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); + + tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, + prot, mmu_idx, 1UL << page_size); + return 1; +} diff --git a/target/ppc/mmu-radix64.h b/target/ppc/mmu-radix64.h new file mode 100644 index 0000000000..1d5c7cfea5 --- /dev/null +++ b/target/ppc/mmu-radix64.h @@ -0,0 +1,72 @@ +#ifndef MMU_RADIX64_H +#define MMU_RADIX64_H + +#ifndef CONFIG_USER_ONLY + +/* Radix Quadrants */ +#define R_EADDR_MASK 0x3FFFFFFFFFFFFFFF +#define R_EADDR_QUADRANT 0xC000000000000000 +#define R_EADDR_QUADRANT0 0x0000000000000000 +#define R_EADDR_QUADRANT1 0x4000000000000000 +#define R_EADDR_QUADRANT2 0x8000000000000000 +#define R_EADDR_QUADRANT3 0xC000000000000000 + +/* Radix Partition Table Entry Fields */ +#define PATBE1_R_PRTB 0x0FFFFFFFFFFFF000 +#define PATBE1_R_PRTS 0x000000000000001F + +/* Radix Process Table Entry Fields */ +#define PRTBE_R_GET_RTS(rts) \ + ((((rts >> 58) & 0x18) | ((rts >> 5) & 0x7)) + 31) +#define PRTBE_R_RPDB 0x0FFFFFFFFFFFFF00 +#define PRTBE_R_RPDS 0x000000000000001F + +/* Radix Page Directory/Table Entry Fields */ +#define R_PTE_VALID 0x8000000000000000 +#define R_PTE_LEAF 0x4000000000000000 +#define R_PTE_SW0 0x2000000000000000 +#define R_PTE_RPN 0x01FFFFFFFFFFF000 +#define R_PTE_SW1 0x0000000000000E00 +#define R_GET_SW(sw) (((sw >> 58) & 0x8) | ((sw >> 9) & 0x7)) +#define R_PTE_R 0x0000000000000100 +#define R_PTE_C 0x0000000000000080 +#define R_PTE_ATT 0x0000000000000030 +#define R_PTE_ATT_NORMAL 0x0000000000000000 +#define R_PTE_ATT_SAO 0x0000000000000010 +#define R_PTE_ATT_NI_IO 0x0000000000000020 +#define R_PTE_ATT_TOLERANT_IO 0x0000000000000030 +#define R_PTE_EAA_PRIV 0x0000000000000008 +#define R_PTE_EAA_R 0x0000000000000004 +#define R_PTE_EAA_RW 0x0000000000000002 +#define R_PTE_EAA_X 0x0000000000000001 +#define R_PDE_NLB PRTBE_R_RPDB +#define R_PDE_NLS PRTBE_R_RPDS + +#ifdef TARGET_PPC64 + +int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, + int mmu_idx); + +static inline int ppc_radix64_get_prot_eaa(uint64_t pte) +{ + return (pte & R_PTE_EAA_R ? PAGE_READ : 0) | + (pte & R_PTE_EAA_RW ? PAGE_READ | PAGE_WRITE : 0) | + (pte & R_PTE_EAA_X ? PAGE_EXEC : 0); +} + +static inline int ppc_radix64_get_prot_amr(PowerPCCPU *cpu) +{ + CPUPPCState *env = &cpu->env; + int amr = env->spr[SPR_AMR] >> 62; /* We only care about key0 AMR63:62 */ + int iamr = env->spr[SPR_IAMR] >> 62; /* We only care about key0 IAMR63:62 */ + + return (amr & 0x2 ? 0 : PAGE_WRITE) | /* Access denied if bit is set */ + (amr & 0x1 ? 0 : PAGE_READ) | + (iamr & 0x1 ? 0 : PAGE_EXEC); +} + +#endif /* TARGET_PPC64 */ + +#endif /* CONFIG_USER_ONLY */ + +#endif /* MMU_RADIX64_H */ diff --git a/target/ppc/translate.c b/target/ppc/translate.c index f40b5a1abf..c0cd64d927 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -73,6 +73,7 @@ static TCGv cpu_cfar; #endif static TCGv cpu_xer, cpu_so, cpu_ov, cpu_ca, cpu_ov32, cpu_ca32; static TCGv cpu_reserve; +static TCGv cpu_reserve_val; static TCGv cpu_fpscr; static TCGv_i32 cpu_access_type; @@ -181,6 +182,9 @@ void ppc_translate_init(void) cpu_reserve = tcg_global_mem_new(cpu_env, offsetof(CPUPPCState, reserve_addr), "reserve_addr"); + cpu_reserve_val = tcg_global_mem_new(cpu_env, + offsetof(CPUPPCState, reserve_val), + "reserve_val"); cpu_fpscr = tcg_global_mem_new(cpu_env, offsetof(CPUPPCState, fpscr), "fpscr"); @@ -214,6 +218,7 @@ struct DisasContext { bool vsx_enabled; bool spe_enabled; bool tm_enabled; + bool gtse; ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; uint64_t insns_flags; @@ -2967,6 +2972,7 @@ static void gen_stswx(DisasContext *ctx) /* eieio */ static void gen_eieio(DisasContext *ctx) { + tcg_gen_mb(TCG_MO_LD_ST | TCG_BAR_SC); } #if !defined(CONFIG_USER_ONLY) @@ -3004,6 +3010,7 @@ static void gen_isync(DisasContext *ctx) if (!ctx->pr) { gen_check_tlb_flush(ctx, false); } + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); gen_stop_exception(ctx); } @@ -3023,7 +3030,8 @@ static void gen_##name(DisasContext *ctx) \ } \ tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop); \ tcg_gen_mov_tl(cpu_reserve, t0); \ - tcg_gen_st_tl(gpr, cpu_env, offsetof(CPUPPCState, reserve_val)); \ + tcg_gen_mov_tl(cpu_reserve_val, gpr); \ + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); \ tcg_temp_free(t0); \ } @@ -3155,14 +3163,31 @@ static void gen_conditional_store(DisasContext *ctx, TCGv EA, static void gen_conditional_store(DisasContext *ctx, TCGv EA, int reg, int memop) { - TCGLabel *l1; + TCGLabel *l1 = gen_new_label(); + TCGLabel *l2 = gen_new_label(); + TCGv t0; - tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); - l1 = gen_new_label(); tcg_gen_brcond_tl(TCG_COND_NE, EA, cpu_reserve, l1); - tcg_gen_ori_i32(cpu_crf[0], cpu_crf[0], CRF_EQ); - tcg_gen_qemu_st_tl(cpu_gpr[reg], EA, ctx->mem_idx, memop); + + t0 = tcg_temp_new(); + tcg_gen_atomic_cmpxchg_tl(t0, cpu_reserve, cpu_reserve_val, + cpu_gpr[reg], ctx->mem_idx, + DEF_MEMOP(memop) | MO_ALIGN); + tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_reserve_val); + tcg_gen_shli_tl(t0, t0, CRF_EQ_BIT); + tcg_gen_or_tl(t0, t0, cpu_so); + tcg_gen_trunc_tl_i32(cpu_crf[0], t0); + tcg_temp_free(t0); + tcg_gen_br(l2); + gen_set_label(l1); + + /* Address mismatch implies failure. But we still need to provide the + memory barrier semantics of the instruction. */ + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); + tcg_gen_trunc_tl_i32(cpu_crf[0], cpu_so); + + gen_set_label(l2); tcg_gen_movi_tl(cpu_reserve, -1); } #endif @@ -3291,6 +3316,7 @@ static void gen_sync(DisasContext *ctx) if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { gen_check_tlb_flush(ctx, true); } + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); } /* wait */ @@ -4513,7 +4539,12 @@ static void gen_tlbie(DisasContext *ctx) GEN_PRIV; #else TCGv_i32 t1; - CHK_HV; + + if (ctx->gtse) { + CHK_SV; /* If gtse is set then tblie is supervisor privileged */ + } else { + CHK_HV; /* Else hypervisor privileged */ + } if (NARROW_MODE(ctx)) { TCGv t0 = tcg_temp_new(); @@ -6547,6 +6578,8 @@ GEN_HANDLER(tlbia, 0x1F, 0x12, 0x0B, 0x03FFFC01, PPC_MEM_TLBIA), * different ISA versions */ GEN_HANDLER(tlbiel, 0x1F, 0x12, 0x08, 0x001F0001, PPC_MEM_TLBIE), GEN_HANDLER(tlbie, 0x1F, 0x12, 0x09, 0x001F0001, PPC_MEM_TLBIE), +GEN_HANDLER_E(tlbiel, 0x1F, 0x12, 0x08, 0x00100001, PPC_NONE, PPC2_ISA300), +GEN_HANDLER_E(tlbie, 0x1F, 0x12, 0x09, 0x00100001, PPC_NONE, PPC2_ISA300), GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM_TLBSYNC), #if defined(TARGET_PPC64) GEN_HANDLER(slbia, 0x1F, 0x12, 0x0F, 0x031FFC01, PPC_SLBI), @@ -7227,6 +7260,7 @@ void gen_intermediate_code(CPUPPCState *env, struct TranslationBlock *tb) ctx.tm_enabled = false; } #endif + ctx.gtse = !!(env->spr[SPR_LPCR] & LPCR_GTSE); if ((env->flags & POWERPC_FLAG_SE) && msr_se) ctx.singlestep_enabled = CPU_SINGLE_STEP; else diff --git a/target/ppc/translate_init.c b/target/ppc/translate_init.c index e82e3e65e1..56a0ab22cf 100644 --- a/target/ppc/translate_init.c +++ b/target/ppc/translate_init.c @@ -8960,7 +8960,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PPC_FLOAT_EXT | PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ | PPC_MEM_SYNC | PPC_MEM_EIEIO | - PPC_MEM_TLBIE | PPC_MEM_TLBSYNC | + PPC_MEM_TLBSYNC | PPC_64B | PPC_64BX | PPC_ALTIVEC | PPC_SEGMENT_64B | PPC_SLBI | PPC_POPCNTB | PPC_POPCNTWD | @@ -10285,6 +10285,18 @@ PowerPCCPU *cpu_ppc_init(const char *cpu_model) return POWERPC_CPU(cpu_generic_init(TYPE_POWERPC_CPU, cpu_model)); } +PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc) +{ + ObjectClass *oc = OBJECT_CLASS(pcc); + + while (oc && !object_class_is_abstract(oc)) { + oc = object_class_get_parent(oc); + } + assert(oc); + + return POWERPC_CPU_CLASS(oc); +} + /* Sort by PVR, ordering special case "host" last. */ static gint ppc_cpu_list_compare(gconstpointer a, gconstpointer b) { @@ -10316,6 +10328,7 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) ObjectClass *oc = data; CPUListState *s = user_data; PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); + DeviceClass *family = DEVICE_CLASS(ppc_cpu_get_family_class(pcc)); const char *typename = object_class_get_name(oc); char *name; int i; @@ -10338,8 +10351,18 @@ static void ppc_cpu_list_entry(gpointer data, gpointer user_data) if (alias_oc != oc) { continue; } - (*s->cpu_fprintf)(s->file, "PowerPC %-16s (alias for %s)\n", - alias->alias, name); + /* + * If running with KVM, we might update the family alias later, so + * avoid printing the wrong alias here and use "preferred" instead + */ + if (strcmp(alias->alias, family->desc) == 0) { + (*s->cpu_fprintf)(s->file, + "PowerPC %-16s (alias for preferred %s CPU)\n", + alias->alias, family->desc); + } else { + (*s->cpu_fprintf)(s->file, "PowerPC %-16s (alias for %s)\n", + alias->alias, name); + } } g_free(name); } @@ -10436,14 +10459,6 @@ static bool ppc_cpu_has_work(CPUState *cs) return msr_ee && (cs->interrupt_request & CPU_INTERRUPT_HARD); } -static void ppc_cpu_exec_enter(CPUState *cs) -{ - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *env = &cpu->env; - - env->reserve_addr = -1; -} - /* CPUClass::reset() */ static void ppc_cpu_reset(CPUState *s) { @@ -10660,7 +10675,6 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) cc->get_phys_page_debug = ppc_cpu_get_phys_page_debug; cc->vmsd = &vmstate_ppc_cpu; #endif - cc->cpu_exec_enter = ppc_cpu_exec_enter; #if defined(CONFIG_SOFTMMU) cc->write_elf64_note = ppc64_cpu_write_elf64_note; cc->write_elf32_note = ppc32_cpu_write_elf32_note; |