diff options
author | Richard Henderson <richard.henderson@linaro.org> | 2024-05-23 22:09:59 -0700 |
---|---|---|
committer | Richard Henderson <richard.henderson@linaro.org> | 2024-05-23 22:09:59 -0700 |
commit | ffdd099a782556b9ead26551a6f1d070a595306d (patch) | |
tree | e5a84eb7a1c9ec0f6ee8957c4c076e2b05297c51 | |
parent | 70581940cabcc51b329652becddfbc6a261b1b83 (diff) | |
parent | e48fb4c590a23d81ee1d2f09ee9bcf5dd5f98e43 (diff) |
Merge tag 'pull-ppc-for-9.1-1-20240524-1' of https://gitlab.com/npiggin/qemu into staging
*** NOTE ***
This replaces the previous PR for tags/pull-ppc-for-9.1-1-20240524
* Fix an interesting TLB invalidate race
* Implement more instructions with decodetree
* Add the POWER8/9/10 BHRB facility
* Add missing instructions, registers, SMT support
* First round of a big MMU xlate cleanup
# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCgAdFiEETkN92lZhb0MpsKeVZ7MCdqhiHK4FAmZP1bsACgkQZ7MCdqhi
# HK7TuQ/7BQugpF2yOYroQmo0Yl4RPfFp6ACqfYQgehcGegg3SWpEselTeOJla3G9
# UyVd0mlWf7DciYi61qit/WyLOeuRXMtRjrnFLV2wz9o7D/Ey5/aLQfUL4oCDt/i2
# hmmq3ZAcr7WWxaz338pLJx9gIVjaNiqSoRz9HgHNkQq0pxkbEo1eSjZ6QLSvqYC2
# dwtJHywFrHNo14aq1Nc7PZ5MFxNN6t7hm7KRHKFrt8Obar15n64MSHyRvMzHI9EO
# RgNzz9/qe5yvJ4kmaNiZjntxojXCBUhhlCTtaDIG1LDBc2yNG5VWQUnwThvyNxxX
# h+Ia4Pv7blXikQ6RuqsvFyrLCgUvwXwBiQwiQCJyITk0asLyJVwhkUpiI/jJvOun
# AujSA/6e2pbSe4RUZytkzygx2KVODrVtcSoOvo8kRw+2aTOWMv7DbfBalmWJQWgx
# 0xSeuUz22eNKEL2XbZWNM5v0OgXUXIs9BVeCqn7RB4lC2RNi72v111UPuKYq6Ijx
# SHWQMGPGu9FNBsIdriclRWXVXHpVHz/s/l8AJT8ad6E57UHVk5zCPrbFZFImvQkL
# E7xlctijeST8V5qGyBPG3M4aPoER9+6J32ORSx7KwDwr+fzkbNUXC8UUC4OjAZ+d
# 2vhie9Vs5xWq/E8gGovTymeQ4yHArobDz/j7+rrr0qeppnKLWjM=
# =jHL7
# -----END PGP SIGNATURE-----
# gpg: Signature made Thu 23 May 2024 04:48:11 PM PDT
# gpg: using RSA key 4E437DDA56616F4329B0A79567B30276A8621CAE
# gpg: Good signature from "Nicholas Piggin <npiggin@gmail.com>" [unknown]
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg: There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 4E43 7DDA 5661 6F43 29B0 A795 67B3 0276 A862 1CAE
* tag 'pull-ppc-for-9.1-1-20240524-1' of https://gitlab.com/npiggin/qemu: (72 commits)
target/ppc: Remove pp_check() and reuse ppc_hash32_pp_prot()
target/ppc: Move out BookE and related MMU functions from mmu_common.c
target/ppc: Add a function to check for page protection bit
target/ppc/mmu-radix64.c: Drop a local variable
target/ppc/mmu-hash32.c: Drop a local variable
target/ppc: Split off common embedded TLB init
target/ppc: Remove id_tlbs flag from CPU env
target/ppc: Move mmu_ctx_t type to mmu_common.c
target/ppc: Transform ppc_jumbo_xlate() into ppc_6xx_xlate()
target/ppc: Split off 40x cases from ppc_jumbo_xlate()
target/ppc: Split off real mode handling from get_physical_address_wtlb()
target/ppc: Simplify ppc_booke_xlate() part 2
target/ppc: Simplify ppc_booke_xlate() part 1
target/ppc: Split off BookE handling from ppc_jumbo_xlate()
target/ppc: Remove BookE from direct store handling
target/ppc: Don't use mmu_ctx_t in mmubooke206_get_physical_address()
target/ppc: Don't use mmu_ctx_t in mmubooke_get_physical_address()
target/ppc: Don't use mmu_ctx_t for mmu40x_get_physical_address()
target/ppc: Replace hard coded constants in ppc_jumbo_xlate()
target/ppc: Deindent ppc_jumbo_xlate()
...
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
43 files changed, 3267 insertions, 2992 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index cdb3e12dfb..117b516739 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -418,12 +418,9 @@ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) { tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); - if (cpu->created && !qemu_cpu_is_self(cpu)) { - async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, - RUN_ON_CPU_HOST_INT(idxmap)); - } else { - tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); - } + assert_cpu_is_self(cpu); + + tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); } void tlb_flush(CPUState *cpu) @@ -431,21 +428,6 @@ void tlb_flush(CPUState *cpu) tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); } -void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) -{ - const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; - - tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); - - flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); - fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); -} - -void tlb_flush_all_cpus(CPUState *src_cpu) -{ - tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); -} - void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) { const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; @@ -627,28 +609,12 @@ void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap) { tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%" PRIx16 "\n", addr, idxmap); + assert_cpu_is_self(cpu); + /* This should already be page aligned */ addr &= TARGET_PAGE_MASK; - if (qemu_cpu_is_self(cpu)) { - tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); - } else if (idxmap < TARGET_PAGE_SIZE) { - /* - * Most targets have only a few mmu_idx. In the case where - * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid - * allocating memory for this operation. - */ - async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, - RUN_ON_CPU_TARGET_PTR(addr | idxmap)); - } else { - TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); - - /* Otherwise allocate a structure, freed by the worker. */ - d->addr = addr; - d->idxmap = idxmap; - async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, - RUN_ON_CPU_HOST_PTR(d)); - } + tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); } void tlb_flush_page(CPUState *cpu, vaddr addr) @@ -656,46 +622,6 @@ void tlb_flush_page(CPUState *cpu, vaddr addr) tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); } -void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, vaddr addr, - uint16_t idxmap) -{ - tlb_debug("addr: %016" VADDR_PRIx " mmu_idx:%"PRIx16"\n", addr, idxmap); - - /* This should already be page aligned */ - addr &= TARGET_PAGE_MASK; - - /* - * Allocate memory to hold addr+idxmap only when needed. - * See tlb_flush_page_by_mmuidx for details. - */ - if (idxmap < TARGET_PAGE_SIZE) { - flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, - RUN_ON_CPU_TARGET_PTR(addr | idxmap)); - } else { - CPUState *dst_cpu; - - /* Allocate a separate data block for each destination cpu. */ - CPU_FOREACH(dst_cpu) { - if (dst_cpu != src_cpu) { - TLBFlushPageByMMUIdxData *d - = g_new(TLBFlushPageByMMUIdxData, 1); - - d->addr = addr; - d->idxmap = idxmap; - async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, - RUN_ON_CPU_HOST_PTR(d)); - } - } - } - - tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); -} - -void tlb_flush_page_all_cpus(CPUState *src, vaddr addr) -{ - tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); -} - void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, vaddr addr, uint16_t idxmap) @@ -851,6 +777,8 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, { TLBFlushRangeData d; + assert_cpu_is_self(cpu); + /* * If all bits are significant, and len is small, * this devolves to tlb_flush_page. @@ -871,14 +799,7 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, d.idxmap = idxmap; d.bits = bits; - if (qemu_cpu_is_self(cpu)) { - tlb_flush_range_by_mmuidx_async_0(cpu, d); - } else { - /* Otherwise allocate a structure, freed by the worker. */ - TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); - async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, - RUN_ON_CPU_HOST_PTR(p)); - } + tlb_flush_range_by_mmuidx_async_0(cpu, d); } void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, @@ -887,54 +808,6 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); } -void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, - vaddr addr, vaddr len, - uint16_t idxmap, unsigned bits) -{ - TLBFlushRangeData d; - CPUState *dst_cpu; - - /* - * If all bits are significant, and len is small, - * this devolves to tlb_flush_page. - */ - if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { - tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); - return; - } - /* If no page bits are significant, this devolves to tlb_flush. */ - if (bits < TARGET_PAGE_BITS) { - tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); - return; - } - - /* This should already be page aligned */ - d.addr = addr & TARGET_PAGE_MASK; - d.len = len; - d.idxmap = idxmap; - d.bits = bits; - - /* Allocate a separate data block for each destination cpu. */ - CPU_FOREACH(dst_cpu) { - if (dst_cpu != src_cpu) { - TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); - async_run_on_cpu(dst_cpu, - tlb_flush_range_by_mmuidx_async_1, - RUN_ON_CPU_HOST_PTR(p)); - } - } - - tlb_flush_range_by_mmuidx_async_0(src_cpu, d); -} - -void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, - vaddr addr, uint16_t idxmap, - unsigned bits) -{ - tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, - idxmap, bits); -} - void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, vaddr addr, vaddr len, diff --git a/docs/devel/multi-thread-tcg.rst b/docs/devel/multi-thread-tcg.rst index 1420789fff..d706c27ea7 100644 --- a/docs/devel/multi-thread-tcg.rst +++ b/docs/devel/multi-thread-tcg.rst @@ -205,15 +205,10 @@ DESIGN REQUIREMENTS: (Current solution) -We have updated cputlb.c to defer operations when a cross-vCPU -operation with async_run_on_cpu() which ensures each vCPU sees a -coherent state when it next runs its work (in a few instructions -time). - -A new set up operations (tlb_flush_*_all_cpus) take an additional flag -which when set will force synchronisation by setting the source vCPUs -work as "safe work" and exiting the cpu run loop. This ensure by the -time execution restarts all flush operations have completed. +A new set of tlb flush operations (tlb_flush_*_all_cpus_synced) force +synchronisation by setting the source vCPUs work as "safe work" and +exiting the cpu run loop. This ensures that by the time execution +restarts all flush operations have completed. TLB flag updates are all done atomically and are also protected by the corresponding page lock. diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 04d6decb2b..c1bd8dfa21 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -400,6 +400,7 @@ static void pegasos2_machine_reset(MachineState *machine, ShutdownCause reason) machine->fdt = fdt; pm->cpu->vhyp = PPC_VIRTUAL_HYPERVISOR(machine); + pm->cpu->vhyp_class = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(pm->cpu->vhyp); } enum pegasos2_rtas_tokens { @@ -984,7 +985,7 @@ static void *build_fdt(MachineState *machine, int *fdt_size) cpu->env.icache_line_size); qemu_fdt_setprop_cell(fdt, cp, "i-cache-line-size", cpu->env.icache_line_size); - if (cpu->env.id_tlbs) { + if (ppc_is_split_tlb(cpu)) { qemu_fdt_setprop_cell(fdt, cp, "i-tlb-sets", cpu->env.nb_ways); qemu_fdt_setprop_cell(fdt, cp, "i-tlb-size", cpu->env.tlb_per_way); qemu_fdt_setprop_cell(fdt, cp, "d-tlb-sets", cpu->env.nb_ways); diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index d2d1e310a3..4345764bce 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -353,6 +353,32 @@ static void spapr_dt_pa_features(SpaprMachineState *spapr, _FDT((fdt_setprop(fdt, offset, "ibm,pa-features", pa_features, pa_size))); } +static void spapr_dt_pi_features(SpaprMachineState *spapr, + PowerPCCPU *cpu, + void *fdt, int offset) +{ + uint8_t pi_features[] = { 1, 0, + 0x00 }; + + if (kvm_enabled() && ppc_check_compat(cpu, CPU_POWERPC_LOGICAL_3_00, + 0, cpu->compat_pvr)) { + /* + * POWER9 and later CPUs with KVM run in LPAR-per-thread mode where + * all threads are essentially independent CPUs, and msgsndp does not + * work (because it is physically-addressed) and therefore is + * emulated by KVM, so disable it here to ensure XIVE will be used. + * This is both KVM and CPU implementation-specific behaviour so a KVM + * cap would be cleanest, but for now this works. If KVM ever permits + * native msgsndp execution by guests, a cap could be added at that + * time. + */ + pi_features[2] |= 0x08; /* 4: No msgsndp */ + } + + _FDT((fdt_setprop(fdt, offset, "ibm,pi-features", pi_features, + sizeof(pi_features)))); +} + static hwaddr spapr_node0_size(MachineState *machine) { if (machine->numa_state->num_nodes) { @@ -815,6 +841,8 @@ static void spapr_dt_cpu(CPUState *cs, void *fdt, int offset, spapr_dt_pa_features(spapr, cpu, fdt, offset); + spapr_dt_pi_features(spapr, cpu, fdt, offset); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", cs->cpu_index / vcpus_per_socket))); diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h index 2cd7b8f61b..b6b46ad13c 100644 --- a/include/exec/exec-all.h +++ b/include/exec/exec-all.h @@ -68,24 +68,15 @@ void tlb_destroy(CPUState *cpu); */ void tlb_flush_page(CPUState *cpu, vaddr addr); /** - * tlb_flush_page_all_cpus: + * tlb_flush_page_all_cpus_synced: * @cpu: src CPU of the flush * @addr: virtual address of page to be flushed * - * Flush one page from the TLB of the specified CPU, for all + * Flush one page from the TLB of all CPUs, for all * MMU indexes. - */ -void tlb_flush_page_all_cpus(CPUState *src, vaddr addr); -/** - * tlb_flush_page_all_cpus_synced: - * @cpu: src CPU of the flush - * @addr: virtual address of page to be flushed * - * Flush one page from the TLB of the specified CPU, for all MMU - * indexes like tlb_flush_page_all_cpus except the source vCPUs work - * is scheduled as safe work meaning all flushes will be complete once - * the source vCPUs safe work is complete. This will depend on when - * the guests translation ends the TB. + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. */ void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); /** @@ -99,18 +90,13 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr); */ void tlb_flush(CPUState *cpu); /** - * tlb_flush_all_cpus: - * @cpu: src CPU of the flush - */ -void tlb_flush_all_cpus(CPUState *src_cpu); -/** * tlb_flush_all_cpus_synced: * @cpu: src CPU of the flush * - * Like tlb_flush_all_cpus except this except the source vCPUs work is - * scheduled as safe work meaning all flushes will be complete once - * the source vCPUs safe work is complete. This will depend on when - * the guests translation ends the TB. + * Flush the entire TLB for all CPUs, for all MMU indexes. + * + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. */ void tlb_flush_all_cpus_synced(CPUState *src_cpu); /** @@ -125,27 +111,16 @@ void tlb_flush_all_cpus_synced(CPUState *src_cpu); void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap); /** - * tlb_flush_page_by_mmuidx_all_cpus: + * tlb_flush_page_by_mmuidx_all_cpus_synced: * @cpu: Originating CPU of the flush * @addr: virtual address of page to be flushed * @idxmap: bitmap of MMU indexes to flush * * Flush one page from the TLB of all CPUs, for the specified * MMU indexes. - */ -void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr, - uint16_t idxmap); -/** - * tlb_flush_page_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @addr: virtual address of page to be flushed - * @idxmap: bitmap of MMU indexes to flush * - * Flush one page from the TLB of all CPUs, for the specified MMU - * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source - * vCPUs work is scheduled as safe work meaning all flushes will be - * complete once the source vCPUs safe work is complete. This will - * depend on when the guests translation ends the TB. + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. */ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, uint16_t idxmap); @@ -160,24 +135,15 @@ void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, */ void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); /** - * tlb_flush_by_mmuidx_all_cpus: + * tlb_flush_by_mmuidx_all_cpus_synced: * @cpu: Originating CPU of the flush * @idxmap: bitmap of MMU indexes to flush * - * Flush all entries from all TLBs of all CPUs, for the specified + * Flush all entries from the TLB of all CPUs, for the specified * MMU indexes. - */ -void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); -/** - * tlb_flush_by_mmuidx_all_cpus_synced: - * @cpu: Originating CPU of the flush - * @idxmap: bitmap of MMU indexes to flush * - * Flush all entries from all TLBs of all CPUs, for the specified - * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source - * vCPUs work is scheduled as safe work meaning all flushes will be - * complete once the source vCPUs safe work is complete. This will - * depend on when the guests translation ends the TB. + * When this function returns, no CPUs will subsequently perform + * translations using the flushed TLBs. */ void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); @@ -194,8 +160,6 @@ void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); /* Similarly, with broadcast and syncing. */ -void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr, - uint16_t idxmap, unsigned bits); void tlb_flush_page_bits_by_mmuidx_all_cpus_synced (CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits); @@ -215,9 +179,6 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, unsigned bits); /* Similarly, with broadcast and syncing. */ -void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, vaddr addr, - vaddr len, uint16_t idxmap, - unsigned bits); void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, vaddr len, @@ -290,18 +251,12 @@ static inline void tlb_destroy(CPUState *cpu) static inline void tlb_flush_page(CPUState *cpu, vaddr addr) { } -static inline void tlb_flush_page_all_cpus(CPUState *src, vaddr addr) -{ -} static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr) { } static inline void tlb_flush(CPUState *cpu) { } -static inline void tlb_flush_all_cpus(CPUState *src_cpu) -{ -} static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) { } @@ -313,20 +268,11 @@ static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) { } -static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, - vaddr addr, - uint16_t idxmap) -{ -} static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, uint16_t idxmap) { } -static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) -{ -} - static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap) { @@ -337,12 +283,6 @@ static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, unsigned bits) { } -static inline void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *cpu, - vaddr addr, - uint16_t idxmap, - unsigned bits) -{ -} static inline void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, uint16_t idxmap, unsigned bits) @@ -353,13 +293,6 @@ static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr, unsigned bits) { } -static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, - vaddr addr, - vaddr len, - uint16_t idxmap, - unsigned bits) -{ -} static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr, vaddr len, diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index 0ac55d6b25..2015e603d4 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -533,6 +533,9 @@ FIELD(MSR, LE, MSR_LE, 1) #define MMCR0_FC56 PPC_BIT(59) /* PMC Freeze Counters 5-6 bit */ #define MMCR0_PMC1CE PPC_BIT(48) /* MMCR0 PMC1 Condition Enabled */ #define MMCR0_PMCjCE PPC_BIT(49) /* MMCR0 PMCj Condition Enabled */ +#define MMCR0_FCP PPC_BIT(34) /* Freeze Counters/BHRB if PR=1 */ +#define MMCR0_FCPC PPC_BIT(51) /* Condition for FCP bit */ +#define MMCR0_BHRBA_NR PPC_BIT_NR(42) /* BHRB Available */ /* MMCR0 userspace r/w mask */ #define MMCR0_UREG_MASK (MMCR0_FC | MMCR0_PMAO | MMCR0_PMAE) /* MMCR2 userspace r/w mask */ @@ -545,6 +548,10 @@ FIELD(MSR, LE, MSR_LE, 1) #define MMCR2_UREG_MASK (MMCR2_FC1P0 | MMCR2_FC2P0 | MMCR2_FC3P0 | \ MMCR2_FC4P0 | MMCR2_FC5P0 | MMCR2_FC6P0) +#define MMCRA_BHRBRD PPC_BIT(26) /* BHRB Recording Disable */ +#define MMCRA_IFM_MASK PPC_BITMASK(32, 33) /* BHRB Instruction Filtering */ +#define MMCRA_IFM_SHIFT PPC_BIT_NR(33) + #define MMCR1_EVT_SIZE 8 /* extract64() does a right shift before extracting */ #define MMCR1_PMC1SEL_START 32 @@ -628,6 +635,7 @@ FIELD(MSR, LE, MSR_LE, 1) /* HFSCR bits */ #define HFSCR_MSGP PPC_BIT(53) /* Privileged Message Send Facilities */ +#define HFSCR_BHRB PPC_BIT(59) /* BHRB Instructions */ #define HFSCR_IC_MSGP 0xA #define DBCR0_ICMP (1 << 27) @@ -770,6 +778,8 @@ enum { POWERPC_FLAG_SMT = 0x00400000, /* Using "LPAR per core" mode (as opposed to per-thread) */ POWERPC_FLAG_SMT_1LPAR = 0x00800000, + /* Has BHRB */ + POWERPC_FLAG_BHRB = 0x01000000, }; /* @@ -797,6 +807,7 @@ enum { HFLAGS_PMCJCE = 17, /* MMCR0 PMCjCE bit */ HFLAGS_PMC_OTHER = 18, /* PMC other than PMC5-6 is enabled */ HFLAGS_INSN_CNT = 19, /* PMU instruction count enabled */ + HFLAGS_BHRB_ENABLE = 20, /* Summary flag for enabling BHRB */ HFLAGS_VSX = 23, /* MSR_VSX if cpu has VSX */ HFLAGS_VR = 25, /* MSR_VR if cpu has VRE */ @@ -1152,7 +1163,11 @@ FIELD(FPSCR, FI, FPSCR_FI, 1) #define DBELL_TYPE_DBELL_SERVER (0x05 << DBELL_TYPE_SHIFT) -#define DBELL_BRDCAST PPC_BIT(37) +#define DBELL_BRDCAST_MASK PPC_BITMASK(37, 38) +#define DBELL_BRDCAST_SHIFT 25 +#define DBELL_BRDCAST_SUBPROC (0x1 << DBELL_BRDCAST_SHIFT) +#define DBELL_BRDCAST_CORE (0x2 << DBELL_BRDCAST_SHIFT) + #define DBELL_LPIDTAG_SHIFT 14 #define DBELL_LPIDTAG_MASK (0xfff << DBELL_LPIDTAG_SHIFT) #define DBELL_PIRTAG_MASK 0x3fff @@ -1210,6 +1225,9 @@ struct pnv_tod_tbst { #define PPC_CPU_OPCODES_LEN 0x40 #define PPC_CPU_INDIRECT_OPCODES_LEN 0x20 +#define BHRB_MAX_NUM_ENTRIES_LOG2 (5) +#define BHRB_MAX_NUM_ENTRIES (1 << BHRB_MAX_NUM_ENTRIES_LOG2) + struct CPUArchState { /* Most commonly used resources during translated code execution first */ target_ulong gpr[32]; /* general purpose registers */ @@ -1250,6 +1268,9 @@ struct CPUArchState { ppc_slb_t slb[MAX_SLB_ENTRIES]; /* PowerPC 64 SLB area */ struct CPUBreakpoint *ciabr_breakpoint; struct CPUWatchpoint *dawr0_watchpoint; + + /* POWER CPU regs/state */ + target_ulong scratch[8]; /* SCRATCH registers (shared across core) */ #endif target_ulong sr[32]; /* segment registers */ uint32_t nb_BATs; /* number of BATs */ @@ -1260,7 +1281,6 @@ struct CPUArchState { int tlb_per_way; /* Speed-up helper: used to avoid divisions at run time */ int nb_ways; /* Number of ways in the TLB set */ int last_way; /* Last used way used to allocate TLB in a LRU way */ - int id_tlbs; /* If 1, MMU has separated TLBs for instructions & data */ int nb_pids; /* Number of available PID registers */ int tlb_type; /* Type of TLB we're dealing with */ ppc_tlb_t tlb; /* TLB is optional. Allocate them only if needed */ @@ -1306,6 +1326,16 @@ struct CPUArchState { int dcache_line_size; int icache_line_size; +#ifdef TARGET_PPC64 + /* Branch History Rolling Buffer (BHRB) resources */ + target_ulong bhrb_num_entries; + intptr_t bhrb_base; + target_ulong bhrb_filter; + target_ulong bhrb_offset; + target_ulong bhrb_offset_mask; + uint64_t bhrb[BHRB_MAX_NUM_ENTRIES]; +#endif + /* These resources are used during exception processing */ /* CPU model definition */ target_ulong msr_mask; @@ -1351,6 +1381,9 @@ struct CPUArchState { /* Power management */ int (*check_pow)(CPUPPCState *env); + /* attn instruction enable */ + int (*check_attn)(CPUPPCState *env); + #if !defined(CONFIG_USER_ONLY) void *load_info; /* holds boot loading state */ #endif @@ -1435,6 +1468,7 @@ struct ArchCPU { int vcpu_id; uint32_t compat_pvr; PPCVirtualHypervisor *vhyp; + PPCVirtualHypervisorClass *vhyp_class; void *machine_data; int32_t node_id; /* NUMA node this CPU belongs to */ PPCHash64Options *hash64_opts; @@ -1498,6 +1532,7 @@ struct PowerPCCPUClass { int n_host_threads; void (*init_proc)(CPUPPCState *env); int (*check_pow)(CPUPPCState *env); + int (*check_attn)(CPUPPCState *env); }; ObjectClass *ppc_cpu_class_by_name(const char *name); @@ -1532,7 +1567,7 @@ DECLARE_OBJ_CHECKERS(PPCVirtualHypervisor, PPCVirtualHypervisorClass, static inline bool vhyp_cpu_in_nested(PowerPCCPU *cpu) { - return PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp)->cpu_in_nested(cpu); + return cpu->vhyp_class->cpu_in_nested(cpu); } #endif /* CONFIG_USER_ONLY */ @@ -1607,10 +1642,6 @@ void ppc_tlb_invalidate_all(CPUPPCState *env); void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr); void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp); void cpu_ppc_set_1lpar(PowerPCCPU *cpu); -int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, - target_ulong address, uint32_t pid); -int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid); -hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb); #endif void ppc_store_fpscr(CPUPPCState *env, target_ulong val); @@ -1777,9 +1808,9 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_SPRG2 (0x112) #define SPR_SPRG3 (0x113) #define SPR_SPRG4 (0x114) -#define SPR_SCOMC (0x114) +#define SPR_POWER_SPRC (0x114) #define SPR_SPRG5 (0x115) -#define SPR_SCOMD (0x115) +#define SPR_POWER_SPRD (0x115) #define SPR_SPRG6 (0x116) #define SPR_SPRG7 (0x117) #define SPR_ASR (0x118) @@ -2069,6 +2100,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_DEXCR (0x33C) #define SPR_IC (0x350) #define SPR_VTB (0x351) +#define SPR_LDBAR (0x352) #define SPR_MMCRC (0x353) #define SPR_PSSCR (0x357) #define SPR_440_INV0 (0x370) @@ -2091,6 +2123,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_POWER_MMCRS (0x37E) #define SPR_WORT (0x37F) #define SPR_PPR (0x380) +#define SPR_PPR32 (0x382) #define SPR_750_GQR0 (0x390) #define SPR_440_DNV0 (0x390) #define SPR_750_GQR1 (0x391) @@ -2114,6 +2147,7 @@ void ppc_compat_add_property(Object *obj, const char *name, #define SPR_440_IVLIM (0x399) #define SPR_TSCR (0x399) #define SPR_750_DMAU (0x39A) +#define SPR_POWER_TTR (0x39A) #define SPR_750_DMAL (0x39B) #define SPR_440_RSTCFG (0x39B) #define SPR_BOOKE_DCDBTRL (0x39C) @@ -2295,6 +2329,8 @@ void ppc_compat_add_property(Object *obj, const char *name, #define HID0_NAP (1 << 22) /* pre-2.06 */ #define HID0_HILE PPC_BIT(19) /* POWER8 */ #define HID0_POWER9_HILE PPC_BIT(4) +#define HID0_ENABLE_ATTN PPC_BIT(31) /* POWER8 */ +#define HID0_POWER9_ENABLE_ATTN PPC_BIT(3) /*****************************************************************************/ /* PowerPC Instructions types definitions */ @@ -2856,6 +2892,10 @@ static inline void booke206_fixed_size_tlbn(CPUPPCState *env, const int tlbn, tlb->mas1 |= ((uint32_t)tsize) << MAS1_TSIZE_SHIFT; } +static inline bool ppc_is_split_tlb(PowerPCCPU *cpu) +{ + return cpu->env.tlb_type == TLB_6XX; +} #endif static inline bool msr_is_64bit(CPUPPCState *env, target_ulong msr) @@ -3000,6 +3040,12 @@ static inline int check_pow_nocheck(CPUPPCState *env) return 1; } +/* attn enable check */ +static inline int check_attn_none(CPUPPCState *env) +{ + return 0; +} + /*****************************************************************************/ /* PowerPC implementations definitions */ diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index c11a69fd90..01e358a4a5 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -246,7 +246,7 @@ static void register_amr_sprs(CPUPPCState *env) spr_register_hv(env, SPR_AMOR, "AMOR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_core_lpar_write_generic, 0); #endif /* !CONFIG_USER_ONLY */ } @@ -792,7 +792,7 @@ static void register_BookE_sprs(CPUPPCState *env, uint64_t ivor_mask) 0x00000000); spr_register(env, SPR_BOOKE_DECAR, "DECAR", SPR_NOACCESS, SPR_NOACCESS, - SPR_NOACCESS, &spr_write_generic, + SPR_NOACCESS, &spr_write_generic32, 0x00000000); /* SPRGs */ spr_register(env, SPR_USPRG0, "USPRG0", @@ -2107,19 +2107,42 @@ static int check_pow_hid0_74xx(CPUPPCState *env) return 0; } -static void init_proc_405(CPUPPCState *env) +#if defined(TARGET_PPC64) +static int check_attn_hid0(CPUPPCState *env) { - register_40x_sprs(env); - register_405_sprs(env); - register_usprgh_sprs(env); + if (env->spr[SPR_HID0] & HID0_ENABLE_ATTN) { + return 1; + } - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) + return 0; +} + +static int check_attn_hid0_power9(CPUPPCState *env) +{ + if (env->spr[SPR_HID0] & HID0_POWER9_ENABLE_ATTN) { + return 1; + } + + return 0; +} +#endif + +static void init_tlbs_emb(CPUPPCState *env) +{ +#ifndef CONFIG_USER_ONLY env->nb_tlb = 64; env->nb_ways = 1; - env->id_tlbs = 0; env->tlb_type = TLB_EMB; #endif +} + +static void init_proc_405(CPUPPCState *env) +{ + register_40x_sprs(env); + register_405_sprs(env); + register_usprgh_sprs(env); + + init_tlbs_emb(env); init_excp_4xx(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -2138,6 +2161,7 @@ POWERPC_FAMILY(405)(ObjectClass *oc, void *data) dc->desc = "PowerPC 405"; pcc->init_proc = init_proc_405; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_DCR | PPC_WRTEE | PPC_CACHE | PPC_CACHE_ICBI | PPC_40x_ICBT | @@ -2186,13 +2210,8 @@ static void init_proc_440EP(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + + init_tlbs_emb(env); init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -2210,6 +2229,7 @@ POWERPC_FAMILY(440EP)(ObjectClass *oc, void *data) dc->desc = "PowerPC 440 EP"; pcc->init_proc = init_proc_440EP; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -2248,6 +2268,7 @@ POWERPC_FAMILY(460EX)(ObjectClass *oc, void *data) dc->desc = "PowerPC 460 EX"; pcc->init_proc = init_proc_440EP; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_FLOAT | PPC_FLOAT_FRES | PPC_FLOAT_FSEL | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -2284,13 +2305,7 @@ static void init_proc_440GP(CPUPPCState *env) register_440_sprs(env); register_usprgh_sprs(env); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + init_tlbs_emb(env); init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -2308,6 +2323,7 @@ POWERPC_FAMILY(440GP)(ObjectClass *oc, void *data) dc->desc = "PowerPC 440 GP"; pcc->init_proc = init_proc_440GP; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_DCRX | PPC_WRTEE | PPC_MFAPIDI | PPC_CACHE | PPC_CACHE_ICBI | @@ -2358,13 +2374,8 @@ static void init_proc_440x5(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + + init_tlbs_emb(env); init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -2382,6 +2393,7 @@ POWERPC_FAMILY(440x5)(ObjectClass *oc, void *data) dc->desc = "PowerPC 440x5"; pcc->init_proc = init_proc_440x5; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_DCR | PPC_WRTEE | PPC_RFMCI | PPC_CACHE | PPC_CACHE_ICBI | @@ -2417,6 +2429,7 @@ POWERPC_FAMILY(440x5wDFPU)(ObjectClass *oc, void *data) dc->desc = "PowerPC 440x5 with double precision FPU"; pcc->init_proc = init_proc_440x5; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_FLOAT | PPC_FLOAT_FSQRT | PPC_FLOAT_STFIWX | @@ -2465,6 +2478,7 @@ POWERPC_FAMILY(MPC5xx)(ObjectClass *oc, void *data) dc->desc = "Freescale 5xx cores (aka RCPU)"; pcc->init_proc = init_proc_MPC5xx; pcc->check_pow = check_pow_none; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MEM_EIEIO | PPC_MEM_SYNC | PPC_CACHE_ICBI | PPC_FLOAT | PPC_FLOAT_STFIWX | @@ -2507,6 +2521,7 @@ POWERPC_FAMILY(MPC8xx)(ObjectClass *oc, void *data) dc->desc = "Freescale 8xx cores (aka PowerQUICC)"; pcc->init_proc = init_proc_MPC8xx; pcc->check_pow = check_pow_none; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MEM_EIEIO | PPC_MEM_SYNC | PPC_CACHE_ICBI | PPC_MFTB; @@ -2557,6 +2572,7 @@ POWERPC_FAMILY(G2)(ObjectClass *oc, void *data) dc->desc = "PowerPC G2"; pcc->init_proc = init_proc_G2; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_STFIWX | @@ -2595,6 +2611,7 @@ POWERPC_FAMILY(G2LE)(ObjectClass *oc, void *data) dc->desc = "PowerPC G2LE"; pcc->init_proc = init_proc_G2; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_STFIWX | @@ -2721,12 +2738,8 @@ static void init_proc_e200(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + + init_tlbs_emb(env); init_excp_e200(env, 0xFFFF0000UL); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -2741,6 +2754,7 @@ POWERPC_FAMILY(e200)(ObjectClass *oc, void *data) dc->desc = "e200 core"; pcc->init_proc = init_proc_e200; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; /* * XXX: unimplemented instructions: * dcblc @@ -2843,7 +2857,6 @@ static void init_proc_e500(CPUPPCState *env, int version) /* Memory management */ env->nb_pids = 3; env->nb_ways = 2; - env->id_tlbs = 0; switch (version) { case fsl_e500v1: tlbncfg[0] = register_tlbncfg(2, 1, 1, 0, 256); @@ -3029,6 +3042,7 @@ POWERPC_FAMILY(e500v1)(ObjectClass *oc, void *data) dc->desc = "e500v1 core"; pcc->init_proc = init_proc_e500v1; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_SPE | PPC_SPE_SINGLE | PPC_WRTEE | PPC_RFDI | @@ -3072,6 +3086,7 @@ POWERPC_FAMILY(e500v2)(ObjectClass *oc, void *data) dc->desc = "e500v2 core"; pcc->init_proc = init_proc_e500v2; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_SPE | PPC_SPE_SINGLE | PPC_SPE_DOUBLE | PPC_WRTEE | PPC_RFDI | @@ -3115,6 +3130,7 @@ POWERPC_FAMILY(e500mc)(ObjectClass *oc, void *data) dc->desc = "e500mc core"; pcc->init_proc = init_proc_e500mc; pcc->check_pow = check_pow_none; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | PPC_WRTEE | PPC_RFDI | PPC_RFMCI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | @@ -3161,6 +3177,7 @@ POWERPC_FAMILY(e5500)(ObjectClass *oc, void *data) dc->desc = "e5500 core"; pcc->init_proc = init_proc_e5500; pcc->check_pow = check_pow_none; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | PPC_WRTEE | PPC_RFDI | PPC_RFMCI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | @@ -3209,6 +3226,7 @@ POWERPC_FAMILY(e6500)(ObjectClass *oc, void *data) dc->desc = "e6500 core"; pcc->init_proc = init_proc_e6500; pcc->check_pow = check_pow_none; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_MFTB | PPC_WRTEE | PPC_RFDI | PPC_RFMCI | PPC_CACHE | PPC_CACHE_LOCK | PPC_CACHE_ICBI | @@ -3271,6 +3289,7 @@ POWERPC_FAMILY(603)(ObjectClass *oc, void *data) dc->desc = "PowerPC 603"; pcc->init_proc = init_proc_603; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3310,6 +3329,7 @@ POWERPC_FAMILY(603E)(ObjectClass *oc, void *data) dc->desc = "PowerPC 603e"; pcc->init_proc = init_proc_603; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3355,6 +3375,7 @@ POWERPC_FAMILY(e300)(ObjectClass *oc, void *data) dc->desc = "e300 core"; pcc->init_proc = init_proc_e300; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_STFIWX | @@ -3410,6 +3431,7 @@ POWERPC_FAMILY(604)(ObjectClass *oc, void *data) dc->desc = "PowerPC 604"; pcc->init_proc = init_proc_604; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3455,6 +3477,7 @@ POWERPC_FAMILY(604E)(ObjectClass *oc, void *data) dc->desc = "PowerPC 604E"; pcc->init_proc = init_proc_604E; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3511,6 +3534,7 @@ POWERPC_FAMILY(740)(ObjectClass *oc, void *data) dc->desc = "PowerPC 740"; pcc->init_proc = init_proc_740; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3576,6 +3600,7 @@ POWERPC_FAMILY(750)(ObjectClass *oc, void *data) dc->desc = "PowerPC 750"; pcc->init_proc = init_proc_750; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3722,6 +3747,7 @@ POWERPC_FAMILY(750cl)(ObjectClass *oc, void *data) dc->desc = "PowerPC 750 CL"; pcc->init_proc = init_proc_750cl; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; /* * XXX: not implemented: * cache lock instructions: @@ -3829,6 +3855,7 @@ POWERPC_FAMILY(750cx)(ObjectClass *oc, void *data) dc->desc = "PowerPC 750CX"; pcc->init_proc = init_proc_750cx; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3901,6 +3928,7 @@ POWERPC_FAMILY(750fx)(ObjectClass *oc, void *data) dc->desc = "PowerPC 750FX"; pcc->init_proc = init_proc_750fx; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -3973,6 +4001,7 @@ POWERPC_FAMILY(750gx)(ObjectClass *oc, void *data) dc->desc = "PowerPC 750GX"; pcc->init_proc = init_proc_750gx; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -4032,6 +4061,7 @@ POWERPC_FAMILY(745)(ObjectClass *oc, void *data) dc->desc = "PowerPC 745"; pcc->init_proc = init_proc_745; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -4077,6 +4107,7 @@ POWERPC_FAMILY(755)(ObjectClass *oc, void *data) dc->desc = "PowerPC 755"; pcc->init_proc = init_proc_755; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FRSQRTE | PPC_FLOAT_STFIWX | @@ -4143,6 +4174,7 @@ POWERPC_FAMILY(7400)(ObjectClass *oc, void *data) dc->desc = "PowerPC 7400 (aka G4)"; pcc->init_proc = init_proc_7400; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -4222,6 +4254,7 @@ POWERPC_FAMILY(7410)(ObjectClass *oc, void *data) dc->desc = "PowerPC 7410 (aka G4)"; pcc->init_proc = init_proc_7410; pcc->check_pow = check_pow_hid0; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -4322,6 +4355,7 @@ POWERPC_FAMILY(7440)(ObjectClass *oc, void *data) dc->desc = "PowerPC 7440 (aka G4)"; pcc->init_proc = init_proc_7440; pcc->check_pow = check_pow_hid0_74xx; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -4444,6 +4478,7 @@ POWERPC_FAMILY(7450)(ObjectClass *oc, void *data) dc->desc = "PowerPC 7450 (aka G4)"; pcc->init_proc = init_proc_7450; pcc->check_pow = check_pow_hid0_74xx; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -4573,6 +4608,7 @@ POWERPC_FAMILY(7445)(ObjectClass *oc, void *data) dc->desc = "PowerPC 7445 (aka G4)"; pcc->init_proc = init_proc_7445; pcc->check_pow = check_pow_hid0_74xx; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -4704,6 +4740,7 @@ POWERPC_FAMILY(7455)(ObjectClass *oc, void *data) dc->desc = "PowerPC 7455 (aka G4)"; pcc->init_proc = init_proc_7455; pcc->check_pow = check_pow_hid0_74xx; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -4855,6 +4892,7 @@ POWERPC_FAMILY(7457)(ObjectClass *oc, void *data) dc->desc = "PowerPC 7457 (aka G4)"; pcc->init_proc = init_proc_7457; pcc->check_pow = check_pow_hid0_74xx; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -4989,6 +5027,7 @@ POWERPC_FAMILY(e600)(ObjectClass *oc, void *data) dc->desc = "PowerPC e600"; pcc->init_proc = init_proc_e600; pcc->check_pow = check_pow_hid0_74xx; + pcc->check_attn = check_attn_none; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -5152,7 +5191,7 @@ static void register_book3s_pmu_sup_sprs(CPUPPCState *env) KVM_REG_PPC_MMCR1, 0x00000000); spr_register_kvm(env, SPR_POWER_MMCRA, "MMCRA", SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_write_MMCRA, KVM_REG_PPC_MMCRA, 0x00000000); spr_register_kvm(env, SPR_POWER_PMC1, "PMC1", SPR_NOACCESS, SPR_NOACCESS, @@ -5415,7 +5454,7 @@ static void register_book3s_ids_sprs(CPUPPCState *env) spr_register_hv(env, SPR_MMCRC, "MMCRC", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic32, + &spr_read_generic, &spr_core_write_generic32, 0x00000000); spr_register_hv(env, SPR_MMCRH, "MMCRH", SPR_NOACCESS, SPR_NOACCESS, @@ -5455,7 +5494,7 @@ static void register_book3s_ids_sprs(CPUPPCState *env) spr_register_hv(env, SPR_HRMOR, "HRMOR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_core_write_generic, 0x00000000); } @@ -5549,6 +5588,14 @@ static void register_HEIR64_spr(CPUPPCState *env) 0x00000000); } +static void register_power7_common_sprs(CPUPPCState *env) +{ + spr_register(env, SPR_PPR32, "PPR32", + &spr_read_ppr32, &spr_write_ppr32, + &spr_read_ppr32, &spr_write_ppr32, + 0x00000000); +} + static void register_power8_tce_address_control_sprs(CPUPPCState *env) { spr_register_kvm(env, SPR_TAR, "TAR", @@ -5675,7 +5722,7 @@ static void register_power_common_book4_sprs(CPUPPCState *env) spr_register_hv(env, SPR_TSCR, "TSCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic32, + &spr_read_generic, &spr_core_write_generic32, 0x00000000); spr_register_hv(env, SPR_HMER, "HMER", SPR_NOACCESS, SPR_NOACCESS, @@ -5685,7 +5732,7 @@ static void register_power_common_book4_sprs(CPUPPCState *env) spr_register_hv(env, SPR_HMEER, "HMEER", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_core_write_generic, 0x00000000); spr_register_hv(env, SPR_TFMR, "TFMR", SPR_NOACCESS, SPR_NOACCESS, @@ -5702,6 +5749,26 @@ static void register_power_common_book4_sprs(CPUPPCState *env) &spr_access_nop, &spr_write_generic, &spr_access_nop, &spr_write_generic, 0x00000000); + spr_register_hv(env, SPR_LDBAR, "LDBAR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_core_lpar_write_generic, + 0x00000000); + spr_register_hv(env, SPR_POWER_TTR, "TTR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_core_write_generic, + 0x00000000); + spr_register_hv(env, SPR_POWER_SPRC, "SPRC", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_sprc, + 0x00000000); + spr_register_hv(env, SPR_POWER_SPRD, "SPRD", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_sprd, &spr_write_sprd, + 0x00000000); #endif } @@ -5761,7 +5828,7 @@ static void register_power8_rpr_sprs(CPUPPCState *env) spr_register_hv(env, SPR_RPR, "RPR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, - &spr_read_generic, &spr_write_generic, + &spr_read_generic, &spr_core_write_generic, 0x00000103070F1F3F); #endif } @@ -5904,6 +5971,7 @@ POWERPC_FAMILY(970)(ObjectClass *oc, void *data) dc->desc = "PowerPC 970"; pcc->init_proc = init_proc_970; pcc->check_pow = check_pow_970; + pcc->check_attn = check_attn_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -5979,6 +6047,7 @@ POWERPC_FAMILY(POWER5P)(ObjectClass *oc, void *data) dc->desc = "POWER5+"; pcc->init_proc = init_proc_power5plus; pcc->check_pow = check_pow_970; + pcc->check_attn = check_attn_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -6042,6 +6111,7 @@ static void init_proc_POWER7(CPUPPCState *env) register_power6_common_sprs(env); register_HEIR32_spr(env); register_power6_dbg_sprs(env); + register_power7_common_sprs(env); register_power7_book4_sprs(env); /* env variables */ @@ -6086,6 +6156,7 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) pcc->pcr_supported = PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER7; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -6142,6 +6213,28 @@ POWERPC_FAMILY(POWER7)(ObjectClass *oc, void *data) pcc->l1_icache_size = 0x8000; } +static void bhrb_init_state(CPUPPCState *env, target_long num_entries_log2) +{ + if (env->flags & POWERPC_FLAG_BHRB) { + if (num_entries_log2 > BHRB_MAX_NUM_ENTRIES_LOG2) { + num_entries_log2 = BHRB_MAX_NUM_ENTRIES_LOG2; + } + env->bhrb_num_entries = 1 << num_entries_log2; + env->bhrb_base = (intptr_t)&env->bhrb[0]; + env->bhrb_offset_mask = (env->bhrb_num_entries * sizeof(uint64_t)) - 1; + } +} + +static void bhrb_reset_state(CPUPPCState *env) +{ + if (env->flags & POWERPC_FLAG_BHRB) { + env->bhrb_offset = 0; + env->bhrb_filter = 0; + memset(env->bhrb, 0, sizeof(env->bhrb)); + } +} + +#define POWER8_BHRB_ENTRIES_LOG2 5 static void init_proc_POWER8(CPUPPCState *env) { /* Common Registers */ @@ -6165,6 +6258,7 @@ static void init_proc_POWER8(CPUPPCState *env) register_power6_common_sprs(env); register_HEIR32_spr(env); register_power6_dbg_sprs(env); + register_power7_common_sprs(env); register_power8_tce_address_control_sprs(env); register_power8_ids_sprs(env); register_power8_ebb_sprs(env); @@ -6183,6 +6277,8 @@ static void init_proc_POWER8(CPUPPCState *env) env->dcache_line_size = 128; env->icache_line_size = 128; + bhrb_init_state(env, POWER8_BHRB_ENTRIES_LOG2); + /* Allocate hardware IRQ controller */ init_excp_POWER8(env); ppcPOWER7_irq_init(env_archcpu(env)); @@ -6223,6 +6319,7 @@ POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data) pcc->pcr_supported = PCR_COMPAT_2_07 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER8; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_hid0; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -6307,6 +6404,7 @@ static struct ppc_radix_page_info POWER9_radix_page_info = { }; #endif /* CONFIG_USER_ONLY */ +#define POWER9_BHRB_ENTRIES_LOG2 5 static void init_proc_POWER9(CPUPPCState *env) { /* Common Registers */ @@ -6328,6 +6426,7 @@ static void init_proc_POWER9(CPUPPCState *env) register_power6_common_sprs(env); register_HEIR32_spr(env); register_power6_dbg_sprs(env); + register_power7_common_sprs(env); register_power8_tce_address_control_sprs(env); register_power8_ids_sprs(env); register_power8_ebb_sprs(env); @@ -6357,6 +6456,8 @@ static void init_proc_POWER9(CPUPPCState *env) env->dcache_line_size = 128; env->icache_line_size = 128; + bhrb_init_state(env, POWER9_BHRB_ENTRIES_LOG2); + /* Allocate hardware IRQ controller */ init_excp_POWER9(env); ppcPOWER9_irq_init(env_archcpu(env)); @@ -6412,6 +6513,7 @@ POWERPC_FAMILY(POWER9)(ObjectClass *oc, void *data) PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER9; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_hid0_power9; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -6497,6 +6599,7 @@ static struct ppc_radix_page_info POWER10_radix_page_info = { }; #endif /* !CONFIG_USER_ONLY */ +#define POWER10_BHRB_ENTRIES_LOG2 5 static void init_proc_POWER10(CPUPPCState *env) { /* Common Registers */ @@ -6518,6 +6621,7 @@ static void init_proc_POWER10(CPUPPCState *env) register_power6_common_sprs(env); register_HEIR64_spr(env); register_power6_dbg_sprs(env); + register_power7_common_sprs(env); register_power8_tce_address_control_sprs(env); register_power8_ids_sprs(env); register_power8_ebb_sprs(env); @@ -6546,6 +6650,8 @@ static void init_proc_POWER10(CPUPPCState *env) env->dcache_line_size = 128; env->icache_line_size = 128; + bhrb_init_state(env, POWER10_BHRB_ENTRIES_LOG2); + /* Allocate hardware IRQ controller */ init_excp_POWER10(env); ppcPOWER9_irq_init(env_archcpu(env)); @@ -6588,6 +6694,7 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) PCR_COMPAT_2_06 | PCR_COMPAT_2_05; pcc->init_proc = init_proc_POWER10; pcc->check_pow = check_pow_nocheck; + pcc->check_attn = check_attn_hid0_power9; pcc->insns_flags = PPC_INSNS_BASE | PPC_ISEL | PPC_STRING | PPC_MFTB | PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES | PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE | @@ -6650,7 +6757,8 @@ POWERPC_FAMILY(POWER10)(ObjectClass *oc, void *data) pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE | POWERPC_FLAG_BE | POWERPC_FLAG_PMM | POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR | - POWERPC_FLAG_VSX | POWERPC_FLAG_SCV; + POWERPC_FLAG_VSX | POWERPC_FLAG_SCV | + POWERPC_FLAG_BHRB; pcc->l1_dcache_size = 0x8000; pcc->l1_icache_size = 0x8000; } @@ -6661,6 +6769,7 @@ void cpu_ppc_set_vhyp(PowerPCCPU *cpu, PPCVirtualHypervisor *vhyp) CPUPPCState *env = &cpu->env; cpu->vhyp = vhyp; + cpu->vhyp_class = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(vhyp); /* * With a virtual hypervisor mode we never allow the CPU to go @@ -6800,20 +6909,17 @@ static void init_ppc_proc(PowerPCCPU *cpu) } /* Allocate TLBs buffer when needed */ #if !defined(CONFIG_USER_ONLY) - if (env->nb_tlb != 0) { - int nb_tlb = env->nb_tlb; - if (env->id_tlbs != 0) { - nb_tlb *= 2; - } + if (env->nb_tlb) { switch (env->tlb_type) { case TLB_6XX: - env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, nb_tlb); + /* 6xx has separate TLBs for instructions and data hence times 2 */ + env->tlb.tlb6 = g_new0(ppc6xx_tlb_t, 2 * env->nb_tlb); break; case TLB_EMB: - env->tlb.tlbe = g_new0(ppcemb_tlb_t, nb_tlb); + env->tlb.tlbe = g_new0(ppcemb_tlb_t, env->nb_tlb); break; case TLB_MAS: - env->tlb.tlbm = g_new0(ppcmas_tlb_t, nb_tlb); + env->tlb.tlbm = g_new0(ppcmas_tlb_t, env->nb_tlb); break; } /* Pre-compute some useful values */ @@ -6824,6 +6930,11 @@ static void init_ppc_proc(PowerPCCPU *cpu) warn_report("no power management check handler registered." " Attempt QEMU to crash very soon !"); } + + if (env->check_attn == NULL) { + warn_report("no attn check handler registered." + " Attempt QEMU to crash very soon !"); + } } @@ -7195,7 +7306,7 @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type) if (env->mmu_model != POWERPC_MMU_REAL) { ppc_tlb_invalidate_all(env); } - pmu_mmcr01_updated(env); + pmu_mmcr01a_updated(env); } /* clean any pending stop state */ @@ -7221,6 +7332,10 @@ static void ppc_cpu_reset_hold(Object *obj, ResetType type) } env->spr[i] = spr->default_value; } + +#if defined(TARGET_PPC64) + bhrb_reset_state(env); +#endif } #ifndef CONFIG_USER_ONLY @@ -7248,9 +7363,7 @@ static void ppc_cpu_exec_enter(CPUState *cs) PowerPCCPU *cpu = POWERPC_CPU(cs); if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->cpu_exec_enter(cpu->vhyp, cpu); + cpu->vhyp_class->cpu_exec_enter(cpu->vhyp, cpu); } } @@ -7259,9 +7372,7 @@ static void ppc_cpu_exec_exit(CPUState *cs) PowerPCCPU *cpu = POWERPC_CPU(cs); if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->cpu_exec_exit(cpu->vhyp, cpu); + cpu->vhyp_class->cpu_exec_exit(cpu->vhyp, cpu); } } #endif /* CONFIG_TCG */ @@ -7285,6 +7396,7 @@ static void ppc_cpu_instance_init(Object *obj) env->flags = pcc->flags; env->bfd_mach = pcc->bfd_mach; env->check_pow = pcc->check_pow; + env->check_attn = pcc->check_attn; /* * Mark HV mode as supported if the CPU has an MSR_HV bit in the @@ -7409,6 +7521,11 @@ static void ppc_cpu_class_init(ObjectClass *oc, void *data) #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &ppc_sysemu_ops; INTERRUPT_STATS_PROVIDER_CLASS(oc)->get_statistics = ppc_get_irq_stats; + + /* check_prot_access_type relies on MMU access and PAGE bits relations */ + qemu_build_assert(MMU_DATA_LOAD == 0 && MMU_DATA_STORE == 1 && + MMU_INST_FETCH == 2 && PAGE_READ == 1 && + PAGE_WRITE == 2 && PAGE_EXEC == 4); #endif cc->gdb_num_core_regs = 71; diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 0712098cf7..0cd542675f 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -19,6 +19,8 @@ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "qemu/log.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" #include "cpu.h" #include "exec/exec-all.h" #include "internal.h" @@ -152,6 +154,7 @@ static uint32_t ppc_ldl_code(CPUArchState *env, target_ulong addr) return insn; } + #endif static void ppc_excp_debug_sw_tlb(CPUPPCState *env, int excp) @@ -423,23 +426,57 @@ static void powerpc_set_excp_state(PowerPCCPU *cpu, target_ulong vector, env->reserve_addr = -1; } -static void powerpc_mcheck_checkstop(CPUPPCState *env) +#ifdef CONFIG_TCG +/* + * This stops the machine and logs CPU state without killing QEMU (like + * cpu_abort()) because it is often a guest error as opposed to a QEMU error, + * so the machine can still be debugged. + */ +static G_NORETURN void powerpc_checkstop(CPUPPCState *env, const char *reason) { CPUState *cs = env_cpu(env); + FILE *f; + + f = qemu_log_trylock(); + if (f) { + fprintf(f, "Entering checkstop state: %s\n", reason); + cpu_dump_state(cs, f, CPU_DUMP_FPU | CPU_DUMP_CCOP); + qemu_log_unlock(f); + } + + /* + * This stops the machine and logs CPU state without killing QEMU + * (like cpu_abort()) so the machine can still be debugged (because + * it is often a guest error). + */ + qemu_system_guest_panicked(NULL); + cpu_loop_exit_noexc(cs); +} + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) +void helper_attn(CPUPPCState *env) +{ + /* POWER attn is unprivileged when enabled by HID, otherwise illegal */ + if ((*env->check_attn)(env)) { + powerpc_checkstop(env, "host executed attn"); + } else { + raise_exception_err(env, POWERPC_EXCP_HV_EMU, + POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL); + } +} +#endif +#endif /* CONFIG_TCG */ +static void powerpc_mcheck_checkstop(CPUPPCState *env) +{ + /* KVM guests always have MSR[ME] enabled */ +#ifdef CONFIG_TCG if (FIELD_EX64(env->msr, MSR, ME)) { return; } - /* Machine check exception is not enabled. Enter checkstop state. */ - fprintf(stderr, "Machine check while not allowed. " - "Entering checkstop state\n"); - if (qemu_log_separate()) { - qemu_log("Machine check while not allowed. " - "Entering checkstop state\n"); - } - cs->halted = 1; - cpu_interrupt_exittb(cs); + powerpc_checkstop(env, "machine check with MSR[ME]=0"); +#endif } static void powerpc_excp_40x(PowerPCCPU *cpu, int excp) @@ -794,9 +831,7 @@ static void powerpc_excp_7xx(PowerPCCPU *cpu, int excp) * HV mode, we need to keep hypercall support. */ if (lev == 1 && cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->hypercall(cpu->vhyp, cpu); + cpu->vhyp_class->hypercall(cpu->vhyp, cpu); powerpc_reset_excp_state(cpu); return; } @@ -946,9 +981,7 @@ static void powerpc_excp_74xx(PowerPCCPU *cpu, int excp) * HV mode, we need to keep hypercall support. */ if (lev == 1 && cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->hypercall(cpu->vhyp, cpu); + cpu->vhyp_class->hypercall(cpu->vhyp, cpu); powerpc_reset_excp_state(cpu); return; } @@ -1437,9 +1470,7 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) /* "PAPR mode" built-in hypercall emulation */ if (lev == 1 && books_vhyp_handles_hcall(cpu)) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->hypercall(cpu->vhyp, cpu); + cpu->vhyp_class->hypercall(cpu->vhyp, cpu); powerpc_reset_excp_state(cpu); return; } @@ -1574,10 +1605,8 @@ static void powerpc_excp_books(PowerPCCPU *cpu, int excp) } if ((new_msr & MSR_HVB) && books_vhyp_handles_hv_excp(cpu)) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); /* Deliver interrupt to L1 by returning from the H_ENTER_NESTED call */ - vhc->deliver_hv_excp(cpu, excp); + cpu->vhyp_class->deliver_hv_excp(cpu, excp); powerpc_reset_excp_state(cpu); } else { /* Sanity check */ @@ -2750,7 +2779,7 @@ void helper_rfmci(CPUPPCState *env) } #endif /* !CONFIG_USER_ONLY */ -void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, +void helper_TW(CPUPPCState *env, target_ulong arg1, target_ulong arg2, uint32_t flags) { if (!likely(!(((int32_t)arg1 < (int32_t)arg2 && (flags & 0x10)) || @@ -2764,7 +2793,7 @@ void helper_tw(CPUPPCState *env, target_ulong arg1, target_ulong arg2, } #ifdef TARGET_PPC64 -void helper_td(CPUPPCState *env, target_ulong arg1, target_ulong arg2, +void helper_TD(CPUPPCState *env, target_ulong arg1, target_ulong arg2, uint32_t flags) { if (!likely(!(((int64_t)arg1 < (int64_t)arg2 && (flags & 0x10)) || @@ -2940,7 +2969,7 @@ void helper_msgsnd(target_ulong rb) PowerPCCPU *cpu = POWERPC_CPU(cs); CPUPPCState *cenv = &cpu->env; - if ((rb & DBELL_BRDCAST) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { + if ((rb & DBELL_BRDCAST_MASK) || (cenv->spr[SPR_BOOKE_PIR] == pir)) { ppc_set_irq(cpu, irq, 1); } } @@ -2959,6 +2988,16 @@ static bool dbell_type_server(target_ulong rb) return (rb & DBELL_TYPE_MASK) == DBELL_TYPE_DBELL_SERVER; } +static inline bool dbell_bcast_core(target_ulong rb) +{ + return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_CORE; +} + +static inline bool dbell_bcast_subproc(target_ulong rb) +{ + return (rb & DBELL_BRDCAST_MASK) == DBELL_BRDCAST_SUBPROC; +} + void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) { if (!dbell_type_server(rb)) { @@ -2968,32 +3007,43 @@ void helper_book3s_msgclr(CPUPPCState *env, target_ulong rb) ppc_set_irq(env_archcpu(env), PPC_INTERRUPT_HDOORBELL, 0); } -static void book3s_msgsnd_common(int pir, int irq) +void helper_book3s_msgsnd(CPUPPCState *env, target_ulong rb) { - CPUState *cs; + int pir = rb & DBELL_PROCIDTAG_MASK; + bool brdcast = false; + CPUState *cs, *ccs; + PowerPCCPU *cpu; - bql_lock(); - CPU_FOREACH(cs) { - PowerPCCPU *cpu = POWERPC_CPU(cs); - CPUPPCState *cenv = &cpu->env; + if (!dbell_type_server(rb)) { + return; + } - /* TODO: broadcast message to all threads of the same processor */ - if (cenv->spr_cb[SPR_PIR].default_value == pir) { - ppc_set_irq(cpu, irq, 1); - } + cpu = ppc_get_vcpu_by_pir(pir); + if (!cpu) { + return; } - bql_unlock(); -} + cs = CPU(cpu); -void helper_book3s_msgsnd(target_ulong rb) -{ - int pir = rb & DBELL_PROCIDTAG_MASK; + if (dbell_bcast_core(rb) || (dbell_bcast_subproc(rb) && + (env->flags & POWERPC_FLAG_SMT_1LPAR))) { + brdcast = true; + } - if (!dbell_type_server(rb)) { + if (cs->nr_threads == 1 || !brdcast) { + ppc_set_irq(cpu, PPC_INTERRUPT_HDOORBELL, 1); return; } - book3s_msgsnd_common(pir, PPC_INTERRUPT_HDOORBELL); + /* + * Why is bql needed for walking CPU list? Answer seems to be because ppc + * irq handling needs it, but ppc_set_irq takes the lock itself if needed, + * so could this be removed? + */ + bql_lock(); + THREAD_SIBLING_FOREACH(cs, ccs) { + ppc_set_irq(POWERPC_CPU(ccs), PPC_INTERRUPT_HDOORBELL, 1); + } + bql_unlock(); } #ifdef TARGET_PPC64 diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index 4b3dcad5d1..51bce99fd5 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -490,54 +490,12 @@ static void float_invalid_op_addsub(CPUPPCState *env, int flags, } } -/* fadd - fadd. */ -float64 helper_fadd(CPUPPCState *env, float64 arg1, float64 arg2) +static inline void addsub_flags_handler(CPUPPCState *env, int flags, + uintptr_t ra) { - float64 ret = float64_add(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_addsub(env, flags, 1, GETPC()); - } - - return ret; -} - -/* fadds - fadds. */ -float64 helper_fadds(CPUPPCState *env, float64 arg1, float64 arg2) -{ - float64 ret = float64r32_add(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_addsub(env, flags, 1, GETPC()); - } - return ret; -} - -/* fsub - fsub. */ -float64 helper_fsub(CPUPPCState *env, float64 arg1, float64 arg2) -{ - float64 ret = float64_sub(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_addsub(env, flags, 1, GETPC()); + float_invalid_op_addsub(env, flags, 1, ra); } - - return ret; -} - -/* fsubs - fsubs. */ -float64 helper_fsubs(CPUPPCState *env, float64 arg1, float64 arg2) -{ - float64 ret = float64r32_sub(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_addsub(env, flags, 1, GETPC()); - } - return ret; } static void float_invalid_op_mul(CPUPPCState *env, int flags, @@ -550,29 +508,11 @@ static void float_invalid_op_mul(CPUPPCState *env, int flags, } } -/* fmul - fmul. */ -float64 helper_fmul(CPUPPCState *env, float64 arg1, float64 arg2) -{ - float64 ret = float64_mul(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_mul(env, flags, 1, GETPC()); - } - - return ret; -} - -/* fmuls - fmuls. */ -float64 helper_fmuls(CPUPPCState *env, float64 arg1, float64 arg2) +static inline void mul_flags_handler(CPUPPCState *env, int flags, uintptr_t ra) { - float64 ret = float64r32_mul(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_mul(env, flags, 1, GETPC()); + float_invalid_op_mul(env, flags, 1, ra); } - return ret; } static void float_invalid_op_div(CPUPPCState *env, int flags, @@ -587,36 +527,14 @@ static void float_invalid_op_div(CPUPPCState *env, int flags, } } -/* fdiv - fdiv. */ -float64 helper_fdiv(CPUPPCState *env, float64 arg1, float64 arg2) -{ - float64 ret = float64_div(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_div(env, flags, 1, GETPC()); - } - if (unlikely(flags & float_flag_divbyzero)) { - float_zero_divide_excp(env, GETPC()); - } - - return ret; -} - -/* fdivs - fdivs. */ -float64 helper_fdivs(CPUPPCState *env, float64 arg1, float64 arg2) +static inline void div_flags_handler(CPUPPCState *env, int flags, uintptr_t ra) { - float64 ret = float64r32_div(arg1, arg2, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_div(env, flags, 1, GETPC()); + float_invalid_op_div(env, flags, 1, ra); } if (unlikely(flags & float_flag_divbyzero)) { - float_zero_divide_excp(env, GETPC()); + float_zero_divide_excp(env, ra); } - - return ret; } static uint64_t float_invalid_cvt(CPUPPCState *env, int flags, @@ -755,7 +673,7 @@ static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b, uint64_t helper_##op(CPUPPCState *env, uint64_t arg1, \ uint64_t arg2, uint64_t arg3) \ { return do_fmadd(env, arg1, arg2, arg3, madd_flags, GETPC()); } \ - uint64_t helper_##op##s(CPUPPCState *env, uint64_t arg1, \ + uint64_t helper_##op##S(CPUPPCState *env, uint64_t arg1, \ uint64_t arg2, uint64_t arg3) \ { return do_fmadds(env, arg1, arg2, arg3, madd_flags, GETPC()); } @@ -764,10 +682,10 @@ static uint64_t do_fmadds(CPUPPCState *env, float64 a, float64 b, #define NMADD_FLGS float_muladd_negate_result #define NMSUB_FLGS (float_muladd_negate_c | float_muladd_negate_result) -FPU_FMADD(fmadd, MADD_FLGS) -FPU_FMADD(fnmadd, NMADD_FLGS) -FPU_FMADD(fmsub, MSUB_FLGS) -FPU_FMADD(fnmsub, NMSUB_FLGS) +FPU_FMADD(FMADD, MADD_FLGS) +FPU_FMADD(FNMADD, NMADD_FLGS) +FPU_FMADD(FMSUB, MSUB_FLGS) +FPU_FMADD(FNMSUB, NMSUB_FLGS) /* frsp - frsp. */ static uint64_t do_frsp(CPUPPCState *env, uint64_t arg, uintptr_t retaddr) @@ -812,81 +730,66 @@ float64 helper_##name(CPUPPCState *env, float64 arg) \ FPU_FSQRT(FSQRT, float64_sqrt) FPU_FSQRT(FSQRTS, float64r32_sqrt) -/* fre - fre. */ -float64 helper_fre(CPUPPCState *env, float64 arg) -{ - /* "Estimate" the reciprocal with actual division. */ - float64 ret = float64_div(float64_one, arg, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid_snan)) { - float_invalid_op_vxsnan(env, GETPC()); - } - if (unlikely(flags & float_flag_divbyzero)) { - float_zero_divide_excp(env, GETPC()); - /* For FPSCR.ZE == 0, the result is 1/2. */ - ret = float64_set_sign(float64_half, float64_is_neg(arg)); - } - - return ret; +#define FPU_FRE(name, op) \ +float64 helper_##name(CPUPPCState *env, float64 arg) \ +{ \ + /* "Estimate" the reciprocal with actual division. */ \ + float64 ret = op(float64_one, arg, &env->fp_status); \ + int flags = get_float_exception_flags(&env->fp_status); \ + \ + if (unlikely(flags & float_flag_invalid_snan)) { \ + float_invalid_op_vxsnan(env, GETPC()); \ + } \ + if (unlikely(flags & float_flag_divbyzero)) { \ + float_zero_divide_excp(env, GETPC()); \ + /* For FPSCR.ZE == 0, the result is 1/2. */ \ + ret = float64_set_sign(float64_half, float64_is_neg(arg)); \ + } \ + \ + return ret; \ } -/* fres - fres. */ -uint64_t helper_fres(CPUPPCState *env, uint64_t arg) -{ - /* "Estimate" the reciprocal with actual division. */ - float64 ret = float64r32_div(float64_one, arg, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid_snan)) { - float_invalid_op_vxsnan(env, GETPC()); - } - if (unlikely(flags & float_flag_divbyzero)) { - float_zero_divide_excp(env, GETPC()); - /* For FPSCR.ZE == 0, the result is 1/2. */ - ret = float64_set_sign(float64_half, float64_is_neg(arg)); - } - - return ret; +#define FPU_FRSQRTE(name, op) \ +float64 helper_##name(CPUPPCState *env, float64 arg) \ +{ \ + /* "Estimate" the reciprocal with actual division. */ \ + float64 rets = float64_sqrt(arg, &env->fp_status); \ + float64 retd = op(float64_one, rets, &env->fp_status); \ + int flags = get_float_exception_flags(&env->fp_status); \ + \ + if (unlikely(flags & float_flag_invalid)) { \ + float_invalid_op_sqrt(env, flags, 1, GETPC()); \ + } \ + if (unlikely(flags & float_flag_divbyzero)) { \ + /* Reciprocal of (square root of) zero. */ \ + float_zero_divide_excp(env, GETPC()); \ + } \ + \ + return retd; \ } -/* frsqrte - frsqrte. */ -float64 helper_frsqrte(CPUPPCState *env, float64 arg) -{ - /* "Estimate" the reciprocal with actual division. */ - float64 rets = float64_sqrt(arg, &env->fp_status); - float64 retd = float64_div(float64_one, rets, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_sqrt(env, flags, 1, GETPC()); - } - if (unlikely(flags & float_flag_divbyzero)) { - /* Reciprocal of (square root of) zero. */ - float_zero_divide_excp(env, GETPC()); - } - - return retd; +#define FPU_HELPER(name, op, flags_handler) \ +float64 helper_##name(CPUPPCState *env, float64 arg1, float64 arg2) \ +{ \ + float64 ret = op(arg1, arg2, &env->fp_status); \ + int flags = get_float_exception_flags(&env->fp_status); \ + uintptr_t ra = GETPC(); \ + flags_handler(env, flags, ra); \ + return ret; \ } -/* frsqrtes - frsqrtes. */ -float64 helper_frsqrtes(CPUPPCState *env, float64 arg) -{ - /* "Estimate" the reciprocal with actual division. */ - float64 rets = float64_sqrt(arg, &env->fp_status); - float64 retd = float64r32_div(float64_one, rets, &env->fp_status); - int flags = get_float_exception_flags(&env->fp_status); - - if (unlikely(flags & float_flag_invalid)) { - float_invalid_op_sqrt(env, flags, 1, GETPC()); - } - if (unlikely(flags & float_flag_divbyzero)) { - /* Reciprocal of (square root of) zero. */ - float_zero_divide_excp(env, GETPC()); - } - - return retd; -} +FPU_FRE(FRE, float64_div) +FPU_FRE(FRES, float64r32_div) +FPU_FRSQRTE(FRSQRTE, float64_div) +FPU_FRSQRTE(FRSQRTES, float64r32_div) +FPU_HELPER(FADD, float64_add, addsub_flags_handler) +FPU_HELPER(FADDS, float64r32_add, addsub_flags_handler) +FPU_HELPER(FSUB, float64_sub, addsub_flags_handler) +FPU_HELPER(FSUBS, float64r32_sub, addsub_flags_handler) +FPU_HELPER(FMUL, float64_mul, mul_flags_handler) +FPU_HELPER(FMULS, float64r32_mul, mul_flags_handler) +FPU_HELPER(FDIV, float64_div, div_flags_handler) +FPU_HELPER(FDIVS, float64r32_div, div_flags_handler) /* fsel - fsel. */ uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c) @@ -903,7 +806,7 @@ uint64_t helper_FSEL(uint64_t a, uint64_t b, uint64_t c) } } -uint32_t helper_ftdiv(uint64_t fra, uint64_t frb) +uint32_t helper_FTDIV(uint64_t fra, uint64_t frb) { int fe_flag = 0; int fg_flag = 0; @@ -939,7 +842,7 @@ uint32_t helper_ftdiv(uint64_t fra, uint64_t frb) return 0x8 | (fg_flag ? 4 : 0) | (fe_flag ? 2 : 0); } -uint32_t helper_ftsqrt(uint64_t frb) +uint32_t helper_FTSQRT(uint64_t frb) { int fe_flag = 0; int fg_flag = 0; diff --git a/target/ppc/helper.h b/target/ppc/helper.h index 86f97ee1e7..76b8f25c77 100644 --- a/target/ppc/helper.h +++ b/target/ppc/helper.h @@ -1,8 +1,8 @@ DEF_HELPER_FLAGS_3(raise_exception_err, TCG_CALL_NO_WG, noreturn, env, i32, i32) DEF_HELPER_FLAGS_2(raise_exception, TCG_CALL_NO_WG, noreturn, env, i32) -DEF_HELPER_FLAGS_4(tw, TCG_CALL_NO_WG, void, env, tl, tl, i32) +DEF_HELPER_FLAGS_4(TW, TCG_CALL_NO_WG, void, env, tl, tl, i32) #if defined(TARGET_PPC64) -DEF_HELPER_FLAGS_4(td, TCG_CALL_NO_WG, void, env, tl, tl, i32) +DEF_HELPER_FLAGS_4(TD, TCG_CALL_NO_WG, void, env, tl, tl, i32) #endif DEF_HELPER_4(HASHST, void, env, tl, tl, tl) DEF_HELPER_4(HASHCHK, void, env, tl, tl, tl) @@ -30,6 +30,7 @@ DEF_HELPER_2(store_dawr0, void, env, tl) DEF_HELPER_2(store_dawrx0, void, env, tl) DEF_HELPER_2(store_mmcr0, void, env, tl) DEF_HELPER_2(store_mmcr1, void, env, tl) +DEF_HELPER_2(store_mmcrA, void, env, tl) DEF_HELPER_3(store_pmc, void, env, i32, i64) DEF_HELPER_2(read_pmc, tl, env, i32) DEF_HELPER_2(insns_inc, void, env, i32) @@ -52,14 +53,14 @@ DEF_HELPER_FLAGS_2(icbiep, TCG_CALL_NO_WG, void, env, tl) DEF_HELPER_5(lscbx, tl, env, tl, i32, i32, i32) #if defined(TARGET_PPC64) -DEF_HELPER_4(divdeu, i64, env, i64, i64, i32) -DEF_HELPER_4(divde, i64, env, i64, i64, i32) +DEF_HELPER_4(DIVDEU, i64, env, i64, i64, i32) +DEF_HELPER_4(DIVDE, i64, env, i64, i64, i32) #endif -DEF_HELPER_4(divweu, tl, env, tl, tl, i32) -DEF_HELPER_4(divwe, tl, env, tl, tl, i32) +DEF_HELPER_4(DIVWEU, tl, env, tl, tl, i32) +DEF_HELPER_4(DIVWE, tl, env, tl, tl, i32) -DEF_HELPER_FLAGS_1(popcntb, TCG_CALL_NO_RWG_SE, tl, tl) -DEF_HELPER_FLAGS_2(cmpb, TCG_CALL_NO_RWG_SE, tl, tl, tl) +DEF_HELPER_FLAGS_1(POPCNTB, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_2(CMPB, TCG_CALL_NO_RWG_SE, tl, tl, tl) DEF_HELPER_3(sraw, tl, env, tl, tl) DEF_HELPER_FLAGS_2(CFUGED, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_2(PDEPD, TCG_CALL_NO_RWG_SE, i64, i64, i64) @@ -67,12 +68,12 @@ DEF_HELPER_FLAGS_2(PEXTD, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_FLAGS_1(CDTBCD, TCG_CALL_NO_RWG_SE, tl, tl) DEF_HELPER_FLAGS_1(CBCDTD, TCG_CALL_NO_RWG_SE, tl, tl) #if defined(TARGET_PPC64) -DEF_HELPER_FLAGS_2(cmpeqb, TCG_CALL_NO_RWG_SE, i32, tl, tl) -DEF_HELPER_FLAGS_1(popcntw, TCG_CALL_NO_RWG_SE, tl, tl) -DEF_HELPER_FLAGS_2(bpermd, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(CMPEQB, TCG_CALL_NO_RWG_SE, i32, tl, tl) +DEF_HELPER_FLAGS_1(POPCNTW, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_2(BPERMD, TCG_CALL_NO_RWG_SE, i64, i64, i64) DEF_HELPER_3(srad, tl, env, tl, tl) -DEF_HELPER_FLAGS_0(darn32, TCG_CALL_NO_RWG, tl) -DEF_HELPER_FLAGS_0(darn64, TCG_CALL_NO_RWG, tl) +DEF_HELPER_FLAGS_0(DARN32, TCG_CALL_NO_RWG, tl) +DEF_HELPER_FLAGS_0(DARN64, TCG_CALL_NO_RWG, tl) #endif DEF_HELPER_FLAGS_1(cntlsw32, TCG_CALL_NO_RWG_SE, i32, i32) @@ -110,32 +111,32 @@ DEF_HELPER_2(friz, i64, env, i64) DEF_HELPER_2(frip, i64, env, i64) DEF_HELPER_2(frim, i64, env, i64) -DEF_HELPER_3(fadd, f64, env, f64, f64) -DEF_HELPER_3(fadds, f64, env, f64, f64) -DEF_HELPER_3(fsub, f64, env, f64, f64) -DEF_HELPER_3(fsubs, f64, env, f64, f64) -DEF_HELPER_3(fmul, f64, env, f64, f64) -DEF_HELPER_3(fmuls, f64, env, f64, f64) -DEF_HELPER_3(fdiv, f64, env, f64, f64) -DEF_HELPER_3(fdivs, f64, env, f64, f64) -DEF_HELPER_4(fmadd, i64, env, i64, i64, i64) -DEF_HELPER_4(fmsub, i64, env, i64, i64, i64) -DEF_HELPER_4(fnmadd, i64, env, i64, i64, i64) -DEF_HELPER_4(fnmsub, i64, env, i64, i64, i64) -DEF_HELPER_4(fmadds, i64, env, i64, i64, i64) -DEF_HELPER_4(fmsubs, i64, env, i64, i64, i64) -DEF_HELPER_4(fnmadds, i64, env, i64, i64, i64) -DEF_HELPER_4(fnmsubs, i64, env, i64, i64, i64) +DEF_HELPER_3(FADD, f64, env, f64, f64) +DEF_HELPER_3(FADDS, f64, env, f64, f64) +DEF_HELPER_3(FSUB, f64, env, f64, f64) +DEF_HELPER_3(FSUBS, f64, env, f64, f64) +DEF_HELPER_3(FMUL, f64, env, f64, f64) +DEF_HELPER_3(FMULS, f64, env, f64, f64) +DEF_HELPER_3(FDIV, f64, env, f64, f64) +DEF_HELPER_3(FDIVS, f64, env, f64, f64) +DEF_HELPER_4(FMADD, i64, env, i64, i64, i64) +DEF_HELPER_4(FMSUB, i64, env, i64, i64, i64) +DEF_HELPER_4(FNMADD, i64, env, i64, i64, i64) +DEF_HELPER_4(FNMSUB, i64, env, i64, i64, i64) +DEF_HELPER_4(FMADDS, i64, env, i64, i64, i64) +DEF_HELPER_4(FMSUBS, i64, env, i64, i64, i64) +DEF_HELPER_4(FNMADDS, i64, env, i64, i64, i64) +DEF_HELPER_4(FNMSUBS, i64, env, i64, i64, i64) DEF_HELPER_2(FSQRT, f64, env, f64) DEF_HELPER_2(FSQRTS, f64, env, f64) -DEF_HELPER_2(fre, i64, env, i64) -DEF_HELPER_2(fres, i64, env, i64) -DEF_HELPER_2(frsqrte, i64, env, i64) -DEF_HELPER_2(frsqrtes, i64, env, i64) +DEF_HELPER_2(FRE, i64, env, i64) +DEF_HELPER_2(FRES, i64, env, i64) +DEF_HELPER_2(FRSQRTE, i64, env, i64) +DEF_HELPER_2(FRSQRTES, i64, env, i64) DEF_HELPER_FLAGS_3(FSEL, TCG_CALL_NO_RWG_SE, i64, i64, i64, i64) -DEF_HELPER_FLAGS_2(ftdiv, TCG_CALL_NO_RWG_SE, i32, i64, i64) -DEF_HELPER_FLAGS_1(ftsqrt, TCG_CALL_NO_RWG_SE, i32, i64) +DEF_HELPER_FLAGS_2(FTDIV, TCG_CALL_NO_RWG_SE, i32, i64, i64) +DEF_HELPER_FLAGS_1(FTSQRT, TCG_CALL_NO_RWG_SE, i32, i64) #define dh_alias_avr ptr #define dh_ctype_avr ppc_avr_t * @@ -267,12 +268,12 @@ DEF_HELPER_5(VMSUMSHS, void, env, avr, avr, avr, avr) DEF_HELPER_FLAGS_5(VMLADDUHM, TCG_CALL_NO_RWG, void, avr, avr, avr, avr, i32) DEF_HELPER_FLAGS_2(mtvscr, TCG_CALL_NO_RWG, void, env, i32) DEF_HELPER_FLAGS_1(mfvscr, TCG_CALL_NO_RWG, i32, env) -DEF_HELPER_3(lvebx, void, env, avr, tl) -DEF_HELPER_3(lvehx, void, env, avr, tl) -DEF_HELPER_3(lvewx, void, env, avr, tl) -DEF_HELPER_3(stvebx, void, env, avr, tl) -DEF_HELPER_3(stvehx, void, env, avr, tl) -DEF_HELPER_3(stvewx, void, env, avr, tl) +DEF_HELPER_3(LVEBX, void, env, avr, tl) +DEF_HELPER_3(LVEHX, void, env, avr, tl) +DEF_HELPER_3(LVEWX, void, env, avr, tl) +DEF_HELPER_3(STVEBX, void, env, avr, tl) +DEF_HELPER_3(STVEHX, void, env, avr, tl) +DEF_HELPER_3(STVEWX, void, env, avr, tl) #if defined(TARGET_PPC64) DEF_HELPER_4(lxvl, void, env, tl, vsr, tl) DEF_HELPER_4(lxvll, void, env, tl, vsr, tl) @@ -694,14 +695,12 @@ DEF_HELPER_FLAGS_3(store_sr, TCG_CALL_NO_RWG, void, env, tl, tl) DEF_HELPER_1(msgsnd, void, tl) DEF_HELPER_2(msgclr, void, env, tl) -DEF_HELPER_1(book3s_msgsnd, void, tl) +DEF_HELPER_2(book3s_msgsnd, void, env, tl) DEF_HELPER_2(book3s_msgclr, void, env, tl) #endif DEF_HELPER_4(dlmzb, tl, env, tl, tl, i32) #if !defined(CONFIG_USER_ONLY) -DEF_HELPER_2(rac, tl, env, tl) - DEF_HELPER_2(load_dcr, tl, env, tl) DEF_HELPER_3(store_dcr, void, env, tl, tl) #endif @@ -729,6 +728,9 @@ DEF_HELPER_2(book3s_msgsndp, void, env, tl) DEF_HELPER_2(book3s_msgclrp, void, env, tl) DEF_HELPER_1(load_tfmr, tl, env) DEF_HELPER_2(store_tfmr, void, env, tl) +DEF_HELPER_FLAGS_2(store_sprc, TCG_CALL_NO_RWG, void, env, tl) +DEF_HELPER_FLAGS_1(load_sprd, TCG_CALL_NO_RWG_SE, tl, env) +DEF_HELPER_FLAGS_2(store_sprd, TCG_CALL_NO_RWG, void, env, tl) #endif DEF_HELPER_2(store_sdr1, void, env, tl) DEF_HELPER_2(store_pidr, void, env, tl) @@ -819,3 +821,11 @@ DEF_HELPER_4(DSCLIQ, void, env, fprp, fprp, i32) DEF_HELPER_1(tbegin, void, env) DEF_HELPER_FLAGS_1(fixup_thrm, TCG_CALL_NO_RWG, void, env) + +#if !defined(CONFIG_USER_ONLY) +#if defined(TARGET_PPC64) +DEF_HELPER_1(clrbhrb, void, env) +DEF_HELPER_FLAGS_2(mfbhrbe, TCG_CALL_NO_WG, i64, env, i32) +DEF_HELPER_1(attn, noreturn, env) +#endif +#endif diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 25258986e3..02076e96fb 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -47,6 +47,39 @@ void hreg_swap_gpr_tgpr(CPUPPCState *env) env->tgpr[3] = tmp; } +#if defined(TARGET_PPC64) +static bool hreg_check_bhrb_enable(CPUPPCState *env) +{ + bool pr = !!(env->msr & (1 << MSR_PR)); + target_long mmcr0; + bool fcp; + bool hv; + + /* ISA 3.1 adds the PMCRA[BRHBRD] and problem state checks */ + if ((env->insns_flags2 & PPC2_ISA310) && + ((env->spr[SPR_POWER_MMCRA] & MMCRA_BHRBRD) || !pr)) { + return false; + } + + /* Check for BHRB "frozen" conditions */ + mmcr0 = env->spr[SPR_POWER_MMCR0]; + fcp = !!(mmcr0 & MMCR0_FCP); + if (mmcr0 & MMCR0_FCPC) { + hv = !!(env->msr & (1ull << MSR_HV)); + if (fcp) { + if (hv && pr) { + return false; + } + } else if (!hv && pr) { + return false; + } + } else if (fcp && pr) { + return false; + } + return true; +} +#endif + static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) { uint32_t hflags = 0; @@ -61,6 +94,9 @@ static uint32_t hreg_compute_pmu_hflags_value(CPUPPCState *env) if (env->spr[SPR_POWER_MMCR0] & MMCR0_PMCjCE) { hflags |= 1 << HFLAGS_PMCJCE; } + if (hreg_check_bhrb_enable(env)) { + hflags |= 1 << HFLAGS_BHRB_ENABLE; + } #ifndef CONFIG_USER_ONLY if (env->pmc_ins_cnt) { @@ -85,6 +121,7 @@ static uint32_t hreg_compute_pmu_hflags_mask(CPUPPCState *env) hflags_mask |= 1 << HFLAGS_PMCJCE; hflags_mask |= 1 << HFLAGS_INSN_CNT; hflags_mask |= 1 << HFLAGS_PMC_OTHER; + hflags_mask |= 1 << HFLAGS_BHRB_ENABLE; #endif return hflags_mask; } @@ -334,7 +371,7 @@ void check_tlb_flush(CPUPPCState *env, bool global) if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; - tlb_flush_all_cpus(cs); + tlb_flush_all_cpus_synced(cs); return; } @@ -693,7 +730,6 @@ void register_6xx_7xx_soft_tlb(CPUPPCState *env, int nb_tlbs, int nb_ways) #if !defined(CONFIG_USER_ONLY) env->nb_tlb = nb_tlbs; env->nb_ways = nb_ways; - env->id_tlbs = 1; env->tlb_type = TLB_6XX; spr_register(env, SPR_DMISS, "DMISS", SPR_NOACCESS, SPR_NOACCESS, diff --git a/target/ppc/insn32.decode b/target/ppc/insn32.decode index eada59f59f..ee33141476 100644 --- a/target/ppc/insn32.decode +++ b/target/ppc/insn32.decode @@ -20,12 +20,24 @@ &A frt fra frb frc rc:bool @A ...... frt:5 fra:5 frb:5 frc:5 ..... rc:1 &A +&A_tab frt fra frb rc:bool +@A_tab ...... frt:5 fra:5 frb:5 ..... ..... rc:1 &A_tab + +&A_tac frt fra frc rc:bool +@A_tac ...... frt:5 fra:5 ..... frc:5 ..... rc:1 &A_tac + &A_tb frt frb rc:bool @A_tb ...... frt:5 ..... frb:5 ..... ..... rc:1 &A_tb +&A_tab_bc rt ra rb bc +@A_tab_bc ...... rt:5 ra:5 rb:5 bc:5 ..... . &A_tab_bc + &D rt ra si:int64_t @D ...... rt:5 ra:5 si:s16 &D +&D_ui rt ra ui:uint64_t +@D_ui ...... rt:5 ra:5 ui:16 &D_ui + &D_bf bf l:bool ra imm @D_bfs ...... bf:3 . l:1 ra:5 imm:s16 &D_bf @D_bfu ...... bf:3 . l:1 ra:5 imm:16 &D_bf @@ -93,6 +105,9 @@ &X_sa rs ra @X_sa ...... rs:5 ra:5 ..... .......... . &X_sa +&X_sa_rc rs ra rc +@X_sa_rc ...... rs:5 ra:5 ..... .......... rc:1 &X_sa_rc + %x_frtp 22:4 !function=times_2 %x_frap 17:4 !function=times_2 %x_frbp 12:4 !function=times_2 @@ -124,6 +139,9 @@ &X_bf bf ra rb @X_bf ...... bf:3 .. ra:5 rb:5 .......... . &X_bf +&X_bf_b bf rb +@X_bf_b ...... bf:3 .. ..... rb:5 .......... . &X_bf_b + @X_bf_ap_bp ...... bf:3 .. ....0 ....0 .......... . &X_bf ra=%x_frap rb=%x_frbp @X_bf_a_bp ...... bf:3 .. ra:5 ....0 .......... . &X_bf rb=%x_frbp @@ -187,12 +205,18 @@ &X_a ra @X_a ...... ra:3 .. ..... ..... .......... . &X_a +&X_tl rt l +@X_tl ...... rt:5 ... l:2 ..... .......... . &X_tl + &XO rt ra rb oe:bool rc:bool @XO ...... rt:5 ra:5 rb:5 oe:1 ......... rc:1 &XO &XO_ta rt ra oe:bool rc:bool @XO_ta ...... rt:5 ra:5 ..... oe:1 ......... rc:1 &XO_ta +&XO_tab_rc rt ra rb rc:bool +@XO_tab_rc ...... rt:5 ra:5 rb:5 . ......... rc:1 &XO_tab_rc + %xx_xt 0:1 21:5 %xx_xb 1:1 11:5 %xx_xa 2:1 16:5 @@ -325,6 +349,19 @@ CMP 011111 ... - . ..... ..... 0000000000 - @X_bfl CMPL 011111 ... - . ..... ..... 0000100000 - @X_bfl CMPI 001011 ... - . ..... ................ @D_bfs CMPLI 001010 ... - . ..... ................ @D_bfu +CMPRB 011111 ... - . ..... ..... 0011000000 - @X_bfl +CMPEQB 011111 ... -- ..... ..... 0011100000 - @X_bf + +### Fixed-Point Trap Instructions + +TW 011111 ..... ..... ..... 0000000100 - @X +TD 011111 ..... ..... ..... 0001000100 - @X +TWI 000011 ..... ..... ................ @D +TDI 000010 ..... ..... ................ @D + +### Fixed-Point Select Instruction + +ISEL 011111 ..... ..... ..... ..... 01111 - @A_tab_bc ### Fixed-Point Arithmetic Instructions @@ -353,8 +390,73 @@ SUBFE 011111 ..... ..... ..... . 010001000 . @XO SUBFME 011111 ..... ..... ----- . 011101000 . @XO_ta SUBFZE 011111 ..... ..... ----- . 011001000 . @XO_ta +MULLI 000111 ..... ..... ................ @D +MULLW 011111 ..... ..... ..... 0 011101011 . @XO_tab_rc +MULLWO 011111 ..... ..... ..... 1 011101011 . @XO_tab_rc +MULHW 011111 ..... ..... ..... - 001001011 . @XO_tab_rc +MULHWU 011111 ..... ..... ..... - 000001011 . @XO_tab_rc + +DIVW 011111 ..... ..... ..... . 111101011 . @XO +DIVWU 011111 ..... ..... ..... . 111001011 . @XO +DIVWE 011111 ..... ..... ..... . 110101011 . @XO +DIVWEU 011111 ..... ..... ..... . 110001011 . @XO + +MODSW 011111 ..... ..... ..... 1100001011 - @X +MODUW 011111 ..... ..... ..... 0100001011 - @X +DARN 011111 ..... --- .. ----- 1011110011 - @X_tl +NEG 011111 ..... ..... ----- . 001101000 . @XO_ta + +MULLD 011111 ..... ..... ..... 0 011101001 . @XO_tab_rc +MULLDO 011111 ..... ..... ..... 1 011101001 . @XO_tab_rc +MULHD 011111 ..... ..... ..... - 001001001 . @XO_tab_rc +MULHDU 011111 ..... ..... ..... - 000001001 . @XO_tab_rc + +MADDLD 000100 ..... ..... ..... ..... 110011 @VA +MADDHD 000100 ..... ..... ..... ..... 110000 @VA +MADDHDU 000100 ..... ..... ..... ..... 110001 @VA + +DIVD 011111 ..... ..... ..... . 111101001 . @XO +DIVDU 011111 ..... ..... ..... . 111001001 . @XO +DIVDE 011111 ..... ..... ..... . 110101001 . @XO +DIVDEU 011111 ..... ..... ..... . 110001001 . @XO + +MODSD 011111 ..... ..... ..... 1100001001 - @X +MODUD 011111 ..... ..... ..... 0100001001 - @X + ## Fixed-Point Logical Instructions +ANDI_ 011100 ..... ..... ................ @D_ui +ANDIS_ 011101 ..... ..... ................ @D_ui +ORI 011000 ..... ..... ................ @D_ui +ORIS 011001 ..... ..... ................ @D_ui +XORI 011010 ..... ..... ................ @D_ui +XORIS 011011 ..... ..... ................ @D_ui + +AND 011111 ..... ..... ..... 0000011100 . @X_rc +ANDC 011111 ..... ..... ..... 0000111100 . @X_rc +NAND 011111 ..... ..... ..... 0111011100 . @X_rc +OR 011111 ..... ..... ..... 0110111100 . @X_rc +ORC 011111 ..... ..... ..... 0110011100 . @X_rc +NOR 011111 ..... ..... ..... 0001111100 . @X_rc +XOR 011111 ..... ..... ..... 0100111100 . @X_rc +EQV 011111 ..... ..... ..... 0100011100 . @X_rc +CMPB 011111 ..... ..... ..... 0111111100 . @X_rc + +EXTSB 011111 ..... ..... ----- 1110111010 . @X_sa_rc +EXTSH 011111 ..... ..... ----- 1110011010 . @X_sa_rc +EXTSW 011111 ..... ..... ----- 1111011010 . @X_sa_rc +CNTLZW 011111 ..... ..... ----- 0000011010 . @X_sa_rc +CNTTZW 011111 ..... ..... ----- 1000011010 . @X_sa_rc +CNTLZD 011111 ..... ..... ----- 0000111010 . @X_sa_rc +CNTTZD 011111 ..... ..... ----- 1000111010 . @X_sa_rc +POPCNTB 011111 ..... ..... ----- 0001111010 . @X_sa_rc + +POPCNTW 011111 ..... ..... ----- 0101111010 - @X_sa +POPCNTD 011111 ..... ..... ----- 0111111010 - @X_sa +PRTYW 011111 ..... ..... ----- 0010011010 - @X_sa +PRTYD 011111 ..... ..... ----- 0010111010 - @X_sa + +BPERMD 011111 ..... ..... ..... 0011111100 - @X CFUGED 011111 ..... ..... ..... 0011011100 - @X CNTLZDM 011111 ..... ..... ..... 0000111011 - @X CNTTZDM 011111 ..... ..... ..... 1000111011 - @X @@ -400,9 +502,42 @@ STFDUX 011111 ..... ...... .... 1011110111 - @X ### Floating-Point Arithmetic Instructions +FADD 111111 ..... ..... ..... ----- 10101 . @A_tab +FADDS 111011 ..... ..... ..... ----- 10101 . @A_tab + +FSUB 111111 ..... ..... ..... ----- 10100 . @A_tab +FSUBS 111011 ..... ..... ..... ----- 10100 . @A_tab + +FMUL 111111 ..... ..... ----- ..... 11001 . @A_tac +FMULS 111011 ..... ..... ----- ..... 11001 . @A_tac + +FDIV 111111 ..... ..... ..... ----- 10010 . @A_tab +FDIVS 111011 ..... ..... ..... ----- 10010 . @A_tab + FSQRT 111111 ..... ----- ..... ----- 10110 . @A_tb FSQRTS 111011 ..... ----- ..... ----- 10110 . @A_tb +FRE 111111 ..... ----- ..... ----- 11000 . @A_tb +FRES 111011 ..... ----- ..... ----- 11000 . @A_tb + +FRSQRTE 111111 ..... ----- ..... ----- 11010 . @A_tb +FRSQRTES 111011 ..... ----- ..... ----- 11010 . @A_tb + +FTDIV 111111 ... -- ..... ..... 0010000000 - @X_bf +FTSQRT 111111 ... -- ----- ..... 0010100000 - @X_bf_b + +FMADD 111111 ..... ..... ..... ..... 11101 . @A +FMADDS 111011 ..... ..... ..... ..... 11101 . @A + +FMSUB 111111 ..... ..... ..... ..... 11100 . @A +FMSUBS 111011 ..... ..... ..... ..... 11100 . @A + +FNMADD 111111 ..... ..... ..... ..... 11111 . @A +FNMADDS 111011 ..... ..... ..... ..... 11111 . @A + +FNMSUB 111111 ..... ..... ..... ..... 11110 . @A +FNMSUBS 111011 ..... ..... ..... ..... 11110 . @A + ### Floating-Point Select Instruction FSEL 111111 ..... ..... ..... ..... 10111 . @A @@ -526,6 +661,23 @@ DSCRIQ 111111 ..... ..... ...... 001100010 . @Z22_tap_sh_rc VPMSUMD 000100 ..... ..... ..... 10011001000 @VX +## Vector Load/Store Instructions + +LVEBX 011111 ..... ..... ..... 0000000111 - @X +LVEHX 011111 ..... ..... ..... 0000100111 - @X +LVEWX 011111 ..... ..... ..... 0001000111 - @X +LVX 011111 ..... ..... ..... 0001100111 - @X +LVXL 011111 ..... ..... ..... 0101100111 - @X + +STVEBX 011111 ..... ..... ..... 0010000111 - @X +STVEHX 011111 ..... ..... ..... 0010100111 - @X +STVEWX 011111 ..... ..... ..... 0011000111 - @X +STVX 011111 ..... ..... ..... 0011100111 - @X +STVXL 011111 ..... ..... ..... 0111100111 - @X + +LVSL 011111 ..... ..... ..... 0000000110 - @X +LVSR 011111 ..... ..... ..... 0000100110 - @X + ## Vector Integer Instructions VCMPEQUB 000100 ..... ..... ..... . 0000000110 @VC @@ -557,6 +709,17 @@ VCMPNEZW 000100 ..... ..... ..... . 0110000111 @VC VCMPSQ 000100 ... -- ..... ..... 00101000001 @VX_bf VCMPUQ 000100 ... -- ..... ..... 00100000001 @VX_bf +## Vector Integer Logical Instructions + +VAND 000100 ..... ..... ..... 10000000100 @VX +VANDC 000100 ..... ..... ..... 10001000100 @VX +VNAND 000100 ..... ..... ..... 10110000100 @VX +VOR 000100 ..... ..... ..... 10010000100 @VX +VORC 000100 ..... ..... ..... 10101000100 @VX +VNOR 000100 ..... ..... ..... 10100000100 @VX +VXOR 000100 ..... ..... ..... 10011000100 @VX +VEQV 000100 ..... ..... ..... 11010000100 @VX + ## Vector Integer Average Instructions VAVGSB 000100 ..... ..... ..... 10100000010 @VX @@ -689,6 +852,28 @@ VEXTSD2Q 000100 ..... 11011 ..... 11000000010 @VX_tb VNEGD 000100 ..... 00111 ..... 11000000010 @VX_tb VNEGW 000100 ..... 00110 ..... 11000000010 @VX_tb +## Vector Integer Maximum/Minimum Instructions + +VMAXUB 000100 ..... ..... ..... 00000000010 @VX +VMAXUH 000100 ..... ..... ..... 00001000010 @VX +VMAXUW 000100 ..... ..... ..... 00010000010 @VX +VMAXUD 000100 ..... ..... ..... 00011000010 @VX + +VMAXSB 000100 ..... ..... ..... 00100000010 @VX +VMAXSH 000100 ..... ..... ..... 00101000010 @VX +VMAXSW 000100 ..... ..... ..... 00110000010 @VX +VMAXSD 000100 ..... ..... ..... 00111000010 @VX + +VMINUB 000100 ..... ..... ..... 01000000010 @VX +VMINUH 000100 ..... ..... ..... 01001000010 @VX +VMINUW 000100 ..... ..... ..... 01010000010 @VX +VMINUD 000100 ..... ..... ..... 01011000010 @VX + +VMINSB 000100 ..... ..... ..... 01100000010 @VX +VMINSH 000100 ..... ..... ..... 01101000010 @VX +VMINSW 000100 ..... ..... ..... 01110000010 @VX +VMINSD 000100 ..... ..... ..... 01111000010 @VX + ## Vector Mask Manipulation Instructions MTVSRBM 000100 ..... 10000 ..... 11001000010 @VX_tb @@ -998,3 +1183,22 @@ MSGSND 011111 ----- ----- ..... 0011001110 - @X_rb MSGCLRP 011111 ----- ----- ..... 0010101110 - @X_rb MSGSNDP 011111 ----- ----- ..... 0010001110 - @X_rb MSGSYNC 011111 ----- ----- ----- 1101110110 - + +# Memory Barrier Instructions + +&X_sync l sc +@X_sync ...... .. l:3 ... sc:2 ..... .......... . &X_sync +SYNC 011111 -- ... --- .. ----- 1001010110 - @X_sync +EIEIO 011111 ----- ----- ----- 1101010110 - + +# Branch History Rolling Buffer (BHRB) Instructions + +&XFX_bhrbe rt bhrbe +@XFX_bhrbe ...... rt:5 bhrbe:10 .......... - &XFX_bhrbe + +MFBHRBE 011111 ..... ..... ..... 0100101110 - @XFX_bhrbe +CLRBHRB 011111 ----- ----- ----- 0110101110 - + +## Misc POWER instructions + +ATTN 000000 00000 00000 00000 0100000000 0 diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index 0a5c3e78a4..2c6b633d65 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -44,7 +44,7 @@ static inline void helper_update_ov_legacy(CPUPPCState *env, int ov) } } -target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb, +target_ulong helper_DIVWEU(CPUPPCState *env, target_ulong ra, target_ulong rb, uint32_t oe) { uint64_t rt = 0; @@ -71,7 +71,7 @@ target_ulong helper_divweu(CPUPPCState *env, target_ulong ra, target_ulong rb, return (target_ulong)rt; } -target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb, +target_ulong helper_DIVWE(CPUPPCState *env, target_ulong ra, target_ulong rb, uint32_t oe) { int64_t rt = 0; @@ -101,7 +101,7 @@ target_ulong helper_divwe(CPUPPCState *env, target_ulong ra, target_ulong rb, #if defined(TARGET_PPC64) -uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) +uint64_t helper_DIVDEU(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) { uint64_t rt = 0; int overflow = 0; @@ -120,7 +120,7 @@ uint64_t helper_divdeu(CPUPPCState *env, uint64_t ra, uint64_t rb, uint32_t oe) return rt; } -uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) +uint64_t helper_DIVDE(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) { uint64_t rt = 0; int64_t ra = (int64_t)rau; @@ -159,7 +159,7 @@ uint64_t helper_divde(CPUPPCState *env, uint64_t rau, uint64_t rbu, uint32_t oe) /* When you XOR the pattern and there is a match, that byte will be zero */ #define hasvalue(x, n) (haszero((x) ^ pattern(n))) -uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb) +uint32_t helper_CMPEQB(target_ulong ra, target_ulong rb) { return hasvalue(rb, ra) ? CRF_GT : 0; } @@ -171,7 +171,7 @@ uint32_t helper_cmpeqb(target_ulong ra, target_ulong rb) /* * Return a random number. */ -uint64_t helper_darn32(void) +uint64_t helper_DARN32(void) { Error *err = NULL; uint32_t ret; @@ -186,7 +186,7 @@ uint64_t helper_darn32(void) return ret; } -uint64_t helper_darn64(void) +uint64_t helper_DARN64(void) { Error *err = NULL; uint64_t ret; @@ -201,7 +201,7 @@ uint64_t helper_darn64(void) return ret; } -uint64_t helper_bpermd(uint64_t rs, uint64_t rb) +uint64_t helper_BPERMD(uint64_t rs, uint64_t rb) { int i; uint64_t ra = 0; @@ -219,7 +219,7 @@ uint64_t helper_bpermd(uint64_t rs, uint64_t rb) #endif -target_ulong helper_cmpb(target_ulong rs, target_ulong rb) +target_ulong helper_CMPB(target_ulong rs, target_ulong rb) { target_ulong mask = 0xff; target_ulong ra = 0; @@ -288,7 +288,7 @@ target_ulong helper_srad(CPUPPCState *env, target_ulong value, #endif #if defined(TARGET_PPC64) -target_ulong helper_popcntb(target_ulong val) +target_ulong helper_POPCNTB(target_ulong val) { /* Note that we don't fold past bytes */ val = (val & 0x5555555555555555ULL) + ((val >> 1) & @@ -300,7 +300,7 @@ target_ulong helper_popcntb(target_ulong val) return val; } -target_ulong helper_popcntw(target_ulong val) +target_ulong helper_POPCNTW(target_ulong val) { /* Note that we don't fold past words. */ val = (val & 0x5555555555555555ULL) + ((val >> 1) & @@ -316,7 +316,7 @@ target_ulong helper_popcntw(target_ulong val) return val; } #else -target_ulong helper_popcntb(target_ulong val) +target_ulong helper_POPCNTB(target_ulong val) { /* Note that we don't fold past bytes */ val = (val & 0x55555555) + ((val >> 1) & 0x55555555); diff --git a/target/ppc/internal.h b/target/ppc/internal.h index 98b41a970c..20fb2ec593 100644 --- a/target/ppc/internal.h +++ b/target/ppc/internal.h @@ -234,51 +234,23 @@ void destroy_ppc_opcodes(PowerPCCPU *cpu); void ppc_gdb_init(CPUState *cs, PowerPCCPUClass *ppc); const gchar *ppc_gdb_arch_name(CPUState *cs); -/** - * prot_for_access_type: - * @access_type: Access type - * - * Return the protection bit required for the given access type. - */ -static inline int prot_for_access_type(MMUAccessType access_type) +#ifndef CONFIG_USER_ONLY + +/* Check if permission bit required for the access_type is set in prot */ +static inline int check_prot_access_type(int prot, MMUAccessType access_type) { - switch (access_type) { - case MMU_INST_FETCH: - return PAGE_EXEC; - case MMU_DATA_LOAD: - return PAGE_READ; - case MMU_DATA_STORE: - return PAGE_WRITE; - } - g_assert_not_reached(); + return prot & (1 << access_type); } -#ifndef CONFIG_USER_ONLY - /* PowerPC MMU emulation */ -typedef struct mmu_ctx_t mmu_ctx_t; - bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, bool guest_visible); -int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, - MMUAccessType access_type, int type, - int mmu_idx); + /* Software driven TLB helpers */ int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, int way, int is_code); -/* Context used internally during MMU translations */ -struct mmu_ctx_t { - hwaddr raddr; /* Real address */ - hwaddr eaddr; /* Effective address */ - int prot; /* Protection bits */ - hwaddr hash[2]; /* Pagetable hash values */ - target_ulong ptem; /* Virtual segment ID | API */ - int key; /* Access key */ - int nx; /* Non-execute area */ -}; #endif /* !CONFIG_USER_ONLY */ diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index 46fccff786..005f2239f3 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -865,9 +865,7 @@ int kvmppc_put_books_sregs(PowerPCCPU *cpu) sregs.pvr = env->spr[SPR_PVR]; if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp); + sregs.u.s.sdr1 = cpu->vhyp_class->encode_hpt_for_kvm_pr(cpu->vhyp); } else { sregs.u.s.sdr1 = env->spr[SPR_SDR1]; } diff --git a/target/ppc/machine.c b/target/ppc/machine.c index 203fe28e01..731dd8df35 100644 --- a/target/ppc/machine.c +++ b/target/ppc/machine.c @@ -333,7 +333,7 @@ static int cpu_post_load(void *opaque, int version_id) * triggered types (including HDEC) would need to carry more state. */ cpu_ppc_store_decr(env, env->spr[SPR_DECR]); - pmu_mmcr01_updated(env); + pmu_mmcr01a_updated(env); } return 0; @@ -711,6 +711,26 @@ static const VMStateDescription vmstate_reservation = { } }; +#ifdef TARGET_PPC64 +static bool bhrb_needed(void *opaque) +{ + PowerPCCPU *cpu = opaque; + return (cpu->env.flags & POWERPC_FLAG_BHRB) != 0; +} + +static const VMStateDescription vmstate_bhrb = { + .name = "cpu/bhrb", + .version_id = 1, + .minimum_version_id = 1, + .needed = bhrb_needed, + .fields = (VMStateField[]) { + VMSTATE_UINTTL(env.bhrb_offset, PowerPCCPU), + VMSTATE_UINT64_ARRAY(env.bhrb, PowerPCCPU, BHRB_MAX_NUM_ENTRIES), + VMSTATE_END_OF_LIST() + } +}; +#endif + const VMStateDescription vmstate_ppc_cpu = { .name = "cpu", .version_id = 5, @@ -756,6 +776,7 @@ const VMStateDescription vmstate_ppc_cpu = { #ifdef TARGET_PPC64 &vmstate_tm, &vmstate_slb, + &vmstate_bhrb, #endif /* TARGET_PPC64 */ &vmstate_tlb6xx, &vmstate_tlbemb, diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index ea7e8443a8..f88155ad45 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -404,9 +404,9 @@ target_ulong helper_lscbx(CPUPPCState *env, target_ulong addr, uint32_t reg, } \ } #define I(x) (x) -LVE(lvebx, cpu_ldub_data_ra, I, u8) -LVE(lvehx, cpu_lduw_data_ra, bswap16, u16) -LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) +LVE(LVEBX, cpu_ldub_data_ra, I, u8) +LVE(LVEHX, cpu_lduw_data_ra, bswap16, u16) +LVE(LVEWX, cpu_ldl_data_ra, bswap32, u32) #undef I #undef LVE @@ -432,9 +432,9 @@ LVE(lvewx, cpu_ldl_data_ra, bswap32, u32) } \ } #define I(x) (x) -STVE(stvebx, cpu_stb_data_ra, I, u8) -STVE(stvehx, cpu_stw_data_ra, bswap16, u16) -STVE(stvewx, cpu_stl_data_ra, bswap32, u32) +STVE(STVEBX, cpu_stb_data_ra, I, u8) +STVE(STVEHX, cpu_stw_data_ra, bswap16, u16) +STVE(STVEWX, cpu_stl_data_ra, bswap32, u32) #undef I #undef LVE diff --git a/target/ppc/meson.build b/target/ppc/meson.build index 0b89f9b89f..db3b7a0c33 100644 --- a/target/ppc/meson.build +++ b/target/ppc/meson.build @@ -37,6 +37,7 @@ ppc_system_ss.add(files( 'arch_dump.c', 'machine.c', 'mmu-hash32.c', + 'mmu-booke.c', 'mmu_common.c', 'ppc-qmp-cmds.c', )) diff --git a/target/ppc/misc_helper.c b/target/ppc/misc_helper.c index 58e808dc96..fa47be2298 100644 --- a/target/ppc/misc_helper.c +++ b/target/ppc/misc_helper.c @@ -150,6 +150,17 @@ void helper_msr_facility_check(CPUPPCState *env, uint32_t bit, #if !defined(CONFIG_USER_ONLY) +#ifdef TARGET_PPC64 +static void helper_mmcr0_facility_check(CPUPPCState *env, uint32_t bit, + uint32_t sprn, uint32_t cause) +{ + if (FIELD_EX64(env->msr, MSR, PR) && + !(env->spr[SPR_POWER_MMCR0] & (1ULL << bit))) { + raise_fu_exception(env, bit, sprn, cause, GETPC()); + } +} +#endif + void helper_store_sdr1(CPUPPCState *env, target_ulong val) { if (env->spr[SPR_SDR1] != val) { @@ -162,6 +173,7 @@ void helper_store_sdr1(CPUPPCState *env, target_ulong val) void helper_store_ptcr(CPUPPCState *env, target_ulong val) { if (env->spr[SPR_PTCR] != val) { + CPUState *cs = env_cpu(env); PowerPCCPU *cpu = env_archcpu(env); target_ulong ptcr_mask = PTCR_PATB | PTCR_PATS; target_ulong patbsize = val & PTCR_PATS; @@ -183,8 +195,19 @@ void helper_store_ptcr(CPUPPCState *env, target_ulong val) return; } - env->spr[SPR_PTCR] = val; - tlb_flush(env_cpu(env)); + if (cs->nr_threads == 1 || !(env->flags & POWERPC_FLAG_SMT_1LPAR)) { + env->spr[SPR_PTCR] = val; + tlb_flush(cs); + } else { + CPUState *ccs; + + THREAD_SIBLING_FOREACH(cs, ccs) { + PowerPCCPU *ccpu = POWERPC_CPU(ccs); + CPUPPCState *cenv = &ccpu->env; + cenv->spr[SPR_PTCR] = val; + tlb_flush(ccs); + } + } } } @@ -284,6 +307,72 @@ void helper_store_dpdes(CPUPPCState *env, target_ulong val) } bql_unlock(); } + +/* Indirect SCOM (SPRC/SPRD) access to SCRATCH0-7 are implemented. */ +void helper_store_sprc(CPUPPCState *env, target_ulong val) +{ + if (val & ~0x3f8ULL) { + qemu_log_mask(LOG_GUEST_ERROR, "Invalid SPRC register value " + TARGET_FMT_lx"\n", val); + return; + } + env->spr[SPR_POWER_SPRC] = val; +} + +target_ulong helper_load_sprd(CPUPPCState *env) +{ + target_ulong sprc = env->spr[SPR_POWER_SPRC]; + + switch (sprc & 0x3c0) { + case 0: /* SCRATCH0-7 */ + return env->scratch[(sprc >> 3) & 0x7]; + default: + qemu_log_mask(LOG_UNIMP, "mfSPRD: Unimplemented SPRC:0x" + TARGET_FMT_lx"\n", sprc); + break; + } + return 0; +} + +static void do_store_scratch(CPUPPCState *env, int nr, target_ulong val) +{ + CPUState *cs = env_cpu(env); + CPUState *ccs; + uint32_t nr_threads = cs->nr_threads; + + /* + * Log stores to SCRATCH, because some firmware uses these for debugging + * and logging, but they would normally be read by the BMC, which is + * not implemented in QEMU yet. This gives a way to get at the information. + * Could also dump these upon checkstop. + */ + qemu_log("SPRD write 0x" TARGET_FMT_lx " to SCRATCH%d\n", val, nr); + + if (nr_threads == 1) { + env->scratch[nr] = val; + return; + } + + THREAD_SIBLING_FOREACH(cs, ccs) { + CPUPPCState *cenv = &POWERPC_CPU(ccs)->env; + cenv->scratch[nr] = val; + } +} + +void helper_store_sprd(CPUPPCState *env, target_ulong val) +{ + target_ulong sprc = env->spr[SPR_POWER_SPRC]; + + switch (sprc & 0x3c0) { + case 0: /* SCRATCH0-7 */ + do_store_scratch(env, (sprc >> 3) & 0x7, val); + break; + default: + qemu_log_mask(LOG_UNIMP, "mfSPRD: Unimplemented SPRC:0x" + TARGET_FMT_lx"\n", sprc); + break; + } +} #endif /* defined(TARGET_PPC64) */ void helper_store_pidr(CPUPPCState *env, target_ulong val) @@ -363,3 +452,42 @@ void helper_fixup_thrm(CPUPPCState *env) env->spr[i] = v; } } + +#if !defined(CONFIG_USER_ONLY) +#if defined(TARGET_PPC64) +void helper_clrbhrb(CPUPPCState *env) +{ + helper_hfscr_facility_check(env, HFSCR_BHRB, "clrbhrb", FSCR_IC_BHRB); + + helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB); + + if (env->flags & POWERPC_FLAG_BHRB) { + memset(env->bhrb, 0, sizeof(env->bhrb)); + } +} + +uint64_t helper_mfbhrbe(CPUPPCState *env, uint32_t bhrbe) +{ + unsigned int index; + + helper_hfscr_facility_check(env, HFSCR_BHRB, "mfbhrbe", FSCR_IC_BHRB); + + helper_mmcr0_facility_check(env, MMCR0_BHRBA_NR, 0, FSCR_IC_BHRB); + + if (!(env->flags & POWERPC_FLAG_BHRB) || + (bhrbe >= env->bhrb_num_entries) || + (env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) { + return 0; + } + + /* + * Note: bhrb_offset is the byte offset for writing the + * next entry (over the oldest entry), which is why we + * must offset bhrbe by 1 to get to the 0th entry. + */ + index = ((env->bhrb_offset / sizeof(uint64_t)) - (bhrbe + 1)) % + env->bhrb_num_entries; + return env->bhrb[index]; +} +#endif +#endif diff --git a/target/ppc/mmu-book3s-v3.h b/target/ppc/mmu-book3s-v3.h index 674377a19e..f3f7993958 100644 --- a/target/ppc/mmu-book3s-v3.h +++ b/target/ppc/mmu-book3s-v3.h @@ -108,9 +108,7 @@ static inline hwaddr ppc_hash64_hpt_mask(PowerPCCPU *cpu) uint64_t base; if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - return vhc->hpt_mask(cpu->vhyp); + return cpu->vhyp_class->hpt_mask(cpu->vhyp); } if (cpu->env.mmu_model == POWERPC_MMU_3_00) { ppc_v3_pate_t pate; diff --git a/target/ppc/mmu-booke.c b/target/ppc/mmu-booke.c new file mode 100644 index 0000000000..55e5dd7c6b --- /dev/null +++ b/target/ppc/mmu-booke.c @@ -0,0 +1,531 @@ +/* + * PowerPC BookE MMU, TLB emulation helpers for QEMU. + * + * Copyright (c) 2003-2007 Jocelyn Mayer + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "qemu/osdep.h" +#include "exec/page-protection.h" +#include "exec/log.h" +#include "cpu.h" +#include "internal.h" +#include "mmu-booke.h" + +/* Generic TLB check function for embedded PowerPC implementations */ +static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddrp, + target_ulong address, uint32_t pid, int i) +{ + target_ulong mask; + + /* Check valid flag */ + if (!(tlb->prot & PAGE_VALID)) { + return false; + } + mask = ~(tlb->size - 1); + qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx + " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", + __func__, i, address, pid, tlb->EPN, + mask, (uint32_t)tlb->PID, tlb->prot); + /* Check PID */ + if (tlb->PID != 0 && tlb->PID != pid) { + return false; + } + /* Check effective address */ + if ((address & mask) != tlb->EPN) { + return false; + } + *raddrp = (tlb->RPN & mask) | (address & ~mask); + return true; +} + +/* Generic TLB search function for PowerPC embedded implementations */ +int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) +{ + ppcemb_tlb_t *tlb; + hwaddr raddr; + int i; + + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) { + return i; + } + } + return -1; +} + +int mmu40x_get_physical_address(CPUPPCState *env, hwaddr *raddr, int *prot, + target_ulong address, + MMUAccessType access_type) +{ + ppcemb_tlb_t *tlb; + int i, ret, zsel, zpr, pr; + + ret = -1; + pr = FIELD_EX64(env->msr, MSR, PR); + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + if (!ppcemb_tlb_check(env, tlb, raddr, address, + env->spr[SPR_40x_PID], i)) { + continue; + } + zsel = (tlb->attr >> 4) & 0xF; + zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; + qemu_log_mask(CPU_LOG_MMU, + "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", + __func__, i, zsel, zpr, access_type, tlb->attr); + /* Check execute enable bit */ + switch (zpr) { + case 0x2: + if (pr != 0) { + goto check_perms; + } + /* fall through */ + case 0x3: + /* All accesses granted */ + *prot = PAGE_RWX; + ret = 0; + break; + + case 0x0: + if (pr != 0) { + /* Raise Zone protection fault. */ + env->spr[SPR_40x_ESR] = 1 << 22; + *prot = 0; + ret = -2; + break; + } + /* fall through */ + case 0x1: +check_perms: + /* Check from TLB entry */ + *prot = tlb->prot; + if (check_prot_access_type(*prot, access_type)) { + ret = 0; + } else { + env->spr[SPR_40x_ESR] = 0; + ret = -2; + } + break; + } + } + qemu_log_mask(CPU_LOG_MMU, "%s: access %s " TARGET_FMT_lx " => " + HWADDR_FMT_plx " %d %d\n", __func__, + ret < 0 ? "refused" : "granted", address, + ret < 0 ? 0 : *raddr, *prot, ret); + + return ret; +} + +static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddr, target_ulong addr, int i) +{ + if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) { + if (!env->nb_pids) { + /* Extend the physical address to 36 bits */ + *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32; + } + return true; + } else if (!env->nb_pids) { + return false; + } + if (env->spr[SPR_BOOKE_PID1] && + ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) { + return true; + } + if (env->spr[SPR_BOOKE_PID2] && + ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) { + return true; + } + return false; +} + +static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, + hwaddr *raddr, int *prot, target_ulong address, + MMUAccessType access_type, int i) +{ + if (!mmubooke_check_pid(env, tlb, raddr, address, i)) { + qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); + return -1; + } + + /* Check the address space */ + if ((access_type == MMU_INST_FETCH ? + FIELD_EX64(env->msr, MSR, IR) : + FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { + qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); + return -1; + } + + if (FIELD_EX64(env->msr, MSR, PR)) { + *prot = tlb->prot & 0xF; + } else { + *prot = (tlb->prot >> 4) & 0xF; + } + if (check_prot_access_type(*prot, access_type)) { + qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); + return 0; + } + + qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, *prot); + return access_type == MMU_INST_FETCH ? -3 : -2; +} + +static int mmubooke_get_physical_address(CPUPPCState *env, hwaddr *raddr, + int *prot, target_ulong address, + MMUAccessType access_type) +{ + ppcemb_tlb_t *tlb; + int i, ret = -1; + + for (i = 0; i < env->nb_tlb; i++) { + tlb = &env->tlb.tlbe[i]; + ret = mmubooke_check_tlb(env, tlb, raddr, prot, address, + access_type, i); + if (ret != -1) { + break; + } + } + qemu_log_mask(CPU_LOG_MMU, + "%s: access %s " TARGET_FMT_lx " => " HWADDR_FMT_plx + " %d %d\n", __func__, ret < 0 ? "refused" : "granted", + address, ret < 0 ? -1 : *raddr, ret == -1 ? 0 : *prot, ret); + return ret; +} + +hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) +{ + int tlbm_size; + + tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; + + return 1024ULL << tlbm_size; +} + +/* TLB check function for MAS based SoftTLBs */ +int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, + target_ulong address, uint32_t pid) +{ + hwaddr mask; + uint32_t tlb_pid; + + if (!FIELD_EX64(env->msr, MSR, CM)) { + /* In 32bit mode we can only address 32bit EAs */ + address = (uint32_t)address; + } + + /* Check valid flag */ + if (!(tlb->mas1 & MAS1_VALID)) { + return -1; + } + + mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); + qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx + " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" + HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", + __func__, address, pid, tlb->mas1, tlb->mas2, mask, + tlb->mas7_3, tlb->mas8); + + /* Check PID */ + tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; + if (tlb_pid != 0 && tlb_pid != pid) { + return -1; + } + + /* Check effective address */ + if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { + return -1; + } + + if (raddrp) { + *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); + } + + return 0; +} + +static bool is_epid_mmu(int mmu_idx) +{ + return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; +} + +static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) +{ + uint32_t esr = 0; + if (access_type == MMU_DATA_STORE) { + esr |= ESR_ST; + } + if (is_epid_mmu(mmu_idx)) { + esr |= ESR_EPID; + } + return esr; +} + +/* + * Get EPID register given the mmu_idx. If this is regular load, + * construct the EPID access bits from current processor state + * + * Get the effective AS and PR bits and the PID. The PID is returned + * only if EPID load is requested, otherwise the caller must detect + * the correct EPID. Return true if valid EPID is returned. + */ +static bool mmubooke206_get_as(CPUPPCState *env, + int mmu_idx, uint32_t *epid_out, + bool *as_out, bool *pr_out) +{ + if (is_epid_mmu(mmu_idx)) { + uint32_t epidr; + if (mmu_idx == PPC_TLB_EPID_STORE) { + epidr = env->spr[SPR_BOOKE_EPSC]; + } else { + epidr = env->spr[SPR_BOOKE_EPLC]; + } + *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; + *as_out = !!(epidr & EPID_EAS); + *pr_out = !!(epidr & EPID_EPR); + return true; + } else { + *as_out = FIELD_EX64(env->msr, MSR, DS); + *pr_out = FIELD_EX64(env->msr, MSR, PR); + return false; + } +} + +/* Check if the tlb found by hashing really matches */ +static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, + hwaddr *raddr, int *prot, + target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + uint32_t epid; + bool as, pr; + bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); + + if (!use_epid) { + if (ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID1] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID1]) >= 0) { + goto found_tlb; + } + + if (env->spr[SPR_BOOKE_PID2] && + ppcmas_tlb_check(env, tlb, raddr, address, + env->spr[SPR_BOOKE_PID2]) >= 0) { + goto found_tlb; + } + } else { + if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { + goto found_tlb; + } + } + + qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address " + "0x" TARGET_FMT_lx "\n", __func__, address); + return -1; + +found_tlb: + + /* Check the address space and permissions */ + if (access_type == MMU_INST_FETCH) { + /* There is no way to fetch code using epid load */ + assert(!use_epid); + as = FIELD_EX64(env->msr, MSR, IR); + } + + if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { + qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); + return -1; + } + + *prot = 0; + if (pr) { + if (tlb->mas7_3 & MAS3_UR) { + *prot |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_UW) { + *prot |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_UX) { + *prot |= PAGE_EXEC; + } + } else { + if (tlb->mas7_3 & MAS3_SR) { + *prot |= PAGE_READ; + } + if (tlb->mas7_3 & MAS3_SW) { + *prot |= PAGE_WRITE; + } + if (tlb->mas7_3 & MAS3_SX) { + *prot |= PAGE_EXEC; + } + } + if (check_prot_access_type(*prot, access_type)) { + qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); + return 0; + } + + qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, *prot); + return access_type == MMU_INST_FETCH ? -3 : -2; +} + +static int mmubooke206_get_physical_address(CPUPPCState *env, hwaddr *raddr, + int *prot, target_ulong address, + MMUAccessType access_type, + int mmu_idx) +{ + ppcmas_tlb_t *tlb; + int i, j, ret = -1; + + for (i = 0; i < BOOKE206_MAX_TLBN; i++) { + int ways = booke206_tlb_ways(env, i); + for (j = 0; j < ways; j++) { + tlb = booke206_get_tlbm(env, i, address, j); + if (!tlb) { + continue; + } + ret = mmubooke206_check_tlb(env, tlb, raddr, prot, address, + access_type, mmu_idx); + if (ret != -1) { + goto found_tlb; + } + } + } + +found_tlb: + + qemu_log_mask(CPU_LOG_MMU, "%s: access %s " TARGET_FMT_lx " => " + HWADDR_FMT_plx " %d %d\n", __func__, + ret < 0 ? "refused" : "granted", address, + ret < 0 ? -1 : *raddr, ret == -1 ? 0 : *prot, ret); + return ret; +} + +static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, + MMUAccessType access_type, int mmu_idx) +{ + uint32_t epid; + bool as, pr; + uint32_t missed_tid = 0; + bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); + + if (access_type == MMU_INST_FETCH) { + as = FIELD_EX64(env->msr, MSR, IR); + } + env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; + env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; + env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; + env->spr[SPR_BOOKE_MAS3] = 0; + env->spr[SPR_BOOKE_MAS6] = 0; + env->spr[SPR_BOOKE_MAS7] = 0; + + /* AS */ + if (as) { + env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; + env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; + } + + env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; + env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; + + if (!use_epid) { + switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { + case MAS4_TIDSELD_PID0: + missed_tid = env->spr[SPR_BOOKE_PID]; + break; + case MAS4_TIDSELD_PID1: + missed_tid = env->spr[SPR_BOOKE_PID1]; + break; + case MAS4_TIDSELD_PID2: + missed_tid = env->spr[SPR_BOOKE_PID2]; + break; + } + env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; + } else { + missed_tid = epid; + env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; + } + env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); + + + /* next victim logic */ + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; + env->last_way++; + env->last_way &= booke206_tlb_ways(env, 0) - 1; + env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; +} + +bool ppc_booke_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, + bool guest_visible) +{ + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + hwaddr raddr; + int prot, ret; + + if (env->mmu_model == POWERPC_MMU_BOOKE206) { + ret = mmubooke206_get_physical_address(env, &raddr, &prot, eaddr, + access_type, mmu_idx); + } else { + ret = mmubooke_get_physical_address(env, &raddr, &prot, eaddr, + access_type); + } + if (ret == 0) { + *raddrp = raddr; + *protp = prot; + *psizep = TARGET_PAGE_BITS; + return true; + } else if (!guest_visible) { + return false; + } + + log_cpu_state_mask(CPU_LOG_MMU, cs, 0); + env->error_code = 0; + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + if (env->mmu_model == POWERPC_MMU_BOOKE206) { + booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); + } + cs->exception_index = (access_type == MMU_INST_FETCH) ? + POWERPC_EXCP_ITLB : POWERPC_EXCP_DTLB; + env->spr[SPR_BOOKE_DEAR] = eaddr; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); + break; + case -2: + /* Access rights violation */ + cs->exception_index = (access_type == MMU_INST_FETCH) ? + POWERPC_EXCP_ISI : POWERPC_EXCP_DSI; + if (access_type != MMU_INST_FETCH) { + env->spr[SPR_BOOKE_DEAR] = eaddr; + env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); + } + break; + case -3: + /* No execute protection violation */ + cs->exception_index = POWERPC_EXCP_ISI; + env->spr[SPR_BOOKE_ESR] = 0; + break; + } + + return false; +} diff --git a/target/ppc/mmu-booke.h b/target/ppc/mmu-booke.h new file mode 100644 index 0000000000..f972843bbb --- /dev/null +++ b/target/ppc/mmu-booke.h @@ -0,0 +1,17 @@ +#ifndef PPC_MMU_BOOKE_H +#define PPC_MMU_BOOKE_H + +#include "cpu.h" + +int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid); +int mmu40x_get_physical_address(CPUPPCState *env, hwaddr *raddr, int *prot, + target_ulong address, + MMUAccessType access_type); +hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb); +int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, + target_ulong address, uint32_t pid); +bool ppc_booke_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, + bool guest_visible); + +#endif diff --git a/target/ppc/mmu-hash32.c b/target/ppc/mmu-hash32.c index 6dfedab11d..d5f2057eb1 100644 --- a/target/ppc/mmu-hash32.c +++ b/target/ppc/mmu-hash32.c @@ -37,57 +37,6 @@ # define LOG_BATS(...) do { } while (0) #endif -struct mmu_ctx_hash32 { - hwaddr raddr; /* Real address */ - int prot; /* Protection bits */ - int key; /* Access key */ -}; - -static int ppc_hash32_pp_prot(int key, int pp, int nx) -{ - int prot; - - if (key == 0) { - switch (pp) { - case 0x0: - case 0x1: - case 0x2: - prot = PAGE_READ | PAGE_WRITE; - break; - - case 0x3: - prot = PAGE_READ; - break; - - default: - abort(); - } - } else { - switch (pp) { - case 0x0: - prot = 0; - break; - - case 0x1: - case 0x3: - prot = PAGE_READ; - break; - - case 0x2: - prot = PAGE_READ | PAGE_WRITE; - break; - - default: - abort(); - } - } - if (nx == 0) { - prot |= PAGE_EXEC; - } - - return prot; -} - static int ppc_hash32_pte_prot(int mmu_idx, target_ulong sr, ppc_hash_pte32_t pte) { @@ -258,7 +207,7 @@ static bool ppc_hash32_direct_store(PowerPCCPU *cpu, target_ulong sr, } *prot = key ? PAGE_READ | PAGE_WRITE : PAGE_READ; - if (*prot & prot_for_access_type(access_type)) { + if (check_prot_access_type(*prot, access_type)) { *raddr = eaddr; return true; } @@ -392,7 +341,6 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, hwaddr pte_offset; ppc_hash_pte32_t pte; int prot; - int need_prot; hwaddr raddr; /* There are no hash32 large pages. */ @@ -406,13 +354,11 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, return true; } - need_prot = prot_for_access_type(access_type); - /* 2. Check Block Address Translation entries (BATs) */ if (env->nb_BATs != 0) { raddr = ppc_hash32_bat_lookup(cpu, eaddr, access_type, protp, mmu_idx); if (raddr != -1) { - if (need_prot & ~*protp) { + if (!check_prot_access_type(*protp, access_type)) { if (guest_visible) { if (access_type == MMU_INST_FETCH) { cs->exception_index = POWERPC_EXCP_ISI; @@ -480,7 +426,7 @@ bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, prot = ppc_hash32_pte_prot(mmu_idx, sr, pte); - if (need_prot & ~prot) { + if (!check_prot_access_type(prot, access_type)) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); if (guest_visible) { diff --git a/target/ppc/mmu-hash32.h b/target/ppc/mmu-hash32.h index 7119a63d97..f0ce6951b4 100644 --- a/target/ppc/mmu-hash32.h +++ b/target/ppc/mmu-hash32.h @@ -102,6 +102,51 @@ static inline void ppc_hash32_store_hpte1(PowerPCCPU *cpu, stl_phys(CPU(cpu)->as, base + pte_offset + HASH_PTE_SIZE_32 / 2, pte1); } +static inline int ppc_hash32_pp_prot(bool key, int pp, bool nx) +{ + int prot; + + if (key == 0) { + switch (pp) { + case 0x0: + case 0x1: + case 0x2: + prot = PAGE_READ | PAGE_WRITE; + break; + + case 0x3: + prot = PAGE_READ; + break; + + default: + abort(); + } + } else { + switch (pp) { + case 0x0: + prot = 0; + break; + + case 0x1: + case 0x3: + prot = PAGE_READ; + break; + + case 0x2: + prot = PAGE_READ | PAGE_WRITE; + break; + + default: + abort(); + } + } + if (nx == 0) { + prot |= PAGE_EXEC; + } + + return prot; +} + typedef struct { uint32_t pte0, pte1; } ppc_hash_pte32_t; diff --git a/target/ppc/mmu-hash64.c b/target/ppc/mmu-hash64.c index 0966422a55..cbc8efa0c3 100644 --- a/target/ppc/mmu-hash64.c +++ b/target/ppc/mmu-hash64.c @@ -517,9 +517,7 @@ const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes; if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - return vhc->map_hptes(cpu->vhyp, ptex, n); + return cpu->vhyp_class->map_hptes(cpu->vhyp, ptex, n); } base = ppc_hash64_hpt_base(cpu); @@ -539,9 +537,7 @@ void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes, hwaddr ptex, int n) { if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n); + cpu->vhyp_class->unmap_hptes(cpu->vhyp, hptes, ptex, n); return; } @@ -821,9 +817,7 @@ static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_R; if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->hpte_set_r(cpu->vhyp, ptex, pte1); + cpu->vhyp_class->hpte_set_r(cpu->vhyp, ptex, pte1); return; } base = ppc_hash64_hpt_base(cpu); @@ -838,9 +832,7 @@ static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1) hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + HPTE64_DW1_C; if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc = - PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - vhc->hpte_set_c(cpu->vhyp, ptex, pte1); + cpu->vhyp_class->hpte_set_c(cpu->vhyp, ptex, pte1); return; } base = ppc_hash64_hpt_base(cpu); @@ -1097,7 +1089,7 @@ bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, amr_prot = ppc_hash64_amr_prot(cpu, pte); prot = exec_prot & pp_prot & amr_prot; - need_prot = prot_for_access_type(access_type); + need_prot = check_prot_access_type(PAGE_RWX, access_type); if (need_prot & ~prot) { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index 8daf71d2db..5a02e4963b 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -185,7 +185,6 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, int mmu_idx, bool partition_scoped) { CPUPPCState *env = &cpu->env; - int need_prot; /* Check Page Attributes (pte58:59) */ if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) { @@ -210,8 +209,8 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, } /* Check if requested access type is allowed */ - need_prot = prot_for_access_type(access_type); - if (need_prot & ~*prot) { /* Page Protected for that Access */ + if (!check_prot_access_type(*prot, access_type)) { + /* Page Protected for that Access */ *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD : DSISR_PROTFAULT; return true; @@ -678,9 +677,7 @@ static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr, /* Get Partition Table */ if (cpu->vhyp) { - PPCVirtualHypervisorClass *vhc; - vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); - if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) { + if (!cpu->vhyp_class->get_pate(cpu->vhyp, cpu, lpid, &pate)) { if (guest_visible) { ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, DSISR_R_BADCONFIG); diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c index 4fde7fd3bf..e2542694f0 100644 --- a/target/ppc/mmu_common.c +++ b/target/ppc/mmu_common.c @@ -33,9 +33,21 @@ #include "internal.h" #include "mmu-book3s-v3.h" #include "mmu-radix64.h" +#include "mmu-booke.h" /* #define DUMP_PAGE_TABLES */ +/* Context used internally during MMU translations */ +typedef struct { + hwaddr raddr; /* Real address */ + hwaddr eaddr; /* Effective address */ + int prot; /* Protection bits */ + hwaddr hash[2]; /* Pagetable hash values */ + target_ulong ptem; /* Virtual segment ID | API */ + int key; /* Access key */ + int nx; /* Non-execute area */ +} mmu_ctx_t; + void ppc_store_sdr1(CPUPPCState *env, target_ulong value) { PowerPCCPU *cpu = env_archcpu(env); @@ -65,49 +77,6 @@ void ppc_store_sdr1(CPUPPCState *env, target_ulong value) /*****************************************************************************/ /* PowerPC MMU emulation */ -static int pp_check(int key, int pp, int nx) -{ - int access; - - /* Compute access rights */ - access = 0; - if (key == 0) { - switch (pp) { - case 0x0: - case 0x1: - case 0x2: - access |= PAGE_WRITE; - /* fall through */ - case 0x3: - access |= PAGE_READ; - break; - } - } else { - switch (pp) { - case 0x0: - access = 0; - break; - case 0x1: - case 0x3: - access = PAGE_READ; - break; - case 0x2: - access = PAGE_READ | PAGE_WRITE; - break; - } - } - if (nx == 0) { - access |= PAGE_EXEC; - } - - return access; -} - -static int check_prot(int prot, MMUAccessType access_type) -{ - return prot & prot_for_access_type(access_type) ? 0 : -2; -} - int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, int way, int is_code) { @@ -117,8 +86,8 @@ int ppc6xx_tlb_getnum(CPUPPCState *env, target_ulong eaddr, nr = (eaddr >> TARGET_PAGE_BITS) & (env->tlb_per_way - 1); /* Select TLB way */ nr += env->tlb_per_way * way; - /* 6xx have separate TLBs for instructions and data */ - if (is_code && env->id_tlbs == 1) { + /* 6xx has separate TLBs for instructions and data */ + if (is_code) { nr += env->nb_tlb; } @@ -130,7 +99,7 @@ static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, MMUAccessType access_type) { target_ulong ptem, mmask; - int access, ret, pteh, ptev, pp; + int ret, pteh, ptev, pp; ret = -1; /* Check validity and table match */ @@ -149,18 +118,17 @@ static int ppc6xx_tlb_pte_check(mmu_ctx_t *ctx, target_ulong pte0, return -3; } } - /* Compute access rights */ - access = pp_check(ctx->key, pp, ctx->nx); /* Keep the matching PTE information */ ctx->raddr = pte1; - ctx->prot = access; - ret = check_prot(ctx->prot, access_type); - if (ret == 0) { + ctx->prot = ppc_hash32_pp_prot(ctx->key, pp, ctx->nx); + if (check_prot_access_type(ctx->prot, access_type)) { /* Access granted */ qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n"); + ret = 0; } else { /* Access right violation */ qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n"); + ret = -2; } } } @@ -225,17 +193,14 @@ static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, access_type == MMU_INST_FETCH ? 'I' : 'D'); switch (ppc6xx_tlb_pte_check(ctx, tlb->pte0, tlb->pte1, 0, access_type)) { - case -3: - /* TLB inconsistency */ - return -1; case -2: /* Access violation */ ret = -2; best = nr; break; - case -1: + case -1: /* No match */ + case -3: /* TLB inconsistency */ default: - /* No match */ break; case 0: /* access granted */ @@ -251,14 +216,34 @@ static int ppc6xx_tlb_check(CPUPPCState *env, mmu_ctx_t *ctx, } } if (best != -1) { - done: +done: qemu_log_mask(CPU_LOG_MMU, "found TLB at addr " HWADDR_FMT_plx " prot=%01x ret=%d\n", ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret); /* Update page flags */ pte_update_flags(ctx, &env->tlb.tlb6[best].pte1, ret, access_type); } - +#if defined(DUMP_PAGE_TABLES) + if (qemu_loglevel_mask(CPU_LOG_MMU)) { + CPUState *cs = env_cpu(env); + hwaddr base = ppc_hash32_hpt_base(env_archcpu(env)); + hwaddr len = ppc_hash32_hpt_mask(env_archcpu(env)) + 0x80; + uint32_t a0, a1, a2, a3; + + qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx "\n", + base, len); + for (hwaddr curaddr = base; curaddr < base + len; curaddr += 16) { + a0 = ldl_phys(cs->as, curaddr); + a1 = ldl_phys(cs->as, curaddr + 4); + a2 = ldl_phys(cs->as, curaddr + 8); + a3 = ldl_phys(cs->as, curaddr + 12); + if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { + qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", + curaddr, a0, a1, a2, a3); + } + } + } +#endif return ret; } @@ -298,8 +283,8 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, int ret = -1; bool ifetch = access_type == MMU_INST_FETCH; - qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, - ifetch ? 'I' : 'D', virtual); + qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT v " TARGET_FMT_lx "\n", __func__, + ifetch ? 'I' : 'D', virtual); if (ifetch) { BATlt = env->IBAT[1]; BATut = env->IBAT[0]; @@ -313,9 +298,9 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bat_size_prot(env, &bl, &valid, &prot, BATu, BATl); - qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " - TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, - ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); + qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx " BATu " + TARGET_FMT_lx " BATl " TARGET_FMT_lx "\n", __func__, + ifetch ? 'I' : 'D', i, virtual, *BATu, *BATl); if ((virtual & 0xF0000000) == BEPIu && ((virtual & 0x0FFE0000) & ~bl) == BEPIl) { /* BAT matches */ @@ -326,12 +311,14 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, (virtual & 0x0001F000); /* Compute access rights */ ctx->prot = prot; - ret = check_prot(ctx->prot, access_type); - if (ret == 0) { + if (check_prot_access_type(ctx->prot, access_type)) { qemu_log_mask(CPU_LOG_MMU, "BAT %d match: r " HWADDR_FMT_plx " prot=%c%c\n", i, ctx->raddr, ctx->prot & PAGE_READ ? 'R' : '-', ctx->prot & PAGE_WRITE ? 'W' : '-'); + ret = 0; + } else { + ret = -2; } break; } @@ -347,12 +334,11 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, BEPIu = *BATu & 0xF0000000; BEPIl = *BATu & 0x0FFE0000; bl = (*BATu & 0x00001FFC) << 15; - qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " - TARGET_FMT_lx " BATu " TARGET_FMT_lx - " BATl " TARGET_FMT_lx "\n\t" TARGET_FMT_lx " " - TARGET_FMT_lx " " TARGET_FMT_lx "\n", - __func__, ifetch ? 'I' : 'D', i, virtual, - *BATu, *BATl, BEPIu, BEPIl, bl); + qemu_log_mask(CPU_LOG_MMU, "%s: %cBAT%d v " TARGET_FMT_lx + " BATu " TARGET_FMT_lx " BATl " TARGET_FMT_lx + "\n\t" TARGET_FMT_lx " " TARGET_FMT_lx " " + TARGET_FMT_lx "\n", __func__, ifetch ? 'I' : 'D', + i, virtual, *BATu, *BATl, BEPIu, BEPIl, bl); } } } @@ -360,19 +346,22 @@ static int get_bat_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, return ret; } -/* Perform segment based translation */ -static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, MMUAccessType access_type, - int type) +static int mmu6xx_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, + target_ulong eaddr, + MMUAccessType access_type, int type) { PowerPCCPU *cpu = env_archcpu(env); hwaddr hash; - target_ulong vsid; + target_ulong vsid, sr, pgidx; int ds, target_page_bits; bool pr; - int ret; - target_ulong sr, pgidx; + /* First try to find a BAT entry if there are any */ + if (env->nb_BATs && get_bat_6xx_tlb(env, ctx, eaddr, access_type) == 0) { + return 0; + } + + /* Perform segment based translation when no BATs matched */ pr = FIELD_EX64(env->msr, MSR, PR); ctx->eaddr = eaddr; @@ -395,527 +384,65 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, hash = vsid ^ pgidx; ctx->ptem = (vsid << 7) | (pgidx >> 10); - qemu_log_mask(CPU_LOG_MMU, - "pte segment: key=%d ds %d nx %d vsid " TARGET_FMT_lx "\n", - ctx->key, ds, ctx->nx, vsid); - ret = -1; + qemu_log_mask(CPU_LOG_MMU, "pte segment: key=%d ds %d nx %d vsid " + TARGET_FMT_lx "\n", ctx->key, ds, ctx->nx, vsid); if (!ds) { /* Check if instruction fetch is allowed, if needed */ - if (type != ACCESS_CODE || ctx->nx == 0) { - /* Page address translation */ - qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx - " htab_mask " HWADDR_FMT_plx - " hash " HWADDR_FMT_plx "\n", - ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); - ctx->hash[0] = hash; - ctx->hash[1] = ~hash; - - /* Initialize real address with an invalid value */ - ctx->raddr = (hwaddr)-1ULL; - /* Software TLB search */ - ret = ppc6xx_tlb_check(env, ctx, eaddr, access_type); -#if defined(DUMP_PAGE_TABLES) - if (qemu_loglevel_mask(CPU_LOG_MMU)) { - CPUState *cs = env_cpu(env); - hwaddr curaddr; - uint32_t a0, a1, a2, a3; - - qemu_log("Page table: " HWADDR_FMT_plx " len " HWADDR_FMT_plx - "\n", ppc_hash32_hpt_base(cpu), - ppc_hash32_hpt_mask(cpu) + 0x80); - for (curaddr = ppc_hash32_hpt_base(cpu); - curaddr < (ppc_hash32_hpt_base(cpu) - + ppc_hash32_hpt_mask(cpu) + 0x80); - curaddr += 16) { - a0 = ldl_phys(cs->as, curaddr); - a1 = ldl_phys(cs->as, curaddr + 4); - a2 = ldl_phys(cs->as, curaddr + 8); - a3 = ldl_phys(cs->as, curaddr + 12); - if (a0 != 0 || a1 != 0 || a2 != 0 || a3 != 0) { - qemu_log(HWADDR_FMT_plx ": %08x %08x %08x %08x\n", - curaddr, a0, a1, a2, a3); - } - } - } -#endif - } else { + if (type == ACCESS_CODE && ctx->nx) { qemu_log_mask(CPU_LOG_MMU, "No access allowed\n"); - ret = -3; - } - } else { - qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); - /* Direct-store segment : absolutely *BUGGY* for now */ - - switch (type) { - case ACCESS_INT: - /* Integer load/store : only access allowed */ - break; - case ACCESS_CODE: - /* No code fetch is allowed in direct-store areas */ - return -4; - case ACCESS_FLOAT: - /* Floating point load/store */ - return -4; - case ACCESS_RES: - /* lwarx, ldarx or srwcx. */ - return -4; - case ACCESS_CACHE: - /* - * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi - * - * Should make the instruction do no-op. As it already do - * no-op, it's quite easy :-) - */ - ctx->raddr = eaddr; - return 0; - case ACCESS_EXT: - /* eciwx or ecowx */ - return -4; - default: - qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need " - "address translation\n"); - return -4; - } - if ((access_type == MMU_DATA_STORE || ctx->key != 1) && - (access_type == MMU_DATA_LOAD || ctx->key != 0)) { - ctx->raddr = eaddr; - ret = 2; - } else { - ret = -2; - } - } - - return ret; -} - -/* Generic TLB check function for embedded PowerPC implementations */ -static bool ppcemb_tlb_check(CPUPPCState *env, ppcemb_tlb_t *tlb, - hwaddr *raddrp, - target_ulong address, uint32_t pid, int i) -{ - target_ulong mask; - - /* Check valid flag */ - if (!(tlb->prot & PAGE_VALID)) { - return false; - } - mask = ~(tlb->size - 1); - qemu_log_mask(CPU_LOG_MMU, "%s: TLB %d address " TARGET_FMT_lx - " PID %u <=> " TARGET_FMT_lx " " TARGET_FMT_lx " %u %x\n", - __func__, i, address, pid, tlb->EPN, - mask, (uint32_t)tlb->PID, tlb->prot); - /* Check PID */ - if (tlb->PID != 0 && tlb->PID != pid) { - return false; - } - /* Check effective address */ - if ((address & mask) != tlb->EPN) { - return false; - } - *raddrp = (tlb->RPN & mask) | (address & ~mask); - return true; -} - -/* Generic TLB search function for PowerPC embedded implementations */ -int ppcemb_tlb_search(CPUPPCState *env, target_ulong address, uint32_t pid) -{ - ppcemb_tlb_t *tlb; - hwaddr raddr; - int i; - - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - if (ppcemb_tlb_check(env, tlb, &raddr, address, pid, i)) { - return i; - } - } - return -1; -} - -static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, - MMUAccessType access_type) -{ - ppcemb_tlb_t *tlb; - hwaddr raddr; - int i, ret, zsel, zpr, pr; - - ret = -1; - raddr = (hwaddr)-1ULL; - pr = FIELD_EX64(env->msr, MSR, PR); - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - if (!ppcemb_tlb_check(env, tlb, &raddr, address, - env->spr[SPR_40x_PID], i)) { - continue; - } - zsel = (tlb->attr >> 4) & 0xF; - zpr = (env->spr[SPR_40x_ZPR] >> (30 - (2 * zsel))) & 0x3; - qemu_log_mask(CPU_LOG_MMU, - "%s: TLB %d zsel %d zpr %d ty %d attr %08x\n", - __func__, i, zsel, zpr, access_type, tlb->attr); - /* Check execute enable bit */ - switch (zpr) { - case 0x2: - if (pr != 0) { - goto check_perms; - } - /* fall through */ - case 0x3: - /* All accesses granted */ - ctx->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; - ret = 0; - break; - case 0x0: - if (pr != 0) { - /* Raise Zone protection fault. */ - env->spr[SPR_40x_ESR] = 1 << 22; - ctx->prot = 0; - ret = -2; - break; - } - /* fall through */ - case 0x1: - check_perms: - /* Check from TLB entry */ - ctx->prot = tlb->prot; - ret = check_prot(ctx->prot, access_type); - if (ret == -2) { - env->spr[SPR_40x_ESR] = 0; - } - break; - } - if (ret >= 0) { - ctx->raddr = raddr; - qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx - " => " HWADDR_FMT_plx - " %d %d\n", __func__, address, ctx->raddr, ctx->prot, - ret); - return 0; - } - } - qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx - " => " HWADDR_FMT_plx - " %d %d\n", __func__, address, raddr, ctx->prot, ret); - - return ret; -} - -static bool mmubooke_check_pid(CPUPPCState *env, ppcemb_tlb_t *tlb, - hwaddr *raddr, target_ulong addr, int i) -{ - if (ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID], i)) { - if (!env->nb_pids) { - /* Extend the physical address to 36 bits */ - *raddr |= (uint64_t)(tlb->RPN & 0xF) << 32; - } - return true; - } else if (!env->nb_pids) { - return false; - } - if (env->spr[SPR_BOOKE_PID1] && - ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID1], i)) { - return true; - } - if (env->spr[SPR_BOOKE_PID2] && - ppcemb_tlb_check(env, tlb, raddr, addr, env->spr[SPR_BOOKE_PID2], i)) { - return true; - } - return false; -} - -static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, - hwaddr *raddr, int *prot, target_ulong address, - MMUAccessType access_type, int i) -{ - int prot2; - - if (!mmubooke_check_pid(env, tlb, raddr, address, i)) { - qemu_log_mask(CPU_LOG_MMU, "%s: TLB entry not found\n", __func__); - return -1; - } - - if (FIELD_EX64(env->msr, MSR, PR)) { - prot2 = tlb->prot & 0xF; - } else { - prot2 = (tlb->prot >> 4) & 0xF; - } - - /* Check the address space */ - if ((access_type == MMU_INST_FETCH ? - FIELD_EX64(env->msr, MSR, IR) : - FIELD_EX64(env->msr, MSR, DR)) != (tlb->attr & 1)) { - qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = prot2; - if (prot2 & prot_for_access_type(access_type)) { - qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); - return 0; - } - - qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); - return access_type == MMU_INST_FETCH ? -3 : -2; -} - -static int mmubooke_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, - MMUAccessType access_type) -{ - ppcemb_tlb_t *tlb; - hwaddr raddr; - int i, ret; - - ret = -1; - raddr = (hwaddr)-1ULL; - for (i = 0; i < env->nb_tlb; i++) { - tlb = &env->tlb.tlbe[i]; - ret = mmubooke_check_tlb(env, tlb, &raddr, &ctx->prot, address, - access_type, i); - if (ret != -1) { - break; - } - } - - if (ret >= 0) { - ctx->raddr = raddr; - qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx - " => " HWADDR_FMT_plx " %d %d\n", __func__, - address, ctx->raddr, ctx->prot, ret); - } else { - qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx - " => " HWADDR_FMT_plx " %d %d\n", __func__, - address, raddr, ctx->prot, ret); - } - - return ret; -} - -hwaddr booke206_tlb_to_page_size(CPUPPCState *env, ppcmas_tlb_t *tlb) -{ - int tlbm_size; - - tlbm_size = (tlb->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT; - - return 1024ULL << tlbm_size; -} - -/* TLB check function for MAS based SoftTLBs */ -int ppcmas_tlb_check(CPUPPCState *env, ppcmas_tlb_t *tlb, hwaddr *raddrp, - target_ulong address, uint32_t pid) -{ - hwaddr mask; - uint32_t tlb_pid; - - if (!FIELD_EX64(env->msr, MSR, CM)) { - /* In 32bit mode we can only address 32bit EAs */ - address = (uint32_t)address; - } - - /* Check valid flag */ - if (!(tlb->mas1 & MAS1_VALID)) { - return -1; - } - - mask = ~(booke206_tlb_to_page_size(env, tlb) - 1); - qemu_log_mask(CPU_LOG_MMU, "%s: TLB ADDR=0x" TARGET_FMT_lx - " PID=0x%x MAS1=0x%x MAS2=0x%" PRIx64 " mask=0x%" - HWADDR_PRIx " MAS7_3=0x%" PRIx64 " MAS8=0x%" PRIx32 "\n", - __func__, address, pid, tlb->mas1, tlb->mas2, mask, - tlb->mas7_3, tlb->mas8); - - /* Check PID */ - tlb_pid = (tlb->mas1 & MAS1_TID_MASK) >> MAS1_TID_SHIFT; - if (tlb_pid != 0 && tlb_pid != pid) { - return -1; - } - - /* Check effective address */ - if ((address & mask) != (tlb->mas2 & MAS2_EPN_MASK)) { - return -1; - } - - if (raddrp) { - *raddrp = (tlb->mas7_3 & mask) | (address & ~mask); - } - - return 0; -} - -static bool is_epid_mmu(int mmu_idx) -{ - return mmu_idx == PPC_TLB_EPID_STORE || mmu_idx == PPC_TLB_EPID_LOAD; -} - -static uint32_t mmubooke206_esr(int mmu_idx, MMUAccessType access_type) -{ - uint32_t esr = 0; - if (access_type == MMU_DATA_STORE) { - esr |= ESR_ST; - } - if (is_epid_mmu(mmu_idx)) { - esr |= ESR_EPID; - } - return esr; -} - -/* - * Get EPID register given the mmu_idx. If this is regular load, - * construct the EPID access bits from current processor state - * - * Get the effective AS and PR bits and the PID. The PID is returned - * only if EPID load is requested, otherwise the caller must detect - * the correct EPID. Return true if valid EPID is returned. - */ -static bool mmubooke206_get_as(CPUPPCState *env, - int mmu_idx, uint32_t *epid_out, - bool *as_out, bool *pr_out) -{ - if (is_epid_mmu(mmu_idx)) { - uint32_t epidr; - if (mmu_idx == PPC_TLB_EPID_STORE) { - epidr = env->spr[SPR_BOOKE_EPSC]; - } else { - epidr = env->spr[SPR_BOOKE_EPLC]; - } - *epid_out = (epidr & EPID_EPID) >> EPID_EPID_SHIFT; - *as_out = !!(epidr & EPID_EAS); - *pr_out = !!(epidr & EPID_EPR); - return true; - } else { - *as_out = FIELD_EX64(env->msr, MSR, DS); - *pr_out = FIELD_EX64(env->msr, MSR, PR); - return false; - } -} - -/* Check if the tlb found by hashing really matches */ -static int mmubooke206_check_tlb(CPUPPCState *env, ppcmas_tlb_t *tlb, - hwaddr *raddr, int *prot, - target_ulong address, - MMUAccessType access_type, int mmu_idx) -{ - int prot2 = 0; - uint32_t epid; - bool as, pr; - bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); - - if (!use_epid) { - if (ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID]) >= 0) { - goto found_tlb; + return -3; } + /* Page address translation */ + qemu_log_mask(CPU_LOG_MMU, "htab_base " HWADDR_FMT_plx " htab_mask " + HWADDR_FMT_plx " hash " HWADDR_FMT_plx "\n", + ppc_hash32_hpt_base(cpu), ppc_hash32_hpt_mask(cpu), hash); + ctx->hash[0] = hash; + ctx->hash[1] = ~hash; - if (env->spr[SPR_BOOKE_PID1] && - ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID1]) >= 0) { - goto found_tlb; - } - - if (env->spr[SPR_BOOKE_PID2] && - ppcmas_tlb_check(env, tlb, raddr, address, - env->spr[SPR_BOOKE_PID2]) >= 0) { - goto found_tlb; - } - } else { - if (ppcmas_tlb_check(env, tlb, raddr, address, epid) >= 0) { - goto found_tlb; - } + /* Initialize real address with an invalid value */ + ctx->raddr = (hwaddr)-1ULL; + /* Software TLB search */ + return ppc6xx_tlb_check(env, ctx, eaddr, access_type); } - qemu_log_mask(CPU_LOG_MMU, "%s: No TLB entry found for effective address " - "0x" TARGET_FMT_lx "\n", __func__, address); - return -1; - -found_tlb: - - if (pr) { - if (tlb->mas7_3 & MAS3_UR) { - prot2 |= PAGE_READ; - } - if (tlb->mas7_3 & MAS3_UW) { - prot2 |= PAGE_WRITE; - } - if (tlb->mas7_3 & MAS3_UX) { - prot2 |= PAGE_EXEC; - } - } else { - if (tlb->mas7_3 & MAS3_SR) { - prot2 |= PAGE_READ; - } - if (tlb->mas7_3 & MAS3_SW) { - prot2 |= PAGE_WRITE; - } - if (tlb->mas7_3 & MAS3_SX) { - prot2 |= PAGE_EXEC; - } - } - - /* Check the address space and permissions */ - if (access_type == MMU_INST_FETCH) { - /* There is no way to fetch code using epid load */ - assert(!use_epid); - as = FIELD_EX64(env->msr, MSR, IR); - } - - if (as != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) { - qemu_log_mask(CPU_LOG_MMU, "%s: AS doesn't match\n", __func__); - return -1; - } - - *prot = prot2; - if (prot2 & prot_for_access_type(access_type)) { - qemu_log_mask(CPU_LOG_MMU, "%s: good TLB!\n", __func__); + /* Direct-store segment : absolutely *BUGGY* for now */ + qemu_log_mask(CPU_LOG_MMU, "direct store...\n"); + switch (type) { + case ACCESS_INT: + /* Integer load/store : only access allowed */ + break; + case ACCESS_CODE: + /* No code fetch is allowed in direct-store areas */ + return -4; + case ACCESS_FLOAT: + /* Floating point load/store */ + return -4; + case ACCESS_RES: + /* lwarx, ldarx or srwcx. */ + return -4; + case ACCESS_CACHE: + /* + * dcba, dcbt, dcbtst, dcbf, dcbi, dcbst, dcbz, or icbi + * + * Should make the instruction do no-op. As it already do + * no-op, it's quite easy :-) + */ + ctx->raddr = eaddr; return 0; + case ACCESS_EXT: + /* eciwx or ecowx */ + return -4; + default: + qemu_log_mask(CPU_LOG_MMU, "ERROR: instruction should not need address" + " translation\n"); + return -4; } - - qemu_log_mask(CPU_LOG_MMU, "%s: no prot match: %x\n", __func__, prot2); - return access_type == MMU_INST_FETCH ? -3 : -2; -} - -static int mmubooke206_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong address, - MMUAccessType access_type, - int mmu_idx) -{ - ppcmas_tlb_t *tlb; - hwaddr raddr; - int i, j, ret; - - ret = -1; - raddr = (hwaddr)-1ULL; - - for (i = 0; i < BOOKE206_MAX_TLBN; i++) { - int ways = booke206_tlb_ways(env, i); - - for (j = 0; j < ways; j++) { - tlb = booke206_get_tlbm(env, i, address, j); - if (!tlb) { - continue; - } - ret = mmubooke206_check_tlb(env, tlb, &raddr, &ctx->prot, address, - access_type, mmu_idx); - if (ret != -1) { - goto found_tlb; - } - } - } - -found_tlb: - - if (ret >= 0) { - ctx->raddr = raddr; - qemu_log_mask(CPU_LOG_MMU, "%s: access granted " TARGET_FMT_lx - " => " HWADDR_FMT_plx " %d %d\n", __func__, address, - ctx->raddr, ctx->prot, ret); - } else { - qemu_log_mask(CPU_LOG_MMU, "%s: access refused " TARGET_FMT_lx - " => " HWADDR_FMT_plx " %d %d\n", __func__, address, - raddr, ctx->prot, ret); + if ((access_type == MMU_DATA_STORE || ctx->key != 1) && + (access_type == MMU_DATA_LOAD || ctx->key != 0)) { + ctx->raddr = eaddr; + return 2; } - - return ret; + return -2; } static const char *book3e_tsize_to_str[32] = { @@ -1104,13 +631,7 @@ static void mmu6xx_dump_mmu(CPUPPCState *env) mmu6xx_dump_BATs(env, ACCESS_INT); mmu6xx_dump_BATs(env, ACCESS_CODE); - if (env->id_tlbs != 1) { - qemu_printf("ERROR: 6xx MMU should have separated TLB" - " for code and data\n"); - } - qemu_printf("\nTLBs [EPN EPN + SIZE]\n"); - for (type = 0; type < 2; type++) { for (way = 0; way < env->nb_ways; way++) { for (entry = env->nb_tlb * type + env->tlb_per_way * way; @@ -1162,147 +683,97 @@ void dump_mmu(CPUPPCState *env) } } -static int check_physical(CPUPPCState *env, mmu_ctx_t *ctx, target_ulong eaddr, - MMUAccessType access_type) -{ - ctx->raddr = eaddr; - ctx->prot = PAGE_READ | PAGE_EXEC; - - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - case POWERPC_MMU_SOFT_4xx: - case POWERPC_MMU_REAL: - case POWERPC_MMU_BOOKE: - ctx->prot |= PAGE_WRITE; - break; - - default: - /* Caller's checks mean we should never get here for other models */ - g_assert_not_reached(); - } - - return 0; -} -int get_physical_address_wtlb(CPUPPCState *env, mmu_ctx_t *ctx, - target_ulong eaddr, - MMUAccessType access_type, int type, - int mmu_idx) +static bool ppc_real_mode_xlate(PowerPCCPU *cpu, vaddr eaddr, + MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp) { - int ret = -1; - bool real_mode = (type == ACCESS_CODE && !FIELD_EX64(env->msr, MSR, IR)) || - (type != ACCESS_CODE && !FIELD_EX64(env->msr, MSR, DR)); - - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - if (real_mode) { - ret = check_physical(env, ctx, eaddr, access_type); - } else { - /* Try to find a BAT */ - if (env->nb_BATs != 0) { - ret = get_bat_6xx_tlb(env, ctx, eaddr, access_type); - } - if (ret < 0) { - /* We didn't match any BAT entry or don't have BATs */ - ret = get_segment_6xx_tlb(env, ctx, eaddr, access_type, type); - } - } - break; + CPUPPCState *env = &cpu->env; - case POWERPC_MMU_SOFT_4xx: - if (real_mode) { - ret = check_physical(env, ctx, eaddr, access_type); - } else { - ret = mmu40x_get_physical_address(env, ctx, eaddr, access_type); - } - break; - case POWERPC_MMU_BOOKE: - ret = mmubooke_get_physical_address(env, ctx, eaddr, access_type); - break; - case POWERPC_MMU_BOOKE206: - ret = mmubooke206_get_physical_address(env, ctx, eaddr, access_type, - mmu_idx); - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n"); - break; - case POWERPC_MMU_REAL: - if (real_mode) { - ret = check_physical(env, ctx, eaddr, access_type); - } else { - cpu_abort(env_cpu(env), - "PowerPC in real mode do not do any translation\n"); - } - return -1; - default: - cpu_abort(env_cpu(env), "Unknown or invalid MMU model\n"); - return -1; + if (access_type == MMU_INST_FETCH ? !FIELD_EX64(env->msr, MSR, IR) + : !FIELD_EX64(env->msr, MSR, DR)) { + *raddrp = eaddr; + *protp = PAGE_RWX; + *psizep = TARGET_PAGE_BITS; + return true; + } else if (env->mmu_model == POWERPC_MMU_REAL) { + cpu_abort(CPU(cpu), "PowerPC in real mode shold not do translation\n"); } - - return ret; + return false; } -static void booke206_update_mas_tlb_miss(CPUPPCState *env, target_ulong address, - MMUAccessType access_type, int mmu_idx) +static bool ppc_40x_xlate(PowerPCCPU *cpu, vaddr eaddr, + MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, + int mmu_idx, bool guest_visible) { - uint32_t epid; - bool as, pr; - uint32_t missed_tid = 0; - bool use_epid = mmubooke206_get_as(env, mmu_idx, &epid, &as, &pr); + CPUState *cs = CPU(cpu); + CPUPPCState *env = &cpu->env; + int ret; - if (access_type == MMU_INST_FETCH) { - as = FIELD_EX64(env->msr, MSR, IR); - } - env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK; - env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK; - env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK; - env->spr[SPR_BOOKE_MAS3] = 0; - env->spr[SPR_BOOKE_MAS6] = 0; - env->spr[SPR_BOOKE_MAS7] = 0; - - /* AS */ - if (as) { - env->spr[SPR_BOOKE_MAS1] |= MAS1_TS; - env->spr[SPR_BOOKE_MAS6] |= MAS6_SAS; + if (ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, protp)) { + return true; } - env->spr[SPR_BOOKE_MAS1] |= MAS1_VALID; - env->spr[SPR_BOOKE_MAS2] |= address & MAS2_EPN_MASK; + ret = mmu40x_get_physical_address(env, raddrp, protp, eaddr, access_type); + if (ret == 0) { + *psizep = TARGET_PAGE_BITS; + return true; + } else if (!guest_visible) { + return false; + } - if (!use_epid) { - switch (env->spr[SPR_BOOKE_MAS4] & MAS4_TIDSELD_PIDZ) { - case MAS4_TIDSELD_PID0: - missed_tid = env->spr[SPR_BOOKE_PID]; - break; - case MAS4_TIDSELD_PID1: - missed_tid = env->spr[SPR_BOOKE_PID1]; + log_cpu_state_mask(CPU_LOG_MMU, cs, 0); + if (access_type == MMU_INST_FETCH) { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + cs->exception_index = POWERPC_EXCP_ITLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = eaddr; + env->spr[SPR_40x_ESR] = 0x00000000; break; - case MAS4_TIDSELD_PID2: - missed_tid = env->spr[SPR_BOOKE_PID2]; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x08000000; break; + default: + g_assert_not_reached(); } - env->spr[SPR_BOOKE_MAS6] |= env->spr[SPR_BOOKE_PID] << 16; } else { - missed_tid = epid; - env->spr[SPR_BOOKE_MAS6] |= missed_tid << 16; + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + cs->exception_index = POWERPC_EXCP_DTLB; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_40x_ESR] = 0x00800000; + } else { + env->spr[SPR_40x_ESR] = 0x00000000; + } + break; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_40x_DEAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_40x_ESR] |= 0x00800000; + } + break; + default: + g_assert_not_reached(); + } } - env->spr[SPR_BOOKE_MAS1] |= (missed_tid << MAS1_TID_SHIFT); - - - /* next victim logic */ - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT; - env->last_way++; - env->last_way &= booke206_tlb_ways(env, 0) - 1; - env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT; + return false; } -/* Perform address translation */ -/* TODO: Split this by mmu_model. */ -static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, - MMUAccessType access_type, - hwaddr *raddrp, int *psizep, int *protp, - int mmu_idx, bool guest_visible) +static bool ppc_6xx_xlate(PowerPCCPU *cpu, vaddr eaddr, + MMUAccessType access_type, + hwaddr *raddrp, int *psizep, int *protp, + int mmu_idx, bool guest_visible) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; @@ -1310,6 +781,10 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, int type; int ret; + if (ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, protp)) { + return true; + } + if (access_type == MMU_INST_FETCH) { /* code access */ type = ACCESS_CODE; @@ -1320,199 +795,116 @@ static bool ppc_jumbo_xlate(PowerPCCPU *cpu, vaddr eaddr, type = ACCESS_INT; } - ret = get_physical_address_wtlb(env, &ctx, eaddr, access_type, - type, mmu_idx); + ctx.prot = 0; + ctx.hash[0] = 0; + ctx.hash[1] = 0; + ret = mmu6xx_get_physical_address(env, &ctx, eaddr, access_type, type); if (ret == 0) { *raddrp = ctx.raddr; *protp = ctx.prot; *psizep = TARGET_PAGE_BITS; return true; + } else if (!guest_visible) { + return false; } - if (guest_visible) { - log_cpu_state_mask(CPU_LOG_MMU, cs, 0); - if (type == ACCESS_CODE) { - switch (ret) { - case -1: - /* No matches in page tables or TLB */ - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - cs->exception_index = POWERPC_EXCP_IFTLB; - env->error_code = 1 << 18; - env->spr[SPR_IMISS] = eaddr; - env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; - goto tlb_miss; - case POWERPC_MMU_SOFT_4xx: - cs->exception_index = POWERPC_EXCP_ITLB; - env->error_code = 0; - env->spr[SPR_40x_DEAR] = eaddr; - env->spr[SPR_40x_ESR] = 0x00000000; - break; - case POWERPC_MMU_BOOKE206: - booke206_update_mas_tlb_miss(env, eaddr, 2, mmu_idx); - /* fall through */ - case POWERPC_MMU_BOOKE: - cs->exception_index = POWERPC_EXCP_ITLB; - env->error_code = 0; - env->spr[SPR_BOOKE_DEAR] = eaddr; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, MMU_DATA_LOAD); - break; - case POWERPC_MMU_MPC8xx: - cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); - case POWERPC_MMU_REAL: - cpu_abort(cs, "PowerPC in real mode should never raise " - "any MMU exceptions\n"); - default: - cpu_abort(cs, "Unknown or invalid MMU model\n"); - } - break; - case -2: - /* Access rights violation */ - cs->exception_index = POWERPC_EXCP_ISI; - if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->error_code = 0; - } else { - env->error_code = 0x08000000; - } - break; - case -3: - /* No execute protection violation */ - if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->spr[SPR_BOOKE_ESR] = 0x00000000; - env->error_code = 0; - } else { - env->error_code = 0x10000000; - } - cs->exception_index = POWERPC_EXCP_ISI; + log_cpu_state_mask(CPU_LOG_MMU, cs, 0); + if (type == ACCESS_CODE) { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + cs->exception_index = POWERPC_EXCP_IFTLB; + env->error_code = 1 << 18; + env->spr[SPR_IMISS] = eaddr; + env->spr[SPR_ICMP] = 0x80000000 | ctx.ptem; + goto tlb_miss; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x08000000; + break; + case -3: + /* No execute protection violation */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; + case -4: + /* Direct store exception */ + /* No code fetch is allowed in direct-store areas */ + cs->exception_index = POWERPC_EXCP_ISI; + env->error_code = 0x10000000; + break; + } + } else { + switch (ret) { + case -1: + /* No matches in page tables or TLB */ + if (access_type == MMU_DATA_STORE) { + cs->exception_index = POWERPC_EXCP_DSTLB; + env->error_code = 1 << 16; + } else { + cs->exception_index = POWERPC_EXCP_DLTLB; + env->error_code = 0; + } + env->spr[SPR_DMISS] = eaddr; + env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; +tlb_miss: + env->error_code |= ctx.key << 19; + env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + + get_pteg_offset32(cpu, ctx.hash[0]); + env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + + get_pteg_offset32(cpu, ctx.hash[1]); + break; + case -2: + /* Access rights violation */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_DSISR] = 0x0A000000; + } else { + env->spr[SPR_DSISR] = 0x08000000; + } + break; + case -4: + /* Direct store exception */ + switch (type) { + case ACCESS_FLOAT: + /* Floating point load/store */ + cs->exception_index = POWERPC_EXCP_ALIGN; + env->error_code = POWERPC_EXCP_ALIGN_FP; + env->spr[SPR_DAR] = eaddr; break; - case -4: - /* Direct store exception */ - /* No code fetch is allowed in direct-store areas */ - cs->exception_index = POWERPC_EXCP_ISI; - if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->error_code = 0; + case ACCESS_RES: + /* lwarx, ldarx or stwcx. */ + cs->exception_index = POWERPC_EXCP_DSI; + env->error_code = 0; + env->spr[SPR_DAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_DSISR] = 0x06000000; } else { - env->error_code = 0x10000000; + env->spr[SPR_DSISR] = 0x04000000; } break; - } - } else { - switch (ret) { - case -1: - /* No matches in page tables or TLB */ - switch (env->mmu_model) { - case POWERPC_MMU_SOFT_6xx: - if (access_type == MMU_DATA_STORE) { - cs->exception_index = POWERPC_EXCP_DSTLB; - env->error_code = 1 << 16; - } else { - cs->exception_index = POWERPC_EXCP_DLTLB; - env->error_code = 0; - } - env->spr[SPR_DMISS] = eaddr; - env->spr[SPR_DCMP] = 0x80000000 | ctx.ptem; - tlb_miss: - env->error_code |= ctx.key << 19; - env->spr[SPR_HASH1] = ppc_hash32_hpt_base(cpu) + - get_pteg_offset32(cpu, ctx.hash[0]); - env->spr[SPR_HASH2] = ppc_hash32_hpt_base(cpu) + - get_pteg_offset32(cpu, ctx.hash[1]); - break; - case POWERPC_MMU_SOFT_4xx: - cs->exception_index = POWERPC_EXCP_DTLB; - env->error_code = 0; - env->spr[SPR_40x_DEAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_40x_ESR] = 0x00800000; - } else { - env->spr[SPR_40x_ESR] = 0x00000000; - } - break; - case POWERPC_MMU_MPC8xx: - /* XXX: TODO */ - cpu_abort(cs, "MPC8xx MMU model is not implemented\n"); - case POWERPC_MMU_BOOKE206: - booke206_update_mas_tlb_miss(env, eaddr, access_type, mmu_idx); - /* fall through */ - case POWERPC_MMU_BOOKE: - cs->exception_index = POWERPC_EXCP_DTLB; - env->error_code = 0; - env->spr[SPR_BOOKE_DEAR] = eaddr; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); - break; - case POWERPC_MMU_REAL: - cpu_abort(cs, "PowerPC in real mode should never raise " - "any MMU exceptions\n"); - default: - cpu_abort(cs, "Unknown or invalid MMU model\n"); - } - break; - case -2: - /* Access rights violation */ + case ACCESS_EXT: + /* eciwx or ecowx */ cs->exception_index = POWERPC_EXCP_DSI; env->error_code = 0; - if (env->mmu_model == POWERPC_MMU_SOFT_4xx) { - env->spr[SPR_40x_DEAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_40x_ESR] |= 0x00800000; - } - } else if ((env->mmu_model == POWERPC_MMU_BOOKE) || - (env->mmu_model == POWERPC_MMU_BOOKE206)) { - env->spr[SPR_BOOKE_DEAR] = eaddr; - env->spr[SPR_BOOKE_ESR] = mmubooke206_esr(mmu_idx, access_type); + env->spr[SPR_DAR] = eaddr; + if (access_type == MMU_DATA_STORE) { + env->spr[SPR_DSISR] = 0x06100000; } else { - env->spr[SPR_DAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_DSISR] = 0x0A000000; - } else { - env->spr[SPR_DSISR] = 0x08000000; - } + env->spr[SPR_DSISR] = 0x04100000; } break; - case -4: - /* Direct store exception */ - switch (type) { - case ACCESS_FLOAT: - /* Floating point load/store */ - cs->exception_index = POWERPC_EXCP_ALIGN; - env->error_code = POWERPC_EXCP_ALIGN_FP; - env->spr[SPR_DAR] = eaddr; - break; - case ACCESS_RES: - /* lwarx, ldarx or stwcx. */ - cs->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_DSISR] = 0x06000000; - } else { - env->spr[SPR_DSISR] = 0x04000000; - } - break; - case ACCESS_EXT: - /* eciwx or ecowx */ - cs->exception_index = POWERPC_EXCP_DSI; - env->error_code = 0; - env->spr[SPR_DAR] = eaddr; - if (access_type == MMU_DATA_STORE) { - env->spr[SPR_DSISR] = 0x06100000; - } else { - env->spr[SPR_DSISR] = 0x04100000; - } - break; - default: - printf("DSI: invalid exception (%d)\n", ret); - cs->exception_index = POWERPC_EXCP_PROGRAM; - env->error_code = - POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; - env->spr[SPR_DAR] = eaddr; - break; - } + default: + printf("DSI: invalid exception (%d)\n", ret); + cs->exception_index = POWERPC_EXCP_PROGRAM; + env->error_code = POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL; + env->spr[SPR_DAR] = eaddr; break; } + break; } } return false; @@ -1543,10 +935,23 @@ bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, case POWERPC_MMU_32B: return ppc_hash32_xlate(cpu, eaddr, access_type, raddrp, psizep, protp, mmu_idx, guest_visible); - - default: - return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp, + case POWERPC_MMU_BOOKE: + case POWERPC_MMU_BOOKE206: + return ppc_booke_xlate(cpu, eaddr, access_type, raddrp, psizep, protp, mmu_idx, guest_visible); + case POWERPC_MMU_SOFT_4xx: + return ppc_40x_xlate(cpu, eaddr, access_type, raddrp, + psizep, protp, mmu_idx, guest_visible); + case POWERPC_MMU_SOFT_6xx: + return ppc_6xx_xlate(cpu, eaddr, access_type, raddrp, + psizep, protp, mmu_idx, guest_visible); + case POWERPC_MMU_REAL: + return ppc_real_mode_xlate(cpu, eaddr, access_type, raddrp, psizep, + protp); + case POWERPC_MMU_MPC8xx: + cpu_abort(env_cpu(&cpu->env), "MPC8xx MMU model is not implemented\n"); + default: + cpu_abort(CPU(cpu), "Unknown or invalid MMU model\n"); } } diff --git a/target/ppc/mmu_helper.c b/target/ppc/mmu_helper.c index b35a93c198..b0a0676beb 100644 --- a/target/ppc/mmu_helper.c +++ b/target/ppc/mmu_helper.c @@ -33,6 +33,7 @@ #include "internal.h" #include "mmu-book3s-v3.h" #include "mmu-radix64.h" +#include "mmu-booke.h" #include "exec/helper-proto.h" #include "exec/cpu_ldst.h" @@ -45,14 +46,8 @@ static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env) { ppc6xx_tlb_t *tlb; - int nr, max; + int nr, max = 2 * env->nb_tlb; - /* LOG_SWTLB("Invalidate all TLBs\n"); */ - /* Invalidate all defined software TLB */ - max = env->nb_tlb; - if (env->id_tlbs == 1) { - max *= 2; - } for (nr = 0; nr < max; nr++) { tlb = &env->tlb.tlb6[nr]; pte_invalidate(&tlb->pte0); @@ -308,9 +303,7 @@ void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr) switch (env->mmu_model) { case POWERPC_MMU_SOFT_6xx: ppc6xx_tlb_invalidate_virt(env, addr, 0); - if (env->id_tlbs == 1) { - ppc6xx_tlb_invalidate_virt(env, addr, 1); - } + ppc6xx_tlb_invalidate_virt(env, addr, 1); break; case POWERPC_MMU_32B: /* @@ -534,7 +527,7 @@ void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs, if (local) { tlb_flush_page(env_cpu(env), addr); } else { - tlb_flush_page_all_cpus(env_cpu(env), addr); + tlb_flush_page_all_cpus_synced(env_cpu(env), addr); } return; @@ -596,30 +589,6 @@ void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN) do_6xx_tlb(env, EPN, 1); } -/*****************************************************************************/ -/* PowerPC 601 specific instructions (POWER bridge) */ - -target_ulong helper_rac(CPUPPCState *env, target_ulong addr) -{ - mmu_ctx_t ctx; - int nb_BATs; - target_ulong ret = 0; - - /* - * We don't have to generate many instances of this instruction, - * as rac is supervisor only. - * - * XXX: FIX THIS: Pretend we have no BAT - */ - nb_BATs = env->nb_BATs; - env->nb_BATs = 0; - if (get_physical_address_wtlb(env, &ctx, addr, 0, ACCESS_INT, 0) == 0) { - ret = ctx.raddr; - } - env->nb_BATs = nb_BATs; - return ret; -} - static inline target_ulong booke_tlb_to_page_size(int size) { return 1024 << (2 * size); diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc index 4956a8b350..652cf20704 100644 --- a/target/ppc/power8-pmu-regs.c.inc +++ b/target/ppc/power8-pmu-regs.c.inc @@ -175,6 +175,11 @@ void spr_write_MMCR2_ureg(DisasContext *ctx, int sprn, int gprn) gen_store_spr(SPR_POWER_MMCR2, masked_gprn); } +void spr_write_MMCRA(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_mmcrA(tcg_env, cpu_gpr[gprn]); +} + void spr_read_PMC(DisasContext *ctx, int gprn, int sprn) { TCGv_i32 t_sprn = tcg_constant_i32(sprn); diff --git a/target/ppc/power8-pmu.c b/target/ppc/power8-pmu.c index cbc5889d91..db9ee8e96b 100644 --- a/target/ppc/power8-pmu.c +++ b/target/ppc/power8-pmu.c @@ -82,7 +82,38 @@ static void pmu_update_summaries(CPUPPCState *env) env->pmc_cyc_cnt = cyc_cnt; } -void pmu_mmcr01_updated(CPUPPCState *env) +static void hreg_bhrb_filter_update(CPUPPCState *env) +{ + target_long ifm; + + if (!(env->spr[SPR_POWER_MMCR0] & MMCR0_PMAE)) { + /* disable recording to BHRB */ + env->bhrb_filter = BHRB_TYPE_NORECORD; + return; + } + + ifm = (env->spr[SPR_POWER_MMCRA] & MMCRA_IFM_MASK) >> MMCRA_IFM_SHIFT; + switch (ifm) { + case 0: + /* record all branches */ + env->bhrb_filter = -1; + break; + case 1: + /* only record calls (LK = 1) */ + env->bhrb_filter = BHRB_TYPE_CALL; + break; + case 2: + /* only record indirect branches */ + env->bhrb_filter = BHRB_TYPE_INDIRECT; + break; + case 3: + /* only record conditional branches */ + env->bhrb_filter = BHRB_TYPE_COND; + break; + } +} + +void pmu_mmcr01a_updated(CPUPPCState *env) { PowerPCCPU *cpu = env_archcpu(env); @@ -95,6 +126,8 @@ void pmu_mmcr01_updated(CPUPPCState *env) ppc_set_irq(cpu, PPC_INTERRUPT_PERFM, 0); } + hreg_bhrb_filter_update(env); + /* * Should this update overflow timers (if mmcr0 is updated) so they * get set in cpu_post_load? @@ -260,7 +293,7 @@ void helper_store_mmcr0(CPUPPCState *env, target_ulong value) env->spr[SPR_POWER_MMCR0] = value; - pmu_mmcr01_updated(env); + pmu_mmcr01a_updated(env); /* Update cycle overflow timers with the current MMCR0 state */ pmu_update_overflow_timers(env); @@ -272,7 +305,14 @@ void helper_store_mmcr1(CPUPPCState *env, uint64_t value) env->spr[SPR_POWER_MMCR1] = value; - pmu_mmcr01_updated(env); + pmu_mmcr01a_updated(env); +} + +void helper_store_mmcrA(CPUPPCState *env, uint64_t value) +{ + env->spr[SPR_POWER_MMCRA] = value; + + pmu_mmcr01a_updated(env); } target_ulong helper_read_pmc(CPUPPCState *env, uint32_t sprn) @@ -301,7 +341,7 @@ static void perfm_alert(PowerPCCPU *cpu) env->spr[SPR_POWER_MMCR0] |= MMCR0_FC; /* Changing MMCR0_FC requires summaries and hflags update */ - pmu_mmcr01_updated(env); + pmu_mmcr01a_updated(env); /* * Delete all pending timers if we need to freeze diff --git a/target/ppc/power8-pmu.h b/target/ppc/power8-pmu.h index 775e640053..3f79cfc45b 100644 --- a/target/ppc/power8-pmu.h +++ b/target/ppc/power8-pmu.h @@ -13,15 +13,22 @@ #ifndef POWER8_PMU_H #define POWER8_PMU_H +#define BHRB_TYPE_NORECORD 0x00 +#define BHRB_TYPE_CALL 0x01 +#define BHRB_TYPE_INDIRECT 0x02 +#define BHRB_TYPE_COND 0x04 +#define BHRB_TYPE_OTHER 0x08 +#define BHRB_TYPE_XL_FORM 0x10 + #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) #define PMC_COUNTER_NEGATIVE_VAL 0x80000000UL void cpu_ppc_pmu_init(CPUPPCState *env); -void pmu_mmcr01_updated(CPUPPCState *env); +void pmu_mmcr01a_updated(CPUPPCState *env); #else static inline void cpu_ppc_pmu_init(CPUPPCState *env) { } -static inline void pmu_mmcr01_updated(CPUPPCState *env) { } +static inline void pmu_mmcr01a_updated(CPUPPCState *env) { } #endif #endif diff --git a/target/ppc/spr_common.h b/target/ppc/spr_common.h index 8a9d6cd994..01aff449bc 100644 --- a/target/ppc/spr_common.h +++ b/target/ppc/spr_common.h @@ -83,8 +83,11 @@ void spr_read_generic(DisasContext *ctx, int gprn, int sprn); void spr_write_generic(DisasContext *ctx, int sprn, int gprn); void spr_write_generic32(DisasContext *ctx, int sprn, int gprn); void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn); +void spr_core_write_generic32(DisasContext *ctx, int sprn, int gprn); +void spr_core_lpar_write_generic(DisasContext *ctx, int sprn, int gprn); void spr_write_MMCR0(DisasContext *ctx, int sprn, int gprn); void spr_write_MMCR1(DisasContext *ctx, int sprn, int gprn); +void spr_write_MMCRA(DisasContext *ctx, int sprn, int gprn); void spr_write_PMC(DisasContext *ctx, int sprn, int gprn); void spr_write_CTRL(DisasContext *ctx, int sprn, int gprn); void spr_read_xer(DisasContext *ctx, int gprn, int sprn); @@ -202,6 +205,11 @@ void spr_read_tfmr(DisasContext *ctx, int gprn, int sprn); void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn); void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn); void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn); +void spr_read_ppr32(DisasContext *ctx, int sprn, int gprn); +void spr_write_ppr32(DisasContext *ctx, int sprn, int gprn); +void spr_write_sprc(DisasContext *ctx, int sprn, int gprn); +void spr_read_sprd(DisasContext *ctx, int sprn, int gprn); +void spr_write_sprd(DisasContext *ctx, int sprn, int gprn); #endif void register_low_BATs(CPUPPCState *env); diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 49dee6cab0..0bc16d7251 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -180,6 +180,7 @@ struct DisasContext { #if defined(TARGET_PPC64) bool sf_mode; bool has_cfar; + bool has_bhrb; #endif bool fpu_enabled; bool altivec_enabled; @@ -193,6 +194,7 @@ struct DisasContext { bool mmcr0_pmcjce; bool pmc_other; bool pmu_insn_cnt; + bool bhrb_enable; ppc_spr_t *spr_cb; /* Needed to check rights for mfspr/mtspr */ int singlestep_enabled; uint32_t flags; @@ -471,6 +473,34 @@ void spr_core_write_generic(DisasContext *ctx, int sprn, int gprn) spr_store_dump_spr(sprn); } +void spr_core_write_generic32(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0; + + if (!(ctx->flags & POWERPC_FLAG_SMT)) { + spr_write_generic32(ctx, sprn, gprn); + return; + } + + if (!gen_serialize(ctx)) { + return; + } + + t0 = tcg_temp_new(); + tcg_gen_ext32u_tl(t0, cpu_gpr[gprn]); + gen_helper_spr_core_write_generic(tcg_env, tcg_constant_i32(sprn), t0); + spr_store_dump_spr(sprn); +} + +void spr_core_lpar_write_generic(DisasContext *ctx, int sprn, int gprn) +{ + if (ctx->flags & POWERPC_FLAG_SMT_1LPAR) { + spr_core_write_generic(ctx, sprn, gprn); + } else { + spr_write_generic(ctx, sprn, gprn); + } +} + static void spr_write_CTRL_ST(DisasContext *ctx, int sprn, int gprn) { /* This does not implement >1 thread */ @@ -879,6 +909,10 @@ void spr_write_hior(DisasContext *ctx, int sprn, int gprn) } void spr_write_ptcr(DisasContext *ctx, int sprn, int gprn) { + if (!gen_serialize_core(ctx)) { + return; + } + gen_helper_store_ptcr(tcg_env, cpu_gpr[gprn]); } @@ -1267,6 +1301,24 @@ void spr_write_tfmr(DisasContext *ctx, int sprn, int gprn) gen_helper_store_tfmr(tcg_env, cpu_gpr[gprn]); } +void spr_write_sprc(DisasContext *ctx, int sprn, int gprn) +{ + gen_helper_store_sprc(tcg_env, cpu_gpr[gprn]); +} + +void spr_read_sprd(DisasContext *ctx, int gprn, int sprn) +{ + gen_helper_load_sprd(cpu_gpr[gprn], tcg_env); +} + +void spr_write_sprd(DisasContext *ctx, int sprn, int gprn) +{ + if (!gen_serialize_core(ctx)) { + return; + } + gen_helper_store_sprd(tcg_env, cpu_gpr[gprn]); +} + void spr_write_lpcr(DisasContext *ctx, int sprn, int gprn) { translator_io_start(&ctx->base); @@ -1350,6 +1402,30 @@ void spr_read_dexcr_ureg(DisasContext *ctx, int gprn, int sprn) gen_load_spr(t0, sprn + 16); tcg_gen_ext32u_tl(cpu_gpr[gprn], t0); } + +/* The PPR32 SPR accesses the upper 32-bits of PPR */ +void spr_read_ppr32(DisasContext *ctx, int gprn, int sprn) +{ + gen_load_spr(cpu_gpr[gprn], SPR_PPR); + tcg_gen_shri_tl(cpu_gpr[gprn], cpu_gpr[gprn], 32); + spr_load_dump_spr(SPR_PPR); +} + +void spr_write_ppr32(DisasContext *ctx, int sprn, int gprn) +{ + TCGv t0 = tcg_temp_new(); + + /* + * Don't clobber the low 32-bits of the PPR. These are all reserved bits + * but TCG does implement them, so it would be surprising to zero them + * here. "Priority nops" are similarly careful not to clobber reserved + * bits. + */ + gen_load_spr(t0, SPR_PPR); + tcg_gen_deposit_tl(t0, t0, cpu_gpr[gprn], 32, 32); + gen_store_spr(SPR_PPR, t0); + spr_store_dump_spr(SPR_PPR); +} #endif #define GEN_HANDLER(name, opc1, opc2, opc3, inval, type) \ @@ -1563,73 +1639,6 @@ static inline void gen_set_Rc0(DisasContext *ctx, TCGv reg) } } -/* cmprb - range comparison: isupper, isaplha, islower*/ -static void gen_cmprb(DisasContext *ctx) -{ - TCGv_i32 src1 = tcg_temp_new_i32(); - TCGv_i32 src2 = tcg_temp_new_i32(); - TCGv_i32 src2lo = tcg_temp_new_i32(); - TCGv_i32 src2hi = tcg_temp_new_i32(); - TCGv_i32 crf = cpu_crf[crfD(ctx->opcode)]; - - tcg_gen_trunc_tl_i32(src1, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_trunc_tl_i32(src2, cpu_gpr[rB(ctx->opcode)]); - - tcg_gen_andi_i32(src1, src1, 0xFF); - tcg_gen_ext8u_i32(src2lo, src2); - tcg_gen_shri_i32(src2, src2, 8); - tcg_gen_ext8u_i32(src2hi, src2); - - tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); - tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); - tcg_gen_and_i32(crf, src2lo, src2hi); - - if (ctx->opcode & 0x00200000) { - tcg_gen_shri_i32(src2, src2, 8); - tcg_gen_ext8u_i32(src2lo, src2); - tcg_gen_shri_i32(src2, src2, 8); - tcg_gen_ext8u_i32(src2hi, src2); - tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); - tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); - tcg_gen_and_i32(src2lo, src2lo, src2hi); - tcg_gen_or_i32(crf, crf, src2lo); - } - tcg_gen_shli_i32(crf, crf, CRF_GT_BIT); -} - -#if defined(TARGET_PPC64) -/* cmpeqb */ -static void gen_cmpeqb(DisasContext *ctx) -{ - gen_helper_cmpeqb(cpu_crf[crfD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); -} -#endif - -/* isel (PowerPC 2.03 specification) */ -static void gen_isel(DisasContext *ctx) -{ - uint32_t bi = rC(ctx->opcode); - uint32_t mask = 0x08 >> (bi & 0x03); - TCGv t0 = tcg_temp_new(); - TCGv zr; - - tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); - tcg_gen_andi_tl(t0, t0, mask); - - zr = tcg_constant_tl(0); - tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rD(ctx->opcode)], t0, zr, - rA(ctx->opcode) ? cpu_gpr[rA(ctx->opcode)] : zr, - cpu_gpr[rB(ctx->opcode)]); -} - -/* cmpb: PowerPC 2.05 specification */ -static void gen_cmpb(DisasContext *ctx) -{ - gen_helper_cmpb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); -} - /*** Integer arithmetic ***/ static inline void gen_op_arith_compute_ov(DisasContext *ctx, TCGv arg0, @@ -1737,8 +1746,9 @@ static inline void gen_op_arith_add(DisasContext *ctx, TCGv ret, TCGv arg1, } } -static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, - TCGv arg2, int sign, int compute_ov) +static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, + TCGv arg1, TCGv arg2, bool sign, + bool compute_ov, bool compute_rc0) { TCGv_i32 t0 = tcg_temp_new_i32(); TCGv_i32 t1 = tcg_temp_new_i32(); @@ -1772,45 +1782,15 @@ static inline void gen_op_arith_divw(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); } - if (unlikely(Rc(ctx->opcode) != 0)) { + if (unlikely(compute_rc0)) { gen_set_Rc0(ctx, ret); } } -/* Div functions */ -#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - gen_op_arith_divw(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ - sign, compute_ov); \ -} -/* divwu divwu. divwuo divwuo. */ -GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0); -GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1); -/* divw divw. divwo divwo. */ -GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0); -GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1); - -/* div[wd]eu[o][.] */ -#define GEN_DIVE(name, hlpr, compute_ov) \ -static void gen_##name(DisasContext *ctx) \ -{ \ - TCGv_i32 t0 = tcg_constant_i32(compute_ov); \ - gen_helper_##hlpr(cpu_gpr[rD(ctx->opcode)], tcg_env, \ - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); \ - } \ -} - -GEN_DIVE(divweu, divweu, 0); -GEN_DIVE(divweuo, divweu, 1); -GEN_DIVE(divwe, divwe, 0); -GEN_DIVE(divweo, divwe, 1); #if defined(TARGET_PPC64) -static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, - TCGv arg2, int sign, int compute_ov) +static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, + TCGv arg1, TCGv arg2, bool sign, + bool compute_ov, bool compute_rc0) { TCGv_i64 t0 = tcg_temp_new_i64(); TCGv_i64 t1 = tcg_temp_new_i64(); @@ -1846,25 +1826,6 @@ static inline void gen_op_arith_divd(DisasContext *ctx, TCGv ret, TCGv arg1, gen_set_Rc0(ctx, ret); } } - -#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - gen_op_arith_divd(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ - sign, compute_ov); \ -} -/* divdu divdu. divduo divduo. */ -GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0); -GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1); -/* divd divd. divdo divdo. */ -GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0); -GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1); - -GEN_DIVE(divdeu, divdeu, 0); -GEN_DIVE(divdeuo, divdeu, 1); -GEN_DIVE(divde, divde, 0); -GEN_DIVE(divdeo, divde, 1); #endif static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, @@ -1896,17 +1857,6 @@ static inline void gen_op_arith_modw(DisasContext *ctx, TCGv ret, TCGv arg1, } } -#define GEN_INT_ARITH_MODW(name, opc3, sign) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - gen_op_arith_modw(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ - sign); \ -} - -GEN_INT_ARITH_MODW(moduw, 0x08, 0); -GEN_INT_ARITH_MODW(modsw, 0x18, 1); - #if defined(TARGET_PPC64) static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, TCGv arg2, int sign) @@ -1934,157 +1884,6 @@ static inline void gen_op_arith_modd(DisasContext *ctx, TCGv ret, TCGv arg1, tcg_gen_remu_i64(ret, t0, t1); } } - -#define GEN_INT_ARITH_MODD(name, opc3, sign) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - gen_op_arith_modd(ctx, cpu_gpr[rD(ctx->opcode)], \ - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], \ - sign); \ -} - -GEN_INT_ARITH_MODD(modud, 0x08, 0); -GEN_INT_ARITH_MODD(modsd, 0x18, 1); -#endif - -/* mulhw mulhw. */ -static void gen_mulhw(DisasContext *ctx) -{ - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); - - tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); - tcg_gen_muls2_i32(t0, t1, t0, t1); - tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mulhwu mulhwu. */ -static void gen_mulhwu(DisasContext *ctx) -{ - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); - - tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); - tcg_gen_mulu2_i32(t0, t1, t0, t1); - tcg_gen_extu_i32_tl(cpu_gpr[rD(ctx->opcode)], t1); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mullw mullw. */ -static void gen_mullw(DisasContext *ctx) -{ -#if defined(TARGET_PPC64) - TCGv_i64 t0, t1; - t0 = tcg_temp_new_i64(); - t1 = tcg_temp_new_i64(); - tcg_gen_ext32s_tl(t0, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_ext32s_tl(t1, cpu_gpr[rB(ctx->opcode)]); - tcg_gen_mul_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); -#else - tcg_gen_mul_i32(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); -#endif - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mullwo mullwo. */ -static void gen_mullwo(DisasContext *ctx) -{ - TCGv_i32 t0 = tcg_temp_new_i32(); - TCGv_i32 t1 = tcg_temp_new_i32(); - - tcg_gen_trunc_tl_i32(t0, cpu_gpr[rA(ctx->opcode)]); - tcg_gen_trunc_tl_i32(t1, cpu_gpr[rB(ctx->opcode)]); - tcg_gen_muls2_i32(t0, t1, t0, t1); -#if defined(TARGET_PPC64) - tcg_gen_concat_i32_i64(cpu_gpr[rD(ctx->opcode)], t0, t1); -#else - tcg_gen_mov_i32(cpu_gpr[rD(ctx->opcode)], t0); -#endif - - tcg_gen_sari_i32(t0, t0, 31); - tcg_gen_setcond_i32(TCG_COND_NE, t0, t0, t1); - tcg_gen_extu_i32_tl(cpu_ov, t0); - if (is_isa300(ctx)) { - tcg_gen_mov_tl(cpu_ov32, cpu_ov); - } - tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); - - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mulli */ -static void gen_mulli(DisasContext *ctx) -{ - tcg_gen_muli_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - SIMM(ctx->opcode)); -} - -#if defined(TARGET_PPC64) -/* mulhd mulhd. */ -static void gen_mulhd(DisasContext *ctx) -{ - TCGv lo = tcg_temp_new(); - tcg_gen_muls2_tl(lo, cpu_gpr[rD(ctx->opcode)], - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mulhdu mulhdu. */ -static void gen_mulhdu(DisasContext *ctx) -{ - TCGv lo = tcg_temp_new(); - tcg_gen_mulu2_tl(lo, cpu_gpr[rD(ctx->opcode)], - cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mulld mulld. */ -static void gen_mulld(DisasContext *ctx) -{ - tcg_gen_mul_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -/* mulldo mulldo. */ -static void gen_mulldo(DisasContext *ctx) -{ - TCGv_i64 t0 = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - - tcg_gen_muls2_i64(t0, t1, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - tcg_gen_mov_i64(cpu_gpr[rD(ctx->opcode)], t0); - - tcg_gen_sari_i64(t0, t0, 63); - tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); - if (is_isa300(ctx)) { - tcg_gen_mov_tl(cpu_ov32, cpu_ov); - } - tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); - - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} #endif /* Common subf function */ @@ -2157,104 +1956,7 @@ static inline void gen_op_arith_subf(DisasContext *ctx, TCGv ret, TCGv arg1, } } -/* neg neg. nego nego. */ -static inline void gen_op_arith_neg(DisasContext *ctx, bool compute_ov) -{ - TCGv zero = tcg_constant_tl(0); - gen_op_arith_subf(ctx, cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)], - zero, 0, 0, compute_ov, Rc(ctx->opcode)); -} - -static void gen_neg(DisasContext *ctx) -{ - tcg_gen_neg_tl(cpu_gpr[rD(ctx->opcode)], cpu_gpr[rA(ctx->opcode)]); - if (unlikely(Rc(ctx->opcode))) { - gen_set_Rc0(ctx, cpu_gpr[rD(ctx->opcode)]); - } -} - -static void gen_nego(DisasContext *ctx) -{ - gen_op_arith_neg(ctx, 1); -} - /*** Integer logical ***/ -#define GEN_LOGICAL2(name, tcg_op, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], \ - cpu_gpr[rB(ctx->opcode)]); \ - if (unlikely(Rc(ctx->opcode) != 0)) \ - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ -} - -#define GEN_LOGICAL1(name, tcg_op, opc, type) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - tcg_op(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); \ - if (unlikely(Rc(ctx->opcode) != 0)) \ - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); \ -} - -/* and & and. */ -GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER); -/* andc & andc. */ -GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER); - -/* andi. */ -static void gen_andi_(DisasContext *ctx) -{ - tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], - UIMM(ctx->opcode)); - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); -} - -/* andis. */ -static void gen_andis_(DisasContext *ctx) -{ - tcg_gen_andi_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], - UIMM(ctx->opcode) << 16); - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); -} - -/* cntlzw */ -static void gen_cntlzw(DisasContext *ctx) -{ - TCGv_i32 t = tcg_temp_new_i32(); - - tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); - tcg_gen_clzi_i32(t, t, 32); - tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); - - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* cnttzw */ -static void gen_cnttzw(DisasContext *ctx) -{ - TCGv_i32 t = tcg_temp_new_i32(); - - tcg_gen_trunc_tl_i32(t, cpu_gpr[rS(ctx->opcode)]); - tcg_gen_ctzi_i32(t, t, 32); - tcg_gen_extu_i32_tl(cpu_gpr[rA(ctx->opcode)], t); - - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* eqv & eqv. */ -GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER); -/* extsb & extsb. */ -GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER); -/* extsh & extsh. */ -GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER); -/* nand & nand. */ -GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER); -/* nor & nor. */ -GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER); #if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) static void gen_pause(DisasContext *ctx) @@ -2268,261 +1970,6 @@ static void gen_pause(DisasContext *ctx) } #endif /* defined(TARGET_PPC64) */ -/* or & or. */ -static void gen_or(DisasContext *ctx) -{ - int rs, ra, rb; - - rs = rS(ctx->opcode); - ra = rA(ctx->opcode); - rb = rB(ctx->opcode); - /* Optimisation for mr. ri case */ - if (rs != ra || rs != rb) { - if (rs != rb) { - tcg_gen_or_tl(cpu_gpr[ra], cpu_gpr[rs], cpu_gpr[rb]); - } else { - tcg_gen_mov_tl(cpu_gpr[ra], cpu_gpr[rs]); - } - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[ra]); - } - } else if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rs]); -#if defined(TARGET_PPC64) - } else if (rs != 0) { /* 0 is nop */ - int prio = 0; - - switch (rs) { - case 1: - /* Set process priority to low */ - prio = 2; - break; - case 6: - /* Set process priority to medium-low */ - prio = 3; - break; - case 2: - /* Set process priority to normal */ - prio = 4; - break; -#if !defined(CONFIG_USER_ONLY) - case 31: - if (!ctx->pr) { - /* Set process priority to very low */ - prio = 1; - } - break; - case 5: - if (!ctx->pr) { - /* Set process priority to medium-hight */ - prio = 5; - } - break; - case 3: - if (!ctx->pr) { - /* Set process priority to high */ - prio = 6; - } - break; - case 7: - if (ctx->hv && !ctx->pr) { - /* Set process priority to very high */ - prio = 7; - } - break; -#endif - default: - break; - } - if (prio) { - TCGv t0 = tcg_temp_new(); - gen_load_spr(t0, SPR_PPR); - tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); - tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); - gen_store_spr(SPR_PPR, t0); - } -#if !defined(CONFIG_USER_ONLY) - /* - * Pause out of TCG otherwise spin loops with smt_low eat too - * much CPU and the kernel hangs. This applies to all - * encodings other than no-op, e.g., miso(rs=26), yield(27), - * mdoio(29), mdoom(30), and all currently undefined. - */ - gen_pause(ctx); -#endif -#endif - } -} -/* orc & orc. */ -GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER); - -/* xor & xor. */ -static void gen_xor(DisasContext *ctx) -{ - /* Optimisation for "set to zero" case */ - if (rS(ctx->opcode) != rB(ctx->opcode)) { - tcg_gen_xor_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - } else { - tcg_gen_movi_tl(cpu_gpr[rA(ctx->opcode)], 0); - } - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* ori */ -static void gen_ori(DisasContext *ctx) -{ - target_ulong uimm = UIMM(ctx->opcode); - - if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { - return; - } - tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); -} - -/* oris */ -static void gen_oris(DisasContext *ctx) -{ - target_ulong uimm = UIMM(ctx->opcode); - - if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { - /* NOP */ - return; - } - tcg_gen_ori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], - uimm << 16); -} - -/* xori */ -static void gen_xori(DisasContext *ctx) -{ - target_ulong uimm = UIMM(ctx->opcode); - - if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { - /* NOP */ - return; - } - tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], uimm); -} - -/* xoris */ -static void gen_xoris(DisasContext *ctx) -{ - target_ulong uimm = UIMM(ctx->opcode); - - if (rS(ctx->opcode) == rA(ctx->opcode) && uimm == 0) { - /* NOP */ - return; - } - tcg_gen_xori_tl(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], - uimm << 16); -} - -/* popcntb : PowerPC 2.03 specification */ -static void gen_popcntb(DisasContext *ctx) -{ - gen_helper_popcntb(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); -} - -static void gen_popcntw(DisasContext *ctx) -{ -#if defined(TARGET_PPC64) - gen_helper_popcntw(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); -#else - tcg_gen_ctpop_i32(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); -#endif -} - -#if defined(TARGET_PPC64) -/* popcntd: PowerPC 2.06 specification */ -static void gen_popcntd(DisasContext *ctx) -{ - tcg_gen_ctpop_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)]); -} -#endif - -/* prtyw: PowerPC 2.05 specification */ -static void gen_prtyw(DisasContext *ctx) -{ - TCGv ra = cpu_gpr[rA(ctx->opcode)]; - TCGv rs = cpu_gpr[rS(ctx->opcode)]; - TCGv t0 = tcg_temp_new(); - tcg_gen_shri_tl(t0, rs, 16); - tcg_gen_xor_tl(ra, rs, t0); - tcg_gen_shri_tl(t0, ra, 8); - tcg_gen_xor_tl(ra, ra, t0); - tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); -} - -#if defined(TARGET_PPC64) -/* prtyd: PowerPC 2.05 specification */ -static void gen_prtyd(DisasContext *ctx) -{ - TCGv ra = cpu_gpr[rA(ctx->opcode)]; - TCGv rs = cpu_gpr[rS(ctx->opcode)]; - TCGv t0 = tcg_temp_new(); - tcg_gen_shri_tl(t0, rs, 32); - tcg_gen_xor_tl(ra, rs, t0); - tcg_gen_shri_tl(t0, ra, 16); - tcg_gen_xor_tl(ra, ra, t0); - tcg_gen_shri_tl(t0, ra, 8); - tcg_gen_xor_tl(ra, ra, t0); - tcg_gen_andi_tl(ra, ra, 1); -} -#endif - -#if defined(TARGET_PPC64) -/* bpermd */ -static void gen_bpermd(DisasContext *ctx) -{ - gen_helper_bpermd(cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); -} -#endif - -#if defined(TARGET_PPC64) -/* extsw & extsw. */ -GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B); - -/* cntlzd */ -static void gen_cntlzd(DisasContext *ctx) -{ - tcg_gen_clzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* cnttzd */ -static void gen_cnttzd(DisasContext *ctx) -{ - tcg_gen_ctzi_i64(cpu_gpr[rA(ctx->opcode)], cpu_gpr[rS(ctx->opcode)], 64); - if (unlikely(Rc(ctx->opcode) != 0)) { - gen_set_Rc0(ctx, cpu_gpr[rA(ctx->opcode)]); - } -} - -/* darn */ -static void gen_darn(DisasContext *ctx) -{ - int l = L(ctx->opcode); - - if (l > 2) { - tcg_gen_movi_i64(cpu_gpr[rD(ctx->opcode)], -1); - } else { - translator_io_start(&ctx->base); - if (l == 0) { - gen_helper_darn32(cpu_gpr[rD(ctx->opcode)]); - } else { - /* Return 64-bit random for both CRN and RRN */ - gen_helper_darn64(cpu_gpr[rD(ctx->opcode)]); - } - } -} -#endif - /*** Integer rotate ***/ /* rlwimi & rlwimi. */ @@ -3422,59 +2869,6 @@ static void gen_stswx(DisasContext *ctx) gen_helper_stsw(tcg_env, t0, t1, t2); } -/*** Memory synchronisation ***/ -/* eieio */ -static void gen_eieio(DisasContext *ctx) -{ - TCGBar bar = TCG_MO_ALL; - - /* - * eieio has complex semanitcs. It provides memory ordering between - * operations in the set: - * - loads from CI memory. - * - stores to CI memory. - * - stores to WT memory. - * - * It separately also orders memory for operations in the set: - * - stores to cacheble memory. - * - * It also serializes instructions: - * - dcbt and dcbst. - * - * It separately serializes: - * - tlbie and tlbsync. - * - * And separately serializes: - * - slbieg, slbiag, and slbsync. - * - * The end result is that CI memory ordering requires TCG_MO_ALL - * and it is not possible to special-case more relaxed ordering for - * cacheable accesses. TCG_BAR_SC is required to provide this - * serialization. - */ - - /* - * POWER9 has a eieio instruction variant using bit 6 as a hint to - * tell the CPU it is a store-forwarding barrier. - */ - if (ctx->opcode & 0x2000000) { - /* - * ISA says that "Reserved fields in instructions are ignored - * by the processor". So ignore the bit 6 on non-POWER9 CPU but - * as this is not an instruction software should be using, - * complain to the user. - */ - if (!(ctx->insns_flags2 & PPC2_ISA300)) { - qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @" - TARGET_FMT_lx "\n", ctx->cia); - } else { - bar = TCG_MO_ST_LD; - } - } - - tcg_gen_mb(bar | TCG_BAR_SC); -} - #if !defined(CONFIG_USER_ONLY) static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { @@ -3494,6 +2888,13 @@ static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) gen_helper_check_tlb_flush_local(tcg_env); } gen_set_label(l); + if (global) { + /* + * Global TLB flush uses async-work which must run before the + * next instruction, so this must be the last in the TB. + */ + ctx->base.is_jmp = DISAS_EXIT_UPDATE; + } } #else static inline void gen_check_tlb_flush(DisasContext *ctx, bool global) { } @@ -3513,8 +2914,6 @@ static void gen_isync(DisasContext *ctx) ctx->base.is_jmp = DISAS_EXIT_UPDATE; } -#define MEMOP_GET_SIZE(x) (1 << ((x) & MO_SIZE)) - static void gen_load_locked(DisasContext *ctx, MemOp memop) { TCGv gpr = cpu_gpr[rD(ctx->opcode)]; @@ -3522,7 +2921,7 @@ static void gen_load_locked(DisasContext *ctx, MemOp memop) gen_set_access_type(ctx, ACCESS_RES); gen_addr_reg_index(ctx, t0); - tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, memop | MO_ALIGN); + tcg_gen_qemu_ld_tl(gpr, t0, ctx->mem_idx, DEF_MEMOP(memop) | MO_ALIGN); tcg_gen_mov_tl(cpu_reserve, t0); tcg_gen_movi_tl(cpu_reserve_length, memop_size(memop)); tcg_gen_mov_tl(cpu_reserve_val, gpr); @@ -3535,9 +2934,9 @@ static void gen_##name(DisasContext *ctx) \ } /* lwarx */ -LARX(lbarx, DEF_MEMOP(MO_UB)) -LARX(lharx, DEF_MEMOP(MO_UW)) -LARX(lwarx, DEF_MEMOP(MO_UL)) +LARX(lbarx, MO_UB) +LARX(lharx, MO_UW) +LARX(lwarx, MO_UL) static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, TCGv EA, TCGCond cond, int addend) @@ -3547,7 +2946,7 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, TCGv u = tcg_temp_new(); tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop); - tcg_gen_addi_tl(t2, EA, MEMOP_GET_SIZE(memop)); + tcg_gen_addi_tl(t2, EA, memop_size(memop)); tcg_gen_qemu_ld_tl(t2, t2, ctx->mem_idx, memop); tcg_gen_addi_tl(u, t, addend); @@ -3557,7 +2956,7 @@ static void gen_fetch_inc_conditional(DisasContext *ctx, MemOp memop, tcg_gen_qemu_st_tl(u, EA, ctx->mem_idx, memop); /* RT = (t != t2 ? t : u = 1<<(s*8-1)) */ - tcg_gen_movi_tl(u, 1 << (MEMOP_GET_SIZE(memop) * 8 - 1)); + tcg_gen_movi_tl(u, 1 << (memop_size(memop) * 8 - 1)); tcg_gen_movcond_tl(cond, cpu_gpr[rD(ctx->opcode)], t, t2, t, u); } @@ -3719,7 +3118,7 @@ static void gen_st_atomic(DisasContext *ctx, MemOp memop) TCGv ea_plus_s = tcg_temp_new(); tcg_gen_qemu_ld_tl(t, EA, ctx->mem_idx, memop); - tcg_gen_addi_tl(ea_plus_s, EA, MEMOP_GET_SIZE(memop)); + tcg_gen_addi_tl(ea_plus_s, EA, memop_size(memop)); tcg_gen_qemu_ld_tl(t2, ea_plus_s, ctx->mem_idx, memop); tcg_gen_movcond_tl(TCG_COND_EQ, s, t, t2, src, t); tcg_gen_movcond_tl(TCG_COND_EQ, s2, t, t2, src, t2); @@ -3782,15 +3181,15 @@ static void gen_##name(DisasContext *ctx) \ gen_conditional_store(ctx, memop); \ } -STCX(stbcx_, DEF_MEMOP(MO_UB)) -STCX(sthcx_, DEF_MEMOP(MO_UW)) -STCX(stwcx_, DEF_MEMOP(MO_UL)) +STCX(stbcx_, MO_UB) +STCX(sthcx_, MO_UW) +STCX(stwcx_, MO_UL) #if defined(TARGET_PPC64) /* ldarx */ -LARX(ldarx, DEF_MEMOP(MO_UQ)) +LARX(ldarx, MO_UQ) /* stdcx. */ -STCX(stdcx_, DEF_MEMOP(MO_UQ)) +STCX(stdcx_, MO_UQ) /* lqarx */ static void gen_lqarx(DisasContext *ctx) @@ -3876,31 +3275,6 @@ static void gen_stqcx_(DisasContext *ctx) } #endif /* defined(TARGET_PPC64) */ -/* sync */ -static void gen_sync(DisasContext *ctx) -{ - TCGBar bar = TCG_MO_ALL; - uint32_t l = (ctx->opcode >> 21) & 3; - - if ((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) { - bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST; - } - - /* - * We may need to check for a pending TLB flush. - * - * We do this on ptesync (l == 2) on ppc64 and any sync pn ppc32. - * - * Additionally, this can only happen in kernel mode however so - * check MSR_PR as well. - */ - if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { - gen_check_tlb_flush(ctx, true); - } - - tcg_gen_mb(bar | TCG_BAR_SC); -} - /* wait */ static void gen_wait(DisasContext *ctx) { @@ -4070,14 +3444,85 @@ static void gen_rvwinkle(DisasContext *ctx) gen_exception_nip(ctx, EXCP_HLT, ctx->base.pc_next); #endif /* defined(CONFIG_USER_ONLY) */ } + +static inline TCGv gen_write_bhrb(TCGv_ptr base, TCGv offset, TCGv mask, TCGv value) +{ + TCGv_ptr tmp = tcg_temp_new_ptr(); + + /* add base and offset to get address of bhrb entry */ + tcg_gen_add_ptr(tmp, base, (TCGv_ptr)offset); + + /* store value into bhrb at bhrb_offset */ + tcg_gen_st_i64(value, tmp, 0); + + /* add 8 to current bhrb_offset */ + tcg_gen_addi_tl(offset, offset, 8); + + /* apply offset mask */ + tcg_gen_and_tl(offset, offset, mask); + + return offset; +} #endif /* #if defined(TARGET_PPC64) */ -static inline void gen_update_cfar(DisasContext *ctx, target_ulong nip) +static inline void gen_update_branch_history(DisasContext *ctx, + target_ulong nip, + TCGv target, + target_long inst_type) { #if defined(TARGET_PPC64) + TCGv_ptr base; + TCGv tmp; + TCGv offset; + TCGv mask; + TCGLabel *no_update; + if (ctx->has_cfar) { tcg_gen_movi_tl(cpu_cfar, nip); } + + if (!ctx->has_bhrb || + !ctx->bhrb_enable || + inst_type == BHRB_TYPE_NORECORD) { + return; + } + + tmp = tcg_temp_new(); + no_update = gen_new_label(); + + /* check for bhrb filtering */ + tcg_gen_ld_tl(tmp, tcg_env, offsetof(CPUPPCState, bhrb_filter)); + tcg_gen_andi_tl(tmp, tmp, inst_type); + tcg_gen_brcondi_tl(TCG_COND_EQ, tmp, 0, no_update); + + base = tcg_temp_new_ptr(); + offset = tcg_temp_new(); + mask = tcg_temp_new(); + + /* load bhrb base address */ + tcg_gen_ld_ptr(base, tcg_env, offsetof(CPUPPCState, bhrb_base)); + + /* load current bhrb_offset */ + tcg_gen_ld_tl(offset, tcg_env, offsetof(CPUPPCState, bhrb_offset)); + + /* load a BHRB offset mask */ + tcg_gen_ld_tl(mask, tcg_env, offsetof(CPUPPCState, bhrb_offset_mask)); + + offset = gen_write_bhrb(base, offset, mask, tcg_constant_i64(nip)); + + /* Also record the target address for XL-Form branches */ + if (inst_type & BHRB_TYPE_XL_FORM) { + + /* Set the 'T' bit for target entries */ + tcg_gen_ori_tl(tmp, target, 0x2); + + offset = gen_write_bhrb(base, offset, mask, tmp); + } + + /* save updated bhrb_offset for next time */ + tcg_gen_st_tl(offset, tcg_env, offsetof(CPUPPCState, bhrb_offset)); + + gen_set_label(no_update); #endif } @@ -4207,8 +3652,10 @@ static void gen_b(DisasContext *ctx) } if (LK(ctx->opcode)) { gen_setlr(ctx, ctx->base.pc_next); + gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_CALL); + } else { + gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_OTHER); } - gen_update_cfar(ctx, ctx->cia); gen_goto_tb(ctx, 0, target); ctx->base.is_jmp = DISAS_NORETURN; } @@ -4223,6 +3670,7 @@ static void gen_bcond(DisasContext *ctx, int type) uint32_t bo = BO(ctx->opcode); TCGLabel *l1; TCGv target; + target_long bhrb_type = BHRB_TYPE_OTHER; if (type == BCOND_LR || type == BCOND_CTR || type == BCOND_TAR) { target = tcg_temp_new(); @@ -4233,11 +3681,16 @@ static void gen_bcond(DisasContext *ctx, int type) } else { tcg_gen_mov_tl(target, cpu_lr); } + if (!LK(ctx->opcode)) { + bhrb_type |= BHRB_TYPE_INDIRECT; + } + bhrb_type |= BHRB_TYPE_XL_FORM; } else { target = NULL; } if (LK(ctx->opcode)) { gen_setlr(ctx, ctx->base.pc_next); + bhrb_type |= BHRB_TYPE_CALL; } l1 = gen_new_label(); if ((bo & 0x4) == 0) { @@ -4288,6 +3741,7 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_brcondi_tl(TCG_COND_EQ, temp, 0, l1); } } + bhrb_type |= BHRB_TYPE_COND; } if ((bo & 0x10) == 0) { /* Test CR */ @@ -4302,8 +3756,11 @@ static void gen_bcond(DisasContext *ctx, int type) tcg_gen_andi_i32(temp, cpu_crf[bi >> 2], mask); tcg_gen_brcondi_i32(TCG_COND_NE, temp, 0, l1); } + bhrb_type |= BHRB_TYPE_COND; } - gen_update_cfar(ctx, ctx->cia); + + gen_update_branch_history(ctx, ctx->cia, target, bhrb_type); + if (type == BCOND_IM) { target_ulong li = (target_long)((int16_t)(BD(ctx->opcode))); if (likely(AA(ctx->opcode) == 0)) { @@ -4419,7 +3876,7 @@ static void gen_rfi(DisasContext *ctx) /* Restore CPU state */ CHK_SV(ctx); translator_io_start(&ctx->base); - gen_update_cfar(ctx, ctx->cia); + gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_NORECORD); gen_helper_rfi(tcg_env); ctx->base.is_jmp = DISAS_EXIT; #endif @@ -4434,7 +3891,7 @@ static void gen_rfid(DisasContext *ctx) /* Restore CPU state */ CHK_SV(ctx); translator_io_start(&ctx->base); - gen_update_cfar(ctx, ctx->cia); + gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_NORECORD); gen_helper_rfid(tcg_env); ctx->base.is_jmp = DISAS_EXIT; #endif @@ -4449,7 +3906,7 @@ static void gen_rfscv(DisasContext *ctx) /* Restore CPU state */ CHK_SV(ctx); translator_io_start(&ctx->base); - gen_update_cfar(ctx, ctx->cia); + gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_NORECORD); gen_helper_rfscv(tcg_env); ctx->base.is_jmp = DISAS_EXIT; #endif @@ -4507,76 +3964,20 @@ static void gen_scv(DisasContext *ctx) /*** Trap ***/ /* Check for unconditional traps (always or never) */ -static bool check_unconditional_trap(DisasContext *ctx) +static bool check_unconditional_trap(DisasContext *ctx, int to) { /* Trap never */ - if (TO(ctx->opcode) == 0) { + if (to == 0) { return true; } /* Trap always */ - if (TO(ctx->opcode) == 31) { + if (to == 31) { gen_exception_err(ctx, POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP); return true; } return false; } -/* tw */ -static void gen_tw(DisasContext *ctx) -{ - TCGv_i32 t0; - - if (check_unconditional_trap(ctx)) { - return; - } - t0 = tcg_constant_i32(TO(ctx->opcode)); - gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], - t0); -} - -/* twi */ -static void gen_twi(DisasContext *ctx) -{ - TCGv t0; - TCGv_i32 t1; - - if (check_unconditional_trap(ctx)) { - return; - } - t0 = tcg_constant_tl(SIMM(ctx->opcode)); - t1 = tcg_constant_i32(TO(ctx->opcode)); - gen_helper_tw(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1); -} - -#if defined(TARGET_PPC64) -/* td */ -static void gen_td(DisasContext *ctx) -{ - TCGv_i32 t0; - - if (check_unconditional_trap(ctx)) { - return; - } - t0 = tcg_constant_i32(TO(ctx->opcode)); - gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], - t0); -} - -/* tdi */ -static void gen_tdi(DisasContext *ctx) -{ - TCGv t0; - TCGv_i32 t1; - - if (check_unconditional_trap(ctx)) { - return; - } - t0 = tcg_constant_tl(SIMM(ctx->opcode)); - t1 = tcg_constant_i32(TO(ctx->opcode)); - gen_helper_td(tcg_env, cpu_gpr[rA(ctx->opcode)], t0, t1); -} -#endif - /*** Processor control ***/ /* mcrxr */ @@ -6009,23 +5410,6 @@ static void gen_dlmzb(DisasContext *ctx) cpu_gpr[rS(ctx->opcode)], cpu_gpr[rB(ctx->opcode)], t0); } -/* mbar replaces eieio on 440 */ -static void gen_mbar(DisasContext *ctx) -{ - /* interpreted as no-op */ -} - -/* msync replaces sync on 440 */ -static void gen_msync_4xx(DisasContext *ctx) -{ - /* Only e500 seems to treat reserved bits as invalid */ - if ((ctx->insns_flags2 & PPC2_BOOKE206) && - (ctx->opcode & 0x03FFF801)) { - gen_inval_exception(ctx, POWERPC_EXCP_INVAL_INVAL); - } - /* otherwise interpreted as no-op */ -} - /* icbt */ static void gen_icbt_440(DisasContext *ctx) { @@ -6036,36 +5420,6 @@ static void gen_icbt_440(DisasContext *ctx) */ } -#if defined(TARGET_PPC64) -static void gen_maddld(DisasContext *ctx) -{ - TCGv_i64 t1 = tcg_temp_new_i64(); - - tcg_gen_mul_i64(t1, cpu_gpr[rA(ctx->opcode)], cpu_gpr[rB(ctx->opcode)]); - tcg_gen_add_i64(cpu_gpr[rD(ctx->opcode)], t1, cpu_gpr[rC(ctx->opcode)]); -} - -/* maddhd maddhdu */ -static void gen_maddhd_maddhdu(DisasContext *ctx) -{ - TCGv_i64 lo = tcg_temp_new_i64(); - TCGv_i64 hi = tcg_temp_new_i64(); - TCGv_i64 t1 = tcg_temp_new_i64(); - - if (Rc(ctx->opcode)) { - tcg_gen_mulu2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - tcg_gen_movi_i64(t1, 0); - } else { - tcg_gen_muls2_i64(lo, hi, cpu_gpr[rA(ctx->opcode)], - cpu_gpr[rB(ctx->opcode)]); - tcg_gen_sari_i64(t1, cpu_gpr[rC(ctx->opcode)], 63); - } - tcg_gen_add2_i64(t1, cpu_gpr[rD(ctx->opcode)], lo, hi, - cpu_gpr[rC(ctx->opcode)], t1); -} -#endif /* defined(TARGET_PPC64) */ - static void gen_tbegin(DisasContext *ctx) { if (unlikely(!ctx->tm_enabled)) { @@ -6363,6 +5717,10 @@ static bool resolve_PLS_D(DisasContext *ctx, arg_D *d, arg_PLS_D *a) #include "translate/storage-ctrl-impl.c.inc" +#include "translate/misc-impl.c.inc" + +#include "translate/bhrb-impl.c.inc" + /* Handles lfdp */ static void gen_dform39(DisasContext *ctx) { @@ -6423,46 +5781,9 @@ GEN_HANDLER_E(brw, 0x1F, 0x1B, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA310), GEN_HANDLER_E(brh, 0x1F, 0x1B, 0x06, 0x0000F801, PPC_NONE, PPC2_ISA310), #endif GEN_HANDLER(invalid, 0x00, 0x00, 0x00, 0xFFFFFFFF, PPC_NONE), -#if defined(TARGET_PPC64) -GEN_HANDLER_E(cmpeqb, 0x1F, 0x00, 0x07, 0x00600000, PPC_NONE, PPC2_ISA300), -#endif -GEN_HANDLER_E(cmpb, 0x1F, 0x1C, 0x0F, 0x00000001, PPC_NONE, PPC2_ISA205), -GEN_HANDLER_E(cmprb, 0x1F, 0x00, 0x06, 0x00400001, PPC_NONE, PPC2_ISA300), -GEN_HANDLER(isel, 0x1F, 0x0F, 0xFF, 0x00000001, PPC_ISEL), -GEN_HANDLER(mulhw, 0x1F, 0x0B, 0x02, 0x00000400, PPC_INTEGER), -GEN_HANDLER(mulhwu, 0x1F, 0x0B, 0x00, 0x00000400, PPC_INTEGER), -GEN_HANDLER(mullw, 0x1F, 0x0B, 0x07, 0x00000000, PPC_INTEGER), -GEN_HANDLER(mullwo, 0x1F, 0x0B, 0x17, 0x00000000, PPC_INTEGER), -GEN_HANDLER(mulli, 0x07, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -#if defined(TARGET_PPC64) -GEN_HANDLER(mulld, 0x1F, 0x09, 0x07, 0x00000000, PPC_64B), -#endif -GEN_HANDLER(neg, 0x1F, 0x08, 0x03, 0x0000F800, PPC_INTEGER), -GEN_HANDLER(nego, 0x1F, 0x08, 0x13, 0x0000F800, PPC_INTEGER), -GEN_HANDLER2(andi_, "andi.", 0x1C, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -GEN_HANDLER2(andis_, "andis.", 0x1D, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -GEN_HANDLER(cntlzw, 0x1F, 0x1A, 0x00, 0x00000000, PPC_INTEGER), -GEN_HANDLER_E(cnttzw, 0x1F, 0x1A, 0x10, 0x00000000, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(copy, 0x1F, 0x06, 0x18, 0x03C00001, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(cp_abort, 0x1F, 0x06, 0x1A, 0x03FFF801, PPC_NONE, PPC2_ISA300), GEN_HANDLER_E(paste, 0x1F, 0x06, 0x1C, 0x03C00000, PPC_NONE, PPC2_ISA300), -GEN_HANDLER(or, 0x1F, 0x1C, 0x0D, 0x00000000, PPC_INTEGER), -GEN_HANDLER(xor, 0x1F, 0x1C, 0x09, 0x00000000, PPC_INTEGER), -GEN_HANDLER(ori, 0x18, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -GEN_HANDLER(oris, 0x19, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -GEN_HANDLER(xori, 0x1A, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -GEN_HANDLER(xoris, 0x1B, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), -GEN_HANDLER(popcntb, 0x1F, 0x1A, 0x03, 0x0000F801, PPC_POPCNTB), -GEN_HANDLER(popcntw, 0x1F, 0x1A, 0x0b, 0x0000F801, PPC_POPCNTWD), -GEN_HANDLER_E(prtyw, 0x1F, 0x1A, 0x04, 0x0000F801, PPC_NONE, PPC2_ISA205), -#if defined(TARGET_PPC64) -GEN_HANDLER(popcntd, 0x1F, 0x1A, 0x0F, 0x0000F801, PPC_POPCNTWD), -GEN_HANDLER(cntlzd, 0x1F, 0x1A, 0x01, 0x00000000, PPC_64B), -GEN_HANDLER_E(cnttzd, 0x1F, 0x1A, 0x11, 0x00000000, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(darn, 0x1F, 0x13, 0x17, 0x001CF801, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(prtyd, 0x1F, 0x1A, 0x05, 0x0000F801, PPC_NONE, PPC2_ISA205), -GEN_HANDLER_E(bpermd, 0x1F, 0x1C, 0x07, 0x00000001, PPC_NONE, PPC2_PERM_ISA206), -#endif GEN_HANDLER(rlwimi, 0x14, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(rlwinm, 0x15, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), GEN_HANDLER(rlwnm, 0x17, 0xFF, 0xFF, 0x00000000, PPC_INTEGER), @@ -6491,7 +5812,6 @@ GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING), GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING), GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING), GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING), -GEN_HANDLER(eieio, 0x1F, 0x16, 0x1A, 0x01FFF801, PPC_MEM_EIEIO), GEN_HANDLER(isync, 0x13, 0x16, 0x04, 0x03FFF801, PPC_MEM), GEN_HANDLER_E(lbarx, 0x1F, 0x14, 0x01, 0, PPC_NONE, PPC2_ATOMIC_ISA206), GEN_HANDLER_E(lharx, 0x1F, 0x14, 0x03, 0, PPC_NONE, PPC2_ATOMIC_ISA206), @@ -6509,7 +5829,6 @@ GEN_HANDLER_E(lqarx, 0x1F, 0x14, 0x08, 0, PPC_NONE, PPC2_LSQ_ISA207), GEN_HANDLER2(stdcx_, "stdcx.", 0x1F, 0x16, 0x06, 0x00000000, PPC_64B), GEN_HANDLER_E(stqcx_, 0x1F, 0x16, 0x05, 0, PPC_NONE, PPC2_LSQ_ISA207), #endif -GEN_HANDLER(sync, 0x1F, 0x16, 0x12, 0x039FF801, PPC_MEM_SYNC), /* ISA v3.0 changed the extended opcode from 62 to 30 */ GEN_HANDLER(wait, 0x1F, 0x1E, 0x01, 0x039FF801, PPC_WAIT), GEN_HANDLER_E(wait, 0x1F, 0x1E, 0x00, 0x039CF801, PPC_NONE, PPC2_ISA300), @@ -6538,12 +5857,6 @@ GEN_HANDLER(hrfid, 0x13, 0x12, 0x08, 0x03FF8001, PPC_64H), /* Top bit of opc2 corresponds with low bit of LEV, so use two handlers */ GEN_HANDLER(sc, 0x11, 0x11, 0xFF, 0x03FFF01D, PPC_FLOW), GEN_HANDLER(sc, 0x11, 0x01, 0xFF, 0x03FFF01D, PPC_FLOW), -GEN_HANDLER(tw, 0x1F, 0x04, 0x00, 0x00000001, PPC_FLOW), -GEN_HANDLER(twi, 0x03, 0xFF, 0xFF, 0x00000000, PPC_FLOW), -#if defined(TARGET_PPC64) -GEN_HANDLER(td, 0x1F, 0x04, 0x02, 0x00000001, PPC_64B), -GEN_HANDLER(tdi, 0x02, 0xFF, 0xFF, 0x00000000, PPC_64B), -#endif GEN_HANDLER(mcrxr, 0x1F, 0x00, 0x10, 0x007FF801, PPC_MISC), GEN_HANDLER(mfcr, 0x1F, 0x13, 0x00, 0x00000801, PPC_MISC), GEN_HANDLER(mfmsr, 0x1F, 0x13, 0x02, 0x001FF801, PPC_MISC), @@ -6632,78 +5945,12 @@ GEN_HANDLER2_E(tlbilx_booke206, "tlbilx", 0x1F, 0x12, 0x00, 0x03800001, GEN_HANDLER(wrtee, 0x1F, 0x03, 0x04, 0x000FFC01, PPC_WRTEE), GEN_HANDLER(wrteei, 0x1F, 0x03, 0x05, 0x000E7C01, PPC_WRTEE), GEN_HANDLER(dlmzb, 0x1F, 0x0E, 0x02, 0x00000000, PPC_440_SPEC), -GEN_HANDLER_E(mbar, 0x1F, 0x16, 0x1a, 0x001FF801, - PPC_BOOKE, PPC2_BOOKE206), -GEN_HANDLER(msync_4xx, 0x1F, 0x16, 0x12, 0x039FF801, PPC_BOOKE), GEN_HANDLER2_E(icbt_440, "icbt", 0x1F, 0x16, 0x00, 0x03E00001, PPC_BOOKE, PPC2_BOOKE206), GEN_HANDLER2(icbt_440, "icbt", 0x1F, 0x06, 0x08, 0x03E00001, PPC_440_SPEC), -GEN_HANDLER(lvsl, 0x1f, 0x06, 0x00, 0x00000001, PPC_ALTIVEC), -GEN_HANDLER(lvsr, 0x1f, 0x06, 0x01, 0x00000001, PPC_ALTIVEC), GEN_HANDLER(mfvscr, 0x04, 0x2, 0x18, 0x001ff800, PPC_ALTIVEC), GEN_HANDLER(mtvscr, 0x04, 0x2, 0x19, 0x03ff0000, PPC_ALTIVEC), -#if defined(TARGET_PPC64) -GEN_HANDLER_E(maddhd_maddhdu, 0x04, 0x18, 0xFF, 0x00000000, PPC_NONE, - PPC2_ISA300), -GEN_HANDLER_E(maddld, 0x04, 0x19, 0xFF, 0x00000000, PPC_NONE, PPC2_ISA300), -#endif - -#undef GEN_INT_ARITH_DIVW -#define GEN_INT_ARITH_DIVW(name, opc3, sign, compute_ov) \ -GEN_HANDLER(name, 0x1F, 0x0B, opc3, 0x00000000, PPC_INTEGER) -GEN_INT_ARITH_DIVW(divwu, 0x0E, 0, 0), -GEN_INT_ARITH_DIVW(divwuo, 0x1E, 0, 1), -GEN_INT_ARITH_DIVW(divw, 0x0F, 1, 0), -GEN_INT_ARITH_DIVW(divwo, 0x1F, 1, 1), -GEN_HANDLER_E(divwe, 0x1F, 0x0B, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(divweo, 0x1F, 0x0B, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(divweu, 0x1F, 0x0B, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(divweuo, 0x1F, 0x0B, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(modsw, 0x1F, 0x0B, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(moduw, 0x1F, 0x0B, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), - -#if defined(TARGET_PPC64) -#undef GEN_INT_ARITH_DIVD -#define GEN_INT_ARITH_DIVD(name, opc3, sign, compute_ov) \ -GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) -GEN_INT_ARITH_DIVD(divdu, 0x0E, 0, 0), -GEN_INT_ARITH_DIVD(divduo, 0x1E, 0, 1), -GEN_INT_ARITH_DIVD(divd, 0x0F, 1, 0), -GEN_INT_ARITH_DIVD(divdo, 0x1F, 1, 1), - -GEN_HANDLER_E(divdeu, 0x1F, 0x09, 0x0C, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(divdeuo, 0x1F, 0x09, 0x1C, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(divde, 0x1F, 0x09, 0x0D, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(divdeo, 0x1F, 0x09, 0x1D, 0, PPC_NONE, PPC2_DIVE_ISA206), -GEN_HANDLER_E(modsd, 0x1F, 0x09, 0x18, 0x00000001, PPC_NONE, PPC2_ISA300), -GEN_HANDLER_E(modud, 0x1F, 0x09, 0x08, 0x00000001, PPC_NONE, PPC2_ISA300), - -#undef GEN_INT_ARITH_MUL_HELPER -#define GEN_INT_ARITH_MUL_HELPER(name, opc3) \ -GEN_HANDLER(name, 0x1F, 0x09, opc3, 0x00000000, PPC_64B) -GEN_INT_ARITH_MUL_HELPER(mulhdu, 0x00), -GEN_INT_ARITH_MUL_HELPER(mulhd, 0x02), -GEN_INT_ARITH_MUL_HELPER(mulldo, 0x17), -#endif - -#undef GEN_LOGICAL1 -#undef GEN_LOGICAL2 -#define GEN_LOGICAL2(name, tcg_op, opc, type) \ -GEN_HANDLER(name, 0x1F, 0x1C, opc, 0x00000000, type) -#define GEN_LOGICAL1(name, tcg_op, opc, type) \ -GEN_HANDLER(name, 0x1F, 0x1A, opc, 0x00000000, type) -GEN_LOGICAL2(and, tcg_gen_and_tl, 0x00, PPC_INTEGER), -GEN_LOGICAL2(andc, tcg_gen_andc_tl, 0x01, PPC_INTEGER), -GEN_LOGICAL2(eqv, tcg_gen_eqv_tl, 0x08, PPC_INTEGER), -GEN_LOGICAL1(extsb, tcg_gen_ext8s_tl, 0x1D, PPC_INTEGER), -GEN_LOGICAL1(extsh, tcg_gen_ext16s_tl, 0x1C, PPC_INTEGER), -GEN_LOGICAL2(nand, tcg_gen_nand_tl, 0x0E, PPC_INTEGER), -GEN_LOGICAL2(nor, tcg_gen_nor_tl, 0x03, PPC_INTEGER), -GEN_LOGICAL2(orc, tcg_gen_orc_tl, 0x0C, PPC_INTEGER), -#if defined(TARGET_PPC64) -GEN_LOGICAL1(extsw, tcg_gen_ext32s_tl, 0x1E, PPC_64B), -#endif #if defined(TARGET_PPC64) #undef GEN_PPC64_R2 @@ -7241,6 +6488,7 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) #if defined(TARGET_PPC64) ctx->sf_mode = (hflags >> HFLAGS_64) & 1; ctx->has_cfar = !!(env->flags & POWERPC_FLAG_CFAR); + ctx->has_bhrb = !!(env->flags & POWERPC_FLAG_BHRB); #endif ctx->lazy_tlb_flush = env->mmu_model == POWERPC_MMU_32B || env->mmu_model & POWERPC_MMU_64; @@ -7257,6 +6505,7 @@ static void ppc_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs) ctx->mmcr0_pmcjce = (hflags >> HFLAGS_PMCJCE) & 1; ctx->pmc_other = (hflags >> HFLAGS_PMC_OTHER) & 1; ctx->pmu_insn_cnt = (hflags >> HFLAGS_INSN_CNT) & 1; + ctx->bhrb_enable = (hflags >> HFLAGS_BHRB_ENABLE) & 1; ctx->singlestep_enabled = 0; if ((hflags >> HFLAGS_SE) & 1) { diff --git a/target/ppc/translate/bhrb-impl.c.inc b/target/ppc/translate/bhrb-impl.c.inc new file mode 100644 index 0000000000..3a19bc4555 --- /dev/null +++ b/target/ppc/translate/bhrb-impl.c.inc @@ -0,0 +1,43 @@ +/* + * Power ISA Decode For BHRB Instructions + * + * Copyright IBM Corp. 2023 + * + * Authors: + * Glenn Miles <milesg@linux.vnet.ibm.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) + +static bool trans_MFBHRBE(DisasContext *ctx, arg_XFX_bhrbe *arg) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA207S); + TCGv_i32 bhrbe = tcg_constant_i32(arg->bhrbe); + gen_helper_mfbhrbe(cpu_gpr[arg->rt], tcg_env, bhrbe); + return true; +} + +static bool trans_CLRBHRB(DisasContext *ctx, arg_CLRBHRB *arg) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA207S); + gen_helper_clrbhrb(tcg_env); + return true; +} + +#else + +static bool trans_MFBHRBE(DisasContext *ctx, arg_XFX_bhrbe *arg) +{ + gen_invalid(ctx); + return true; +} + +static bool trans_CLRBHRB(DisasContext *ctx, arg_CLRBHRB *arg) +{ + gen_invalid(ctx); + return true; +} +#endif diff --git a/target/ppc/translate/branch-impl.c.inc b/target/ppc/translate/branch-impl.c.inc index fb0fcf30cc..9ade0c659a 100644 --- a/target/ppc/translate/branch-impl.c.inc +++ b/target/ppc/translate/branch-impl.c.inc @@ -17,7 +17,7 @@ static bool trans_RFEBB(DisasContext *ctx, arg_XL_s *arg) REQUIRE_INSNS_FLAGS2(ctx, ISA207S); translator_io_start(&ctx->base); - gen_update_cfar(ctx, ctx->cia); + gen_update_branch_history(ctx, ctx->cia, NULL, BHRB_TYPE_NORECORD); gen_helper_rfebb(tcg_env, cpu_gpr[arg->s]); ctx->base.is_jmp = DISAS_CHAIN; diff --git a/target/ppc/translate/fixedpoint-impl.c.inc b/target/ppc/translate/fixedpoint-impl.c.inc index 0c66465d96..fa0191e866 100644 --- a/target/ppc/translate/fixedpoint-impl.c.inc +++ b/target/ppc/translate/fixedpoint-impl.c.inc @@ -289,6 +289,50 @@ TRANS(CMPL, do_cmp_X, false); TRANS(CMPI, do_cmp_D, true); TRANS(CMPLI, do_cmp_D, false); +static bool trans_CMPRB(DisasContext *ctx, arg_CMPRB *a) +{ + TCGv_i32 src1 = tcg_temp_new_i32(); + TCGv_i32 src2 = tcg_temp_new_i32(); + TCGv_i32 src2lo = tcg_temp_new_i32(); + TCGv_i32 src2hi = tcg_temp_new_i32(); + TCGv_i32 crf = cpu_crf[a->bf]; + + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + tcg_gen_trunc_tl_i32(src1, cpu_gpr[a->ra]); + tcg_gen_trunc_tl_i32(src2, cpu_gpr[a->rb]); + + tcg_gen_andi_i32(src1, src1, 0xFF); + tcg_gen_ext8u_i32(src2lo, src2); + tcg_gen_extract_i32(src2hi, src2, 8, 8); + + tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); + tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); + tcg_gen_and_i32(crf, src2lo, src2hi); + + if (a->l) { + tcg_gen_extract_i32(src2lo, src2, 16, 8); + tcg_gen_extract_i32(src2hi, src2, 24, 8); + tcg_gen_setcond_i32(TCG_COND_LEU, src2lo, src2lo, src1); + tcg_gen_setcond_i32(TCG_COND_LEU, src2hi, src1, src2hi); + tcg_gen_and_i32(src2lo, src2lo, src2hi); + tcg_gen_or_i32(crf, crf, src2lo); + } + tcg_gen_shli_i32(crf, crf, CRF_GT_BIT); + return true; +} + +static bool trans_CMPEQB(DisasContext *ctx, arg_CMPEQB *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); +#if defined(TARGET_PPC64) + gen_helper_CMPEQB(cpu_crf[a->bf], cpu_gpr[a->ra], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + /* * Fixed-Point Arithmetic Instructions */ @@ -395,6 +439,389 @@ TRANS(SUBFE, do_subf_XO, true, true) TRANS(SUBFME, do_subf_const_XO, tcg_constant_tl(-1LL), true, true) TRANS(SUBFZE, do_subf_const_XO, tcg_constant_tl(0), true, true) +static bool trans_MULLI(DisasContext *ctx, arg_MULLI *a) +{ + tcg_gen_muli_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], a->si); + return true; +} + +static bool trans_MULLW(DisasContext *ctx, arg_MULLW *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + tcg_gen_ext32s_tl(t0, cpu_gpr[a->ra]); + tcg_gen_ext32s_tl(t1, cpu_gpr[a->rb]); + tcg_gen_mul_tl(cpu_gpr[a->rt], t0, t1); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } + return true; +} + +static bool trans_MULLWO(DisasContext *ctx, arg_MULLWO *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + +#if defined(TARGET_PPC64) + tcg_gen_ext32s_i64(t0, cpu_gpr[a->ra]); + tcg_gen_ext32s_i64(t1, cpu_gpr[a->rb]); + tcg_gen_mul_i64(cpu_gpr[a->rt], t0, t1); + tcg_gen_sextract_i64(t0, cpu_gpr[a->rt], 31, 1); + tcg_gen_sari_i64(t1, cpu_gpr[a->rt], 32); +#else + tcg_gen_muls2_i32(cpu_gpr[a->rt], t1, cpu_gpr[a->ra], cpu_gpr[a->rb]); + tcg_gen_sari_i32(t0, cpu_gpr[a->rt], 31); +#endif + tcg_gen_setcond_tl(TCG_COND_NE, cpu_ov, t0, t1); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(cpu_ov32, cpu_ov); + } + tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); + + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } + return true; +} + +static bool do_mulhw(DisasContext *ctx, arg_XO_tab_rc *a, + void (*helper)(TCGv_i32 rl, TCGv_i32 rh, TCGv_i32 arg1, + TCGv_i32 arg2)) +{ + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGv_i32 t1 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t0, cpu_gpr[a->ra]); + tcg_gen_trunc_tl_i32(t1, cpu_gpr[a->rb]); + helper(t0, t1, t0, t1); + tcg_gen_extu_i32_tl(cpu_gpr[a->rt], t1); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } + return true; +} + +TRANS(MULHW, do_mulhw, tcg_gen_muls2_i32) +TRANS(MULHWU, do_mulhw, tcg_gen_mulu2_i32) + +static bool do_divw(DisasContext *ctx, arg_XO *a, int sign) +{ + gen_op_arith_divw(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb], + sign, a->oe, a->rc); + return true; +} + +static bool do_dive(DisasContext *ctx, arg_XO *a, + void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv, TCGv_i32)) +{ + REQUIRE_INSNS_FLAGS2(ctx, DIVE_ISA206); + helper(cpu_gpr[a->rt], tcg_env, cpu_gpr[a->ra], cpu_gpr[a->rb], + tcg_constant_i32(a->oe)); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } + return true; +} + +TRANS(DIVW, do_divw, 1); +TRANS(DIVWU, do_divw, 0); +TRANS(DIVWE, do_dive, gen_helper_DIVWE); +TRANS(DIVWEU, do_dive, gen_helper_DIVWEU); + +static bool do_modw(DisasContext *ctx, arg_X *a, bool sign) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA300); + gen_op_arith_modw(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb], + sign); + return true; +} + +TRANS(MODUW, do_modw, false); +TRANS(MODSW, do_modw, true); + +static bool trans_NEG(DisasContext *ctx, arg_NEG *a) +{ + if (a->oe) { + TCGv zero = tcg_constant_tl(0); + gen_op_arith_subf(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], zero, + false, false, true, a->rc); + } else { + tcg_gen_neg_tl(cpu_gpr[a->rt], cpu_gpr[a->ra]); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } + } + return true; +} + +static bool trans_DARN(DisasContext *ctx, arg_DARN *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); +#if defined(TARGET_PPC64) + if (a->l > 2) { + tcg_gen_movi_i64(cpu_gpr[a->rt], -1); + } else { + translator_io_start(&ctx->base); + if (a->l == 0) { + gen_helper_DARN32(cpu_gpr[a->rt]); + } else { + /* Return 64-bit random for both CRN and RRN */ + gen_helper_DARN64(cpu_gpr[a->rt]); + } + } +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MULLD(DisasContext *ctx, arg_MULLD *a) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + tcg_gen_mul_tl(cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb]); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MULLDO(DisasContext *ctx, arg_MULLD *a) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + + tcg_gen_muls2_i64(t0, t1, cpu_gpr[a->ra], cpu_gpr[a->rb]); + tcg_gen_mov_i64(cpu_gpr[a->rt], t0); + + tcg_gen_sari_i64(t0, t0, 63); + tcg_gen_setcond_i64(TCG_COND_NE, cpu_ov, t0, t1); + if (is_isa300(ctx)) { + tcg_gen_mov_tl(cpu_ov32, cpu_ov); + } + tcg_gen_or_tl(cpu_so, cpu_so, cpu_ov); + + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool do_mulhd(DisasContext *ctx, arg_XO_tab_rc *a, + void (*helper)(TCGv, TCGv, TCGv, TCGv)) +{ + TCGv lo = tcg_temp_new(); + helper(lo, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb]); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); + } + return true; +} + +TRANS64(MULHD, do_mulhd, tcg_gen_muls2_tl); +TRANS64(MULHDU, do_mulhd, tcg_gen_mulu2_tl); + +static bool trans_MADDLD(DisasContext *ctx, arg_MADDLD *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); +#if defined(TARGET_PPC64) + TCGv_i64 t1 = tcg_temp_new_i64(); + + tcg_gen_mul_i64(t1, cpu_gpr[a->vra], cpu_gpr[a->vrb]); + tcg_gen_add_i64(cpu_gpr[a->vrt], t1, cpu_gpr[a->rc]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MADDHD(DisasContext *ctx, arg_MADDHD *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); +#if defined(TARGET_PPC64) + TCGv_i64 lo = tcg_temp_new_i64(); + TCGv_i64 hi = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + + tcg_gen_muls2_i64(lo, hi, cpu_gpr[a->vra], cpu_gpr[a->vrb]); + tcg_gen_sari_i64(t1, cpu_gpr[a->rc], 63); + tcg_gen_add2_i64(t1, cpu_gpr[a->vrt], lo, hi, cpu_gpr[a->rc], t1); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_MADDHDU(DisasContext *ctx, arg_MADDHDU *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); +#if defined(TARGET_PPC64) + TCGv_i64 lo = tcg_temp_new_i64(); + TCGv_i64 hi = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + + tcg_gen_mulu2_i64(lo, hi, cpu_gpr[a->vra], cpu_gpr[a->vrb]); + tcg_gen_add2_i64(t1, cpu_gpr[a->vrt], lo, hi, cpu_gpr[a->rc], + tcg_constant_i64(0)); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool do_divd(DisasContext *ctx, arg_XO *a, bool sign) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + gen_op_arith_divd(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb], + sign, a->oe, a->rc); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool do_modd(DisasContext *ctx, arg_X *a, bool sign) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); +#if defined(TARGET_PPC64) + gen_op_arith_modd(ctx, cpu_gpr[a->rt], cpu_gpr[a->ra], cpu_gpr[a->rb], + sign); +#else + qemu_build_not_reached(); +#endif + return true; +} + +TRANS64(DIVD, do_divd, true); +TRANS64(DIVDU, do_divd, false); + +static bool trans_DIVDE(DisasContext *ctx, arg_DIVDE *a) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + return do_dive(ctx, a, gen_helper_DIVDE); +#else + qemu_build_not_reached(); +#endif +} + +static bool trans_DIVDEU(DisasContext *ctx, arg_DIVDEU *a) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + return do_dive(ctx, a, gen_helper_DIVDEU); +#else + qemu_build_not_reached(); +#endif + return true; +} + +TRANS64(MODSD, do_modd, true); +TRANS64(MODUD, do_modd, false); + +/* + * Fixed-Point Select Instructions + */ + +static bool trans_ISEL(DisasContext *ctx, arg_ISEL *a) +{ + REQUIRE_INSNS_FLAGS(ctx, ISEL); + uint32_t bi = a->bc; + uint32_t mask = 0x08 >> (bi & 0x03); + TCGv t0 = tcg_temp_new(); + TCGv zr; + + tcg_gen_extu_i32_tl(t0, cpu_crf[bi >> 2]); + tcg_gen_andi_tl(t0, t0, mask); + + zr = tcg_constant_tl(0); + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[a->rt], t0, zr, + a->ra ? cpu_gpr[a->ra] : zr, + cpu_gpr[a->rb]); + return true; +} + +/* + * Fixed-Point Trap Instructions + */ + +static bool trans_TW(DisasContext *ctx, arg_TW *a) +{ + TCGv_i32 t0; + + if (check_unconditional_trap(ctx, a->rt)) { + return true; + } + t0 = tcg_constant_i32(a->rt); + gen_helper_TW(tcg_env, cpu_gpr[a->ra], cpu_gpr[a->rb], t0); + return true; +} + +static bool trans_TWI(DisasContext *ctx, arg_TWI *a) +{ + TCGv t0; + TCGv_i32 t1; + + if (check_unconditional_trap(ctx, a->rt)) { + return true; + } + t0 = tcg_constant_tl(a->si); + t1 = tcg_constant_i32(a->rt); + gen_helper_TW(tcg_env, cpu_gpr[a->ra], t0, t1); + return true; +} + +static bool trans_TD(DisasContext *ctx, arg_TD *a) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + TCGv_i32 t0; + + if (check_unconditional_trap(ctx, a->rt)) { + return true; + } + t0 = tcg_constant_i32(a->rt); + gen_helper_TD(tcg_env, cpu_gpr[a->ra], cpu_gpr[a->rb], t0); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_TDI(DisasContext *ctx, arg_TDI *a) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + TCGv t0; + TCGv_i32 t1; + + if (check_unconditional_trap(ctx, a->rt)) { + return true; + } + t0 = tcg_constant_tl(a->si); + t1 = tcg_constant_i32(a->rt); + gen_helper_TD(tcg_env, cpu_gpr[a->ra], t0, t1); +#else + qemu_build_not_reached(); +#endif + return true; +} + static bool trans_INVALID(DisasContext *ctx, arg_INVALID *a) { gen_invalid(ctx); @@ -429,6 +856,285 @@ TRANS(SETBCR, do_set_bool_cond, false, true) TRANS(SETNBC, do_set_bool_cond, true, false) TRANS(SETNBCR, do_set_bool_cond, true, true) +/* + * Fixed-Point Logical Instructions + */ + +static bool do_addi_(DisasContext *ctx, arg_D_ui *a, bool shift) +{ + tcg_gen_andi_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], shift ? a->ui << 16 : a->ui); + gen_set_Rc0(ctx, cpu_gpr[a->ra]); + return true; +} + +static bool do_ori(DisasContext *ctx, arg_D_ui *a, bool shift) +{ + if (a->rt == a->ra && a->ui == 0) { + /* NOP */ + return true; + } + tcg_gen_ori_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], shift ? a->ui << 16 : a->ui); + return true; +} + +static bool do_xori(DisasContext *ctx, arg_D_ui *a, bool shift) +{ + if (a->rt == a->ra && a->ui == 0) { + /* NOP */ + return true; + } + tcg_gen_xori_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], shift ? a->ui << 16 : a->ui); + return true; +} + +static bool do_logical1(DisasContext *ctx, arg_X_sa_rc *a, + void (*helper)(TCGv, TCGv)) +{ + helper(cpu_gpr[a->ra], cpu_gpr[a->rs]); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->ra]); + } + return true; +} + +static bool do_logical2(DisasContext *ctx, arg_X_rc *a, + void (*helper)(TCGv, TCGv, TCGv)) +{ + helper(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->ra]); + } + return true; +} + +static bool trans_OR(DisasContext *ctx, arg_OR *a) +{ + /* Optimisation for mr. ri case */ + if (a->rt != a->ra || a->rt != a->rb) { + if (a->rt != a->rb) { + tcg_gen_or_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); + } else { + tcg_gen_mov_tl(cpu_gpr[a->ra], cpu_gpr[a->rt]); + } + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->ra]); + } + } else if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->rt]); +#if defined(TARGET_PPC64) + } else if (a->rt != 0) { /* 0 is nop */ + int prio = 0; + + switch (a->rt) { + case 1: + /* Set process priority to low */ + prio = 2; + break; + case 6: + /* Set process priority to medium-low */ + prio = 3; + break; + case 2: + /* Set process priority to normal */ + prio = 4; + break; +#if !defined(CONFIG_USER_ONLY) + case 31: + if (!ctx->pr) { + /* Set process priority to very low */ + prio = 1; + } + break; + case 5: + if (!ctx->pr) { + /* Set process priority to medium-hight */ + prio = 5; + } + break; + case 3: + if (!ctx->pr) { + /* Set process priority to high */ + prio = 6; + } + break; + case 7: + if (ctx->hv && !ctx->pr) { + /* Set process priority to very high */ + prio = 7; + } + break; +#endif + default: + break; + } + if (prio) { + TCGv t0 = tcg_temp_new(); + gen_load_spr(t0, SPR_PPR); + tcg_gen_andi_tl(t0, t0, ~0x001C000000000000ULL); + tcg_gen_ori_tl(t0, t0, ((uint64_t)prio) << 50); + gen_store_spr(SPR_PPR, t0); + } +#if !defined(CONFIG_USER_ONLY) + /* + * Pause out of TCG otherwise spin loops with smt_low eat too + * much CPU and the kernel hangs. This applies to all + * encodings other than no-op, e.g., miso(rs=26), yield(27), + * mdoio(29), mdoom(30), and all currently undefined. + */ + gen_pause(ctx); +#endif +#endif + } + + return true; +} + +static bool trans_XOR(DisasContext *ctx, arg_XOR *a) +{ + /* Optimisation for "set to zero" case */ + if (a->rt != a->rb) { + tcg_gen_xor_tl(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); + } else { + tcg_gen_movi_tl(cpu_gpr[a->ra], 0); + } + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->ra]); + } + return true; +} + +static bool trans_CMPB(DisasContext *ctx, arg_CMPB *a) +{ + REQUIRE_INSNS_FLAGS2(ctx, ISA205); + gen_helper_CMPB(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); + return true; +} + +static bool do_cntzw(DisasContext *ctx, arg_X_sa_rc *a, + void (*helper)(TCGv_i32, TCGv_i32, uint32_t)) +{ + TCGv_i32 t = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t, cpu_gpr[a->rs]); + helper(t, t, 32); + tcg_gen_extu_i32_tl(cpu_gpr[a->ra], t); + + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->ra]); + } + return true; +} + +#if defined(TARGET_PPC64) +static bool do_cntzd(DisasContext *ctx, arg_X_sa_rc *a, + void (*helper)(TCGv_i64, TCGv_i64, uint64_t)) +{ + helper(cpu_gpr[a->ra], cpu_gpr[a->rs], 64); + if (unlikely(a->rc)) { + gen_set_Rc0(ctx, cpu_gpr[a->ra]); + } + return true; +} +#endif + +static bool trans_CNTLZD(DisasContext *ctx, arg_CNTLZD *a) +{ + REQUIRE_64BIT(ctx); +#if defined(TARGET_PPC64) + do_cntzd(ctx, a, tcg_gen_clzi_i64); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_CNTTZD(DisasContext *ctx, arg_CNTTZD *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA300); +#if defined(TARGET_PPC64) + do_cntzd(ctx, a, tcg_gen_ctzi_i64); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_POPCNTB(DisasContext *ctx, arg_POPCNTB *a) +{ + REQUIRE_INSNS_FLAGS(ctx, POPCNTB); + gen_helper_POPCNTB(cpu_gpr[a->ra], cpu_gpr[a->rs]); + return true; +} + +static bool trans_POPCNTW(DisasContext *ctx, arg_POPCNTW *a) +{ + REQUIRE_INSNS_FLAGS(ctx, POPCNTWD); +#if defined(TARGET_PPC64) + gen_helper_POPCNTW(cpu_gpr[a->ra], cpu_gpr[a->rs]); +#else + tcg_gen_ctpop_i32(cpu_gpr[a->ra], cpu_gpr[a->rs]); +#endif + return true; +} + +static bool trans_POPCNTD(DisasContext *ctx, arg_POPCNTD *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS(ctx, POPCNTWD); +#if defined(TARGET_PPC64) + tcg_gen_ctpop_i64(cpu_gpr[a->ra], cpu_gpr[a->rs]); +#else + qemu_build_not_reached(); +#endif + return true; +} + +static bool trans_PRTYW(DisasContext *ctx, arg_PRTYW *a) +{ + TCGv ra = cpu_gpr[a->ra]; + TCGv rs = cpu_gpr[a->rs]; + TCGv t0 = tcg_temp_new(); + + REQUIRE_INSNS_FLAGS2(ctx, ISA205); + tcg_gen_shri_tl(t0, rs, 16); + tcg_gen_xor_tl(ra, rs, t0); + tcg_gen_shri_tl(t0, ra, 8); + tcg_gen_xor_tl(ra, ra, t0); + tcg_gen_andi_tl(ra, ra, (target_ulong)0x100000001ULL); + return true; +} + +static bool trans_PRTYD(DisasContext *ctx, arg_PRTYD *a) +{ + TCGv ra = cpu_gpr[a->ra]; + TCGv rs = cpu_gpr[a->rs]; + TCGv t0 = tcg_temp_new(); + + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, ISA205); + tcg_gen_shri_tl(t0, rs, 32); + tcg_gen_xor_tl(ra, rs, t0); + tcg_gen_shri_tl(t0, ra, 16); + tcg_gen_xor_tl(ra, ra, t0); + tcg_gen_shri_tl(t0, ra, 8); + tcg_gen_xor_tl(ra, ra, t0); + tcg_gen_andi_tl(ra, ra, 1); + return true; +} + +static bool trans_BPERMD(DisasContext *ctx, arg_BPERMD *a) +{ + REQUIRE_64BIT(ctx); + REQUIRE_INSNS_FLAGS2(ctx, PERM_ISA206); +#if defined(TARGET_PPC64) + gen_helper_BPERMD(cpu_gpr[a->ra], cpu_gpr[a->rt], cpu_gpr[a->rb]); +#else + qemu_build_not_reached(); +#endif + return true; +} + static bool trans_CFUGED(DisasContext *ctx, arg_X *a) { REQUIRE_64BIT(ctx); @@ -517,6 +1223,27 @@ static bool trans_PEXTD(DisasContext *ctx, arg_X *a) return true; } +TRANS(ANDI_, do_addi_, false); +TRANS(ANDIS_, do_addi_, true); +TRANS(ORI, do_ori, false); +TRANS(ORIS, do_ori, true); +TRANS(XORI, do_xori, false); +TRANS(XORIS, do_xori, true); + +TRANS(AND, do_logical2, tcg_gen_and_tl); +TRANS(ANDC, do_logical2, tcg_gen_andc_tl); +TRANS(NAND, do_logical2, tcg_gen_nand_tl); +TRANS(ORC, do_logical2, tcg_gen_orc_tl); +TRANS(NOR, do_logical2, tcg_gen_nor_tl); +TRANS(EQV, do_logical2, tcg_gen_eqv_tl); +TRANS(EXTSB, do_logical1, tcg_gen_ext8s_tl); +TRANS(EXTSH, do_logical1, tcg_gen_ext16s_tl); + +TRANS(CNTLZW, do_cntzw, tcg_gen_clzi_i32); +TRANS_FLAGS2(ISA300, CNTTZW, do_cntzw, tcg_gen_ctzi_i32); + +TRANS64(EXTSW, do_logical1, tcg_gen_ext32s_tl); + static bool trans_ADDG6S(DisasContext *ctx, arg_X *a) { const target_ulong carry_bits = (target_ulong)-1 / 0xf; diff --git a/target/ppc/translate/fp-impl.c.inc b/target/ppc/translate/fp-impl.c.inc index 189cd8c979..a66b83398b 100644 --- a/target/ppc/translate/fp-impl.c.inc +++ b/target/ppc/translate/fp-impl.c.inc @@ -30,96 +30,73 @@ static void gen_set_cr1_from_fpscr(DisasContext *ctx) #endif /*** Floating-Point arithmetic ***/ -#define _GEN_FLOAT_ACB(name, op1, op2, set_fprf, type) \ -static void gen_f##name(DisasContext *ctx) \ -{ \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - TCGv_i64 t2; \ - TCGv_i64 t3; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - t2 = tcg_temp_new_i64(); \ - t3 = tcg_temp_new_i64(); \ - gen_reset_fpstatus(); \ - get_fpr(t0, rA(ctx->opcode)); \ - get_fpr(t1, rC(ctx->opcode)); \ - get_fpr(t2, rB(ctx->opcode)); \ - gen_helper_f##name(t3, tcg_env, t0, t1, t2); \ - set_fpr(rD(ctx->opcode), t3); \ - if (set_fprf) { \ - gen_compute_fprf_float64(t3); \ - } \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ +static bool do_helper_acb(DisasContext *ctx, arg_A *a, + void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64, + TCGv_i64, TCGv_i64)) +{ + TCGv_i64 t0, t1, t2, t3; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + t3 = tcg_temp_new_i64(); + gen_reset_fpstatus(); + get_fpr(t0, a->fra); + get_fpr(t1, a->frc); + get_fpr(t2, a->frb); + helper(t3, tcg_env, t0, t1, t2); + set_fpr(a->frt, t3); + gen_compute_fprf_float64(t3); + if (unlikely(a->rc)) { + gen_set_cr1_from_fpscr(ctx); + } + return true; } -#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ -_GEN_FLOAT_ACB(name, 0x3F, op2, set_fprf, type); \ -_GEN_FLOAT_ACB(name##s, 0x3B, op2, set_fprf, type); - -#define _GEN_FLOAT_AB(name, op1, op2, inval, set_fprf, type) \ -static void gen_f##name(DisasContext *ctx) \ -{ \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - TCGv_i64 t2; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - t2 = tcg_temp_new_i64(); \ - gen_reset_fpstatus(); \ - get_fpr(t0, rA(ctx->opcode)); \ - get_fpr(t1, rB(ctx->opcode)); \ - gen_helper_f##name(t2, tcg_env, t0, t1); \ - set_fpr(rD(ctx->opcode), t2); \ - if (set_fprf) { \ - gen_compute_fprf_float64(t2); \ - } \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ +static bool do_helper_ab(DisasContext *ctx, arg_A_tab *a, + void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64, + TCGv_i64)) +{ + TCGv_i64 t0, t1, t2; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + gen_reset_fpstatus(); + get_fpr(t0, a->fra); + get_fpr(t1, a->frb); + helper(t2, tcg_env, t0, t1); + set_fpr(a->frt, t2); + gen_compute_fprf_float64(t2); + if (unlikely(a->rc)) { + gen_set_cr1_from_fpscr(ctx); + } + return true; } -#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ -_GEN_FLOAT_AB(name, 0x3F, op2, inval, set_fprf, type); \ -_GEN_FLOAT_AB(name##s, 0x3B, op2, inval, set_fprf, type); -#define _GEN_FLOAT_AC(name, op1, op2, inval, set_fprf, type) \ -static void gen_f##name(DisasContext *ctx) \ -{ \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - TCGv_i64 t2; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - t2 = tcg_temp_new_i64(); \ - gen_reset_fpstatus(); \ - get_fpr(t0, rA(ctx->opcode)); \ - get_fpr(t1, rC(ctx->opcode)); \ - gen_helper_f##name(t2, tcg_env, t0, t1); \ - set_fpr(rD(ctx->opcode), t2); \ - if (set_fprf) { \ - gen_compute_fprf_float64(t2); \ - } \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ +static bool do_helper_ac(DisasContext *ctx, arg_A_tac *a, + void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64, + TCGv_i64)) +{ + TCGv_i64 t0, t1, t2; + REQUIRE_INSNS_FLAGS(ctx, FLOAT); + REQUIRE_FPU(ctx); + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + t2 = tcg_temp_new_i64(); + gen_reset_fpstatus(); + get_fpr(t0, a->fra); + get_fpr(t1, a->frc); + helper(t2, tcg_env, t0, t1); + set_fpr(a->frt, t2); + gen_compute_fprf_float64(t2); + if (unlikely(a->rc)) { + gen_set_cr1_from_fpscr(ctx); + } + return true; } -#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ -_GEN_FLOAT_AC(name, 0x3F, op2, inval, set_fprf, type); \ -_GEN_FLOAT_AC(name##s, 0x3B, op2, inval, set_fprf, type); #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ static void gen_f##name(DisasContext *ctx) \ @@ -145,64 +122,22 @@ static void gen_f##name(DisasContext *ctx) \ } \ } -#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ -static void gen_f##name(DisasContext *ctx) \ -{ \ - TCGv_i64 t0; \ - TCGv_i64 t1; \ - if (unlikely(!ctx->fpu_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_FPU); \ - return; \ - } \ - t0 = tcg_temp_new_i64(); \ - t1 = tcg_temp_new_i64(); \ - gen_reset_fpstatus(); \ - get_fpr(t0, rB(ctx->opcode)); \ - gen_helper_f##name(t1, tcg_env, t0); \ - set_fpr(rD(ctx->opcode), t1); \ - if (set_fprf) { \ - gen_compute_fprf_float64(t1); \ - } \ - if (unlikely(Rc(ctx->opcode) != 0)) { \ - gen_set_cr1_from_fpscr(ctx); \ - } \ -} - -/* fadd - fadds */ -GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT); -/* fdiv - fdivs */ -GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT); -/* fmul - fmuls */ -GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT); - -/* fre */ -GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT); - -/* fres */ -GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES); - -/* frsqrte */ -GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE); - -/* frsqrtes */ -static void gen_frsqrtes(DisasContext *ctx) +static bool do_helper_bs(DisasContext *ctx, arg_A_tb *a, + void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64)) { - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + TCGv_i64 t0, t1; + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); gen_reset_fpstatus(); - get_fpr(t0, rB(ctx->opcode)); - gen_helper_frsqrtes(t1, tcg_env, t0); - set_fpr(rD(ctx->opcode), t1); + get_fpr(t0, a->frb); + helper(t1, tcg_env, t0); + set_fpr(a->frt, t1); gen_compute_fprf_float64(t1); - if (unlikely(Rc(ctx->opcode) != 0)) { + if (unlikely(a->rc)) { gen_set_cr1_from_fpscr(ctx); } + return true; } static bool trans_FSEL(DisasContext *ctx, arg_A *a) @@ -228,10 +163,6 @@ static bool trans_FSEL(DisasContext *ctx, arg_A *a) return true; } -/* fsub - fsubs */ -GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT); -/* Optional: */ - static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, void (*helper)(TCGv_i64, TCGv_ptr, TCGv_i64)) { @@ -254,19 +185,33 @@ static bool do_helper_fsqrt(DisasContext *ctx, arg_A_tb *a, return true; } +TRANS(FADD, do_helper_ab, gen_helper_FADD); +TRANS(FADDS, do_helper_ab, gen_helper_FADDS); +TRANS(FSUB, do_helper_ab, gen_helper_FSUB); +TRANS(FSUBS, do_helper_ab, gen_helper_FSUBS); +TRANS(FDIV, do_helper_ab, gen_helper_FDIV); +TRANS(FDIVS, do_helper_ab, gen_helper_FDIVS); +TRANS(FMUL, do_helper_ac, gen_helper_FMUL); +TRANS(FMULS, do_helper_ac, gen_helper_FMULS); + +TRANS(FMADD, do_helper_acb, gen_helper_FMADD); +TRANS(FMADDS, do_helper_acb, gen_helper_FMADDS); +TRANS(FMSUB, do_helper_acb, gen_helper_FMSUB); +TRANS(FMSUBS, do_helper_acb, gen_helper_FMSUBS); + +TRANS(FNMADD, do_helper_acb, gen_helper_FNMADD); +TRANS(FNMADDS, do_helper_acb, gen_helper_FNMADDS); +TRANS(FNMSUB, do_helper_acb, gen_helper_FNMSUB); +TRANS(FNMSUBS, do_helper_acb, gen_helper_FNMSUBS); + +TRANS_FLAGS(FLOAT_EXT, FRE, do_helper_bs, gen_helper_FRE); +TRANS_FLAGS(FLOAT_FRES, FRES, do_helper_bs, gen_helper_FRES); +TRANS_FLAGS(FLOAT_FRSQRTE, FRSQRTE, do_helper_bs, gen_helper_FRSQRTE); +TRANS_FLAGS(FLOAT_FRSQRTES, FRSQRTES, do_helper_bs, gen_helper_FRSQRTES); + TRANS(FSQRT, do_helper_fsqrt, gen_helper_FSQRT); TRANS(FSQRTS, do_helper_fsqrt, gen_helper_FSQRTS); -/*** Floating-Point multiply-and-add ***/ -/* fmadd - fmadds */ -GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT); -/* fmsub - fmsubs */ -GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT); -/* fnmadd - fnmadds */ -GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT); -/* fnmsub - fnmsubs */ -GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT); - /*** Floating-Point round & convert ***/ /* fctiw */ GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT); @@ -304,35 +249,30 @@ GEN_FLOAT_B(rip, 0x08, 0x0E, 1, PPC_FLOAT_EXT); /* frim */ GEN_FLOAT_B(rim, 0x08, 0x0F, 1, PPC_FLOAT_EXT); -static void gen_ftdiv(DisasContext *ctx) +static bool trans_FTDIV(DisasContext *ctx, arg_X_bf *a) { - TCGv_i64 t0; - TCGv_i64 t1; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + TCGv_i64 t0, t1; + REQUIRE_INSNS_FLAGS2(ctx, FP_TST_ISA206); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); t1 = tcg_temp_new_i64(); - get_fpr(t0, rA(ctx->opcode)); - get_fpr(t1, rB(ctx->opcode)); - gen_helper_ftdiv(cpu_crf[crfD(ctx->opcode)], t0, t1); + get_fpr(t0, a->ra); + get_fpr(t1, a->rb); + gen_helper_FTDIV(cpu_crf[a->bf], t0, t1); + return true; } -static void gen_ftsqrt(DisasContext *ctx) +static bool trans_FTSQRT(DisasContext *ctx, arg_X_bf_b *a) { TCGv_i64 t0; - if (unlikely(!ctx->fpu_enabled)) { - gen_exception(ctx, POWERPC_EXCP_FPU); - return; - } + REQUIRE_INSNS_FLAGS2(ctx, FP_TST_ISA206); + REQUIRE_FPU(ctx); t0 = tcg_temp_new_i64(); - get_fpr(t0, rB(ctx->opcode)); - gen_helper_ftsqrt(cpu_crf[crfD(ctx->opcode)], t0); + get_fpr(t0, a->rb); + gen_helper_FTSQRT(cpu_crf[a->bf], t0); + return true; } - - /*** Floating-Point compare ***/ /* fcmpo */ @@ -1111,14 +1051,7 @@ TRANS(STFDX, do_lsfp_X, false, true, false) TRANS(STFDUX, do_lsfp_X, true, true, false) TRANS(PSTFD, do_lsfp_PLS_D, false, true, false) -#undef _GEN_FLOAT_ACB -#undef GEN_FLOAT_ACB -#undef _GEN_FLOAT_AB -#undef GEN_FLOAT_AB -#undef _GEN_FLOAT_AC -#undef GEN_FLOAT_AC #undef GEN_FLOAT_B -#undef GEN_FLOAT_BS #undef GEN_LDF #undef GEN_LDUF diff --git a/target/ppc/translate/fp-ops.c.inc b/target/ppc/translate/fp-ops.c.inc index d4c6c4bed1..cef4b5dfcb 100644 --- a/target/ppc/translate/fp-ops.c.inc +++ b/target/ppc/translate/fp-ops.c.inc @@ -1,36 +1,6 @@ -#define _GEN_FLOAT_ACB(name, op, op1, op2, isfloat, set_fprf, type) \ -GEN_HANDLER(f##name, op1, op2, 0xFF, 0x00000000, type) -#define GEN_FLOAT_ACB(name, op2, set_fprf, type) \ -_GEN_FLOAT_ACB(name, name, 0x3F, op2, 0, set_fprf, type), \ -_GEN_FLOAT_ACB(name##s, name, 0x3B, op2, 1, set_fprf, type) -#define _GEN_FLOAT_AB(name, op, op1, op2, inval, isfloat, set_fprf, type) \ -GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) -#define GEN_FLOAT_AB(name, op2, inval, set_fprf, type) \ -_GEN_FLOAT_AB(name, name, 0x3F, op2, inval, 0, set_fprf, type), \ -_GEN_FLOAT_AB(name##s, name, 0x3B, op2, inval, 1, set_fprf, type) -#define _GEN_FLOAT_AC(name, op, op1, op2, inval, isfloat, set_fprf, type) \ -GEN_HANDLER(f##name, op1, op2, 0xFF, inval, type) -#define GEN_FLOAT_AC(name, op2, inval, set_fprf, type) \ -_GEN_FLOAT_AC(name, name, 0x3F, op2, inval, 0, set_fprf, type), \ -_GEN_FLOAT_AC(name##s, name, 0x3B, op2, inval, 1, set_fprf, type) #define GEN_FLOAT_B(name, op2, op3, set_fprf, type) \ GEN_HANDLER(f##name, 0x3F, op2, op3, 0x001F0000, type) -#define GEN_FLOAT_BS(name, op1, op2, set_fprf, type) \ -GEN_HANDLER(f##name, op1, op2, 0xFF, 0x001F07C0, type) -GEN_FLOAT_AB(add, 0x15, 0x000007C0, 1, PPC_FLOAT), -GEN_FLOAT_AB(div, 0x12, 0x000007C0, 1, PPC_FLOAT), -GEN_FLOAT_AC(mul, 0x19, 0x0000F800, 1, PPC_FLOAT), -GEN_FLOAT_BS(re, 0x3F, 0x18, 1, PPC_FLOAT_EXT), -GEN_FLOAT_BS(res, 0x3B, 0x18, 1, PPC_FLOAT_FRES), -GEN_FLOAT_BS(rsqrte, 0x3F, 0x1A, 1, PPC_FLOAT_FRSQRTE), -GEN_FLOAT_AB(sub, 0x14, 0x000007C0, 1, PPC_FLOAT), -GEN_FLOAT_ACB(madd, 0x1D, 1, PPC_FLOAT), -GEN_FLOAT_ACB(msub, 0x1C, 1, PPC_FLOAT), -GEN_FLOAT_ACB(nmadd, 0x1F, 1, PPC_FLOAT), -GEN_FLOAT_ACB(nmsub, 0x1E, 1, PPC_FLOAT), -GEN_HANDLER_E(ftdiv, 0x3F, 0x00, 0x04, 1, PPC_NONE, PPC2_FP_TST_ISA206), -GEN_HANDLER_E(ftsqrt, 0x3F, 0x00, 0x05, 1, PPC_NONE, PPC2_FP_TST_ISA206), GEN_FLOAT_B(ctiw, 0x0E, 0x00, 0, PPC_FLOAT), GEN_HANDLER_E(fctiwu, 0x3F, 0x0E, 0x04, 0, PPC_NONE, PPC2_FP_CVT_ISA206), GEN_FLOAT_B(ctiwz, 0x0F, 0x00, 0, PPC_FLOAT), @@ -61,7 +31,6 @@ GEN_STXF(stfiw, st32fiw, 0x17, 0x1E, PPC_FLOAT_STFIWX) GEN_HANDLER_E(stfdepx, 0x1F, 0x1F, 0x16, 0x00000001, PPC_NONE, PPC2_BOOKE206), GEN_HANDLER_E(stfdpx, 0x1F, 0x17, 0x1C, 0x00200001, PPC_NONE, PPC2_ISA205), -GEN_HANDLER(frsqrtes, 0x3B, 0x1A, 0xFF, 0x001F07C0, PPC_FLOAT_FRSQRTES), GEN_HANDLER(fcmpo, 0x3F, 0x00, 0x01, 0x00600001, PPC_FLOAT), GEN_HANDLER(fcmpu, 0x3F, 0x00, 0x00, 0x00600001, PPC_FLOAT), GEN_HANDLER(fabs, 0x3F, 0x08, 0x08, 0x001F0000, PPC_FLOAT), diff --git a/target/ppc/translate/misc-impl.c.inc b/target/ppc/translate/misc-impl.c.inc new file mode 100644 index 0000000000..cbf82b1ea0 --- /dev/null +++ b/target/ppc/translate/misc-impl.c.inc @@ -0,0 +1,157 @@ +/* + * Power ISA decode for misc instructions + * + * Copyright (c) 2024, IBM Corporation. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +/* + * Memory Barrier Instructions + */ + +static bool trans_SYNC(DisasContext *ctx, arg_X_sync *a) +{ + TCGBar bar = TCG_MO_ALL; + uint32_t l = a->l; + uint32_t sc = a->sc; + + /* + * BookE uses the msync mnemonic. This means hwsync, except in the + * 440, where it an execution serialisation point that requires all + * previous storage accesses to have been performed to memory (which + * doesn't matter for TCG). + */ + if (!(ctx->insns_flags & PPC_MEM_SYNC)) { + if (ctx->insns_flags & PPC_BOOKE) { + tcg_gen_mb(bar | TCG_BAR_SC); + return true; + } + + return false; + } + + /* + * In ISA v3.1, the L field grew one bit. Mask that out to ignore it in + * older processors. It also added the SC field, zero this to ignore + * it too. + */ + if (!(ctx->insns_flags2 & PPC2_ISA310)) { + l &= 0x3; + sc = 0; + } + + if (sc) { + /* Store syncs [stsync, stcisync, stncisync]. These ignore L. */ + bar = TCG_MO_ST_ST; + } else { + if (((l == 1) && (ctx->insns_flags2 & PPC2_MEM_LWSYNC)) || (l == 5)) { + /* lwsync, or plwsync on POWER10 and later */ + bar = TCG_MO_LD_LD | TCG_MO_LD_ST | TCG_MO_ST_ST; + } + + /* + * We may need to check for a pending TLB flush. + * + * We do this on ptesync (l == 2) on ppc64 and any sync on ppc32. + * + * Additionally, this can only happen in kernel mode however so + * check MSR_PR as well. + */ + if (((l == 2) || !(ctx->insns_flags & PPC_64B)) && !ctx->pr) { + gen_check_tlb_flush(ctx, true); + } + } + + tcg_gen_mb(bar | TCG_BAR_SC); + + return true; +} + +static bool trans_EIEIO(DisasContext *ctx, arg_EIEIO *a) +{ + TCGBar bar = TCG_MO_ALL; + + /* + * BookE uses the mbar instruction instead of eieio, which is basically + * full hwsync memory barrier, but is not execution synchronising. For + * the purpose of TCG the distinction is not relevant. + */ + if (!(ctx->insns_flags & PPC_MEM_EIEIO)) { + if ((ctx->insns_flags & PPC_BOOKE) || + (ctx->insns_flags2 & PPC2_BOOKE206)) { + tcg_gen_mb(bar | TCG_BAR_SC); + return true; + } + return false; + } + + /* + * eieio has complex semanitcs. It provides memory ordering between + * operations in the set: + * - loads from CI memory. + * - stores to CI memory. + * - stores to WT memory. + * + * It separately also orders memory for operations in the set: + * - stores to cacheble memory. + * + * It also serializes instructions: + * - dcbt and dcbst. + * + * It separately serializes: + * - tlbie and tlbsync. + * + * And separately serializes: + * - slbieg, slbiag, and slbsync. + * + * The end result is that CI memory ordering requires TCG_MO_ALL + * and it is not possible to special-case more relaxed ordering for + * cacheable accesses. TCG_BAR_SC is required to provide this + * serialization. + */ + + /* + * POWER9 has a eieio instruction variant using bit 6 as a hint to + * tell the CPU it is a store-forwarding barrier. + */ + if (ctx->opcode & 0x2000000) { + /* + * ISA says that "Reserved fields in instructions are ignored + * by the processor". So ignore the bit 6 on non-POWER9 CPU but + * as this is not an instruction software should be using, + * complain to the user. + */ + if (!(ctx->insns_flags2 & PPC2_ISA300)) { + qemu_log_mask(LOG_GUEST_ERROR, "invalid eieio using bit 6 at @" + TARGET_FMT_lx "\n", ctx->cia); + } else { + bar = TCG_MO_ST_LD; + } + } + + tcg_gen_mb(bar | TCG_BAR_SC); + + return true; +} + +static bool trans_ATTN(DisasContext *ctx, arg_ATTN *a) +{ +#if defined(TARGET_PPC64) && !defined(CONFIG_USER_ONLY) + gen_helper_attn(tcg_env); + return true; +#else + return false; +#endif +} diff --git a/target/ppc/translate/processor-ctrl-impl.c.inc b/target/ppc/translate/processor-ctrl-impl.c.inc index 0142801985..8abbb89630 100644 --- a/target/ppc/translate/processor-ctrl-impl.c.inc +++ b/target/ppc/translate/processor-ctrl-impl.c.inc @@ -59,7 +59,7 @@ static bool trans_MSGSND(DisasContext *ctx, arg_X_rb *a) #if !defined(CONFIG_USER_ONLY) if (is_book3s_arch2x(ctx)) { - gen_helper_book3s_msgsnd(cpu_gpr[a->rb]); + gen_helper_book3s_msgsnd(tcg_env, cpu_gpr[a->rb]); } else { gen_helper_msgsnd(cpu_gpr[a->rb]); } diff --git a/target/ppc/translate/storage-ctrl-impl.c.inc b/target/ppc/translate/storage-ctrl-impl.c.inc index 74c23a4191..b8b4454663 100644 --- a/target/ppc/translate/storage-ctrl-impl.c.inc +++ b/target/ppc/translate/storage-ctrl-impl.c.inc @@ -224,6 +224,13 @@ static bool do_tlbie(DisasContext *ctx, arg_X_tlbie *a, bool local) a->prs << TLBIE_F_PRS_SHIFT | a->r << TLBIE_F_R_SHIFT | local << TLBIE_F_LOCAL_SHIFT)); + if (!local) { + /* + * Global TLB flush uses async-work which must run before the + * next instruction, so this must be the last in the TB. + */ + ctx->base.is_jmp = DISAS_EXIT_UPDATE; + } return true; #endif diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index b56e615c24..8084af75cc 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -14,125 +14,88 @@ static inline TCGv_ptr gen_avr_ptr(int reg) return r; } -#define GEN_VR_LDX(name, opc2, opc3) \ -static void glue(gen_, name)(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 avr; \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_INT); \ - avr = tcg_temp_new_i64(); \ - EA = tcg_temp_new(); \ - gen_addr_reg_index(ctx, EA); \ - tcg_gen_andi_tl(EA, EA, ~0xf); \ - /* \ - * We only need to swap high and low halves. gen_qemu_ld64_i64 \ - * does necessary 64-bit byteswap already. \ - */ \ - if (ctx->le_mode) { \ - gen_qemu_ld64_i64(ctx, avr, EA); \ - set_avr64(rD(ctx->opcode), avr, false); \ - tcg_gen_addi_tl(EA, EA, 8); \ - gen_qemu_ld64_i64(ctx, avr, EA); \ - set_avr64(rD(ctx->opcode), avr, true); \ - } else { \ - gen_qemu_ld64_i64(ctx, avr, EA); \ - set_avr64(rD(ctx->opcode), avr, true); \ - tcg_gen_addi_tl(EA, EA, 8); \ - gen_qemu_ld64_i64(ctx, avr, EA); \ - set_avr64(rD(ctx->opcode), avr, false); \ - } \ -} - -#define GEN_VR_STX(name, opc2, opc3) \ -static void gen_st##name(DisasContext *ctx) \ -{ \ - TCGv EA; \ - TCGv_i64 avr; \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_INT); \ - avr = tcg_temp_new_i64(); \ - EA = tcg_temp_new(); \ - gen_addr_reg_index(ctx, EA); \ - tcg_gen_andi_tl(EA, EA, ~0xf); \ - /* \ - * We only need to swap high and low halves. gen_qemu_st64_i64 \ - * does necessary 64-bit byteswap already. \ - */ \ - if (ctx->le_mode) { \ - get_avr64(avr, rD(ctx->opcode), false); \ - gen_qemu_st64_i64(ctx, avr, EA); \ - tcg_gen_addi_tl(EA, EA, 8); \ - get_avr64(avr, rD(ctx->opcode), true); \ - gen_qemu_st64_i64(ctx, avr, EA); \ - } else { \ - get_avr64(avr, rD(ctx->opcode), true); \ - gen_qemu_st64_i64(ctx, avr, EA); \ - tcg_gen_addi_tl(EA, EA, 8); \ - get_avr64(avr, rD(ctx->opcode), false); \ - gen_qemu_st64_i64(ctx, avr, EA); \ - } \ -} - -#define GEN_VR_LVE(name, opc2, opc3, size) \ -static void gen_lve##name(DisasContext *ctx) \ - { \ - TCGv EA; \ - TCGv_ptr rs; \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - gen_addr_reg_index(ctx, EA); \ - if (size > 1) { \ - tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ - } \ - rs = gen_avr_ptr(rS(ctx->opcode)); \ - gen_helper_lve##name(tcg_env, rs, EA); \ - } - -#define GEN_VR_STVE(name, opc2, opc3, size) \ -static void gen_stve##name(DisasContext *ctx) \ - { \ - TCGv EA; \ - TCGv_ptr rs; \ - if (unlikely(!ctx->altivec_enabled)) { \ - gen_exception(ctx, POWERPC_EXCP_VPU); \ - return; \ - } \ - gen_set_access_type(ctx, ACCESS_INT); \ - EA = tcg_temp_new(); \ - gen_addr_reg_index(ctx, EA); \ - if (size > 1) { \ - tcg_gen_andi_tl(EA, EA, ~(size - 1)); \ - } \ - rs = gen_avr_ptr(rS(ctx->opcode)); \ - gen_helper_stve##name(tcg_env, rs, EA); \ - } +static bool trans_LVX(DisasContext *ctx, arg_X *a) +{ + TCGv EA; + TCGv_i64 avr; + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); + gen_set_access_type(ctx, ACCESS_INT); + avr = tcg_temp_new_i64(); + EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]); + tcg_gen_andi_tl(EA, EA, ~0xf); + /* + * We only need to swap high and low halves. gen_qemu_ld64_i64 + * does necessary 64-bit byteswap already. + */ + gen_qemu_ld64_i64(ctx, avr, EA); + set_avr64(a->rt, avr, !ctx->le_mode); + tcg_gen_addi_tl(EA, EA, 8); + gen_qemu_ld64_i64(ctx, avr, EA); + set_avr64(a->rt, avr, ctx->le_mode); + return true; +} -GEN_VR_LDX(lvx, 0x07, 0x03); /* As we don't emulate the cache, lvxl is strictly equivalent to lvx */ -GEN_VR_LDX(lvxl, 0x07, 0x0B); +QEMU_FLATTEN +static bool trans_LVXL(DisasContext *ctx, arg_LVXL *a) +{ + return trans_LVX(ctx, a); +} -GEN_VR_LVE(bx, 0x07, 0x00, 1); -GEN_VR_LVE(hx, 0x07, 0x01, 2); -GEN_VR_LVE(wx, 0x07, 0x02, 4); +static bool trans_STVX(DisasContext *ctx, arg_STVX *a) +{ + TCGv EA; + TCGv_i64 avr; + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); + gen_set_access_type(ctx, ACCESS_INT); + avr = tcg_temp_new_i64(); + EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]); + tcg_gen_andi_tl(EA, EA, ~0xf); + /* + * We only need to swap high and low halves. gen_qemu_st64_i64 + * does necessary 64-bit byteswap already. + */ + get_avr64(avr, a->rt, !ctx->le_mode); + gen_qemu_st64_i64(ctx, avr, EA); + tcg_gen_addi_tl(EA, EA, 8); + get_avr64(avr, a->rt, ctx->le_mode); + gen_qemu_st64_i64(ctx, avr, EA); + return true; +} -GEN_VR_STX(svx, 0x07, 0x07); /* As we don't emulate the cache, stvxl is strictly equivalent to stvx */ -GEN_VR_STX(svxl, 0x07, 0x0F); +QEMU_FLATTEN +static bool trans_STVXL(DisasContext *ctx, arg_STVXL *a) +{ + return trans_STVX(ctx, a); +} + +static bool do_ldst_ve_X(DisasContext *ctx, arg_X *a, int size, + void (*helper)(TCGv_env, TCGv_ptr, TCGv)) +{ + TCGv EA; + TCGv_ptr vrt; + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); + gen_set_access_type(ctx, ACCESS_INT); + EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]); + if (size > 1) { + tcg_gen_andi_tl(EA, EA, ~(size - 1)); + } + vrt = gen_avr_ptr(a->rt); + helper(tcg_env, vrt, EA); + return true; +} -GEN_VR_STVE(bx, 0x07, 0x04, 1); -GEN_VR_STVE(hx, 0x07, 0x05, 2); -GEN_VR_STVE(wx, 0x07, 0x06, 4); +TRANS(LVEBX, do_ldst_ve_X, 1, gen_helper_LVEBX); +TRANS(LVEHX, do_ldst_ve_X, 2, gen_helper_LVEHX); +TRANS(LVEWX, do_ldst_ve_X, 4, gen_helper_LVEWX); + +TRANS(STVEBX, do_ldst_ve_X, 1, gen_helper_STVEBX); +TRANS(STVEHX, do_ldst_ve_X, 2, gen_helper_STVEHX); +TRANS(STVEWX, do_ldst_ve_X, 4, gen_helper_STVEWX); static void gen_mfvscr(DisasContext *ctx) { @@ -242,16 +205,6 @@ static void glue(gen_, name)(DisasContext *ctx) \ 16, 16); \ } -/* Logical operations */ -GEN_VXFORM_V(vand, MO_64, tcg_gen_gvec_and, 2, 16); -GEN_VXFORM_V(vandc, MO_64, tcg_gen_gvec_andc, 2, 17); -GEN_VXFORM_V(vor, MO_64, tcg_gen_gvec_or, 2, 18); -GEN_VXFORM_V(vxor, MO_64, tcg_gen_gvec_xor, 2, 19); -GEN_VXFORM_V(vnor, MO_64, tcg_gen_gvec_nor, 2, 20); -GEN_VXFORM_V(veqv, MO_64, tcg_gen_gvec_eqv, 2, 26); -GEN_VXFORM_V(vnand, MO_64, tcg_gen_gvec_nand, 2, 22); -GEN_VXFORM_V(vorc, MO_64, tcg_gen_gvec_orc, 2, 21); - #define GEN_VXFORM(name, opc2, opc3) \ static void glue(gen_, name)(DisasContext *ctx) \ { \ @@ -389,22 +342,6 @@ GEN_VXFORM_V(vsububm, MO_8, tcg_gen_gvec_sub, 0, 16); GEN_VXFORM_V(vsubuhm, MO_16, tcg_gen_gvec_sub, 0, 17); GEN_VXFORM_V(vsubuwm, MO_32, tcg_gen_gvec_sub, 0, 18); GEN_VXFORM_V(vsubudm, MO_64, tcg_gen_gvec_sub, 0, 19); -GEN_VXFORM_V(vmaxub, MO_8, tcg_gen_gvec_umax, 1, 0); -GEN_VXFORM_V(vmaxuh, MO_16, tcg_gen_gvec_umax, 1, 1); -GEN_VXFORM_V(vmaxuw, MO_32, tcg_gen_gvec_umax, 1, 2); -GEN_VXFORM_V(vmaxud, MO_64, tcg_gen_gvec_umax, 1, 3); -GEN_VXFORM_V(vmaxsb, MO_8, tcg_gen_gvec_smax, 1, 4); -GEN_VXFORM_V(vmaxsh, MO_16, tcg_gen_gvec_smax, 1, 5); -GEN_VXFORM_V(vmaxsw, MO_32, tcg_gen_gvec_smax, 1, 6); -GEN_VXFORM_V(vmaxsd, MO_64, tcg_gen_gvec_smax, 1, 7); -GEN_VXFORM_V(vminub, MO_8, tcg_gen_gvec_umin, 1, 8); -GEN_VXFORM_V(vminuh, MO_16, tcg_gen_gvec_umin, 1, 9); -GEN_VXFORM_V(vminuw, MO_32, tcg_gen_gvec_umin, 1, 10); -GEN_VXFORM_V(vminud, MO_64, tcg_gen_gvec_umin, 1, 11); -GEN_VXFORM_V(vminsb, MO_8, tcg_gen_gvec_smin, 1, 12); -GEN_VXFORM_V(vminsh, MO_16, tcg_gen_gvec_smin, 1, 13); -GEN_VXFORM_V(vminsw, MO_32, tcg_gen_gvec_smin, 1, 14); -GEN_VXFORM_V(vminsd, MO_64, tcg_gen_gvec_smin, 1, 15); GEN_VXFORM(vmrghb, 6, 0); GEN_VXFORM(vmrghh, 6, 1); GEN_VXFORM(vmrghw, 6, 2); @@ -460,15 +397,17 @@ static void trans_vmrgow(DisasContext *ctx) * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. * Bytes sh:sh+15 of X are placed into vD. */ -static void trans_lvsl(DisasContext *ctx) +static bool trans_LVSL(DisasContext *ctx, arg_LVSL *a) { - int VT = rD(ctx->opcode); TCGv_i64 result = tcg_temp_new_i64(); TCGv_i64 sh = tcg_temp_new_i64(); TCGv EA = tcg_temp_new(); + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); + /* Get sh(from description) by anding EA with 0xf. */ - gen_addr_reg_index(ctx, EA); + EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]); tcg_gen_extu_tl_i64(sh, EA); tcg_gen_andi_i64(sh, sh, 0xfULL); @@ -478,13 +417,14 @@ static void trans_lvsl(DisasContext *ctx) */ tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); tcg_gen_addi_i64(result, sh, 0x0001020304050607ull); - set_avr64(VT, result, true); + set_avr64(a->rt, result, true); /* * Create bytes sh+8:sh+15 of X(from description) and place them in * lower doubleword of vD. */ tcg_gen_addi_i64(result, sh, 0x08090a0b0c0d0e0fULL); - set_avr64(VT, result, false); + set_avr64(a->rt, result, false); + return true; } /* @@ -494,16 +434,17 @@ static void trans_lvsl(DisasContext *ctx) * Let X be the 32-byte value 0x00 || 0x01 || 0x02 || ... || 0x1E || 0x1F. * Bytes (16-sh):(31-sh) of X are placed into vD. */ -static void trans_lvsr(DisasContext *ctx) +static bool trans_LVSR(DisasContext *ctx, arg_LVSR *a) { - int VT = rD(ctx->opcode); TCGv_i64 result = tcg_temp_new_i64(); TCGv_i64 sh = tcg_temp_new_i64(); TCGv EA = tcg_temp_new(); + REQUIRE_INSNS_FLAGS(ctx, ALTIVEC); + REQUIRE_VECTOR(ctx); /* Get sh(from description) by anding EA with 0xf. */ - gen_addr_reg_index(ctx, EA); + EA = do_ea_calc(ctx, a->ra, cpu_gpr[a->rb]); tcg_gen_extu_tl_i64(sh, EA); tcg_gen_andi_i64(sh, sh, 0xfULL); @@ -513,13 +454,14 @@ static void trans_lvsr(DisasContext *ctx) */ tcg_gen_muli_i64(sh, sh, 0x0101010101010101ULL); tcg_gen_subfi_i64(result, 0x1011121314151617ULL, sh); - set_avr64(VT, result, true); + set_avr64(a->rt, result, true); /* * Create bytes (24-sh):(32-sh) of X(from description) and place them in * lower doubleword of vD. */ tcg_gen_subfi_i64(result, 0x18191a1b1c1d1e1fULL, sh); - set_avr64(VT, result, false); + set_avr64(a->rt, result, false); + return true; } /* @@ -759,6 +701,37 @@ TRANS_FLAGS(ALTIVEC, VRLH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_rotlv) TRANS_FLAGS(ALTIVEC, VRLW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_rotlv) TRANS_FLAGS2(ALTIVEC_207, VRLD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_rotlv) +/* Logical operations */ +TRANS_FLAGS(ALTIVEC, VAND, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_and); +TRANS_FLAGS(ALTIVEC, VANDC, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_andc); +TRANS_FLAGS(ALTIVEC, VOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_or); +TRANS_FLAGS(ALTIVEC, VXOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_xor); +TRANS_FLAGS(ALTIVEC, VNOR, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_nor); +TRANS_FLAGS2(ALTIVEC_207, VEQV, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_eqv); +TRANS_FLAGS2(ALTIVEC_207, VNAND, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_nand); +TRANS_FLAGS2(ALTIVEC_207, VORC, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_orc); + +/* Integer Max/Min operations */ +TRANS_FLAGS(ALTIVEC, VMAXUB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_umax); +TRANS_FLAGS(ALTIVEC, VMAXUH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_umax); +TRANS_FLAGS(ALTIVEC, VMAXUW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_umax); +TRANS_FLAGS2(ALTIVEC_207, VMAXUD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_umax); + +TRANS_FLAGS(ALTIVEC, VMAXSB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_smax); +TRANS_FLAGS(ALTIVEC, VMAXSH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_smax); +TRANS_FLAGS(ALTIVEC, VMAXSW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_smax); +TRANS_FLAGS2(ALTIVEC_207, VMAXSD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_smax); + +TRANS_FLAGS(ALTIVEC, VMINUB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_umin); +TRANS_FLAGS(ALTIVEC, VMINUH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_umin); +TRANS_FLAGS(ALTIVEC, VMINUW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_umin); +TRANS_FLAGS2(ALTIVEC_207, VMINUD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_umin); + +TRANS_FLAGS(ALTIVEC, VMINSB, do_vector_gvec3_VX, MO_8, tcg_gen_gvec_smin); +TRANS_FLAGS(ALTIVEC, VMINSH, do_vector_gvec3_VX, MO_16, tcg_gen_gvec_smin); +TRANS_FLAGS(ALTIVEC, VMINSW, do_vector_gvec3_VX, MO_32, tcg_gen_gvec_smin); +TRANS_FLAGS2(ALTIVEC_207, VMINSD, do_vector_gvec3_VX, MO_64, tcg_gen_gvec_smin); + static TCGv_vec do_vrl_mask_vec(unsigned vece, TCGv_vec vrb) { TCGv_vec t0 = tcg_temp_new_vec_matching(vrb), @@ -1158,8 +1131,6 @@ GEN_VXFORM_TRANS_DUAL(vmrgow, PPC_NONE, PPC2_ALTIVEC_207, GEN_VXFORM_HETRO(vextubrx, 6, 28) GEN_VXFORM_HETRO(vextuhrx, 6, 29) GEN_VXFORM_HETRO(vextuwrx, 6, 30) -GEN_VXFORM_TRANS(lvsl, 6, 31) -GEN_VXFORM_TRANS(lvsr, 6, 32) GEN_VXFORM_TRANS_DUAL(vmrgew, PPC_NONE, PPC2_ALTIVEC_207, vextuwrx, PPC_NONE, PPC2_ISA300) @@ -3365,13 +3336,6 @@ TRANS_FLAGS2(ISA310, VMODUQ, do_vx_helper, gen_helper_VMODUQ) #undef DIVS64 #undef DIVU64 -#undef GEN_VR_LDX -#undef GEN_VR_STX -#undef GEN_VR_LVE -#undef GEN_VR_STVE - -#undef GEN_VX_LOGICAL -#undef GEN_VX_LOGICAL_207 #undef GEN_VXFORM #undef GEN_VXFORM_207 #undef GEN_VXFORM_DUAL diff --git a/target/ppc/translate/vmx-ops.c.inc b/target/ppc/translate/vmx-ops.c.inc index 33fec8aca4..7bb11b0549 100644 --- a/target/ppc/translate/vmx-ops.c.inc +++ b/target/ppc/translate/vmx-ops.c.inc @@ -1,37 +1,3 @@ -#define GEN_VR_LDX(name, opc2, opc3) \ -GEN_HANDLER(name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) -#define GEN_VR_STX(name, opc2, opc3) \ -GEN_HANDLER(st##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) -#define GEN_VR_LVE(name, opc2, opc3) \ - GEN_HANDLER(lve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) -#define GEN_VR_STVE(name, opc2, opc3) \ - GEN_HANDLER(stve##name, 0x1F, opc2, opc3, 0x00000001, PPC_ALTIVEC) -GEN_VR_LDX(lvx, 0x07, 0x03), -GEN_VR_LDX(lvxl, 0x07, 0x0B), -GEN_VR_LVE(bx, 0x07, 0x00), -GEN_VR_LVE(hx, 0x07, 0x01), -GEN_VR_LVE(wx, 0x07, 0x02), -GEN_VR_STX(svx, 0x07, 0x07), -GEN_VR_STX(svxl, 0x07, 0x0F), -GEN_VR_STVE(bx, 0x07, 0x04), -GEN_VR_STVE(hx, 0x07, 0x05), -GEN_VR_STVE(wx, 0x07, 0x06), - -#define GEN_VX_LOGICAL(name, tcg_op, opc2, opc3) \ -GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) - -#define GEN_VX_LOGICAL_207(name, tcg_op, opc2, opc3) \ -GEN_HANDLER_E(name, 0x04, opc2, opc3, 0x00000000, PPC_NONE, PPC2_ALTIVEC_207) - -GEN_VX_LOGICAL(vand, tcg_gen_and_i64, 2, 16), -GEN_VX_LOGICAL(vandc, tcg_gen_andc_i64, 2, 17), -GEN_VX_LOGICAL(vor, tcg_gen_or_i64, 2, 18), -GEN_VX_LOGICAL(vxor, tcg_gen_xor_i64, 2, 19), -GEN_VX_LOGICAL(vnor, tcg_gen_nor_i64, 2, 20), -GEN_VX_LOGICAL_207(veqv, tcg_gen_eqv_i64, 2, 26), -GEN_VX_LOGICAL_207(vnand, tcg_gen_nand_i64, 2, 22), -GEN_VX_LOGICAL_207(vorc, tcg_gen_orc_i64, 2, 21), - #define GEN_VXFORM(name, opc2, opc3) \ GEN_HANDLER(name, 0x04, opc2, opc3, 0x00000000, PPC_ALTIVEC) @@ -67,22 +33,6 @@ GEN_VXFORM_DUAL(vsubuhm, bcdsub, 0, 17, PPC_ALTIVEC, PPC_NONE), GEN_VXFORM_DUAL(vsubuwm, bcdus, 0, 18, PPC_ALTIVEC, PPC2_ISA300), GEN_VXFORM_DUAL(vsubudm, bcds, 0, 19, PPC2_ALTIVEC_207, PPC2_ISA300), GEN_VXFORM_300(bcds, 0, 27), -GEN_VXFORM(vmaxub, 1, 0), -GEN_VXFORM(vmaxuh, 1, 1), -GEN_VXFORM(vmaxuw, 1, 2), -GEN_VXFORM_207(vmaxud, 1, 3), -GEN_VXFORM(vmaxsb, 1, 4), -GEN_VXFORM(vmaxsh, 1, 5), -GEN_VXFORM(vmaxsw, 1, 6), -GEN_VXFORM_207(vmaxsd, 1, 7), -GEN_VXFORM(vminub, 1, 8), -GEN_VXFORM(vminuh, 1, 9), -GEN_VXFORM(vminuw, 1, 10), -GEN_VXFORM_207(vminud, 1, 11), -GEN_VXFORM(vminsb, 1, 12), -GEN_VXFORM(vminsh, 1, 13), -GEN_VXFORM(vminsw, 1, 14), -GEN_VXFORM_207(vminsd, 1, 15), GEN_VXFORM(vmrghb, 6, 0), GEN_VXFORM(vmrghh, 6, 1), GEN_VXFORM(vmrghw, 6, 2), |