diff options
author | Cédric Le Goater <clg@kaod.org> | 2020-01-27 15:45:06 +0100 |
---|---|---|
committer | David Gibson <david@gibson.dropbear.id.au> | 2020-02-02 14:07:57 +1100 |
commit | 9ae1329ee2fee95f201ca219090d7c742eaf6a90 (patch) | |
tree | fea34e2d2ac683817d1affb4c46537051b96431f /hw | |
parent | 4f9924c4d4cf9c039e247c5cdbbf71bce4e573c3 (diff) |
ppc/pnv: Add models for POWER8 PHB3 PCIe Host bridge
This is a model of the PCIe Host Bridge (PHB3) found on a POWER8
processor. It includes the PowerBus logic interface (PBCQ), IOMMU
support, a single PCIe Gen.3 Root Complex, and support for MSI and LSI
interrupt sources as found on a POWER8 system using the XICS interrupt
controller.
The POWER8 processor comes in different flavors: Venice, Murano,
Naple, each having a different number of PHBs. To make things simpler,
the models provides 3 PHB3 per chip. Some platforms, like the
Firestone, can also couple PHBs on the first chip to provide more
bandwidth but this is too specific to model in QEMU.
XICS requires some adjustment to support the PHB3 MSI. The changes are
provided here but they could be decoupled in prereq patches.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Cédric Le Goater <clg@kaod.org>
Message-Id: <20200127144506.11132-3-clg@kaod.org>
[dwg: Use device_class_set_props()]
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/intc/xics.c | 14 | ||||
-rw-r--r-- | hw/pci-host/Makefile.objs | 1 | ||||
-rw-r--r-- | hw/pci-host/pnv_phb3.c | 1195 | ||||
-rw-r--r-- | hw/pci-host/pnv_phb3_msi.c | 349 | ||||
-rw-r--r-- | hw/pci-host/pnv_phb3_pbcq.c | 357 | ||||
-rw-r--r-- | hw/ppc/pnv.c | 69 |
6 files changed, 1982 insertions, 3 deletions
diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 785b607528..c5d507e707 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -217,7 +217,7 @@ void icp_eoi(ICPState *icp, uint32_t xirr) } } -static void icp_irq(ICSState *ics, int server, int nr, uint8_t priority) +void icp_irq(ICSState *ics, int server, int nr, uint8_t priority) { ICPState *icp = xics_icp_get(ics->xics, server); @@ -512,8 +512,14 @@ void ics_write_xive(ICSState *ics, int srcno, int server, static void ics_reject(ICSState *ics, uint32_t nr) { + ICSStateClass *isc = ICS_GET_CLASS(ics); ICSIRQState *irq = ics->irqs + nr - ics->offset; + if (isc->reject) { + isc->reject(ics, nr); + return; + } + trace_xics_ics_reject(nr, nr - ics->offset); if (irq->flags & XICS_FLAGS_IRQ_MSI) { irq->status |= XICS_STATUS_REJECTED; @@ -524,8 +530,14 @@ static void ics_reject(ICSState *ics, uint32_t nr) void ics_resend(ICSState *ics) { + ICSStateClass *isc = ICS_GET_CLASS(ics); int i; + if (isc->resend) { + isc->resend(ics); + return; + } + for (i = 0; i < ics->nr_irqs; i++) { /* FIXME: filter by server#? */ if (ics->irqs[i].flags & XICS_FLAGS_IRQ_LSI) { diff --git a/hw/pci-host/Makefile.objs b/hw/pci-host/Makefile.objs index 8a296e2f93..8c87e8494d 100644 --- a/hw/pci-host/Makefile.objs +++ b/hw/pci-host/Makefile.objs @@ -21,3 +21,4 @@ common-obj-$(CONFIG_PCI_EXPRESS_XILINX) += xilinx-pcie.o common-obj-$(CONFIG_PCI_EXPRESS_DESIGNWARE) += designware.o obj-$(CONFIG_POWERNV) += pnv_phb4.o pnv_phb4_pec.o +obj-$(CONFIG_POWERNV) += pnv_phb3.o pnv_phb3_msi.o pnv_phb3_pbcq.o diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c new file mode 100644 index 0000000000..f03399c406 --- /dev/null +++ b/hw/pci-host/pnv_phb3.c @@ -0,0 +1,1195 @@ +/* + * QEMU PowerPC PowerNV (POWER8) PHB3 model + * + * Copyright (c) 2014-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/visitor.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/pci-host/pnv_phb3_regs.h" +#include "hw/pci-host/pnv_phb3.h" +#include "hw/pci/pcie_host.h" +#include "hw/pci/pcie_port.h" +#include "hw/ppc/pnv.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" + +#define phb3_error(phb, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "phb3[%d:%d]: " fmt "\n", \ + (phb)->chip_id, (phb)->phb_id, ## __VA_ARGS__) + +static PCIDevice *pnv_phb3_find_cfg_dev(PnvPHB3 *phb) +{ + PCIHostState *pci = PCI_HOST_BRIDGE(phb); + uint64_t addr = phb->regs[PHB_CONFIG_ADDRESS >> 3]; + uint8_t bus, devfn; + + if (!(addr >> 63)) { + return NULL; + } + bus = (addr >> 52) & 0xff; + devfn = (addr >> 44) & 0xff; + + return pci_find_device(pci->bus, bus, devfn); +} + +/* + * The CONFIG_DATA register expects little endian accesses, but as the + * region is big endian, we have to swap the value. + */ +static void pnv_phb3_config_write(PnvPHB3 *phb, unsigned off, + unsigned size, uint64_t val) +{ + uint32_t cfg_addr, limit; + PCIDevice *pdev; + + pdev = pnv_phb3_find_cfg_dev(phb); + if (!pdev) { + return; + } + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; + cfg_addr |= off; + limit = pci_config_size(pdev); + if (limit <= cfg_addr) { + /* + * conventional pci device can be behind pcie-to-pci bridge. + * 256 <= addr < 4K has no effects. + */ + return; + } + switch (size) { + case 1: + break; + case 2: + val = bswap16(val); + break; + case 4: + val = bswap32(val); + break; + default: + g_assert_not_reached(); + } + pci_host_config_write_common(pdev, cfg_addr, limit, val, size); +} + +static uint64_t pnv_phb3_config_read(PnvPHB3 *phb, unsigned off, + unsigned size) +{ + uint32_t cfg_addr, limit; + PCIDevice *pdev; + uint64_t val; + + pdev = pnv_phb3_find_cfg_dev(phb); + if (!pdev) { + return ~0ull; + } + cfg_addr = (phb->regs[PHB_CONFIG_ADDRESS >> 3] >> 32) & 0xffc; + cfg_addr |= off; + limit = pci_config_size(pdev); + if (limit <= cfg_addr) { + /* + * conventional pci device can be behind pcie-to-pci bridge. + * 256 <= addr < 4K has no effects. + */ + return ~0ull; + } + val = pci_host_config_read_common(pdev, cfg_addr, limit, size); + switch (size) { + case 1: + return val; + case 2: + return bswap16(val); + case 4: + return bswap32(val); + default: + g_assert_not_reached(); + } +} + +static void pnv_phb3_check_m32(PnvPHB3 *phb) +{ + uint64_t base, start, size; + MemoryRegion *parent; + PnvPBCQState *pbcq = &phb->pbcq; + + if (memory_region_is_mapped(&phb->mr_m32)) { + memory_region_del_subregion(phb->mr_m32.container, &phb->mr_m32); + } + + if (!(phb->regs[PHB_PHB3_CONFIG >> 3] & PHB_PHB3C_M32_EN)) { + return; + } + + /* Grab geometry from registers */ + base = phb->regs[PHB_M32_BASE_ADDR >> 3]; + start = phb->regs[PHB_M32_START_ADDR >> 3]; + size = ~(phb->regs[PHB_M32_BASE_MASK >> 3] | 0xfffc000000000000ull) + 1; + + /* Check if it matches an enabled MMIO region in the PBCQ */ + if (memory_region_is_mapped(&pbcq->mmbar0) && + base >= pbcq->mmio0_base && + (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) { + parent = &pbcq->mmbar0; + base -= pbcq->mmio0_base; + } else if (memory_region_is_mapped(&pbcq->mmbar1) && + base >= pbcq->mmio1_base && + (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) { + parent = &pbcq->mmbar1; + base -= pbcq->mmio1_base; + } else { + return; + } + + /* Create alias */ + memory_region_init_alias(&phb->mr_m32, OBJECT(phb), "phb3-m32", + &phb->pci_mmio, start, size); + memory_region_add_subregion(parent, base, &phb->mr_m32); +} + +static void pnv_phb3_check_m64(PnvPHB3 *phb, uint32_t index) +{ + uint64_t base, start, size, m64; + MemoryRegion *parent; + PnvPBCQState *pbcq = &phb->pbcq; + + if (memory_region_is_mapped(&phb->mr_m64[index])) { + /* Should we destroy it in RCU friendly way... ? */ + memory_region_del_subregion(phb->mr_m64[index].container, + &phb->mr_m64[index]); + } + + /* Get table entry */ + m64 = phb->ioda_M64BT[index]; + + if (!(m64 & IODA2_M64BT_ENABLE)) { + return; + } + + /* Grab geometry from registers */ + base = GETFIELD(IODA2_M64BT_BASE, m64) << 20; + if (m64 & IODA2_M64BT_SINGLE_PE) { + base &= ~0x1ffffffull; + } + size = GETFIELD(IODA2_M64BT_MASK, m64) << 20; + size |= 0xfffc000000000000ull; + size = ~size + 1; + start = base | (phb->regs[PHB_M64_UPPER_BITS >> 3]); + + /* Check if it matches an enabled MMIO region in the PBCQ */ + if (memory_region_is_mapped(&pbcq->mmbar0) && + base >= pbcq->mmio0_base && + (base + size) <= (pbcq->mmio0_base + pbcq->mmio0_size)) { + parent = &pbcq->mmbar0; + base -= pbcq->mmio0_base; + } else if (memory_region_is_mapped(&pbcq->mmbar1) && + base >= pbcq->mmio1_base && + (base + size) <= (pbcq->mmio1_base + pbcq->mmio1_size)) { + parent = &pbcq->mmbar1; + base -= pbcq->mmio1_base; + } else { + return; + } + + /* Create alias */ + memory_region_init_alias(&phb->mr_m64[index], OBJECT(phb), "phb3-m64", + &phb->pci_mmio, start, size); + memory_region_add_subregion(parent, base, &phb->mr_m64[index]); +} + +static void pnv_phb3_check_all_m64s(PnvPHB3 *phb) +{ + uint64_t i; + + for (i = 0; i < PNV_PHB3_NUM_M64; i++) { + pnv_phb3_check_m64(phb, i); + } +} + +static void pnv_phb3_lxivt_write(PnvPHB3 *phb, unsigned idx, uint64_t val) +{ + uint8_t server, prio; + + phb->ioda_LXIVT[idx] = val & (IODA2_LXIVT_SERVER | + IODA2_LXIVT_PRIORITY | + IODA2_LXIVT_NODE_ID); + server = GETFIELD(IODA2_LXIVT_SERVER, val); + prio = GETFIELD(IODA2_LXIVT_PRIORITY, val); + + /* + * The low order 2 bits are the link pointer (Type II interrupts). + * Shift back to get a valid IRQ server. + */ + server >>= 2; + + ics_write_xive(&phb->lsis, idx, server, prio, prio); +} + +static uint64_t *pnv_phb3_ioda_access(PnvPHB3 *phb, + unsigned *out_table, unsigned *out_idx) +{ + uint64_t adreg = phb->regs[PHB_IODA_ADDR >> 3]; + unsigned int index = GETFIELD(PHB_IODA_AD_TADR, adreg); + unsigned int table = GETFIELD(PHB_IODA_AD_TSEL, adreg); + unsigned int mask; + uint64_t *tptr = NULL; + + switch (table) { + case IODA2_TBL_LIST: + tptr = phb->ioda_LIST; + mask = 7; + break; + case IODA2_TBL_LXIVT: + tptr = phb->ioda_LXIVT; + mask = 7; + break; + case IODA2_TBL_IVC_CAM: + case IODA2_TBL_RBA: + mask = 31; + break; + case IODA2_TBL_RCAM: + mask = 63; + break; + case IODA2_TBL_MRT: + mask = 7; + break; + case IODA2_TBL_PESTA: + case IODA2_TBL_PESTB: + mask = 255; + break; + case IODA2_TBL_TVT: + tptr = phb->ioda_TVT; + mask = 511; + break; + case IODA2_TBL_TCAM: + case IODA2_TBL_TDR: + mask = 63; + break; + case IODA2_TBL_M64BT: + tptr = phb->ioda_M64BT; + mask = 15; + break; + case IODA2_TBL_M32DT: + tptr = phb->ioda_MDT; + mask = 255; + break; + case IODA2_TBL_PEEV: + tptr = phb->ioda_PEEV; + mask = 3; + break; + default: + phb3_error(phb, "invalid IODA table %d", table); + return NULL; + } + index &= mask; + if (out_idx) { + *out_idx = index; + } + if (out_table) { + *out_table = table; + } + if (tptr) { + tptr += index; + } + if (adreg & PHB_IODA_AD_AUTOINC) { + index = (index + 1) & mask; + adreg = SETFIELD(PHB_IODA_AD_TADR, adreg, index); + } + phb->regs[PHB_IODA_ADDR >> 3] = adreg; + return tptr; +} + +static uint64_t pnv_phb3_ioda_read(PnvPHB3 *phb) +{ + unsigned table; + uint64_t *tptr; + + tptr = pnv_phb3_ioda_access(phb, &table, NULL); + if (!tptr) { + /* Return 0 on unsupported tables, not ff's */ + return 0; + } + return *tptr; +} + +static void pnv_phb3_ioda_write(PnvPHB3 *phb, uint64_t val) +{ + unsigned table, idx; + uint64_t *tptr; + + tptr = pnv_phb3_ioda_access(phb, &table, &idx); + if (!tptr) { + return; + } + + /* Handle side effects */ + switch (table) { + case IODA2_TBL_LXIVT: + pnv_phb3_lxivt_write(phb, idx, val); + break; + case IODA2_TBL_M64BT: + *tptr = val; + pnv_phb3_check_m64(phb, idx); + break; + default: + *tptr = val; + } +} + +/* + * This is called whenever the PHB LSI, MSI source ID register or + * the PBCQ irq filters are written. + */ +void pnv_phb3_remap_irqs(PnvPHB3 *phb) +{ + ICSState *ics = &phb->lsis; + uint32_t local, global, count, mask, comp; + uint64_t baren; + PnvPBCQState *pbcq = &phb->pbcq; + + /* + * First check if we are enabled. Unlike real HW we don't separate + * TX and RX so we enable if both are set + */ + baren = pbcq->nest_regs[PBCQ_NEST_BAR_EN]; + if (!(baren & PBCQ_NEST_BAR_EN_IRSN_RX) || + !(baren & PBCQ_NEST_BAR_EN_IRSN_TX)) { + ics->offset = 0; + return; + } + + /* Grab local LSI source ID */ + local = GETFIELD(PHB_LSI_SRC_ID, phb->regs[PHB_LSI_SOURCE_ID >> 3]) << 3; + + /* Grab global one and compare */ + global = GETFIELD(PBCQ_NEST_LSI_SRC, + pbcq->nest_regs[PBCQ_NEST_LSI_SRC_ID]) << 3; + if (global != local) { + /* + * This happens during initialization, let's come back when we + * are properly configured + */ + ics->offset = 0; + return; + } + + /* Get the base on the powerbus */ + comp = GETFIELD(PBCQ_NEST_IRSN_COMP, + pbcq->nest_regs[PBCQ_NEST_IRSN_COMPARE]); + mask = GETFIELD(PBCQ_NEST_IRSN_COMP, + pbcq->nest_regs[PBCQ_NEST_IRSN_MASK]); + count = ((~mask) + 1) & 0x7ffff; + phb->total_irq = count; + + /* Sanity checks */ + if ((global + PNV_PHB3_NUM_LSI) > count) { + phb3_error(phb, "LSIs out of reach: LSI base=%d total irq=%d", global, + count); + } + + if (count > 2048) { + phb3_error(phb, "More interrupts than supported: %d", count); + } + + if ((comp & mask) != comp) { + phb3_error(phb, "IRQ compare bits not in mask: comp=0x%x mask=0x%x", + comp, mask); + comp &= mask; + } + /* Setup LSI offset */ + ics->offset = comp + global; + + /* Setup MSI offset */ + pnv_phb3_msi_update_config(&phb->msis, comp, count - PNV_PHB3_NUM_LSI); +} + +static void pnv_phb3_lsi_src_id_write(PnvPHB3 *phb, uint64_t val) +{ + /* Sanitize content */ + val &= PHB_LSI_SRC_ID; + phb->regs[PHB_LSI_SOURCE_ID >> 3] = val; + pnv_phb3_remap_irqs(phb); +} + +static void pnv_phb3_rtc_invalidate(PnvPHB3 *phb, uint64_t val) +{ + PnvPhb3DMASpace *ds; + + /* Always invalidate all for now ... */ + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + ds->pe_num = PHB_INVALID_PE; + } +} + + +static void pnv_phb3_update_msi_regions(PnvPhb3DMASpace *ds) +{ + uint64_t cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3]; + + if (cfg & PHB_PHB3C_32BIT_MSI_EN) { + if (!memory_region_is_mapped(&ds->msi32_mr)) { + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), + 0xffff0000, &ds->msi32_mr); + } + } else { + if (memory_region_is_mapped(&ds->msi32_mr)) { + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), + &ds->msi32_mr); + } + } + + if (cfg & PHB_PHB3C_64BIT_MSI_EN) { + if (!memory_region_is_mapped(&ds->msi64_mr)) { + memory_region_add_subregion(MEMORY_REGION(&ds->dma_mr), + (1ull << 60), &ds->msi64_mr); + } + } else { + if (memory_region_is_mapped(&ds->msi64_mr)) { + memory_region_del_subregion(MEMORY_REGION(&ds->dma_mr), + &ds->msi64_mr); + } + } +} + +static void pnv_phb3_update_all_msi_regions(PnvPHB3 *phb) +{ + PnvPhb3DMASpace *ds; + + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + pnv_phb3_update_msi_regions(ds); + } +} + +void pnv_phb3_reg_write(void *opaque, hwaddr off, uint64_t val, unsigned size) +{ + PnvPHB3 *phb = opaque; + bool changed; + + /* Special case configuration data */ + if ((off & 0xfffc) == PHB_CONFIG_DATA) { + pnv_phb3_config_write(phb, off & 0x3, size, val); + return; + } + + /* Other registers are 64-bit only */ + if (size != 8 || off & 0x7) { + phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", + off, size); + return; + } + + /* Handle masking & filtering */ + switch (off) { + case PHB_M64_UPPER_BITS: + val &= 0xfffc000000000000ull; + break; + case PHB_Q_DMA_R: + /* + * This is enough logic to make SW happy but we aren't actually + * quiescing the DMAs + */ + if (val & PHB_Q_DMA_R_AUTORESET) { + val = 0; + } else { + val &= PHB_Q_DMA_R_QUIESCE_DMA; + } + break; + /* LEM stuff */ + case PHB_LEM_FIR_AND_MASK: + phb->regs[PHB_LEM_FIR_ACCUM >> 3] &= val; + return; + case PHB_LEM_FIR_OR_MASK: + phb->regs[PHB_LEM_FIR_ACCUM >> 3] |= val; + return; + case PHB_LEM_ERROR_AND_MASK: + phb->regs[PHB_LEM_ERROR_MASK >> 3] &= val; + return; + case PHB_LEM_ERROR_OR_MASK: + phb->regs[PHB_LEM_ERROR_MASK >> 3] |= val; + return; + case PHB_LEM_WOF: + val = 0; + break; + } + + /* Record whether it changed */ + changed = phb->regs[off >> 3] != val; + + /* Store in register cache first */ + phb->regs[off >> 3] = val; + + /* Handle side effects */ + switch (off) { + case PHB_PHB3_CONFIG: + if (changed) { + pnv_phb3_update_all_msi_regions(phb); + } + /* fall through */ + case PHB_M32_BASE_ADDR: + case PHB_M32_BASE_MASK: + case PHB_M32_START_ADDR: + if (changed) { + pnv_phb3_check_m32(phb); + } + break; + case PHB_M64_UPPER_BITS: + if (changed) { + pnv_phb3_check_all_m64s(phb); + } + break; + case PHB_LSI_SOURCE_ID: + if (changed) { + pnv_phb3_lsi_src_id_write(phb, val); + } + break; + + /* IODA table accesses */ + case PHB_IODA_DATA0: + pnv_phb3_ioda_write(phb, val); + break; + + /* RTC invalidation */ + case PHB_RTC_INVALIDATE: + pnv_phb3_rtc_invalidate(phb, val); + break; + + /* FFI request */ + case PHB_FFI_REQUEST: + pnv_phb3_msi_ffi(&phb->msis, val); + break; + + /* Silent simple writes */ + case PHB_CONFIG_ADDRESS: + case PHB_IODA_ADDR: + case PHB_TCE_KILL: + case PHB_TCE_SPEC_CTL: + case PHB_PEST_BAR: + case PHB_PELTV_BAR: + case PHB_RTT_BAR: + case PHB_RBA_BAR: + case PHB_IVT_BAR: + case PHB_FFI_LOCK: + case PHB_LEM_FIR_ACCUM: + case PHB_LEM_ERROR_MASK: + case PHB_LEM_ACTION0: + case PHB_LEM_ACTION1: + break; + + /* Noise on anything else */ + default: + qemu_log_mask(LOG_UNIMP, "phb3: reg_write 0x%"PRIx64"=%"PRIx64"\n", + off, val); + } +} + +uint64_t pnv_phb3_reg_read(void *opaque, hwaddr off, unsigned size) +{ + PnvPHB3 *phb = opaque; + PCIHostState *pci = PCI_HOST_BRIDGE(phb); + uint64_t val; + + if ((off & 0xfffc) == PHB_CONFIG_DATA) { + return pnv_phb3_config_read(phb, off & 0x3, size); + } + + /* Other registers are 64-bit only */ + if (size != 8 || off & 0x7) { + phb3_error(phb, "Invalid register access, offset: 0x%"PRIx64" size: %d", + off, size); + return ~0ull; + } + + /* Default read from cache */ + val = phb->regs[off >> 3]; + + switch (off) { + /* Simulate venice DD2.0 */ + case PHB_VERSION: + return 0x000000a300000005ull; + case PHB_PCIE_SYSTEM_CONFIG: + return 0x441100fc30000000; + + /* IODA table accesses */ + case PHB_IODA_DATA0: + return pnv_phb3_ioda_read(phb); + + /* Link training always appears trained */ + case PHB_PCIE_DLP_TRAIN_CTL: + if (!pci_find_device(pci->bus, 1, 0)) { + return 0; + } + return PHB_PCIE_DLP_INBAND_PRESENCE | PHB_PCIE_DLP_TC_DL_LINKACT; + + /* FFI Lock */ + case PHB_FFI_LOCK: + /* Set lock and return previous value */ + phb->regs[off >> 3] |= PHB_FFI_LOCK_STATE; + return val; + + /* DMA read sync: make it look like it's complete */ + case PHB_DMARD_SYNC: + return PHB_DMARD_SYNC_COMPLETE; + + /* Silent simple reads */ + case PHB_PHB3_CONFIG: + case PHB_M32_BASE_ADDR: + case PHB_M32_BASE_MASK: + case PHB_M32_START_ADDR: + case PHB_CONFIG_ADDRESS: + case PHB_IODA_ADDR: + case PHB_RTC_INVALIDATE: + case PHB_TCE_KILL: + case PHB_TCE_SPEC_CTL: + case PHB_PEST_BAR: + case PHB_PELTV_BAR: + case PHB_RTT_BAR: + case PHB_RBA_BAR: + case PHB_IVT_BAR: + case PHB_M64_UPPER_BITS: + case PHB_LEM_FIR_ACCUM: + case PHB_LEM_ERROR_MASK: + case PHB_LEM_ACTION0: + case PHB_LEM_ACTION1: + break; + + /* Noise on anything else */ + default: + qemu_log_mask(LOG_UNIMP, "phb3: reg_read 0x%"PRIx64"=%"PRIx64"\n", + off, val); + } + return val; +} + +static const MemoryRegionOps pnv_phb3_reg_ops = { + .read = pnv_phb3_reg_read, + .write = pnv_phb3_reg_write, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static int pnv_phb3_map_irq(PCIDevice *pci_dev, int irq_num) +{ + /* Check that out properly ... */ + return irq_num & 3; +} + +static void pnv_phb3_set_irq(void *opaque, int irq_num, int level) +{ + PnvPHB3 *phb = opaque; + + /* LSI only ... */ + if (irq_num > 3) { + phb3_error(phb, "Unknown IRQ to set %d", irq_num); + } + qemu_set_irq(phb->qirqs[irq_num], level); +} + +static bool pnv_phb3_resolve_pe(PnvPhb3DMASpace *ds) +{ + uint64_t rtt, addr; + uint16_t rte; + int bus_num; + + /* Already resolved ? */ + if (ds->pe_num != PHB_INVALID_PE) { + return true; + } + + /* We need to lookup the RTT */ + rtt = ds->phb->regs[PHB_RTT_BAR >> 3]; + if (!(rtt & PHB_RTT_BAR_ENABLE)) { + phb3_error(ds->phb, "DMA with RTT BAR disabled !"); + /* Set error bits ? fence ? ... */ + return false; + } + + /* Read RTE */ + bus_num = pci_bus_num(ds->bus); + addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; + addr += 2 * ((bus_num << 8) | ds->devfn); + if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { + phb3_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); + /* Set error bits ? fence ? ... */ + return false; + } + rte = be16_to_cpu(rte); + + /* Fail upon reading of invalid PE# */ + if (rte >= PNV_PHB3_NUM_PE) { + phb3_error(ds->phb, "RTE for RID 0x%x invalid (%04x", ds->devfn, rte); + /* Set error bits ? fence ? ... */ + return false; + } + ds->pe_num = rte; + return true; +} + +static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr, + bool is_write, uint64_t tve, + IOMMUTLBEntry *tlb) +{ + uint64_t tta = GETFIELD(IODA2_TVT_TABLE_ADDR, tve); + int32_t lev = GETFIELD(IODA2_TVT_NUM_LEVELS, tve); + uint32_t tts = GETFIELD(IODA2_TVT_TCE_TABLE_SIZE, tve); + uint32_t tps = GETFIELD(IODA2_TVT_IO_PSIZE, tve); + PnvPHB3 *phb = ds->phb; + + /* Invalid levels */ + if (lev > 4) { + phb3_error(phb, "Invalid #levels in TVE %d", lev); + return; + } + + /* IO Page Size of 0 means untranslated, else use TCEs */ + if (tps == 0) { + /* + * We only support non-translate in top window. + * + * TODO: Venice/Murano support it on bottom window above 4G and + * Naples suports it on everything + */ + if (!(tve & PPC_BIT(51))) { + phb3_error(phb, "xlate for invalid non-translate TVE"); + return; + } + /* TODO: Handle boundaries */ + + /* Use 4k pages like q35 ... for now */ + tlb->iova = addr & 0xfffffffffffff000ull; + tlb->translated_addr = addr & 0x0003fffffffff000ull; + tlb->addr_mask = 0xfffull; + tlb->perm = IOMMU_RW; + } else { + uint32_t tce_shift, tbl_shift, sh; + uint64_t base, taddr, tce, tce_mask; + + /* TVE disabled ? */ + if (tts == 0) { + phb3_error(phb, "xlate for invalid translated TVE"); + return; + } + + /* Address bits per bottom level TCE entry */ + tce_shift = tps + 11; + + /* Address bits per table level */ + tbl_shift = tts + 8; + + /* Top level table base address */ + base = tta << 12; + + /* Total shift to first level */ + sh = tbl_shift * lev + tce_shift; + + /* TODO: Multi-level untested */ + while ((lev--) >= 0) { + /* Grab the TCE address */ + taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); + if (dma_memory_read(&address_space_memory, taddr, &tce, + sizeof(tce))) { + phb3_error(phb, "Failed to read TCE at 0x%"PRIx64, taddr); + return; + } + tce = be64_to_cpu(tce); + + /* Check permission for indirect TCE */ + if ((lev >= 0) && !(tce & 3)) { + phb3_error(phb, "Invalid indirect TCE at 0x%"PRIx64, taddr); + phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, + is_write ? 'W' : 'R', tve); + phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", + tta, lev, tts, tps); + return; + } + sh -= tbl_shift; + base = tce & ~0xfffull; + } + + /* We exit the loop with TCE being the final TCE */ + tce_mask = ~((1ull << tce_shift) - 1); + tlb->iova = addr & tce_mask; + tlb->translated_addr = tce & tce_mask; + tlb->addr_mask = ~tce_mask; + tlb->perm = tce & 3; + if ((is_write & !(tce & 2)) || ((!is_write) && !(tce & 1))) { + phb3_error(phb, "TCE access fault at 0x%"PRIx64, taddr); + phb3_error(phb, " xlate %"PRIx64":%c TVE=%"PRIx64, addr, + is_write ? 'W' : 'R', tve); + phb3_error(phb, " tta=%"PRIx64" lev=%d tts=%d tps=%d", + tta, lev, tts, tps); + } + } +} + +static IOMMUTLBEntry pnv_phb3_translate_iommu(IOMMUMemoryRegion *iommu, + hwaddr addr, + IOMMUAccessFlags flag, + int iommu_idx) +{ + PnvPhb3DMASpace *ds = container_of(iommu, PnvPhb3DMASpace, dma_mr); + int tve_sel; + uint64_t tve, cfg; + IOMMUTLBEntry ret = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = 0, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + PnvPHB3 *phb = ds->phb; + + /* Resolve PE# */ + if (!pnv_phb3_resolve_pe(ds)) { + phb3_error(phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", + ds->bus, pci_bus_num(ds->bus), ds->devfn); + return ret; + } + + /* Check top bits */ + switch (addr >> 60) { + case 00: + /* DMA or 32-bit MSI ? */ + cfg = ds->phb->regs[PHB_PHB3_CONFIG >> 3]; + if ((cfg & PHB_PHB3C_32BIT_MSI_EN) && + ((addr & 0xffffffffffff0000ull) == 0xffff0000ull)) { + phb3_error(phb, "xlate on 32-bit MSI region"); + return ret; + } + /* Choose TVE XXX Use PHB3 Control Register */ + tve_sel = (addr >> 59) & 1; + tve = ds->phb->ioda_TVT[ds->pe_num * 2 + tve_sel]; + pnv_phb3_translate_tve(ds, addr, flag & IOMMU_WO, tve, &ret); + break; + case 01: + phb3_error(phb, "xlate on 64-bit MSI region"); + break; + default: + phb3_error(phb, "xlate on unsupported address 0x%"PRIx64, addr); + } + return ret; +} + +#define TYPE_PNV_PHB3_IOMMU_MEMORY_REGION "pnv-phb3-iommu-memory-region" +#define PNV_PHB3_IOMMU_MEMORY_REGION(obj) \ + OBJECT_CHECK(IOMMUMemoryRegion, (obj), TYPE_PNV_PHB3_IOMMU_MEMORY_REGION) + +static void pnv_phb3_iommu_memory_region_class_init(ObjectClass *klass, + void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = pnv_phb3_translate_iommu; +} + +static const TypeInfo pnv_phb3_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_PNV_PHB3_IOMMU_MEMORY_REGION, + .class_init = pnv_phb3_iommu_memory_region_class_init, +}; + +/* + * MSI/MSIX memory region implementation. + * The handler handles both MSI and MSIX. + */ +static void pnv_phb3_msi_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + PnvPhb3DMASpace *ds = opaque; + + /* Resolve PE# */ + if (!pnv_phb3_resolve_pe(ds)) { + phb3_error(ds->phb, "Failed to resolve PE# for bus @%p (%d) devfn 0x%x", + ds->bus, pci_bus_num(ds->bus), ds->devfn); + return; + } + + pnv_phb3_msi_send(&ds->phb->msis, addr, data, ds->pe_num); +} + +/* There is no .read as the read result is undefined by PCI spec */ +static uint64_t pnv_phb3_msi_read(void *opaque, hwaddr addr, unsigned size) +{ + PnvPhb3DMASpace *ds = opaque; + + phb3_error(ds->phb, "invalid read @ 0x%" HWADDR_PRIx, addr); + return -1; +} + +static const MemoryRegionOps pnv_phb3_msi_ops = { + .read = pnv_phb3_msi_read, + .write = pnv_phb3_msi_write, + .endianness = DEVICE_LITTLE_ENDIAN +}; + +static AddressSpace *pnv_phb3_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + PnvPHB3 *phb = opaque; + PnvPhb3DMASpace *ds; + + QLIST_FOREACH(ds, &phb->dma_spaces, list) { + if (ds->bus == bus && ds->devfn == devfn) { + break; + } + } + + if (ds == NULL) { + ds = g_malloc0(sizeof(PnvPhb3DMASpace)); + ds->bus = bus; + ds->devfn = devfn; + ds->pe_num = PHB_INVALID_PE; + ds->phb = phb; + memory_region_init_iommu(&ds->dma_mr, sizeof(ds->dma_mr), + TYPE_PNV_PHB3_IOMMU_MEMORY_REGION, + OBJECT(phb), "phb3_iommu", UINT64_MAX); + address_space_init(&ds->dma_as, MEMORY_REGION(&ds->dma_mr), + "phb3_iommu"); + memory_region_init_io(&ds->msi32_mr, OBJECT(phb), &pnv_phb3_msi_ops, + ds, "msi32", 0x10000); + memory_region_init_io(&ds->msi64_mr, OBJECT(phb), &pnv_phb3_msi_ops, + ds, "msi64", 0x100000); + pnv_phb3_update_msi_regions(ds); + + QLIST_INSERT_HEAD(&phb->dma_spaces, ds, list); + } + return &ds->dma_as; +} + +static void pnv_phb3_instance_init(Object *obj) +{ + PnvPHB3 *phb = PNV_PHB3(obj); + + QLIST_INIT(&phb->dma_spaces); + + /* LSI sources */ + object_initialize_child(obj, "lsi", &phb->lsis, sizeof(phb->lsis), + TYPE_ICS, &error_abort, NULL); + + /* Default init ... will be fixed by HW inits */ + phb->lsis.offset = 0; + + /* MSI sources */ + object_initialize_child(obj, "msi", &phb->msis, sizeof(phb->msis), + TYPE_PHB3_MSI, &error_abort, NULL); + + /* Power Bus Common Queue */ + object_initialize_child(obj, "pbcq", &phb->pbcq, sizeof(phb->pbcq), + TYPE_PNV_PBCQ, &error_abort, NULL); + + /* Root Port */ + object_initialize_child(obj, "root", &phb->root, sizeof(phb->root), + TYPE_PNV_PHB3_ROOT_PORT, &error_abort, NULL); + qdev_prop_set_int32(DEVICE(&phb->root), "addr", PCI_DEVFN(0, 0)); + qdev_prop_set_bit(DEVICE(&phb->root), "multifunction", false); +} + +static void pnv_phb3_realize(DeviceState *dev, Error **errp) +{ + PnvPHB3 *phb = PNV_PHB3(dev); + PCIHostState *pci = PCI_HOST_BRIDGE(dev); + PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); + Error *local_err = NULL; + int i; + + if (phb->phb_id >= PNV8_CHIP_PHB3_MAX) { + error_setg(errp, "invalid PHB index: %d", phb->phb_id); + return; + } + + /* LSI sources */ + object_property_set_link(OBJECT(&phb->lsis), OBJECT(pnv), "xics", + &error_abort); + object_property_set_int(OBJECT(&phb->lsis), PNV_PHB3_NUM_LSI, "nr-irqs", + &error_abort); + object_property_set_bool(OBJECT(&phb->lsis), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + for (i = 0; i < phb->lsis.nr_irqs; i++) { + ics_set_irq_type(&phb->lsis, i, true); + } + + phb->qirqs = qemu_allocate_irqs(ics_set_irq, &phb->lsis, phb->lsis.nr_irqs); + + /* MSI sources */ + object_property_set_link(OBJECT(&phb->msis), OBJECT(phb), "phb", + &error_abort); + object_property_set_link(OBJECT(&phb->msis), OBJECT(pnv), "xics", + &error_abort); + object_property_set_int(OBJECT(&phb->msis), PHB3_MAX_MSI, "nr-irqs", + &error_abort); + object_property_set_bool(OBJECT(&phb->msis), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Power Bus Common Queue */ + object_property_set_link(OBJECT(&phb->pbcq), OBJECT(phb), "phb", + &error_abort); + object_property_set_bool(OBJECT(&phb->pbcq), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* Controller Registers */ + memory_region_init_io(&phb->mr_regs, OBJECT(phb), &pnv_phb3_reg_ops, phb, + "phb3-regs", 0x1000); + + /* + * PHB3 doesn't support IO space. However, qemu gets very upset if + * we don't have an IO region to anchor IO BARs onto so we just + * initialize one which we never hook up to anything + */ + memory_region_init(&phb->pci_io, OBJECT(phb), "pci-io", 0x10000); + memory_region_init(&phb->pci_mmio, OBJECT(phb), "pci-mmio", + PCI_MMIO_TOTAL_SIZE); + + pci->bus = pci_register_root_bus(dev, "root-bus", + pnv_phb3_set_irq, pnv_phb3_map_irq, phb, + &phb->pci_mmio, &phb->pci_io, + 0, 4, TYPE_PNV_PHB3_ROOT_BUS); + + pci_setup_iommu(pci->bus, pnv_phb3_dma_iommu, phb); + + /* Add a single Root port */ + qdev_prop_set_uint8(DEVICE(&phb->root), "chassis", phb->chip_id); + qdev_prop_set_uint16(DEVICE(&phb->root), "slot", phb->phb_id); + qdev_set_parent_bus(DEVICE(&phb->root), BUS(pci->bus)); + qdev_init_nofail(DEVICE(&phb->root)); +} + +void pnv_phb3_update_regions(PnvPHB3 *phb) +{ + PnvPBCQState *pbcq = &phb->pbcq; + + /* Unmap first always */ + if (memory_region_is_mapped(&phb->mr_regs)) { + memory_region_del_subregion(&pbcq->phbbar, &phb->mr_regs); + } + + /* Map registers if enabled */ + if (memory_region_is_mapped(&pbcq->phbbar)) { + /* TODO: We should use the PHB BAR 2 register but we don't ... */ + memory_region_add_subregion(&pbcq->phbbar, 0, &phb->mr_regs); + } + + /* Check/update m32 */ + if (memory_region_is_mapped(&phb->mr_m32)) { + pnv_phb3_check_m32(phb); + } + pnv_phb3_check_all_m64s(phb); +} + +static const char *pnv_phb3_root_bus_path(PCIHostState *host_bridge, + PCIBus *rootbus) +{ + PnvPHB3 *phb = PNV_PHB3(host_bridge); + + snprintf(phb->bus_path, sizeof(phb->bus_path), "00%02x:%02x", + phb->chip_id, phb->phb_id); + return phb->bus_path; +} + +static Property pnv_phb3_properties[] = { + DEFINE_PROP_UINT32("index", PnvPHB3, phb_id, 0), + DEFINE_PROP_UINT32("chip-id", PnvPHB3, chip_id, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static void pnv_phb3_class_init(ObjectClass *klass, void *data) +{ + PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass); + DeviceClass *dc = DEVICE_CLASS(klass); + + hc->root_bus_path = pnv_phb3_root_bus_path; + dc->realize = pnv_phb3_realize; + device_class_set_props(dc, pnv_phb3_properties); + set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); +} + +static const TypeInfo pnv_phb3_type_info = { + .name = TYPE_PNV_PHB3, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(PnvPHB3), + .class_init = pnv_phb3_class_init, + .instance_init = pnv_phb3_instance_init, +}; + +static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data) +{ + BusClass *k = BUS_CLASS(klass); + + /* + * PHB3 has only a single root complex. Enforce the limit on the + * parent bus + */ + k->max_dev = 1; +} + +static const TypeInfo pnv_phb3_root_bus_info = { + .name = TYPE_PNV_PHB3_ROOT_BUS, + .parent = TYPE_PCIE_BUS, + .class_init = pnv_phb3_root_bus_class_init, + .interfaces = (InterfaceInfo[]) { + { INTERFACE_PCIE_DEVICE }, + { } + }, +}; + +static void pnv_phb3_root_port_realize(DeviceState *dev, Error **errp) +{ + PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(dev); + Error *local_err = NULL; + + rpc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +static void pnv_phb3_root_port_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + PCIERootPortClass *rpc = PCIE_ROOT_PORT_CLASS(klass); + + dc->desc = "IBM PHB3 PCIE Root Port"; + + device_class_set_parent_realize(dc, pnv_phb3_root_port_realize, + &rpc->parent_realize); + + k->vendor_id = PCI_VENDOR_ID_IBM; + k->device_id = 0x03dc; + k->revision = 0; + + rpc->exp_offset = 0x48; + rpc->aer_offset = 0x100; +} + +static const TypeInfo pnv_phb3_root_port_info = { + .name = TYPE_PNV_PHB3_ROOT_PORT, + .parent = TYPE_PCIE_ROOT_PORT, + .instance_size = sizeof(PnvPHB3RootPort), + .class_init = pnv_phb3_root_port_class_init, +}; + +static void pnv_phb3_register_types(void) +{ + type_register_static(&pnv_phb3_root_bus_info); + type_register_static(&pnv_phb3_root_port_info); + type_register_static(&pnv_phb3_type_info); + type_register_static(&pnv_phb3_iommu_memory_region_info); +} + +type_init(pnv_phb3_register_types) diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c new file mode 100644 index 0000000000..ecfc1b2c4e --- /dev/null +++ b/hw/pci-host/pnv_phb3_msi.c @@ -0,0 +1,349 @@ +/* + * QEMU PowerPC PowerNV (POWER8) PHB3 model + * + * Copyright (c) 2014-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "hw/pci-host/pnv_phb3_regs.h" +#include "hw/pci-host/pnv_phb3.h" +#include "hw/ppc/pnv.h" +#include "hw/pci/msi.h" +#include "monitor/monitor.h" +#include "hw/irq.h" +#include "hw/qdev-properties.h" +#include "sysemu/reset.h" + +static uint64_t phb3_msi_ive_addr(PnvPHB3 *phb, int srcno) +{ + uint64_t ivtbar = phb->regs[PHB_IVT_BAR >> 3]; + uint64_t phbctl = phb->regs[PHB_CONTROL >> 3]; + + if (!(ivtbar & PHB_IVT_BAR_ENABLE)) { + qemu_log_mask(LOG_GUEST_ERROR, "Failed access to disable IVT BAR !"); + return 0; + } + + if (srcno >= (ivtbar & PHB_IVT_LENGTH_MASK)) { + qemu_log_mask(LOG_GUEST_ERROR, "MSI out of bounds (%d vs 0x%"PRIx64")", + srcno, (uint64_t) (ivtbar & PHB_IVT_LENGTH_MASK)); + return 0; + } + + ivtbar &= PHB_IVT_BASE_ADDRESS_MASK; + + if (phbctl & PHB_CTRL_IVE_128_BYTES) { + return ivtbar + 128 * srcno; + } else { + return ivtbar + 16 * srcno; + } +} + +static bool phb3_msi_read_ive(PnvPHB3 *phb, int srcno, uint64_t *out_ive) +{ + uint64_t ive_addr, ive; + + ive_addr = phb3_msi_ive_addr(phb, srcno); + if (!ive_addr) { + return false; + } + + if (dma_memory_read(&address_space_memory, ive_addr, &ive, sizeof(ive))) { + qemu_log_mask(LOG_GUEST_ERROR, "Failed to read IVE at 0x%" PRIx64, + ive_addr); + return false; + } + *out_ive = be64_to_cpu(ive); + + return true; +} + +static void phb3_msi_set_p(Phb3MsiState *msi, int srcno, uint8_t gen) +{ + uint64_t ive_addr; + uint8_t p = 0x01 | (gen << 1); + + ive_addr = phb3_msi_ive_addr(msi->phb, srcno); + if (!ive_addr) { + return; + } + + if (dma_memory_write(&address_space_memory, ive_addr + 4, &p, 1)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Failed to write IVE (set P) at 0x%" PRIx64, ive_addr); + } +} + +static void phb3_msi_set_q(Phb3MsiState *msi, int srcno) +{ + uint64_t ive_addr; + uint8_t q = 0x01; + + ive_addr = phb3_msi_ive_addr(msi->phb, srcno); + if (!ive_addr) { + return; + } + + if (dma_memory_write(&address_space_memory, ive_addr + 5, &q, 1)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Failed to write IVE (set Q) at 0x%" PRIx64, ive_addr); + } +} + +static void phb3_msi_try_send(Phb3MsiState *msi, int srcno, bool force) +{ + ICSState *ics = ICS(msi); + uint64_t ive; + uint64_t server, prio, pq, gen; + + if (!phb3_msi_read_ive(msi->phb, srcno, &ive)) { + return; + } + + server = GETFIELD(IODA2_IVT_SERVER, ive); + prio = GETFIELD(IODA2_IVT_PRIORITY, ive); + if (!force) { + pq = GETFIELD(IODA2_IVT_Q, ive) | (GETFIELD(IODA2_IVT_P, ive) << 1); + } else { + pq = 0; + } + gen = GETFIELD(IODA2_IVT_GEN, ive); + + /* + * The low order 2 bits are the link pointer (Type II interrupts). + * Shift back to get a valid IRQ server. + */ + server >>= 2; + + switch (pq) { + case 0: /* 00 */ + if (prio == 0xff) { + /* Masked, set Q */ + phb3_msi_set_q(msi, srcno); + } else { + /* Enabled, set P and send */ + phb3_msi_set_p(msi, srcno, gen); + icp_irq(ics, server, srcno + ics->offset, prio); + } + break; + case 2: /* 10 */ + /* Already pending, set Q */ + phb3_msi_set_q(msi, srcno); + break; + case 1: /* 01 */ + case 3: /* 11 */ + default: + /* Just drop stuff if Q already set */ + break; + } +} + +static void phb3_msi_set_irq(void *opaque, int srcno, int val) +{ + Phb3MsiState *msi = PHB3_MSI(opaque); + + if (val) { + phb3_msi_try_send(msi, srcno, false); + } +} + + +void pnv_phb3_msi_send(Phb3MsiState *msi, uint64_t addr, uint16_t data, + int32_t dev_pe) +{ + ICSState *ics = ICS(msi); + uint64_t ive; + uint16_t pe; + uint32_t src = ((addr >> 4) & 0xffff) | (data & 0x1f); + + if (src >= ics->nr_irqs) { + qemu_log_mask(LOG_GUEST_ERROR, "MSI %d out of bounds", src); + return; + } + if (dev_pe >= 0) { + if (!phb3_msi_read_ive(msi->phb, src, &ive)) { + return; + } + pe = GETFIELD(IODA2_IVT_PE, ive); + if (pe != dev_pe) { + qemu_log_mask(LOG_GUEST_ERROR, + "MSI %d send by PE#%d but assigned to PE#%d", + src, dev_pe, pe); + return; + } + } + qemu_irq_pulse(msi->qirqs[src]); +} + +void pnv_phb3_msi_ffi(Phb3MsiState *msi, uint64_t val) +{ + /* Emit interrupt */ + pnv_phb3_msi_send(msi, val, 0, -1); + + /* Clear FFI lock */ + msi->phb->regs[PHB_FFI_LOCK >> 3] = 0; +} + +static void phb3_msi_reject(ICSState *ics, uint32_t nr) +{ + Phb3MsiState *msi = PHB3_MSI(ics); + unsigned int srcno = nr - ics->offset; + unsigned int idx = srcno >> 6; + unsigned int bit = 1ull << (srcno & 0x3f); + + assert(srcno < PHB3_MAX_MSI); + + msi->rba[idx] |= bit; + msi->rba_sum |= (1u << idx); +} + +static void phb3_msi_resend(ICSState *ics) +{ + Phb3MsiState *msi = PHB3_MSI(ics); + unsigned int i, j; + + if (msi->rba_sum == 0) { + return; + } + + for (i = 0; i < 32; i++) { + if ((msi->rba_sum & (1u << i)) == 0) { + continue; + } + msi->rba_sum &= ~(1u << i); + for (j = 0; j < 64; j++) { + if ((msi->rba[i] & (1ull << j)) == 0) { + continue; + } + msi->rba[i] &= ~(1u << j); + phb3_msi_try_send(msi, i * 64 + j, true); + } + } +} + +static void phb3_msi_reset(DeviceState *dev) +{ + Phb3MsiState *msi = PHB3_MSI(dev); + ICSStateClass *icsc = ICS_GET_CLASS(dev); + + icsc->parent_reset(dev); + + memset(msi->rba, 0, sizeof(msi->rba)); + msi->rba_sum = 0; +} + +static void phb3_msi_reset_handler(void *dev) +{ + phb3_msi_reset(dev); +} + +void pnv_phb3_msi_update_config(Phb3MsiState *msi, uint32_t base, + uint32_t count) +{ + ICSState *ics = ICS(msi); + + if (count > PHB3_MAX_MSI) { + count = PHB3_MAX_MSI; + } + ics->nr_irqs = count; + ics->offset = base; +} + +static void phb3_msi_realize(DeviceState *dev, Error **errp) +{ + Phb3MsiState *msi = PHB3_MSI(dev); + ICSState *ics = ICS(msi); + ICSStateClass *icsc = ICS_GET_CLASS(ics); + Error *local_err = NULL; + + assert(msi->phb); + + icsc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + msi->qirqs = qemu_allocate_irqs(phb3_msi_set_irq, msi, ics->nr_irqs); + + qemu_register_reset(phb3_msi_reset_handler, dev); +} + +static void phb3_msi_instance_init(Object *obj) +{ + Phb3MsiState *msi = PHB3_MSI(obj); + ICSState *ics = ICS(obj); + + object_property_add_link(obj, "phb", TYPE_PNV_PHB3, + (Object **)&msi->phb, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG, + &error_abort); + + /* Will be overriden later */ + ics->offset = 0; +} + +static void phb3_msi_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + ICSStateClass *isc = ICS_CLASS(klass); + + device_class_set_parent_realize(dc, phb3_msi_realize, + &isc->parent_realize); + device_class_set_parent_reset(dc, phb3_msi_reset, + &isc->parent_reset); + + isc->reject = phb3_msi_reject; + isc->resend = phb3_msi_resend; +} + +static const TypeInfo phb3_msi_info = { + .name = TYPE_PHB3_MSI, + .parent = TYPE_ICS, + .instance_size = sizeof(Phb3MsiState), + .class_init = phb3_msi_class_init, + .class_size = sizeof(ICSStateClass), + .instance_init = phb3_msi_instance_init, +}; + +static void pnv_phb3_msi_register_types(void) +{ + type_register_static(&phb3_msi_info); +} + +type_init(pnv_phb3_msi_register_types); + +void pnv_phb3_msi_pic_print_info(Phb3MsiState *msi, Monitor *mon) +{ + ICSState *ics = ICS(msi); + int i; + + monitor_printf(mon, "ICS %4x..%4x %p\n", + ics->offset, ics->offset + ics->nr_irqs - 1, ics); + + for (i = 0; i < ics->nr_irqs; i++) { + uint64_t ive; + + if (!phb3_msi_read_ive(msi->phb, i, &ive)) { + return; + } + + if (GETFIELD(IODA2_IVT_PRIORITY, ive) == 0xff) { + continue; + } + + monitor_printf(mon, " %4x %c%c server=%04x prio=%02x gen=%d\n", + ics->offset + i, + GETFIELD(IODA2_IVT_P, ive) ? 'P' : '-', + GETFIELD(IODA2_IVT_Q, ive) ? 'Q' : '-', + (uint32_t) GETFIELD(IODA2_IVT_SERVER, ive) >> 2, + (uint32_t) GETFIELD(IODA2_IVT_PRIORITY, ive), + (uint32_t) GETFIELD(IODA2_IVT_GEN, ive)); + } +} diff --git a/hw/pci-host/pnv_phb3_pbcq.c b/hw/pci-host/pnv_phb3_pbcq.c new file mode 100644 index 0000000000..6f0c05be68 --- /dev/null +++ b/hw/pci-host/pnv_phb3_pbcq.c @@ -0,0 +1,357 @@ +/* + * QEMU PowerPC PowerNV (POWER8) PHB3 model + * + * Copyright (c) 2014-2020, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/log.h" +#include "target/ppc/cpu.h" +#include "hw/ppc/fdt.h" +#include "hw/pci-host/pnv_phb3_regs.h" +#include "hw/pci-host/pnv_phb3.h" +#include "hw/ppc/pnv.h" +#include "hw/ppc/pnv_xscom.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" + +#include <libfdt.h> + +#define phb3_pbcq_error(pbcq, fmt, ...) \ + qemu_log_mask(LOG_GUEST_ERROR, "phb3_pbcq[%d:%d]: " fmt "\n", \ + (pbcq)->phb->chip_id, (pbcq)->phb->phb_id, ## __VA_ARGS__) + +static uint64_t pnv_pbcq_nest_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t offset = addr >> 3; + + return pbcq->nest_regs[offset]; +} + +static uint64_t pnv_pbcq_pci_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t offset = addr >> 3; + + return pbcq->pci_regs[offset]; +} + +static uint64_t pnv_pbcq_spci_xscom_read(void *opaque, hwaddr addr, + unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t offset = addr >> 3; + + if (offset == PBCQ_SPCI_ASB_DATA) { + return pnv_phb3_reg_read(pbcq->phb, + pbcq->spci_regs[PBCQ_SPCI_ASB_ADDR], 8); + } + return pbcq->spci_regs[offset]; +} + +static void pnv_pbcq_update_map(PnvPBCQState *pbcq) +{ + uint64_t bar_en = pbcq->nest_regs[PBCQ_NEST_BAR_EN]; + uint64_t bar, mask, size; + + /* + * NOTE: This will really not work well if those are remapped + * after the PHB has created its sub regions. We could do better + * if we had a way to resize regions but we don't really care + * that much in practice as the stuff below really only happens + * once early during boot + */ + + /* Handle unmaps */ + if (memory_region_is_mapped(&pbcq->mmbar0) && + !(bar_en & PBCQ_NEST_BAR_EN_MMIO0)) { + memory_region_del_subregion(get_system_memory(), &pbcq->mmbar0); + } + if (memory_region_is_mapped(&pbcq->mmbar1) && + !(bar_en & PBCQ_NEST_BAR_EN_MMIO1)) { + memory_region_del_subregion(get_system_memory(), &pbcq->mmbar1); + } + if (memory_region_is_mapped(&pbcq->phbbar) && + !(bar_en & PBCQ_NEST_BAR_EN_PHB)) { + memory_region_del_subregion(get_system_memory(), &pbcq->phbbar); + } + + /* Update PHB */ + pnv_phb3_update_regions(pbcq->phb); + + /* Handle maps */ + if (!memory_region_is_mapped(&pbcq->mmbar0) && + (bar_en & PBCQ_NEST_BAR_EN_MMIO0)) { + bar = pbcq->nest_regs[PBCQ_NEST_MMIO_BAR0] >> 14; + mask = pbcq->nest_regs[PBCQ_NEST_MMIO_MASK0]; + size = ((~mask) >> 14) + 1; + memory_region_init(&pbcq->mmbar0, OBJECT(pbcq), "pbcq-mmio0", size); + memory_region_add_subregion(get_system_memory(), bar, &pbcq->mmbar0); + pbcq->mmio0_base = bar; + pbcq->mmio0_size = size; + } + if (!memory_region_is_mapped(&pbcq->mmbar1) && + (bar_en & PBCQ_NEST_BAR_EN_MMIO1)) { + bar = pbcq->nest_regs[PBCQ_NEST_MMIO_BAR1] >> 14; + mask = pbcq->nest_regs[PBCQ_NEST_MMIO_MASK1]; + size = ((~mask) >> 14) + 1; + memory_region_init(&pbcq->mmbar1, OBJECT(pbcq), "pbcq-mmio1", size); + memory_region_add_subregion(get_system_memory(), bar, &pbcq->mmbar1); + pbcq->mmio1_base = bar; + pbcq->mmio1_size = size; + } + if (!memory_region_is_mapped(&pbcq->phbbar) + && (bar_en & PBCQ_NEST_BAR_EN_PHB)) { + bar = pbcq->nest_regs[PBCQ_NEST_PHB_BAR] >> 14; + size = 0x1000; + memory_region_init(&pbcq->phbbar, OBJECT(pbcq), "pbcq-phb", size); + memory_region_add_subregion(get_system_memory(), bar, &pbcq->phbbar); + } + + /* Update PHB */ + pnv_phb3_update_regions(pbcq->phb); +} + +static void pnv_pbcq_nest_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PBCQ_NEST_MMIO_BAR0: + case PBCQ_NEST_MMIO_BAR1: + case PBCQ_NEST_MMIO_MASK0: + case PBCQ_NEST_MMIO_MASK1: + if (pbcq->nest_regs[PBCQ_NEST_BAR_EN] & + (PBCQ_NEST_BAR_EN_MMIO0 | + PBCQ_NEST_BAR_EN_MMIO1)) { + phb3_pbcq_error(pbcq, "Changing enabled BAR unsupported"); + } + pbcq->nest_regs[reg] = val & 0xffffffffc0000000ull; + break; + case PBCQ_NEST_PHB_BAR: + if (pbcq->nest_regs[PBCQ_NEST_BAR_EN] & PBCQ_NEST_BAR_EN_PHB) { + phb3_pbcq_error(pbcq, "Changing enabled BAR unsupported"); + } + pbcq->nest_regs[reg] = val & 0xfffffffffc000000ull; + break; + case PBCQ_NEST_BAR_EN: + pbcq->nest_regs[reg] = val & 0xf800000000000000ull; + pnv_pbcq_update_map(pbcq); + pnv_phb3_remap_irqs(pbcq->phb); + break; + case PBCQ_NEST_IRSN_COMPARE: + case PBCQ_NEST_IRSN_MASK: + pbcq->nest_regs[reg] = val & PBCQ_NEST_IRSN_COMP; + pnv_phb3_remap_irqs(pbcq->phb); + break; + case PBCQ_NEST_LSI_SRC_ID: + pbcq->nest_regs[reg] = val & PBCQ_NEST_LSI_SRC; + pnv_phb3_remap_irqs(pbcq->phb); + break; + default: + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, + addr, val); + } +} + +static void pnv_pbcq_pci_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PBCQ_PCI_BAR2: + pbcq->pci_regs[reg] = val & 0xfffffffffc000000ull; + pnv_pbcq_update_map(pbcq); + default: + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, + addr, val); + } +} + +static void pnv_pbcq_spci_xscom_write(void *opaque, hwaddr addr, + uint64_t val, unsigned size) +{ + PnvPBCQState *pbcq = PNV_PBCQ(opaque); + uint32_t reg = addr >> 3; + + switch (reg) { + case PBCQ_SPCI_ASB_ADDR: + pbcq->spci_regs[reg] = val & 0xfff; + break; + case PBCQ_SPCI_ASB_STATUS: + pbcq->spci_regs[reg] &= ~val; + break; + case PBCQ_SPCI_ASB_DATA: + pnv_phb3_reg_write(pbcq->phb, pbcq->spci_regs[PBCQ_SPCI_ASB_ADDR], + val, 8); + break; + case PBCQ_SPCI_AIB_CAPP_EN: + case PBCQ_SPCI_CAPP_SEC_TMR: + break; + default: + phb3_pbcq_error(pbcq, "%s @0x%"HWADDR_PRIx"=%"PRIx64, __func__, + addr, val); + } +} + +static const MemoryRegionOps pnv_pbcq_nest_xscom_ops = { + .read = pnv_pbcq_nest_xscom_read, + .write = pnv_pbcq_nest_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static const MemoryRegionOps pnv_pbcq_pci_xscom_ops = { + .read = pnv_pbcq_pci_xscom_read, + .write = pnv_pbcq_pci_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static const MemoryRegionOps pnv_pbcq_spci_xscom_ops = { + .read = pnv_pbcq_spci_xscom_read, + .write = pnv_pbcq_spci_xscom_write, + .valid.min_access_size = 8, + .valid.max_access_size = 8, + .impl.min_access_size = 8, + .impl.max_access_size = 8, + .endianness = DEVICE_BIG_ENDIAN, +}; + +static void pnv_pbcq_default_bars(PnvPBCQState *pbcq) +{ + uint64_t mm0, mm1, reg; + PnvPHB3 *phb = pbcq->phb; + + mm0 = 0x3d00000000000ull + 0x4000000000ull * phb->chip_id + + 0x1000000000ull * phb->phb_id; + mm1 = 0x3ff8000000000ull + 0x0200000000ull * phb->chip_id + + 0x0080000000ull * phb->phb_id; + reg = 0x3fffe40000000ull + 0x0000400000ull * phb->chip_id + + 0x0000100000ull * phb->phb_id; + + pbcq->nest_regs[PBCQ_NEST_MMIO_BAR0] = mm0 << 14; + pbcq->nest_regs[PBCQ_NEST_MMIO_BAR1] = mm1 << 14; + pbcq->nest_regs[PBCQ_NEST_PHB_BAR] = reg << 14; + pbcq->nest_regs[PBCQ_NEST_MMIO_MASK0] = 0x3fff000000000ull << 14; + pbcq->nest_regs[PBCQ_NEST_MMIO_MASK1] = 0x3ffff80000000ull << 14; + pbcq->pci_regs[PBCQ_PCI_BAR2] = reg << 14; +} + +static void pnv_pbcq_realize(DeviceState *dev, Error **errp) +{ + PnvPBCQState *pbcq = PNV_PBCQ(dev); + PnvPHB3 *phb; + char name[32]; + + assert(pbcq->phb); + phb = pbcq->phb; + + /* TODO: Fix OPAL to do that: establish default BAR values */ + pnv_pbcq_default_bars(pbcq); + + /* Initialize the XSCOM region for the PBCQ registers */ + snprintf(name, sizeof(name), "xscom-pbcq-nest-%d.%d", + phb->chip_id, phb->phb_id); + pnv_xscom_region_init(&pbcq->xscom_nest_regs, OBJECT(dev), + &pnv_pbcq_nest_xscom_ops, pbcq, name, + PNV_XSCOM_PBCQ_NEST_SIZE); + snprintf(name, sizeof(name), "xscom-pbcq-pci-%d.%d", + phb->chip_id, phb->phb_id); + pnv_xscom_region_init(&pbcq->xscom_pci_regs, OBJECT(dev), + &pnv_pbcq_pci_xscom_ops, pbcq, name, + PNV_XSCOM_PBCQ_PCI_SIZE); + snprintf(name, sizeof(name), "xscom-pbcq-spci-%d.%d", + phb->chip_id, phb->phb_id); + pnv_xscom_region_init(&pbcq->xscom_spci_regs, OBJECT(dev), + &pnv_pbcq_spci_xscom_ops, pbcq, name, + PNV_XSCOM_PBCQ_SPCI_SIZE); +} + +static int pnv_pbcq_dt_xscom(PnvXScomInterface *dev, void *fdt, + int xscom_offset) +{ + const char compat[] = "ibm,power8-pbcq"; + PnvPHB3 *phb = PNV_PBCQ(dev)->phb; + char *name; + int offset; + uint32_t lpc_pcba = PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id; + uint32_t reg[] = { + cpu_to_be32(lpc_pcba), + cpu_to_be32(PNV_XSCOM_PBCQ_NEST_SIZE), + cpu_to_be32(PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id), + cpu_to_be32(PNV_XSCOM_PBCQ_PCI_SIZE), + cpu_to_be32(PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id), + cpu_to_be32(PNV_XSCOM_PBCQ_SPCI_SIZE) + }; + + name = g_strdup_printf("pbcq@%x", lpc_pcba); + offset = fdt_add_subnode(fdt, xscom_offset, name); + _FDT(offset); + g_free(name); + + _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); + + _FDT((fdt_setprop_cell(fdt, offset, "ibm,phb-index", phb->phb_id))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,chip-id", phb->chip_id))); + _FDT((fdt_setprop(fdt, offset, "compatible", compat, + sizeof(compat)))); + return 0; +} + +static void phb3_pbcq_instance_init(Object *obj) +{ + PnvPBCQState *pbcq = PNV_PBCQ(obj); + + object_property_add_link(obj, "phb", TYPE_PNV_PHB3, + (Object **)&pbcq->phb, + object_property_allow_set_link, + OBJ_PROP_LINK_STRONG, + &error_abort); +} + +static void pnv_pbcq_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); + + xdc->dt_xscom = pnv_pbcq_dt_xscom; + + dc->realize = pnv_pbcq_realize; +} + +static const TypeInfo pnv_pbcq_type_info = { + .name = TYPE_PNV_PBCQ, + .parent = TYPE_DEVICE, + .instance_size = sizeof(PnvPBCQState), + .instance_init = phb3_pbcq_instance_init, + .class_init = pnv_pbcq_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_PNV_XSCOM_INTERFACE }, + { } + } +}; + +static void pnv_pbcq_register_types(void) +{ + type_register_static(&pnv_pbcq_type_info); +} + +type_init(pnv_pbcq_register_types) diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index d5ecec6321..139c857b1e 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -616,8 +616,13 @@ static ISABus *pnv_isa_create(PnvChip *chip, Error **errp) static void pnv_chip_power8_pic_print_info(PnvChip *chip, Monitor *mon) { Pnv8Chip *chip8 = PNV8_CHIP(chip); + int i; ics_pic_print_info(&chip8->psi.ics, mon); + for (i = 0; i < chip->num_phbs; i++) { + pnv_phb3_msi_pic_print_info(&chip8->phbs[i].msis, mon); + ics_pic_print_info(&chip8->phbs[i].lsis, mon); + } } static void pnv_chip_power9_pic_print_info(PnvChip *chip, Monitor *mon) @@ -1026,7 +1031,10 @@ static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu, static void pnv_chip_power8_instance_init(Object *obj) { + PnvChip *chip = PNV_CHIP(obj); Pnv8Chip *chip8 = PNV8_CHIP(obj); + PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj); + int i; object_property_add_link(obj, "xics", TYPE_XICS_FABRIC, (Object **)&chip8->xics, @@ -1045,6 +1053,17 @@ static void pnv_chip_power8_instance_init(Object *obj) object_initialize_child(obj, "homer", &chip8->homer, sizeof(chip8->homer), TYPE_PNV8_HOMER, &error_abort, NULL); + + for (i = 0; i < pcc->num_phbs; i++) { + object_initialize_child(obj, "phb[*]", &chip8->phbs[i], + sizeof(chip8->phbs[i]), TYPE_PNV_PHB3, + &error_abort, NULL); + } + + /* + * Number of PHBs is the chip default + */ + chip->num_phbs = pcc->num_phbs; } static void pnv_chip_icp_realize(Pnv8Chip *chip8, Error **errp) @@ -1083,6 +1102,7 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) Pnv8Chip *chip8 = PNV8_CHIP(dev); Pnv8Psi *psi8 = &chip8->psi; Error *local_err = NULL; + int i; assert(chip8->xics); @@ -1163,6 +1183,33 @@ static void pnv_chip_power8_realize(DeviceState *dev, Error **errp) /* Homer mmio region */ memory_region_add_subregion(get_system_memory(), PNV_HOMER_BASE(chip), &chip8->homer.regs); + + /* PHB3 controllers */ + for (i = 0; i < chip->num_phbs; i++) { + PnvPHB3 *phb = &chip8->phbs[i]; + PnvPBCQState *pbcq = &phb->pbcq; + + object_property_set_int(OBJECT(phb), i, "index", &error_fatal); + object_property_set_int(OBJECT(phb), chip->chip_id, "chip-id", + &error_fatal); + object_property_set_bool(OBJECT(phb), true, "realized", &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + qdev_set_parent_bus(DEVICE(phb), sysbus_get_default()); + + /* Populate the XSCOM address space. */ + pnv_xscom_add_subregion(chip, + PNV_XSCOM_PBCQ_NEST_BASE + 0x400 * phb->phb_id, + &pbcq->xscom_nest_regs); + pnv_xscom_add_subregion(chip, + PNV_XSCOM_PBCQ_PCI_BASE + 0x400 * phb->phb_id, + &pbcq->xscom_pci_regs); + pnv_xscom_add_subregion(chip, + PNV_XSCOM_PBCQ_SPCI_BASE + 0x040 * phb->phb_id, + &pbcq->xscom_spci_regs); + } } static uint32_t pnv_chip_power8_xscom_pcba(PnvChip *chip, uint64_t addr) @@ -1178,6 +1225,7 @@ static void pnv_chip_power8e_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x221ef04980000000ull; /* P8 Murano DD2.1 */ k->cores_mask = POWER8E_CORE_MASK; + k->num_phbs = 3; k->core_pir = pnv_chip_core_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; @@ -1201,6 +1249,7 @@ static void pnv_chip_power8_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x220ea04980000000ull; /* P8 Venice DD2.0 */ k->cores_mask = POWER8_CORE_MASK; + k->num_phbs = 3; k->core_pir = pnv_chip_core_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; @@ -1224,6 +1273,7 @@ static void pnv_chip_power8nvl_class_init(ObjectClass *klass, void *data) k->chip_cfam_id = 0x120d304980000000ull; /* P8 Naples DD1.0 */ k->cores_mask = POWER8_CORE_MASK; + k->num_phbs = 3; k->core_pir = pnv_chip_core_pir_p8; k->intc_create = pnv_chip_power8_intc_create; k->intc_reset = pnv_chip_power8_intc_reset; @@ -1748,14 +1798,23 @@ PowerPCCPU *pnv_chip_find_cpu(PnvChip *chip, uint32_t pir) static ICSState *pnv_ics_get(XICSFabric *xi, int irq) { PnvMachineState *pnv = PNV_MACHINE(xi); - int i; + int i, j; for (i = 0; i < pnv->num_chips; i++) { + PnvChip *chip = pnv->chips[i]; Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); if (ics_valid_irq(&chip8->psi.ics, irq)) { return &chip8->psi.ics; } + for (j = 0; j < chip->num_phbs; j++) { + if (ics_valid_irq(&chip8->phbs[j].lsis, irq)) { + return &chip8->phbs[j].lsis; + } + if (ics_valid_irq(ICS(&chip8->phbs[j].msis), irq)) { + return ICS(&chip8->phbs[j].msis); + } + } } return NULL; } @@ -1763,11 +1822,17 @@ static ICSState *pnv_ics_get(XICSFabric *xi, int irq) static void pnv_ics_resend(XICSFabric *xi) { PnvMachineState *pnv = PNV_MACHINE(xi); - int i; + int i, j; for (i = 0; i < pnv->num_chips; i++) { + PnvChip *chip = pnv->chips[i]; Pnv8Chip *chip8 = PNV8_CHIP(pnv->chips[i]); + ics_resend(&chip8->psi.ics); + for (j = 0; j < chip->num_phbs; j++) { + ics_resend(&chip8->phbs[j].lsis); + ics_resend(ICS(&chip8->phbs[j].msis)); + } } } |