aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2022-03-02 12:38:46 +0000
committerPeter Maydell <peter.maydell@linaro.org>2022-03-02 12:38:46 +0000
commit64ada298b98a51eb2512607f6e6180cb330c47b1 (patch)
tree18bd53f57fa2bf127485a0c15c33021ab024cdf1 /hw
parent44efeb90b2d06635fd4052fa080b2a2ea480501f (diff)
parent169518430562b454a1531610d2711c6b920929f6 (diff)
Merge remote-tracking branch 'remotes/legoater/tags/pull-ppc-20220302' into staging
ppc-7.0 queue * ppc/pnv fixes * PMU EBB support * target/ppc: PowerISA Vector/VSX instruction batch * ppc/pnv: Extension of the powernv10 machine with XIVE2 ans PHB5 models * spapr allocation cleanups # gpg: Signature made Wed 02 Mar 2022 11:00:42 GMT # gpg: using RSA key A0F66548F04895EBFE6B0B6051A343C7CFFBECA1 # gpg: Good signature from "Cédric Le Goater <clg@kaod.org>" [undefined] # gpg: WARNING: This key is not certified with a trusted signature! # gpg: There is no indication that the signature belongs to the owner. # Primary key fingerprint: A0F6 6548 F048 95EB FE6B 0B60 51A3 43C7 CFFB ECA1 * remotes/legoater/tags/pull-ppc-20220302: (87 commits) hw/ppc/spapr_vio.c: use g_autofree in spapr_dt_vdevice() hw/ppc/spapr_rtas.c: use g_autofree in rtas_ibm_get_system_parameter() spapr_pci_nvlink2.c: use g_autofree in spapr_phb_nvgpu_ram_populate_dt() hw/ppc/spapr_numa.c: simplify spapr_numa_write_assoc_lookup_arrays() hw/ppc/spapr_drc.c: use g_autofree in spapr_drc_by_index() hw/ppc/spapr_drc.c: use g_autofree in spapr_dr_connector_new() hw/ppc/spapr_drc.c: use g_autofree in drc_unrealize() hw/ppc/spapr_drc.c: use g_autofree in drc_realize() hw/ppc/spapr_drc.c: use g_auto in spapr_dt_drc() hw/ppc/spapr_caps.c: use g_autofree in spapr_caps_add_properties() hw/ppc/spapr_caps.c: use g_autofree in spapr_cap_get_string() hw/ppc/spapr_caps.c: use g_autofree in spapr_cap_set_string() hw/ppc/spapr.c: fail early if no firmware found in machine_init() hw/ppc/spapr.c: use g_autofree in spapr_dt_chosen() pnv/xive2: Add support for 8bits thread id pnv/xive2: Add support for automatic save&restore xive2: Add a get_config() handler for the router configuration pnv/xive2: Add support XIVE2 P9-compat mode (or Gen1) ppc/pnv: add XIVE Gen2 TIMA support pnv/xive2: Introduce new capability bits ... Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r--hw/intc/meson.build4
-rw-r--r--hw/intc/pnv_xive.c37
-rw-r--r--hw/intc/pnv_xive2.c2128
-rw-r--r--hw/intc/pnv_xive2_regs.h442
-rw-r--r--hw/intc/spapr_xive.c25
-rw-r--r--hw/intc/xive.c77
-rw-r--r--hw/intc/xive2.c1018
-rw-r--r--hw/pci-host/pnv_phb4.c143
-rw-r--r--hw/pci-host/pnv_phb4_pec.c53
-rw-r--r--hw/pci-host/trace-events2
-rw-r--r--hw/ppc/pnv.c227
-rw-r--r--hw/ppc/pnv_homer.c64
-rw-r--r--hw/ppc/pnv_occ.c16
-rw-r--r--hw/ppc/pnv_psi.c38
-rw-r--r--hw/ppc/spapr.c31
-rw-r--r--hw/ppc/spapr_caps.c22
-rw-r--r--hw/ppc/spapr_drc.c47
-rw-r--r--hw/ppc/spapr_numa.c16
-rw-r--r--hw/ppc/spapr_pci_nvlink2.c10
-rw-r--r--hw/ppc/spapr_rtas.c25
-rw-r--r--hw/ppc/spapr_vio.c6
21 files changed, 4278 insertions, 153 deletions
diff --git a/hw/intc/meson.build b/hw/intc/meson.build
index 7466024402..d953197413 100644
--- a/hw/intc/meson.build
+++ b/hw/intc/meson.build
@@ -42,7 +42,7 @@ specific_ss.add(when: 'CONFIG_OMAP', if_true: files('omap_intc.c'))
specific_ss.add(when: 'CONFIG_OMPIC', if_true: files('ompic.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_OPENPIC'],
if_true: files('openpic_kvm.c'))
-specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c'))
+specific_ss.add(when: 'CONFIG_POWERNV', if_true: files('xics_pnv.c', 'pnv_xive.c', 'pnv_xive2.c'))
specific_ss.add(when: 'CONFIG_PPC_UIC', if_true: files('ppc-uic.c'))
specific_ss.add(when: 'CONFIG_RASPI', if_true: files('bcm2835_ic.c', 'bcm2836_control.c'))
specific_ss.add(when: 'CONFIG_RX_ICU', if_true: files('rx_icu.c'))
@@ -52,7 +52,7 @@ specific_ss.add(when: 'CONFIG_SH_INTC', if_true: files('sh_intc.c'))
specific_ss.add(when: 'CONFIG_RISCV_ACLINT', if_true: files('riscv_aclint.c'))
specific_ss.add(when: 'CONFIG_RISCV_APLIC', if_true: files('riscv_aplic.c'))
specific_ss.add(when: 'CONFIG_SIFIVE_PLIC', if_true: files('sifive_plic.c'))
-specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c'))
+specific_ss.add(when: 'CONFIG_XICS', if_true: files('xics.c', 'xive2.c'))
specific_ss.add(when: ['CONFIG_KVM', 'CONFIG_XICS'],
if_true: files('xics_kvm.c'))
specific_ss.add(when: 'CONFIG_PSERIES', if_true: files('xics_spapr.c', 'spapr_xive.c'))
diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c
index 621b20a03f..1ce1d7b07d 100644
--- a/hw/intc/pnv_xive.c
+++ b/hw/intc/pnv_xive.c
@@ -403,6 +403,34 @@ static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas);
}
+static int pnv_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ uint8_t *pq)
+{
+ PnvXive *xive = PNV_XIVE(xrtr);
+
+ if (pnv_xive_block_id(xive) != blk) {
+ xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
+ return -1;
+ }
+
+ *pq = xive_source_esb_get(&xive->ipi_source, idx);
+ return 0;
+}
+
+static int pnv_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ uint8_t *pq)
+{
+ PnvXive *xive = PNV_XIVE(xrtr);
+
+ if (pnv_xive_block_id(xive) != blk) {
+ xive_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
+ return -1;
+ }
+
+ *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
+ return 0;
+}
+
/*
* One bit per thread id. The first register PC_THREAD_EN_REG0 covers
* the first cores 0-15 (normal) of the chip or 0-7 (fused). The
@@ -499,12 +527,12 @@ static PnvXive *pnv_xive_tm_get_xive(PowerPCCPU *cpu)
* event notification to the Router. This is required on a multichip
* system.
*/
-static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno)
+static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
{
PnvXive *xive = PNV_XIVE(xn);
uint8_t blk = pnv_xive_block_id(xive);
- xive_router_notify(xn, XIVE_EAS(blk, srcno));
+ xive_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
}
/*
@@ -1351,7 +1379,8 @@ static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val)
blk = XIVE_EAS_BLOCK(val);
idx = XIVE_EAS_INDEX(val);
- xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx));
+ xive_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
+ !!(val & XIVE_TRIGGER_PQ));
}
static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val,
@@ -1971,6 +2000,8 @@ static void pnv_xive_class_init(ObjectClass *klass, void *data)
device_class_set_props(dc, pnv_xive_properties);
xrc->get_eas = pnv_xive_get_eas;
+ xrc->get_pq = pnv_xive_get_pq;
+ xrc->set_pq = pnv_xive_set_pq;
xrc->get_end = pnv_xive_get_end;
xrc->write_end = pnv_xive_write_end;
xrc->get_nvt = pnv_xive_get_nvt;
diff --git a/hw/intc/pnv_xive2.c b/hw/intc/pnv_xive2.c
new file mode 100644
index 0000000000..87303b4064
--- /dev/null
+++ b/hw/intc/pnv_xive2.c
@@ -0,0 +1,2128 @@
+/*
+ * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
+ *
+ * Copyright (c) 2019-2022, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qapi/error.h"
+#include "target/ppc/cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/dma.h"
+#include "monitor/monitor.h"
+#include "hw/ppc/fdt.h"
+#include "hw/ppc/pnv.h"
+#include "hw/ppc/pnv_core.h"
+#include "hw/ppc/pnv_xscom.h"
+#include "hw/ppc/xive2.h"
+#include "hw/ppc/pnv_xive.h"
+#include "hw/ppc/xive_regs.h"
+#include "hw/ppc/xive2_regs.h"
+#include "hw/ppc/ppc.h"
+#include "hw/qdev-properties.h"
+#include "sysemu/reset.h"
+
+#include <libfdt.h>
+
+#include "pnv_xive2_regs.h"
+
+#undef XIVE2_DEBUG
+
+/*
+ * Virtual structures table (VST)
+ */
+#define SBE_PER_BYTE 4
+
+typedef struct XiveVstInfo {
+ const char *name;
+ uint32_t size;
+ uint32_t max_blocks;
+} XiveVstInfo;
+
+static const XiveVstInfo vst_infos[] = {
+
+ [VST_EAS] = { "EAT", sizeof(Xive2Eas), 16 },
+ [VST_ESB] = { "ESB", 1, 16 },
+ [VST_END] = { "ENDT", sizeof(Xive2End), 16 },
+
+ [VST_NVP] = { "NVPT", sizeof(Xive2Nvp), 16 },
+ [VST_NVG] = { "NVGT", sizeof(Xive2Nvgc), 16 },
+ [VST_NVC] = { "NVCT", sizeof(Xive2Nvgc), 16 },
+
+ [VST_IC] = { "IC", 1 /* ? */ , 16 }, /* Topology # */
+ [VST_SYNC] = { "SYNC", 1 /* ? */ , 16 }, /* Topology # */
+
+ /*
+ * This table contains the backing store pages for the interrupt
+ * fifos of the VC sub-engine in case of overflow.
+ *
+ * 0 - IPI,
+ * 1 - HWD,
+ * 2 - NxC,
+ * 3 - INT,
+ * 4 - OS-Queue,
+ * 5 - Pool-Queue,
+ * 6 - Hard-Queue
+ */
+ [VST_ERQ] = { "ERQ", 1, VC_QUEUE_COUNT },
+};
+
+#define xive2_error(xive, fmt, ...) \
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \
+ (xive)->chip->chip_id, ## __VA_ARGS__);
+
+/*
+ * QEMU version of the GETFIELD/SETFIELD macros
+ *
+ * TODO: It might be better to use the existing extract64() and
+ * deposit64() but this means that all the register definitions will
+ * change and become incompatible with the ones found in skiboot.
+ *
+ * Keep it as it is for now until we find a common ground.
+ */
+static inline uint64_t GETFIELD(uint64_t mask, uint64_t word)
+{
+ return (word & mask) >> ctz64(mask);
+}
+
+static inline uint64_t SETFIELD(uint64_t mask, uint64_t word,
+ uint64_t value)
+{
+ return (word & ~mask) | ((value << ctz64(mask)) & mask);
+}
+
+/*
+ * TODO: Document block id override
+ */
+static uint32_t pnv_xive2_block_id(PnvXive2 *xive)
+{
+ uint8_t blk = xive->chip->chip_id;
+ uint64_t cfg_val = xive->cq_regs[CQ_XIVE_CFG >> 3];
+
+ if (cfg_val & CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE) {
+ blk = GETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, cfg_val);
+ }
+
+ return blk;
+}
+
+/*
+ * Remote access to controllers. HW uses MMIOs. For now, a simple scan
+ * of the chips is good enough.
+ *
+ * TODO: Block scope support
+ */
+static PnvXive2 *pnv_xive2_get_remote(uint8_t blk)
+{
+ PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
+ int i;
+
+ for (i = 0; i < pnv->num_chips; i++) {
+ Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
+ PnvXive2 *xive = &chip10->xive;
+
+ if (pnv_xive2_block_id(xive) == blk) {
+ return xive;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * VST accessors for ESB, EAT, ENDT, NVP
+ *
+ * Indirect VST tables are arrays of VSDs pointing to a page (of same
+ * size). Each page is a direct VST table.
+ */
+
+#define XIVE_VSD_SIZE 8
+
+/* Indirect page size can be 4K, 64K, 2M, 16M. */
+static uint64_t pnv_xive2_vst_page_size_allowed(uint32_t page_shift)
+{
+ return page_shift == 12 || page_shift == 16 ||
+ page_shift == 21 || page_shift == 24;
+}
+
+static uint64_t pnv_xive2_vst_addr_direct(PnvXive2 *xive, uint32_t type,
+ uint64_t vsd, uint32_t idx)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
+ uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
+ uint32_t idx_max;
+
+ idx_max = vst_tsize / info->size - 1;
+ if (idx > idx_max) {
+#ifdef XIVE2_DEBUG
+ xive2_error(xive, "VST: %s entry %x out of range [ 0 .. %x ] !?",
+ info->name, idx, idx_max);
+#endif
+ return 0;
+ }
+
+ return vst_addr + idx * info->size;
+}
+
+static uint64_t pnv_xive2_vst_addr_indirect(PnvXive2 *xive, uint32_t type,
+ uint64_t vsd, uint32_t idx)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vsd_addr;
+ uint32_t vsd_idx;
+ uint32_t page_shift;
+ uint32_t vst_per_page;
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+ ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
+ return 0;
+ }
+
+ page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
+
+ if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
+ xive2_error(xive, "VST: invalid %s page shift %d", info->name,
+ page_shift);
+ return 0;
+ }
+
+ vst_per_page = (1ull << page_shift) / info->size;
+ vsd_idx = idx / vst_per_page;
+
+ /* Load the VSD we are looking for, if not already done */
+ if (vsd_idx) {
+ vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE;
+ ldq_be_dma(&address_space_memory, vsd_addr, &vsd,
+ MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+ xive2_error(xive, "VST: invalid %s entry %x !?", info->name, idx);
+ return 0;
+ }
+
+ /*
+ * Check that the pages have a consistent size across the
+ * indirect table
+ */
+ if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) {
+ xive2_error(xive, "VST: %s entry %x indirect page size differ !?",
+ info->name, idx);
+ return 0;
+ }
+ }
+
+ return pnv_xive2_vst_addr_direct(xive, type, vsd, (idx % vst_per_page));
+}
+
+static uint64_t pnv_xive2_vst_addr(PnvXive2 *xive, uint32_t type, uint8_t blk,
+ uint32_t idx)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vsd;
+
+ if (blk >= info->max_blocks) {
+ xive2_error(xive, "VST: invalid block id %d for VST %s %d !?",
+ blk, info->name, idx);
+ return 0;
+ }
+
+ vsd = xive->vsds[type][blk];
+
+ /* Remote VST access */
+ if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) {
+ xive = pnv_xive2_get_remote(blk);
+
+ return xive ? pnv_xive2_vst_addr(xive, type, blk, idx) : 0;
+ }
+
+ if (VSD_INDIRECT & vsd) {
+ return pnv_xive2_vst_addr_indirect(xive, type, vsd, idx);
+ }
+
+ return pnv_xive2_vst_addr_direct(xive, type, vsd, idx);
+}
+
+static int pnv_xive2_vst_read(PnvXive2 *xive, uint32_t type, uint8_t blk,
+ uint32_t idx, void *data)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
+
+ if (!addr) {
+ return -1;
+ }
+
+ cpu_physical_memory_read(addr, data, info->size);
+ return 0;
+}
+
+#define XIVE_VST_WORD_ALL -1
+
+static int pnv_xive2_vst_write(PnvXive2 *xive, uint32_t type, uint8_t blk,
+ uint32_t idx, void *data, uint32_t word_number)
+{
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t addr = pnv_xive2_vst_addr(xive, type, blk, idx);
+
+ if (!addr) {
+ return -1;
+ }
+
+ if (word_number == XIVE_VST_WORD_ALL) {
+ cpu_physical_memory_write(addr, data, info->size);
+ } else {
+ cpu_physical_memory_write(addr + word_number * 4,
+ data + word_number * 4, 4);
+ }
+ return 0;
+}
+
+static int pnv_xive2_get_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
+ uint8_t *pq)
+{
+ PnvXive2 *xive = PNV_XIVE2(xrtr);
+
+ if (pnv_xive2_block_id(xive) != blk) {
+ xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
+ return -1;
+ }
+
+ *pq = xive_source_esb_get(&xive->ipi_source, idx);
+ return 0;
+}
+
+static int pnv_xive2_set_pq(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
+ uint8_t *pq)
+{
+ PnvXive2 *xive = PNV_XIVE2(xrtr);
+
+ if (pnv_xive2_block_id(xive) != blk) {
+ xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
+ return -1;
+ }
+
+ *pq = xive_source_esb_set(&xive->ipi_source, idx, *pq);
+ return 0;
+}
+
+static int pnv_xive2_get_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
+ Xive2End *end)
+{
+ return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_END, blk, idx, end);
+}
+
+static int pnv_xive2_write_end(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
+ Xive2End *end, uint8_t word_number)
+{
+ return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_END, blk, idx, end,
+ word_number);
+}
+
+static int pnv_xive2_end_update(PnvXive2 *xive)
+{
+ uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
+ xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
+ xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
+ int i;
+ uint64_t endc_watch[4];
+
+ for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
+ endc_watch[i] =
+ cpu_to_be64(xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i]);
+ }
+
+ return pnv_xive2_vst_write(xive, VST_END, blk, idx, endc_watch,
+ XIVE_VST_WORD_ALL);
+}
+
+static void pnv_xive2_end_cache_load(PnvXive2 *xive)
+{
+ uint8_t blk = GETFIELD(VC_ENDC_WATCH_BLOCK_ID,
+ xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(VC_ENDC_WATCH_INDEX,
+ xive->vc_regs[(VC_ENDC_WATCH0_SPEC >> 3)]);
+ uint64_t endc_watch[4] = { 0 };
+ int i;
+
+ if (pnv_xive2_vst_read(xive, VST_END, blk, idx, endc_watch)) {
+ xive2_error(xive, "VST: no END entry %x/%x !?", blk, idx);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(endc_watch); i++) {
+ xive->vc_regs[(VC_ENDC_WATCH0_DATA0 >> 3) + i] =
+ be64_to_cpu(endc_watch[i]);
+ }
+}
+
+static int pnv_xive2_get_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
+ Xive2Nvp *nvp)
+{
+ return pnv_xive2_vst_read(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp);
+}
+
+static int pnv_xive2_write_nvp(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
+ Xive2Nvp *nvp, uint8_t word_number)
+{
+ return pnv_xive2_vst_write(PNV_XIVE2(xrtr), VST_NVP, blk, idx, nvp,
+ word_number);
+}
+
+static int pnv_xive2_nvp_update(PnvXive2 *xive)
+{
+ uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
+ xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
+ xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
+ int i;
+ uint64_t nxc_watch[4];
+
+ for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
+ nxc_watch[i] =
+ cpu_to_be64(xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i]);
+ }
+
+ return pnv_xive2_vst_write(xive, VST_NVP, blk, idx, nxc_watch,
+ XIVE_VST_WORD_ALL);
+}
+
+static void pnv_xive2_nvp_cache_load(PnvXive2 *xive)
+{
+ uint8_t blk = GETFIELD(PC_NXC_WATCH_BLOCK_ID,
+ xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
+ uint32_t idx = GETFIELD(PC_NXC_WATCH_INDEX,
+ xive->pc_regs[(PC_NXC_WATCH0_SPEC >> 3)]);
+ uint64_t nxc_watch[4] = { 0 };
+ int i;
+
+ if (pnv_xive2_vst_read(xive, VST_NVP, blk, idx, nxc_watch)) {
+ xive2_error(xive, "VST: no NVP entry %x/%x !?", blk, idx);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(nxc_watch); i++) {
+ xive->pc_regs[(PC_NXC_WATCH0_DATA0 >> 3) + i] =
+ be64_to_cpu(nxc_watch[i]);
+ }
+}
+
+static int pnv_xive2_get_eas(Xive2Router *xrtr, uint8_t blk, uint32_t idx,
+ Xive2Eas *eas)
+{
+ PnvXive2 *xive = PNV_XIVE2(xrtr);
+
+ if (pnv_xive2_block_id(xive) != blk) {
+ xive2_error(xive, "VST: EAS %x is remote !?", XIVE_EAS(blk, idx));
+ return -1;
+ }
+
+ return pnv_xive2_vst_read(xive, VST_EAS, blk, idx, eas);
+}
+
+static uint32_t pnv_xive2_get_config(Xive2Router *xrtr)
+{
+ PnvXive2 *xive = PNV_XIVE2(xrtr);
+ uint32_t cfg = 0;
+
+ if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS) {
+ cfg |= XIVE2_GEN1_TIMA_OS;
+ }
+
+ if (xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_EN_VP_SAVE_RESTORE) {
+ cfg |= XIVE2_VP_SAVE_RESTORE;
+ }
+
+ if (GETFIELD(CQ_XIVE_CFG_HYP_HARD_RANGE,
+ xive->cq_regs[CQ_XIVE_CFG >> 3]) == CQ_XIVE_CFG_THREADID_8BITS) {
+ cfg |= XIVE2_THREADID_8BITS;
+ }
+
+ return cfg;
+}
+
+static bool pnv_xive2_is_cpu_enabled(PnvXive2 *xive, PowerPCCPU *cpu)
+{
+ int pir = ppc_cpu_pir(cpu);
+ uint32_t fc = PNV10_PIR2FUSEDCORE(pir);
+ uint64_t reg = fc < 8 ? TCTXT_EN0 : TCTXT_EN1;
+ uint32_t bit = pir & 0x3f;
+
+ return xive->tctxt_regs[reg >> 3] & PPC_BIT(bit);
+}
+
+static int pnv_xive2_match_nvt(XivePresenter *xptr, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv, XiveTCTXMatch *match)
+{
+ PnvXive2 *xive = PNV_XIVE2(xptr);
+ PnvChip *chip = xive->chip;
+ int count = 0;
+ int i, j;
+ bool gen1_tima_os =
+ xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
+
+ for (i = 0; i < chip->nr_cores; i++) {
+ PnvCore *pc = chip->cores[i];
+ CPUCore *cc = CPU_CORE(pc);
+
+ for (j = 0; j < cc->nr_threads; j++) {
+ PowerPCCPU *cpu = pc->threads[j];
+ XiveTCTX *tctx;
+ int ring;
+
+ if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
+ continue;
+ }
+
+ tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+
+ if (gen1_tima_os) {
+ ring = xive_presenter_tctx_match(xptr, tctx, format, nvt_blk,
+ nvt_idx, cam_ignore,
+ logic_serv);
+ } else {
+ ring = xive2_presenter_tctx_match(xptr, tctx, format, nvt_blk,
+ nvt_idx, cam_ignore,
+ logic_serv);
+ }
+
+ /*
+ * Save the context and follow on to catch duplicates,
+ * that we don't support yet.
+ */
+ if (ring != -1) {
+ if (match->tctx) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: already found a "
+ "thread context NVT %x/%x\n",
+ nvt_blk, nvt_idx);
+ return false;
+ }
+
+ match->ring = ring;
+ match->tctx = tctx;
+ count++;
+ }
+ }
+ }
+
+ return count;
+}
+
+static uint8_t pnv_xive2_get_block_id(Xive2Router *xrtr)
+{
+ return pnv_xive2_block_id(PNV_XIVE2(xrtr));
+}
+
+/*
+ * The TIMA MMIO space is shared among the chips and to identify the
+ * chip from which the access is being done, we extract the chip id
+ * from the PIR.
+ */
+static PnvXive2 *pnv_xive2_tm_get_xive(PowerPCCPU *cpu)
+{
+ int pir = ppc_cpu_pir(cpu);
+ XivePresenter *xptr = XIVE_TCTX(pnv_cpu_state(cpu)->intc)->xptr;
+ PnvXive2 *xive = PNV_XIVE2(xptr);
+
+ if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
+ xive2_error(xive, "IC: CPU %x is not enabled", pir);
+ }
+ return xive;
+}
+
+/*
+ * The internal sources of the interrupt controller have no knowledge
+ * of the XIVE2 chip on which they reside. Encode the block id in the
+ * source interrupt number before forwarding the source event
+ * notification to the Router. This is required on a multichip system.
+ */
+static void pnv_xive2_notify(XiveNotifier *xn, uint32_t srcno, bool pq_checked)
+{
+ PnvXive2 *xive = PNV_XIVE2(xn);
+ uint8_t blk = pnv_xive2_block_id(xive);
+
+ xive2_router_notify(xn, XIVE_EAS(blk, srcno), pq_checked);
+}
+
+/*
+ * Set Translation Tables
+ *
+ * TODO add support for multiple sets
+ */
+static int pnv_xive2_stt_set_data(PnvXive2 *xive, uint64_t val)
+{
+ uint8_t tsel = GETFIELD(CQ_TAR_SELECT, xive->cq_regs[CQ_TAR >> 3]);
+ uint8_t entry = GETFIELD(CQ_TAR_ENTRY_SELECT,
+ xive->cq_regs[CQ_TAR >> 3]);
+
+ switch (tsel) {
+ case CQ_TAR_NVPG:
+ case CQ_TAR_ESB:
+ case CQ_TAR_END:
+ xive->tables[tsel][entry] = val;
+ break;
+ default:
+ xive2_error(xive, "IC: unsupported table %d", tsel);
+ return -1;
+ }
+
+ if (xive->cq_regs[CQ_TAR >> 3] & CQ_TAR_AUTOINC) {
+ xive->cq_regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_ENTRY_SELECT,
+ xive->cq_regs[CQ_TAR >> 3], ++entry);
+ }
+
+ return 0;
+}
+/*
+ * Virtual Structure Tables (VST) configuration
+ */
+static void pnv_xive2_vst_set_exclusive(PnvXive2 *xive, uint8_t type,
+ uint8_t blk, uint64_t vsd)
+{
+ Xive2EndSource *end_xsrc = &xive->end_source;
+ XiveSource *xsrc = &xive->ipi_source;
+ const XiveVstInfo *info = &vst_infos[type];
+ uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
+ uint64_t vst_tsize = 1ull << page_shift;
+ uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
+
+ /* Basic checks */
+
+ if (VSD_INDIRECT & vsd) {
+ if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
+ xive2_error(xive, "VST: invalid %s page shift %d", info->name,
+ page_shift);
+ return;
+ }
+ }
+
+ if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) {
+ xive2_error(xive, "VST: %s table address 0x%"PRIx64
+ " is not aligned with page shift %d",
+ info->name, vst_addr, page_shift);
+ return;
+ }
+
+ /* Record the table configuration (in SRAM on HW) */
+ xive->vsds[type][blk] = vsd;
+
+ /* Now tune the models with the configuration provided by the FW */
+
+ switch (type) {
+ case VST_ESB:
+ /*
+ * Backing store pages for the source PQ bits. The model does
+ * not use these PQ bits backed in RAM because the XiveSource
+ * model has its own.
+ *
+ * If the table is direct, we can compute the number of PQ
+ * entries provisioned by FW (such as skiboot) and resize the
+ * ESB window accordingly.
+ */
+ if (!(VSD_INDIRECT & vsd)) {
+ memory_region_set_size(&xsrc->esb_mmio, vst_tsize * SBE_PER_BYTE
+ * (1ull << xsrc->esb_shift));
+ }
+
+ memory_region_add_subregion(&xive->esb_mmio, 0, &xsrc->esb_mmio);
+ break;
+
+ case VST_EAS: /* Nothing to be done */
+ break;
+
+ case VST_END:
+ /*
+ * Backing store pages for the END.
+ */
+ if (!(VSD_INDIRECT & vsd)) {
+ memory_region_set_size(&end_xsrc->esb_mmio, (vst_tsize / info->size)
+ * (1ull << end_xsrc->esb_shift));
+ }
+ memory_region_add_subregion(&xive->end_mmio, 0, &end_xsrc->esb_mmio);
+ break;
+
+ case VST_NVP: /* Not modeled */
+ case VST_NVG: /* Not modeled */
+ case VST_NVC: /* Not modeled */
+ case VST_IC: /* Not modeled */
+ case VST_SYNC: /* Not modeled */
+ case VST_ERQ: /* Not modeled */
+ break;
+
+ default:
+ g_assert_not_reached();
+ }
+}
+
+/*
+ * Both PC and VC sub-engines are configured as each use the Virtual
+ * Structure Tables
+ */
+static void pnv_xive2_vst_set_data(PnvXive2 *xive, uint64_t vsd)
+{
+ uint8_t mode = GETFIELD(VSD_MODE, vsd);
+ uint8_t type = GETFIELD(VC_VSD_TABLE_SELECT,
+ xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
+ uint8_t blk = GETFIELD(VC_VSD_TABLE_ADDRESS,
+ xive->vc_regs[VC_VSD_TABLE_ADDR >> 3]);
+ uint64_t vst_addr = vsd & VSD_ADDRESS_MASK;
+
+ if (type > VST_ERQ) {
+ xive2_error(xive, "VST: invalid table type %d", type);
+ return;
+ }
+
+ if (blk >= vst_infos[type].max_blocks) {
+ xive2_error(xive, "VST: invalid block id %d for"
+ " %s table", blk, vst_infos[type].name);
+ return;
+ }
+
+ if (!vst_addr) {
+ xive2_error(xive, "VST: invalid %s table address",
+ vst_infos[type].name);
+ return;
+ }
+
+ switch (mode) {
+ case VSD_MODE_FORWARD:
+ xive->vsds[type][blk] = vsd;
+ break;
+
+ case VSD_MODE_EXCLUSIVE:
+ pnv_xive2_vst_set_exclusive(xive, type, blk, vsd);
+ break;
+
+ default:
+ xive2_error(xive, "VST: unsupported table mode %d", mode);
+ return;
+ }
+}
+
+/*
+ * MMIO handlers
+ */
+
+
+/*
+ * IC BAR layout
+ *
+ * Page 0: Internal CQ register accesses (reads & writes)
+ * Page 1: Internal PC register accesses (reads & writes)
+ * Page 2: Internal VC register accesses (reads & writes)
+ * Page 3: Internal TCTXT (TIMA) reg accesses (read & writes)
+ * Page 4: Notify Port page (writes only, w/data),
+ * Page 5: Reserved
+ * Page 6: Sync Poll page (writes only, dataless)
+ * Page 7: Sync Inject page (writes only, dataless)
+ * Page 8: LSI Trigger page (writes only, dataless)
+ * Page 9: LSI SB Management page (reads & writes dataless)
+ * Pages 10-255: Reserved
+ * Pages 256-383: Direct mapped Thread Context Area (reads & writes)
+ * covering the 128 threads in P10.
+ * Pages 384-511: Reserved
+ */
+typedef struct PnvXive2Region {
+ const char *name;
+ uint32_t pgoff;
+ uint32_t pgsize;
+ const MemoryRegionOps *ops;
+} PnvXive2Region;
+
+static const MemoryRegionOps pnv_xive2_ic_cq_ops;
+static const MemoryRegionOps pnv_xive2_ic_pc_ops;
+static const MemoryRegionOps pnv_xive2_ic_vc_ops;
+static const MemoryRegionOps pnv_xive2_ic_tctxt_ops;
+static const MemoryRegionOps pnv_xive2_ic_notify_ops;
+static const MemoryRegionOps pnv_xive2_ic_sync_ops;
+static const MemoryRegionOps pnv_xive2_ic_lsi_ops;
+static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops;
+
+/* 512 pages. 4K: 2M range, 64K: 32M range */
+static const PnvXive2Region pnv_xive2_ic_regions[] = {
+ { "xive-ic-cq", 0, 1, &pnv_xive2_ic_cq_ops },
+ { "xive-ic-vc", 1, 1, &pnv_xive2_ic_vc_ops },
+ { "xive-ic-pc", 2, 1, &pnv_xive2_ic_pc_ops },
+ { "xive-ic-tctxt", 3, 1, &pnv_xive2_ic_tctxt_ops },
+ { "xive-ic-notify", 4, 1, &pnv_xive2_ic_notify_ops },
+ /* page 5 reserved */
+ { "xive-ic-sync", 6, 2, &pnv_xive2_ic_sync_ops },
+ { "xive-ic-lsi", 8, 2, &pnv_xive2_ic_lsi_ops },
+ /* pages 10-255 reserved */
+ { "xive-ic-tm-indirect", 256, 128, &pnv_xive2_ic_tm_indirect_ops },
+ /* pages 384-511 reserved */
+};
+
+/*
+ * CQ operations
+ */
+
+static uint64_t pnv_xive2_ic_cq_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t reg = offset >> 3;
+ uint64_t val = 0;
+
+ switch (offset) {
+ case CQ_XIVE_CAP: /* Set at reset */
+ case CQ_XIVE_CFG:
+ val = xive->cq_regs[reg];
+ break;
+ case CQ_MSGSND: /* TODO check the #cores of the machine */
+ val = 0xffffffff00000000;
+ break;
+ case CQ_CFG_PB_GEN:
+ val = CQ_CFG_PB_GEN_PB_INIT; /* TODO: fix CQ_CFG_PB_GEN default value */
+ break;
+ default:
+ xive2_error(xive, "CQ: invalid read @%"HWADDR_PRIx, offset);
+ }
+
+ return val;
+}
+
+static uint64_t pnv_xive2_bar_size(uint64_t val)
+{
+ return 1ull << (GETFIELD(CQ_BAR_RANGE, val) + 24);
+}
+
+static void pnv_xive2_ic_cq_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ MemoryRegion *sysmem = get_system_memory();
+ uint32_t reg = offset >> 3;
+ int i;
+
+ switch (offset) {
+ case CQ_XIVE_CFG:
+ case CQ_RST_CTL: /* TODO: reset all BARs */
+ break;
+
+ case CQ_IC_BAR:
+ xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12;
+ if (!(val & CQ_IC_BAR_VALID)) {
+ xive->ic_base = 0;
+ if (xive->cq_regs[reg] & CQ_IC_BAR_VALID) {
+ for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
+ memory_region_del_subregion(&xive->ic_mmio,
+ &xive->ic_mmios[i]);
+ }
+ memory_region_del_subregion(sysmem, &xive->ic_mmio);
+ }
+ } else {
+ xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K);
+ if (!(xive->cq_regs[reg] & CQ_IC_BAR_VALID)) {
+ for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
+ memory_region_add_subregion(&xive->ic_mmio,
+ pnv_xive2_ic_regions[i].pgoff << xive->ic_shift,
+ &xive->ic_mmios[i]);
+ }
+ memory_region_add_subregion(sysmem, xive->ic_base,
+ &xive->ic_mmio);
+ }
+ }
+ break;
+
+ case CQ_TM_BAR:
+ xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12;
+ if (!(val & CQ_TM_BAR_VALID)) {
+ xive->tm_base = 0;
+ if (xive->cq_regs[reg] & CQ_TM_BAR_VALID) {
+ memory_region_del_subregion(sysmem, &xive->tm_mmio);
+ }
+ } else {
+ xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K);
+ if (!(xive->cq_regs[reg] & CQ_TM_BAR_VALID)) {
+ memory_region_add_subregion(sysmem, xive->tm_base,
+ &xive->tm_mmio);
+ }
+ }
+ break;
+
+ case CQ_ESB_BAR:
+ xive->esb_shift = val & CQ_BAR_64K ? 16 : 12;
+ if (!(val & CQ_BAR_VALID)) {
+ xive->esb_base = 0;
+ if (xive->cq_regs[reg] & CQ_BAR_VALID) {
+ memory_region_del_subregion(sysmem, &xive->esb_mmio);
+ }
+ } else {
+ xive->esb_base = val & CQ_BAR_ADDR;
+ if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
+ memory_region_set_size(&xive->esb_mmio,
+ pnv_xive2_bar_size(val));
+ memory_region_add_subregion(sysmem, xive->esb_base,
+ &xive->esb_mmio);
+ }
+ }
+ break;
+
+ case CQ_END_BAR:
+ xive->end_shift = val & CQ_BAR_64K ? 16 : 12;
+ if (!(val & CQ_BAR_VALID)) {
+ xive->end_base = 0;
+ if (xive->cq_regs[reg] & CQ_BAR_VALID) {
+ memory_region_del_subregion(sysmem, &xive->end_mmio);
+ }
+ } else {
+ xive->end_base = val & CQ_BAR_ADDR;
+ if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
+ memory_region_set_size(&xive->end_mmio,
+ pnv_xive2_bar_size(val));
+ memory_region_add_subregion(sysmem, xive->end_base,
+ &xive->end_mmio);
+ }
+ }
+ break;
+
+ case CQ_NVC_BAR:
+ xive->nvc_shift = val & CQ_BAR_64K ? 16 : 12;
+ if (!(val & CQ_BAR_VALID)) {
+ xive->nvc_base = 0;
+ if (xive->cq_regs[reg] & CQ_BAR_VALID) {
+ memory_region_del_subregion(sysmem, &xive->nvc_mmio);
+ }
+ } else {
+ xive->nvc_base = val & CQ_BAR_ADDR;
+ if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
+ memory_region_set_size(&xive->nvc_mmio,
+ pnv_xive2_bar_size(val));
+ memory_region_add_subregion(sysmem, xive->nvc_base,
+ &xive->nvc_mmio);
+ }
+ }
+ break;
+
+ case CQ_NVPG_BAR:
+ xive->nvpg_shift = val & CQ_BAR_64K ? 16 : 12;
+ if (!(val & CQ_BAR_VALID)) {
+ xive->nvpg_base = 0;
+ if (xive->cq_regs[reg] & CQ_BAR_VALID) {
+ memory_region_del_subregion(sysmem, &xive->nvpg_mmio);
+ }
+ } else {
+ xive->nvpg_base = val & CQ_BAR_ADDR;
+ if (!(xive->cq_regs[reg] & CQ_BAR_VALID)) {
+ memory_region_set_size(&xive->nvpg_mmio,
+ pnv_xive2_bar_size(val));
+ memory_region_add_subregion(sysmem, xive->nvpg_base,
+ &xive->nvpg_mmio);
+ }
+ }
+ break;
+
+ case CQ_TAR: /* Set Translation Table Address */
+ break;
+ case CQ_TDR: /* Set Translation Table Data */
+ pnv_xive2_stt_set_data(xive, val);
+ break;
+ case CQ_FIRMASK_OR: /* FIR error reporting */
+ break;
+ default:
+ xive2_error(xive, "CQ: invalid write 0x%"HWADDR_PRIx, offset);
+ return;
+ }
+
+ xive->cq_regs[reg] = val;
+}
+
+static const MemoryRegionOps pnv_xive2_ic_cq_ops = {
+ .read = pnv_xive2_ic_cq_read,
+ .write = pnv_xive2_ic_cq_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t pnv_xive2_ic_vc_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint64_t val = 0;
+ uint32_t reg = offset >> 3;
+
+ switch (offset) {
+ /*
+ * VSD table settings.
+ */
+ case VC_VSD_TABLE_ADDR:
+ case VC_VSD_TABLE_DATA:
+ val = xive->vc_regs[reg];
+ break;
+
+ /*
+ * ESB cache updates (not modeled)
+ */
+ case VC_ESBC_FLUSH_CTRL:
+ xive->vc_regs[reg] &= ~VC_ESBC_FLUSH_CTRL_POLL_VALID;
+ val = xive->vc_regs[reg];
+ break;
+
+ /*
+ * EAS cache updates (not modeled)
+ */
+ case VC_EASC_FLUSH_CTRL:
+ xive->vc_regs[reg] &= ~VC_EASC_FLUSH_CTRL_POLL_VALID;
+ val = xive->vc_regs[reg];
+ break;
+
+ /*
+ * END cache updates
+ */
+ case VC_ENDC_WATCH0_SPEC:
+ xive->vc_regs[reg] &= ~(VC_ENDC_WATCH_FULL | VC_ENDC_WATCH_CONFLICT);
+ val = xive->vc_regs[reg];
+ break;
+
+ case VC_ENDC_WATCH0_DATA0:
+ /*
+ * Load DATA registers from cache with data requested by the
+ * SPEC register
+ */
+ pnv_xive2_end_cache_load(xive);
+ val = xive->vc_regs[reg];
+ break;
+
+ case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
+ val = xive->vc_regs[reg];
+ break;
+
+ case VC_ENDC_FLUSH_CTRL:
+ xive->vc_regs[reg] &= ~VC_ENDC_FLUSH_CTRL_POLL_VALID;
+ val = xive->vc_regs[reg];
+ break;
+
+ /*
+ * Indirect invalidation
+ */
+ case VC_AT_MACRO_KILL_MASK:
+ val = xive->vc_regs[reg];
+ break;
+
+ case VC_AT_MACRO_KILL:
+ xive->vc_regs[reg] &= ~VC_AT_MACRO_KILL_VALID;
+ val = xive->vc_regs[reg];
+ break;
+
+ /*
+ * Interrupt fifo overflow in memory backing store (Not modeled)
+ */
+ case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
+ val = xive->vc_regs[reg];
+ break;
+
+ /*
+ * Synchronisation
+ */
+ case VC_ENDC_SYNC_DONE:
+ val = VC_ENDC_SYNC_POLL_DONE;
+ break;
+ default:
+ xive2_error(xive, "VC: invalid read @%"HWADDR_PRIx, offset);
+ }
+
+ return val;
+}
+
+static void pnv_xive2_ic_vc_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t reg = offset >> 3;
+
+ switch (offset) {
+ /*
+ * VSD table settings.
+ */
+ case VC_VSD_TABLE_ADDR:
+ break;
+ case VC_VSD_TABLE_DATA:
+ pnv_xive2_vst_set_data(xive, val);
+ break;
+
+ /*
+ * ESB cache updates (not modeled)
+ */
+ /* case VC_ESBC_FLUSH_CTRL: */
+ case VC_ESBC_FLUSH_POLL:
+ xive->vc_regs[VC_ESBC_FLUSH_CTRL >> 3] |= VC_ESBC_FLUSH_CTRL_POLL_VALID;
+ /* ESB update */
+ break;
+
+ /*
+ * EAS cache updates (not modeled)
+ */
+ /* case VC_EASC_FLUSH_CTRL: */
+ case VC_EASC_FLUSH_POLL:
+ xive->vc_regs[VC_EASC_FLUSH_CTRL >> 3] |= VC_EASC_FLUSH_CTRL_POLL_VALID;
+ /* EAS update */
+ break;
+
+ /*
+ * END cache updates
+ */
+ case VC_ENDC_WATCH0_SPEC:
+ val &= ~VC_ENDC_WATCH_CONFLICT; /* HW will set this bit */
+ break;
+
+ case VC_ENDC_WATCH0_DATA1 ... VC_ENDC_WATCH0_DATA3:
+ break;
+ case VC_ENDC_WATCH0_DATA0:
+ /* writing to DATA0 triggers the cache write */
+ xive->vc_regs[reg] = val;
+ pnv_xive2_end_update(xive);
+ break;
+
+
+ /* case VC_ENDC_FLUSH_CTRL: */
+ case VC_ENDC_FLUSH_POLL:
+ xive->vc_regs[VC_ENDC_FLUSH_CTRL >> 3] |= VC_ENDC_FLUSH_CTRL_POLL_VALID;
+ break;
+
+ /*
+ * Indirect invalidation
+ */
+ case VC_AT_MACRO_KILL:
+ case VC_AT_MACRO_KILL_MASK:
+ break;
+
+ /*
+ * Interrupt fifo overflow in memory backing store (Not modeled)
+ */
+ case VC_QUEUES_CFG_REM0 ... VC_QUEUES_CFG_REM6:
+ break;
+
+ /*
+ * Synchronisation
+ */
+ case VC_ENDC_SYNC_DONE:
+ break;
+
+ default:
+ xive2_error(xive, "VC: invalid write @%"HWADDR_PRIx, offset);
+ return;
+ }
+
+ xive->vc_regs[reg] = val;
+}
+
+static const MemoryRegionOps pnv_xive2_ic_vc_ops = {
+ .read = pnv_xive2_ic_vc_read,
+ .write = pnv_xive2_ic_vc_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t pnv_xive2_ic_pc_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint64_t val = -1;
+ uint32_t reg = offset >> 3;
+
+ switch (offset) {
+ /*
+ * VSD table settings.
+ */
+ case PC_VSD_TABLE_ADDR:
+ case PC_VSD_TABLE_DATA:
+ val = xive->pc_regs[reg];
+ break;
+
+ /*
+ * cache updates
+ */
+ case PC_NXC_WATCH0_SPEC:
+ xive->pc_regs[reg] &= ~(PC_NXC_WATCH_FULL | PC_NXC_WATCH_CONFLICT);
+ val = xive->pc_regs[reg];
+ break;
+
+ case PC_NXC_WATCH0_DATA0:
+ /*
+ * Load DATA registers from cache with data requested by the
+ * SPEC register
+ */
+ pnv_xive2_nvp_cache_load(xive);
+ val = xive->pc_regs[reg];
+ break;
+
+ case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
+ val = xive->pc_regs[reg];
+ break;
+
+ case PC_NXC_FLUSH_CTRL:
+ xive->pc_regs[reg] &= ~PC_NXC_FLUSH_CTRL_POLL_VALID;
+ val = xive->pc_regs[reg];
+ break;
+
+ /*
+ * Indirect invalidation
+ */
+ case PC_AT_KILL:
+ xive->pc_regs[reg] &= ~PC_AT_KILL_VALID;
+ val = xive->pc_regs[reg];
+ break;
+
+ default:
+ xive2_error(xive, "PC: invalid read @%"HWADDR_PRIx, offset);
+ }
+
+ return val;
+}
+
+static void pnv_xive2_ic_pc_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t reg = offset >> 3;
+
+ switch (offset) {
+
+ /*
+ * VSD table settings. Only taken into account in the VC
+ * sub-engine because the Xive2Router model combines both VC and PC
+ * sub-engines
+ */
+ case PC_VSD_TABLE_ADDR:
+ case PC_VSD_TABLE_DATA:
+ break;
+
+ /*
+ * cache updates
+ */
+ case PC_NXC_WATCH0_SPEC:
+ val &= ~PC_NXC_WATCH_CONFLICT; /* HW will set this bit */
+ break;
+
+ case PC_NXC_WATCH0_DATA1 ... PC_NXC_WATCH0_DATA3:
+ break;
+ case PC_NXC_WATCH0_DATA0:
+ /* writing to DATA0 triggers the cache write */
+ xive->pc_regs[reg] = val;
+ pnv_xive2_nvp_update(xive);
+ break;
+
+ /* case PC_NXC_FLUSH_CTRL: */
+ case PC_NXC_FLUSH_POLL:
+ xive->pc_regs[PC_NXC_FLUSH_CTRL >> 3] |= PC_NXC_FLUSH_CTRL_POLL_VALID;
+ break;
+
+ /*
+ * Indirect invalidation
+ */
+ case PC_AT_KILL:
+ case PC_AT_KILL_MASK:
+ break;
+
+ default:
+ xive2_error(xive, "PC: invalid write @%"HWADDR_PRIx, offset);
+ return;
+ }
+
+ xive->pc_regs[reg] = val;
+}
+
+static const MemoryRegionOps pnv_xive2_ic_pc_ops = {
+ .read = pnv_xive2_ic_pc_read,
+ .write = pnv_xive2_ic_pc_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+
+static uint64_t pnv_xive2_ic_tctxt_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint64_t val = -1;
+ uint32_t reg = offset >> 3;
+
+ switch (offset) {
+ /*
+ * XIVE2 hardware thread enablement
+ */
+ case TCTXT_EN0:
+ case TCTXT_EN1:
+ val = xive->tctxt_regs[reg];
+ break;
+
+ case TCTXT_EN0_SET:
+ case TCTXT_EN0_RESET:
+ val = xive->tctxt_regs[TCTXT_EN0 >> 3];
+ break;
+ case TCTXT_EN1_SET:
+ case TCTXT_EN1_RESET:
+ val = xive->tctxt_regs[TCTXT_EN1 >> 3];
+ break;
+ default:
+ xive2_error(xive, "TCTXT: invalid read @%"HWADDR_PRIx, offset);
+ }
+
+ return val;
+}
+
+static void pnv_xive2_ic_tctxt_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t reg = offset >> 3;
+
+ switch (offset) {
+ /*
+ * XIVE2 hardware thread enablement
+ */
+ case TCTXT_EN0: /* Physical Thread Enable */
+ case TCTXT_EN1: /* Physical Thread Enable (fused core) */
+ break;
+
+ case TCTXT_EN0_SET:
+ xive->tctxt_regs[TCTXT_EN0 >> 3] |= val;
+ break;
+ case TCTXT_EN1_SET:
+ xive->tctxt_regs[TCTXT_EN1 >> 3] |= val;
+ break;
+ case TCTXT_EN0_RESET:
+ xive->tctxt_regs[TCTXT_EN0 >> 3] &= ~val;
+ break;
+ case TCTXT_EN1_RESET:
+ xive->tctxt_regs[TCTXT_EN1 >> 3] &= ~val;
+ break;
+
+ default:
+ xive2_error(xive, "TCTXT: invalid write @%"HWADDR_PRIx, offset);
+ return;
+ }
+
+ xive->pc_regs[reg] = val;
+}
+
+static const MemoryRegionOps pnv_xive2_ic_tctxt_ops = {
+ .read = pnv_xive2_ic_tctxt_read,
+ .write = pnv_xive2_ic_tctxt_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * Redirect XSCOM to MMIO handlers
+ */
+static uint64_t pnv_xive2_xscom_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint64_t val = -1;
+ uint32_t xscom_reg = offset >> 3;
+ uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
+
+ switch (xscom_reg) {
+ case 0x000 ... 0x0FF:
+ val = pnv_xive2_ic_cq_read(opaque, mmio_offset, size);
+ break;
+ case 0x100 ... 0x1FF:
+ val = pnv_xive2_ic_vc_read(opaque, mmio_offset, size);
+ break;
+ case 0x200 ... 0x2FF:
+ val = pnv_xive2_ic_pc_read(opaque, mmio_offset, size);
+ break;
+ case 0x300 ... 0x3FF:
+ val = pnv_xive2_ic_tctxt_read(opaque, mmio_offset, size);
+ break;
+ default:
+ xive2_error(xive, "XSCOM: invalid read @%"HWADDR_PRIx, offset);
+ }
+
+ return val;
+}
+
+static void pnv_xive2_xscom_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t xscom_reg = offset >> 3;
+ uint32_t mmio_offset = (xscom_reg & 0xFF) << 3;
+
+ switch (xscom_reg) {
+ case 0x000 ... 0x0FF:
+ pnv_xive2_ic_cq_write(opaque, mmio_offset, val, size);
+ break;
+ case 0x100 ... 0x1FF:
+ pnv_xive2_ic_vc_write(opaque, mmio_offset, val, size);
+ break;
+ case 0x200 ... 0x2FF:
+ pnv_xive2_ic_pc_write(opaque, mmio_offset, val, size);
+ break;
+ case 0x300 ... 0x3FF:
+ pnv_xive2_ic_tctxt_write(opaque, mmio_offset, val, size);
+ break;
+ default:
+ xive2_error(xive, "XSCOM: invalid write @%"HWADDR_PRIx, offset);
+ }
+}
+
+static const MemoryRegionOps pnv_xive2_xscom_ops = {
+ .read = pnv_xive2_xscom_read,
+ .write = pnv_xive2_xscom_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * Notify port page. The layout is compatible between 4K and 64K pages :
+ *
+ * Page 1 Notify page (writes only)
+ * 0x000 - 0x7FF IPI interrupt (NPU)
+ * 0x800 - 0xFFF HW interrupt triggers (PSI, PHB)
+ */
+
+static void pnv_xive2_ic_hw_trigger(PnvXive2 *xive, hwaddr addr,
+ uint64_t val)
+{
+ uint8_t blk;
+ uint32_t idx;
+
+ if (val & XIVE_TRIGGER_END) {
+ xive2_error(xive, "IC: END trigger at @0x%"HWADDR_PRIx" data 0x%"PRIx64,
+ addr, val);
+ return;
+ }
+
+ /*
+ * Forward the source event notification directly to the Router.
+ * The source interrupt number should already be correctly encoded
+ * with the chip block id by the sending device (PHB, PSI).
+ */
+ blk = XIVE_EAS_BLOCK(val);
+ idx = XIVE_EAS_INDEX(val);
+
+ xive2_router_notify(XIVE_NOTIFIER(xive), XIVE_EAS(blk, idx),
+ !!(val & XIVE_TRIGGER_PQ));
+}
+
+static void pnv_xive2_ic_notify_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ /* VC: IPI triggers */
+ switch (offset) {
+ case 0x000 ... 0x7FF:
+ /* TODO: check IPI notify sub-page routing */
+ pnv_xive2_ic_hw_trigger(opaque, offset, val);
+ break;
+
+ /* VC: HW triggers */
+ case 0x800 ... 0xFFF:
+ pnv_xive2_ic_hw_trigger(opaque, offset, val);
+ break;
+
+ default:
+ xive2_error(xive, "NOTIFY: invalid write @%"HWADDR_PRIx, offset);
+ }
+}
+
+static uint64_t pnv_xive2_ic_notify_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ /* loads are invalid */
+ xive2_error(xive, "NOTIFY: invalid read @%"HWADDR_PRIx, offset);
+ return -1;
+}
+
+static const MemoryRegionOps pnv_xive2_ic_notify_ops = {
+ .read = pnv_xive2_ic_notify_read,
+ .write = pnv_xive2_ic_notify_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t pnv_xive2_ic_lsi_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ xive2_error(xive, "LSI: invalid read @%"HWADDR_PRIx, offset);
+ return -1;
+}
+
+static void pnv_xive2_ic_lsi_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ xive2_error(xive, "LSI: invalid write @%"HWADDR_PRIx, offset);
+}
+
+static const MemoryRegionOps pnv_xive2_ic_lsi_ops = {
+ .read = pnv_xive2_ic_lsi_read,
+ .write = pnv_xive2_ic_lsi_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * Sync MMIO page (write only)
+ */
+#define PNV_XIVE2_SYNC_IPI 0x000
+#define PNV_XIVE2_SYNC_HW 0x080
+#define PNV_XIVE2_SYNC_NxC 0x100
+#define PNV_XIVE2_SYNC_INT 0x180
+#define PNV_XIVE2_SYNC_OS_ESC 0x200
+#define PNV_XIVE2_SYNC_POOL_ESC 0x280
+#define PNV_XIVE2_SYNC_HARD_ESC 0x300
+
+static uint64_t pnv_xive2_ic_sync_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ /* loads are invalid */
+ xive2_error(xive, "SYNC: invalid read @%"HWADDR_PRIx, offset);
+ return -1;
+}
+
+static void pnv_xive2_ic_sync_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ switch (offset) {
+ case PNV_XIVE2_SYNC_IPI:
+ case PNV_XIVE2_SYNC_HW:
+ case PNV_XIVE2_SYNC_NxC:
+ case PNV_XIVE2_SYNC_INT:
+ case PNV_XIVE2_SYNC_OS_ESC:
+ case PNV_XIVE2_SYNC_POOL_ESC:
+ case PNV_XIVE2_SYNC_HARD_ESC:
+ break;
+ default:
+ xive2_error(xive, "SYNC: invalid write @%"HWADDR_PRIx, offset);
+ }
+}
+
+static const MemoryRegionOps pnv_xive2_ic_sync_ops = {
+ .read = pnv_xive2_ic_sync_read,
+ .write = pnv_xive2_ic_sync_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * When the TM direct pages of the IC controller are accessed, the
+ * target HW thread is deduced from the page offset.
+ */
+static XiveTCTX *pnv_xive2_get_indirect_tctx(PnvXive2 *xive, uint32_t pir)
+{
+ PnvChip *chip = xive->chip;
+ PowerPCCPU *cpu = NULL;
+
+ cpu = pnv_chip_find_cpu(chip, pir);
+ if (!cpu) {
+ xive2_error(xive, "IC: invalid PIR %x for indirect access", pir);
+ return NULL;
+ }
+
+ if (!pnv_xive2_is_cpu_enabled(xive, cpu)) {
+ xive2_error(xive, "IC: CPU %x is not enabled", pir);
+ }
+
+ return XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+}
+
+static uint64_t pnv_xive2_ic_tm_indirect_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t pir = offset >> xive->ic_shift;
+ XiveTCTX *tctx = pnv_xive2_get_indirect_tctx(xive, pir);
+ uint64_t val = -1;
+
+ if (tctx) {
+ val = xive_tctx_tm_read(NULL, tctx, offset, size);
+ }
+
+ return val;
+}
+
+static void pnv_xive2_ic_tm_indirect_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+ uint32_t pir = offset >> xive->ic_shift;
+ XiveTCTX *tctx = pnv_xive2_get_indirect_tctx(xive, pir);
+
+ if (tctx) {
+ xive_tctx_tm_write(NULL, tctx, offset, val, size);
+ }
+}
+
+static const MemoryRegionOps pnv_xive2_ic_tm_indirect_ops = {
+ .read = pnv_xive2_ic_tm_indirect_read,
+ .write = pnv_xive2_ic_tm_indirect_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * TIMA ops
+ */
+
+/*
+ * Special TIMA offsets to handle accesses in a POWER10 way.
+ *
+ * Only the CAM line updates done by the hypervisor should be handled
+ * specifically.
+ */
+#define HV_PAGE_OFFSET (XIVE_TM_HV_PAGE << TM_SHIFT)
+#define HV_PUSH_OS_CTX_OFFSET (HV_PAGE_OFFSET | (TM_QW1_OS + TM_WORD2))
+#define HV_PULL_OS_CTX_OFFSET (HV_PAGE_OFFSET | TM_SPC_PULL_OS_CTX)
+
+static void pnv_xive2_tm_write(void *opaque, hwaddr offset,
+ uint64_t value, unsigned size)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
+ PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
+ XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ bool gen1_tima_os =
+ xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
+
+ /* TODO: should we switch the TM ops table instead ? */
+ if (!gen1_tima_os && offset == HV_PUSH_OS_CTX_OFFSET) {
+ xive2_tm_push_os_ctx(xptr, tctx, offset, value, size);
+ return;
+ }
+
+ /* Other TM ops are the same as XIVE1 */
+ xive_tctx_tm_write(xptr, tctx, offset, value, size);
+}
+
+static uint64_t pnv_xive2_tm_read(void *opaque, hwaddr offset, unsigned size)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(current_cpu);
+ PnvXive2 *xive = pnv_xive2_tm_get_xive(cpu);
+ XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc);
+ XivePresenter *xptr = XIVE_PRESENTER(xive);
+ bool gen1_tima_os =
+ xive->cq_regs[CQ_XIVE_CFG >> 3] & CQ_XIVE_CFG_GEN1_TIMA_OS;
+
+ /* TODO: should we switch the TM ops table instead ? */
+ if (!gen1_tima_os && offset == HV_PULL_OS_CTX_OFFSET) {
+ return xive2_tm_pull_os_ctx(xptr, tctx, offset, size);
+ }
+
+ /* Other TM ops are the same as XIVE1 */
+ return xive_tctx_tm_read(xptr, tctx, offset, size);
+}
+
+static const MemoryRegionOps pnv_xive2_tm_ops = {
+ .read = pnv_xive2_tm_read,
+ .write = pnv_xive2_tm_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t pnv_xive2_nvc_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ xive2_error(xive, "NVC: invalid read @%"HWADDR_PRIx, offset);
+ return -1;
+}
+
+static void pnv_xive2_nvc_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ xive2_error(xive, "NVC: invalid write @%"HWADDR_PRIx, offset);
+}
+
+static const MemoryRegionOps pnv_xive2_nvc_ops = {
+ .read = pnv_xive2_nvc_read,
+ .write = pnv_xive2_nvc_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static uint64_t pnv_xive2_nvpg_read(void *opaque, hwaddr offset,
+ unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ xive2_error(xive, "NVPG: invalid read @%"HWADDR_PRIx, offset);
+ return -1;
+}
+
+static void pnv_xive2_nvpg_write(void *opaque, hwaddr offset,
+ uint64_t val, unsigned size)
+{
+ PnvXive2 *xive = PNV_XIVE2(opaque);
+
+ xive2_error(xive, "NVPG: invalid write @%"HWADDR_PRIx, offset);
+}
+
+static const MemoryRegionOps pnv_xive2_nvpg_ops = {
+ .read = pnv_xive2_nvpg_read,
+ .write = pnv_xive2_nvpg_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+/*
+ * POWER10 default capabilities: 0x2000120076f000FC
+ */
+#define PNV_XIVE2_CAPABILITIES 0x2000120076f000FC
+
+/*
+ * POWER10 default configuration: 0x0030000033000000
+ *
+ * 8bits thread id was dropped for P10
+ */
+#define PNV_XIVE2_CONFIGURATION 0x0030000033000000
+
+static void pnv_xive2_reset(void *dev)
+{
+ PnvXive2 *xive = PNV_XIVE2(dev);
+ XiveSource *xsrc = &xive->ipi_source;
+ Xive2EndSource *end_xsrc = &xive->end_source;
+
+ xive->cq_regs[CQ_XIVE_CAP >> 3] = xive->capabilities;
+ xive->cq_regs[CQ_XIVE_CFG >> 3] = xive->config;
+
+ /* HW hardwires the #Topology of the chip in the block field */
+ xive->cq_regs[CQ_XIVE_CFG >> 3] |=
+ SETFIELD(CQ_XIVE_CFG_HYP_HARD_BLOCK_ID, 0ull, xive->chip->chip_id);
+
+ /* Set default page size to 64k */
+ xive->ic_shift = xive->esb_shift = xive->end_shift = 16;
+ xive->nvc_shift = xive->nvpg_shift = xive->tm_shift = 16;
+
+ /* Clear source MMIOs */
+ if (memory_region_is_mapped(&xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->esb_mmio, &xsrc->esb_mmio);
+ }
+
+ if (memory_region_is_mapped(&end_xsrc->esb_mmio)) {
+ memory_region_del_subregion(&xive->end_mmio, &end_xsrc->esb_mmio);
+ }
+}
+
+/*
+ * Maximum number of IRQs and ENDs supported by HW. Will be tuned by
+ * software.
+ */
+#define PNV_XIVE2_NR_IRQS (PNV10_XIVE2_ESB_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
+#define PNV_XIVE2_NR_ENDS (PNV10_XIVE2_END_SIZE / (1ull << XIVE_ESB_64K_2PAGE))
+
+static void pnv_xive2_realize(DeviceState *dev, Error **errp)
+{
+ PnvXive2 *xive = PNV_XIVE2(dev);
+ PnvXive2Class *pxc = PNV_XIVE2_GET_CLASS(dev);
+ XiveSource *xsrc = &xive->ipi_source;
+ Xive2EndSource *end_xsrc = &xive->end_source;
+ Error *local_err = NULL;
+ int i;
+
+ pxc->parent_realize(dev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ assert(xive->chip);
+
+ /*
+ * The XiveSource and Xive2EndSource objects are realized with the
+ * maximum allowed HW configuration. The ESB MMIO regions will be
+ * resized dynamically when the controller is configured by the FW
+ * to limit accesses to resources not provisioned.
+ */
+ object_property_set_int(OBJECT(xsrc), "flags", XIVE_SRC_STORE_EOI,
+ &error_fatal);
+ object_property_set_int(OBJECT(xsrc), "nr-irqs", PNV_XIVE2_NR_IRQS,
+ &error_fatal);
+ object_property_set_link(OBJECT(xsrc), "xive", OBJECT(xive),
+ &error_fatal);
+ qdev_realize(DEVICE(xsrc), NULL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ object_property_set_int(OBJECT(end_xsrc), "nr-ends", PNV_XIVE2_NR_ENDS,
+ &error_fatal);
+ object_property_set_link(OBJECT(end_xsrc), "xive", OBJECT(xive),
+ &error_abort);
+ qdev_realize(DEVICE(end_xsrc), NULL, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /* XSCOM region, used for initial configuration of the BARs */
+ memory_region_init_io(&xive->xscom_regs, OBJECT(dev),
+ &pnv_xive2_xscom_ops, xive, "xscom-xive",
+ PNV10_XSCOM_XIVE2_SIZE << 3);
+
+ /* Interrupt controller MMIO regions */
+ xive->ic_shift = 16;
+ memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic",
+ PNV10_XIVE2_IC_SIZE);
+
+ for (i = 0; i < ARRAY_SIZE(xive->ic_mmios); i++) {
+ memory_region_init_io(&xive->ic_mmios[i], OBJECT(dev),
+ pnv_xive2_ic_regions[i].ops, xive,
+ pnv_xive2_ic_regions[i].name,
+ pnv_xive2_ic_regions[i].pgsize << xive->ic_shift);
+ }
+
+ /*
+ * VC MMIO regions.
+ */
+ xive->esb_shift = 16;
+ xive->end_shift = 16;
+ memory_region_init(&xive->esb_mmio, OBJECT(xive), "xive-esb",
+ PNV10_XIVE2_ESB_SIZE);
+ memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-end",
+ PNV10_XIVE2_END_SIZE);
+
+ /* Presenter Controller MMIO region (not modeled) */
+ xive->nvc_shift = 16;
+ xive->nvpg_shift = 16;
+ memory_region_init_io(&xive->nvc_mmio, OBJECT(dev),
+ &pnv_xive2_nvc_ops, xive,
+ "xive-nvc", PNV10_XIVE2_NVC_SIZE);
+
+ memory_region_init_io(&xive->nvpg_mmio, OBJECT(dev),
+ &pnv_xive2_nvpg_ops, xive,
+ "xive-nvpg", PNV10_XIVE2_NVPG_SIZE);
+
+ /* Thread Interrupt Management Area (Direct) */
+ xive->tm_shift = 16;
+ memory_region_init_io(&xive->tm_mmio, OBJECT(dev), &pnv_xive2_tm_ops,
+ xive, "xive-tima", PNV10_XIVE2_TM_SIZE);
+
+ qemu_register_reset(pnv_xive2_reset, dev);
+}
+
+static Property pnv_xive2_properties[] = {
+ DEFINE_PROP_UINT64("ic-bar", PnvXive2, ic_base, 0),
+ DEFINE_PROP_UINT64("esb-bar", PnvXive2, esb_base, 0),
+ DEFINE_PROP_UINT64("end-bar", PnvXive2, end_base, 0),
+ DEFINE_PROP_UINT64("nvc-bar", PnvXive2, nvc_base, 0),
+ DEFINE_PROP_UINT64("nvpg-bar", PnvXive2, nvpg_base, 0),
+ DEFINE_PROP_UINT64("tm-bar", PnvXive2, tm_base, 0),
+ DEFINE_PROP_UINT64("capabilities", PnvXive2, capabilities,
+ PNV_XIVE2_CAPABILITIES),
+ DEFINE_PROP_UINT64("config", PnvXive2, config,
+ PNV_XIVE2_CONFIGURATION),
+ DEFINE_PROP_LINK("chip", PnvXive2, chip, TYPE_PNV_CHIP, PnvChip *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void pnv_xive2_instance_init(Object *obj)
+{
+ PnvXive2 *xive = PNV_XIVE2(obj);
+
+ object_initialize_child(obj, "ipi_source", &xive->ipi_source,
+ TYPE_XIVE_SOURCE);
+ object_initialize_child(obj, "end_source", &xive->end_source,
+ TYPE_XIVE2_END_SOURCE);
+}
+
+static int pnv_xive2_dt_xscom(PnvXScomInterface *dev, void *fdt,
+ int xscom_offset)
+{
+ const char compat_p10[] = "ibm,power10-xive-x";
+ char *name;
+ int offset;
+ uint32_t reg[] = {
+ cpu_to_be32(PNV10_XSCOM_XIVE2_BASE),
+ cpu_to_be32(PNV10_XSCOM_XIVE2_SIZE)
+ };
+
+ name = g_strdup_printf("xive@%x", PNV10_XSCOM_XIVE2_BASE);
+ offset = fdt_add_subnode(fdt, xscom_offset, name);
+ _FDT(offset);
+ g_free(name);
+
+ _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg))));
+ _FDT(fdt_setprop(fdt, offset, "compatible", compat_p10,
+ sizeof(compat_p10)));
+ return 0;
+}
+
+static void pnv_xive2_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass);
+ Xive2RouterClass *xrc = XIVE2_ROUTER_CLASS(klass);
+ XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
+ XivePresenterClass *xpc = XIVE_PRESENTER_CLASS(klass);
+ PnvXive2Class *pxc = PNV_XIVE2_CLASS(klass);
+
+ xdc->dt_xscom = pnv_xive2_dt_xscom;
+
+ dc->desc = "PowerNV XIVE2 Interrupt Controller (POWER10)";
+ device_class_set_parent_realize(dc, pnv_xive2_realize,
+ &pxc->parent_realize);
+ device_class_set_props(dc, pnv_xive2_properties);
+
+ xrc->get_eas = pnv_xive2_get_eas;
+ xrc->get_pq = pnv_xive2_get_pq;
+ xrc->set_pq = pnv_xive2_set_pq;
+ xrc->get_end = pnv_xive2_get_end;
+ xrc->write_end = pnv_xive2_write_end;
+ xrc->get_nvp = pnv_xive2_get_nvp;
+ xrc->write_nvp = pnv_xive2_write_nvp;
+ xrc->get_config = pnv_xive2_get_config;
+ xrc->get_block_id = pnv_xive2_get_block_id;
+
+ xnc->notify = pnv_xive2_notify;
+
+ xpc->match_nvt = pnv_xive2_match_nvt;
+};
+
+static const TypeInfo pnv_xive2_info = {
+ .name = TYPE_PNV_XIVE2,
+ .parent = TYPE_XIVE2_ROUTER,
+ .instance_init = pnv_xive2_instance_init,
+ .instance_size = sizeof(PnvXive2),
+ .class_init = pnv_xive2_class_init,
+ .class_size = sizeof(PnvXive2Class),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { }
+ }
+};
+
+static void pnv_xive2_register_types(void)
+{
+ type_register_static(&pnv_xive2_info);
+}
+
+type_init(pnv_xive2_register_types)
+
+static void xive2_nvp_pic_print_info(Xive2Nvp *nvp, uint32_t nvp_idx,
+ Monitor *mon)
+{
+ uint8_t eq_blk = xive_get_field32(NVP2_W5_VP_END_BLOCK, nvp->w5);
+ uint32_t eq_idx = xive_get_field32(NVP2_W5_VP_END_INDEX, nvp->w5);
+
+ if (!xive2_nvp_is_valid(nvp)) {
+ return;
+ }
+
+ monitor_printf(mon, " %08x end:%02x/%04x IPB:%02x",
+ nvp_idx, eq_blk, eq_idx,
+ xive_get_field32(NVP2_W2_IPB, nvp->w2));
+ /*
+ * When the NVP is HW controlled, more fields are updated
+ */
+ if (xive2_nvp_is_hw(nvp)) {
+ monitor_printf(mon, " CPPR:%02x",
+ xive_get_field32(NVP2_W2_CPPR, nvp->w2));
+ if (xive2_nvp_is_co(nvp)) {
+ monitor_printf(mon, " CO:%04x",
+ xive_get_field32(NVP2_W1_CO_THRID, nvp->w1));
+ }
+ }
+ monitor_printf(mon, "\n");
+}
+
+/*
+ * If the table is direct, we can compute the number of PQ entries
+ * provisioned by FW.
+ */
+static uint32_t pnv_xive2_nr_esbs(PnvXive2 *xive)
+{
+ uint8_t blk = pnv_xive2_block_id(xive);
+ uint64_t vsd = xive->vsds[VST_ESB][blk];
+ uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12);
+
+ return VSD_INDIRECT & vsd ? 0 : vst_tsize * SBE_PER_BYTE;
+}
+
+/*
+ * Compute the number of entries per indirect subpage.
+ */
+static uint64_t pnv_xive2_vst_per_subpage(PnvXive2 *xive, uint32_t type)
+{
+ uint8_t blk = pnv_xive2_block_id(xive);
+ uint64_t vsd = xive->vsds[type][blk];
+ const XiveVstInfo *info = &vst_infos[type];
+ uint64_t vsd_addr;
+ uint32_t page_shift;
+
+ /* For direct tables, fake a valid value */
+ if (!(VSD_INDIRECT & vsd)) {
+ return 1;
+ }
+
+ /* Get the page size of the indirect table. */
+ vsd_addr = vsd & VSD_ADDRESS_MASK;
+ ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED);
+
+ if (!(vsd & VSD_ADDRESS_MASK)) {
+#ifdef XIVE2_DEBUG
+ xive2_error(xive, "VST: invalid %s entry!?", info->name);
+#endif
+ return 0;
+ }
+
+ page_shift = GETFIELD(VSD_TSIZE, vsd) + 12;
+
+ if (!pnv_xive2_vst_page_size_allowed(page_shift)) {
+ xive2_error(xive, "VST: invalid %s page shift %d", info->name,
+ page_shift);
+ return 0;
+ }
+
+ return (1ull << page_shift) / info->size;
+}
+
+void pnv_xive2_pic_print_info(PnvXive2 *xive, Monitor *mon)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xive);
+ uint8_t blk = pnv_xive2_block_id(xive);
+ uint8_t chip_id = xive->chip->chip_id;
+ uint32_t srcno0 = XIVE_EAS(blk, 0);
+ uint32_t nr_esbs = pnv_xive2_nr_esbs(xive);
+ Xive2Eas eas;
+ Xive2End end;
+ Xive2Nvp nvp;
+ int i;
+ uint64_t xive_nvp_per_subpage;
+
+ monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0,
+ srcno0 + nr_esbs - 1);
+ xive_source_pic_print_info(&xive->ipi_source, srcno0, mon);
+
+ monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0,
+ srcno0 + nr_esbs - 1);
+ for (i = 0; i < nr_esbs; i++) {
+ if (xive2_router_get_eas(xrtr, blk, i, &eas)) {
+ break;
+ }
+ if (!xive2_eas_is_masked(&eas)) {
+ xive2_eas_pic_print_info(&eas, i, mon);
+ }
+ }
+
+ monitor_printf(mon, "XIVE[%x] #%d END Escalation EAT\n", chip_id, blk);
+ i = 0;
+ while (!xive2_router_get_end(xrtr, blk, i, &end)) {
+ xive2_end_eas_pic_print_info(&end, i++, mon);
+ }
+
+ monitor_printf(mon, "XIVE[%x] #%d ENDT\n", chip_id, blk);
+ i = 0;
+ while (!xive2_router_get_end(xrtr, blk, i, &end)) {
+ xive2_end_pic_print_info(&end, i++, mon);
+ }
+
+ monitor_printf(mon, "XIVE[%x] #%d NVPT %08x .. %08x\n", chip_id, blk,
+ 0, XIVE2_NVP_COUNT - 1);
+ xive_nvp_per_subpage = pnv_xive2_vst_per_subpage(xive, VST_NVP);
+ for (i = 0; i < XIVE2_NVP_COUNT; i += xive_nvp_per_subpage) {
+ while (!xive2_router_get_nvp(xrtr, blk, i, &nvp)) {
+ xive2_nvp_pic_print_info(&nvp, i++, mon);
+ }
+ }
+}
diff --git a/hw/intc/pnv_xive2_regs.h b/hw/intc/pnv_xive2_regs.h
new file mode 100644
index 0000000000..0c096e4adb
--- /dev/null
+++ b/hw/intc/pnv_xive2_regs.h
@@ -0,0 +1,442 @@
+/*
+ * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
+ *
+ * Copyright (c) 2019-2022, IBM Corporation.
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#ifndef PPC_PNV_XIVE2_REGS_H
+#define PPC_PNV_XIVE2_REGS_H
+
+/*
+ * CQ Common Queue (PowerBus bridge) Registers
+ */
+
+/* XIVE2 Capabilities */
+#define X_CQ_XIVE_CAP 0x02
+#define CQ_XIVE_CAP 0x010
+#define CQ_XIVE_CAP_VERSION PPC_BITMASK(0, 3)
+/* 4:6 reserved */
+#define CQ_XIVE_CAP_USER_INT_PRIO PPC_BITMASK(8, 9)
+#define CQ_XIVE_CAP_USER_INT_PRIO_1 0
+#define CQ_XIVE_CAP_USER_INT_PRIO_1_2 1
+#define CQ_XIVE_CAP_USER_INT_PRIO_1_4 2
+#define CQ_XIVE_CAP_USER_INT_PRIO_1_8 3
+#define CQ_XIVE_CAP_VP_INT_PRIO PPC_BITMASK(10, 11)
+#define CQ_XIVE_CAP_VP_INT_PRIO_1_8 0
+#define CQ_XIVE_CAP_VP_INT_PRIO_2_8 1
+#define CQ_XIVE_CAP_VP_INT_PRIO_4_8 2
+#define CQ_XIVE_CAP_VP_INT_PRIO_8 3
+#define CQ_XIVE_CAP_BLOCK_ID_WIDTH PPC_BITMASK(12, 13)
+#define CQ_XIVE_CAP_VP_SAVE_RESTORE PPC_BIT(38)
+
+#define CQ_XIVE_CAP_PHB_PQ_DISABLE PPC_BIT(56)
+#define CQ_XIVE_CAP_PHB_ABT PPC_BIT(57)
+#define CQ_XIVE_CAP_EXPLOITATION_MODE PPC_BIT(58)
+#define CQ_XIVE_CAP_STORE_EOI PPC_BIT(59)
+
+/* XIVE2 Configuration */
+#define X_CQ_XIVE_CFG 0x03
+#define CQ_XIVE_CFG 0x018
+
+/* 0:7 reserved */
+#define CQ_XIVE_CFG_USER_INT_PRIO PPC_BITMASK(8, 9)
+#define CQ_XIVE_CFG_VP_INT_PRIO PPC_BITMASK(10, 11)
+#define CQ_XIVE_CFG_INT_PRIO_1 0
+#define CQ_XIVE_CFG_INT_PRIO_2 1
+#define CQ_XIVE_CFG_INT_PRIO_4 2
+#define CQ_XIVE_CFG_INT_PRIO_8 3
+#define CQ_XIVE_CFG_BLOCK_ID_WIDTH PPC_BITMASK(12, 13)
+#define CQ_XIVE_CFG_BLOCK_ID_4BITS 0
+#define CQ_XIVE_CFG_BLOCK_ID_5BITS 1
+#define CQ_XIVE_CFG_BLOCK_ID_6BITS 2
+#define CQ_XIVE_CFG_BLOCK_ID_7BITS 3
+#define CQ_XIVE_CFG_HYP_HARD_RANGE PPC_BITMASK(14, 15)
+#define CQ_XIVE_CFG_THREADID_7BITS 0
+#define CQ_XIVE_CFG_THREADID_8BITS 1
+#define CQ_XIVE_CFG_THREADID_9BITS 2
+#define CQ_XIVE_CFG_THREADID_10BITs 3
+#define CQ_XIVE_CFG_HYP_HARD_BLKID_OVERRIDE PPC_BIT(16)
+#define CQ_XIVE_CFG_HYP_HARD_BLOCK_ID PPC_BITMASK(17, 23)
+
+#define CQ_XIVE_CFG_GEN1_TIMA_OS PPC_BIT(24)
+#define CQ_XIVE_CFG_GEN1_TIMA_HYP PPC_BIT(25)
+#define CQ_XIVE_CFG_GEN1_TIMA_HYP_BLK0 PPC_BIT(26) /* 0 if bit[25]=0 */
+#define CQ_XIVE_CFG_GEN1_TIMA_CROWD_DIS PPC_BIT(27) /* 0 if bit[25]=0 */
+#define CQ_XIVE_CFG_GEN1_END_ESX PPC_BIT(28)
+#define CQ_XIVE_CFG_EN_VP_SAVE_RESTORE PPC_BIT(38) /* 0 if bit[25]=1 */
+#define CQ_XIVE_CFG_EN_VP_SAVE_REST_STRICT PPC_BIT(39) /* 0 if bit[25]=1 */
+
+/* Interrupt Controller Base Address Register - 512 pages (32M) */
+#define X_CQ_IC_BAR 0x08
+#define CQ_IC_BAR 0x040
+#define CQ_IC_BAR_VALID PPC_BIT(0)
+#define CQ_IC_BAR_64K PPC_BIT(1)
+/* 2:7 reserved */
+#define CQ_IC_BAR_ADDR PPC_BITMASK(8, 42)
+/* 43:63 reserved */
+
+/* Thread Management Base Address Register - 4 pages */
+#define X_CQ_TM_BAR 0x09
+#define CQ_TM_BAR 0x048
+#define CQ_TM_BAR_VALID PPC_BIT(0)
+#define CQ_TM_BAR_64K PPC_BIT(1)
+#define CQ_TM_BAR_ADDR PPC_BITMASK(8, 49)
+
+/* ESB Base Address Register */
+#define X_CQ_ESB_BAR 0x0A
+#define CQ_ESB_BAR 0x050
+#define CQ_BAR_VALID PPC_BIT(0)
+#define CQ_BAR_64K PPC_BIT(1)
+/* 2:7 reserved */
+#define CQ_BAR_ADDR PPC_BITMASK(8, 39)
+#define CQ_BAR_SET_DIV PPC_BITMASK(56, 58)
+#define CQ_BAR_RANGE PPC_BITMASK(59, 63)
+ /* 0 (16M) - 16 (16T) */
+
+/* END Base Address Register */
+#define X_CQ_END_BAR 0x0B
+#define CQ_END_BAR 0x058
+
+/* NVPG Base Address Register */
+#define X_CQ_NVPG_BAR 0x0C
+#define CQ_NVPG_BAR 0x060
+
+/* NVC Base Address Register */
+#define X_CQ_NVC_BAR 0x0D
+#define CQ_NVC_BAR 0x068
+
+/* Table Address Register */
+#define X_CQ_TAR 0x0E
+#define CQ_TAR 0x070
+#define CQ_TAR_AUTOINC PPC_BIT(0)
+#define CQ_TAR_SELECT PPC_BITMASK(12, 15)
+#define CQ_TAR_ESB 0 /* 0 - 15 */
+#define CQ_TAR_END 2 /* 0 - 15 */
+#define CQ_TAR_NVPG 3 /* 0 - 15 */
+#define CQ_TAR_NVC 5 /* 0 - 15 */
+#define CQ_TAR_ENTRY_SELECT PPC_BITMASK(28, 31)
+
+/* Table Data Register */
+#define X_CQ_TDR 0x0F
+#define CQ_TDR 0x078
+/* for the NVPG, NVC, ESB, END Set Translation Tables */
+#define CQ_TDR_VALID PPC_BIT(0)
+#define CQ_TDR_BLOCK_ID PPC_BITMASK(60, 63)
+
+/*
+ * Processor Cores Enabled for MsgSnd
+ * Identifies which of the 32 possible core chiplets are enabled and
+ * available to receive the MsgSnd command
+ */
+#define X_CQ_MSGSND 0x10
+#define CQ_MSGSND 0x080
+
+/* Interrupt Unit Reset Control */
+#define X_CQ_RST_CTL 0x12
+#define CQ_RST_CTL 0x090
+#define CQ_RST_SYNC_RESET PPC_BIT(0) /* Write Only */
+#define CQ_RST_QUIESCE_PB PPC_BIT(1) /* RW */
+#define CQ_RST_MASTER_IDLE PPC_BIT(2) /* Read Only */
+#define CQ_RST_SAVE_IDLE PPC_BIT(3) /* Read Only */
+#define CQ_RST_PB_BAR_RESET PPC_BIT(4) /* Write Only */
+
+/* PowerBus General Configuration */
+#define X_CQ_CFG_PB_GEN 0x14
+#define CQ_CFG_PB_GEN 0x0A0
+#define CQ_CFG_PB_GEN_PB_INIT PPC_BIT(45)
+
+/*
+ * FIR
+ * (And-Mask)
+ * (Or-Mask)
+ */
+#define X_CQ_FIR 0x30
+#define X_CQ_FIR_AND 0x31
+#define X_CQ_FIR_OR 0x32
+#define CQ_FIR 0x180
+#define CQ_FIR_AND 0x188
+#define CQ_FIR_OR 0x190
+#define CQ_FIR_PB_RCMDX_CI_ERR1 PPC_BIT(19)
+#define CQ_FIR_VC_INFO_ERROR_0_2 PPC_BITMASK(61, 63)
+
+/*
+ * FIR Mask
+ * (And-Mask)
+ * (Or-Mask)
+ */
+#define X_CQ_FIRMASK 0x33
+#define X_CQ_FIRMASK_AND 0x34
+#define X_CQ_FIRMASK_OR 0x35
+#define CQ_FIRMASK 0x198
+#define CQ_FIRMASK_AND 0x1A0
+#define CQ_FIRMASK_OR 0x1A8
+
+/*
+ * VC0
+ */
+
+/* VSD table address */
+#define X_VC_VSD_TABLE_ADDR 0x100
+#define VC_VSD_TABLE_ADDR 0x000
+#define VC_VSD_TABLE_AUTOINC PPC_BIT(0)
+#define VC_VSD_TABLE_SELECT PPC_BITMASK(12, 15)
+#define VC_VSD_TABLE_ADDRESS PPC_BITMASK(28, 31)
+
+/* VSD table data */
+#define X_VC_VSD_TABLE_DATA 0x101
+#define VC_VSD_TABLE_DATA 0x008
+
+/* AIB AT macro indirect kill */
+#define X_VC_AT_MACRO_KILL 0x102
+#define VC_AT_MACRO_KILL 0x010
+#define VC_AT_MACRO_KILL_VALID PPC_BIT(0)
+#define VC_AT_MACRO_KILL_VSD PPC_BITMASK(12, 15)
+#define VC_AT_MACRO_KILL_BLOCK_ID PPC_BITMASK(28, 31)
+#define VC_AT_MACRO_KILL_OFFSET PPC_BITMASK(48, 60)
+
+/* AIB AT macro indirect kill mask (same bit definitions) */
+#define X_VC_AT_MACRO_KILL_MASK 0x103
+#define VC_AT_MACRO_KILL_MASK 0x018
+
+/* Remote IRQs and ERQs configuration [n] (n = 0:6) */
+#define X_VC_QUEUES_CFG_REM0 0x117
+
+#define VC_QUEUES_CFG_REM0 0x0B8
+#define VC_QUEUES_CFG_REM1 0x0C0
+#define VC_QUEUES_CFG_REM2 0x0C8
+#define VC_QUEUES_CFG_REM3 0x0D0
+#define VC_QUEUES_CFG_REM4 0x0D8
+#define VC_QUEUES_CFG_REM5 0x0E0
+#define VC_QUEUES_CFG_REM6 0x0E8
+#define VC_QUEUES_CFG_MEMB_EN PPC_BIT(38)
+#define VC_QUEUES_CFG_MEMB_SZ PPC_BITMASK(42, 47)
+
+/*
+ * VC1
+ */
+
+/* ESBC cache flush control trigger */
+#define X_VC_ESBC_FLUSH_CTRL 0x140
+#define VC_ESBC_FLUSH_CTRL 0x200
+#define VC_ESBC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
+#define VC_ESBC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
+
+/* ESBC cache flush poll trigger */
+#define X_VC_ESBC_FLUSH_POLL 0x141
+#define VC_ESBC_FLUSH_POLL 0x208
+#define VC_ESBC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(0, 3)
+#define VC_ESBC_FLUSH_POLL_OFFSET PPC_BITMASK(4, 31) /* 28-bit */
+#define VC_ESBC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
+#define VC_ESBC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
+
+/* EASC flush control register */
+#define X_VC_EASC_FLUSH_CTRL 0x160
+#define VC_EASC_FLUSH_CTRL 0x300
+#define VC_EASC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
+#define VC_EASC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
+
+/* EASC flush poll register */
+#define X_VC_EASC_FLUSH_POLL 0x161
+#define VC_EASC_FLUSH_POLL 0x308
+#define VC_EASC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(0, 3)
+#define VC_EASC_FLUSH_POLL_OFFSET PPC_BITMASK(4, 31) /* 28-bit */
+#define VC_EASC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(32, 35)
+#define VC_EASC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(36, 63) /* 28-bit */
+
+/*
+ * VC2
+ */
+
+/* ENDC flush control register */
+#define X_VC_ENDC_FLUSH_CTRL 0x180
+#define VC_ENDC_FLUSH_CTRL 0x400
+#define VC_ENDC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
+#define VC_ENDC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
+#define VC_ENDC_FLUSH_CTRL_WANT_INVALIDATE PPC_BIT(3)
+#define VC_ENDC_FLUSH_CTRL_INJECT_INVALIDATE PPC_BIT(7)
+
+/* ENDC flush poll register */
+#define X_VC_ENDC_FLUSH_POLL 0x181
+#define VC_ENDC_FLUSH_POLL 0x408
+#define VC_ENDC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(4, 7)
+#define VC_ENDC_FLUSH_POLL_OFFSET PPC_BITMASK(8, 31) /* 24-bit */
+#define VC_ENDC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
+#define VC_ENDC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
+
+/* ENDC Sync done */
+#define X_VC_ENDC_SYNC_DONE 0x184
+#define VC_ENDC_SYNC_DONE 0x420
+#define VC_ENDC_SYNC_POLL_DONE PPC_BITMASK(0, 6)
+#define VC_ENDC_SYNC_QUEUE_IPI PPC_BIT(0)
+#define VC_ENDC_SYNC_QUEUE_HWD PPC_BIT(1)
+#define VC_ENDC_SYNC_QUEUE_NXC PPC_BIT(2)
+#define VC_ENDC_SYNC_QUEUE_INT PPC_BIT(3)
+#define VC_ENDC_SYNC_QUEUE_OS PPC_BIT(4)
+#define VC_ENDC_SYNC_QUEUE_POOL PPC_BIT(5)
+#define VC_ENDC_SYNC_QUEUE_HARD PPC_BIT(6)
+#define VC_QUEUE_COUNT 7
+
+/* ENDC cache watch specification 0 */
+#define X_VC_ENDC_WATCH0_SPEC 0x1A0
+#define VC_ENDC_WATCH0_SPEC 0x500
+#define VC_ENDC_WATCH_CONFLICT PPC_BIT(0)
+#define VC_ENDC_WATCH_FULL PPC_BIT(8)
+#define VC_ENDC_WATCH_BLOCK_ID PPC_BITMASK(28, 31)
+#define VC_ENDC_WATCH_INDEX PPC_BITMASK(40, 63)
+
+/* ENDC cache watch data 0 */
+#define X_VC_ENDC_WATCH0_DATA0 0x1A4
+#define X_VC_ENDC_WATCH0_DATA1 0x1A5
+#define X_VC_ENDC_WATCH0_DATA2 0x1A6
+#define X_VC_ENDC_WATCH0_DATA3 0x1A7
+
+#define VC_ENDC_WATCH0_DATA0 0x520
+#define VC_ENDC_WATCH0_DATA1 0x528
+#define VC_ENDC_WATCH0_DATA2 0x530
+#define VC_ENDC_WATCH0_DATA3 0x538
+
+/*
+ * PC LSB1
+ */
+
+/* VSD table address register */
+#define X_PC_VSD_TABLE_ADDR 0x200
+#define PC_VSD_TABLE_ADDR 0x000
+#define PC_VSD_TABLE_AUTOINC PPC_BIT(0)
+#define PC_VSD_TABLE_SELECT PPC_BITMASK(12, 15)
+#define PC_VSD_TABLE_ADDRESS PPC_BITMASK(28, 31)
+
+/* VSD table data register */
+#define X_PC_VSD_TABLE_DATA 0x201
+#define PC_VSD_TABLE_DATA 0x008
+
+/* AT indirect kill register */
+#define X_PC_AT_KILL 0x202
+#define PC_AT_KILL 0x010
+#define PC_AT_KILL_VALID PPC_BIT(0)
+#define PC_AT_KILL_VSD_TYPE PPC_BITMASK(24, 27)
+/* Only NVP, NVG, NVC */
+#define PC_AT_KILL_BLOCK_ID PPC_BITMASK(28, 31)
+#define PC_AT_KILL_OFFSET PPC_BITMASK(48, 60)
+
+/* AT indirect kill mask register */
+#define X_PC_AT_KILL_MASK 0x203
+#define PC_AT_KILL_MASK 0x018
+#define PC_AT_KILL_MASK_VSD_TYPE PPC_BITMASK(24, 27)
+#define PC_AT_KILL_MASK_BLOCK_ID PPC_BITMASK(28, 31)
+#define PC_AT_KILL_MASK_OFFSET PPC_BITMASK(48, 60)
+
+/*
+ * PC LSB2
+ */
+
+/* NxC Cache flush control */
+#define X_PC_NXC_FLUSH_CTRL 0x280
+#define PC_NXC_FLUSH_CTRL 0x400
+#define PC_NXC_FLUSH_CTRL_POLL_VALID PPC_BIT(0)
+#define PC_NXC_FLUSH_CTRL_WANT_CACHE_DISABLE PPC_BIT(2)
+#define PC_NXC_FLUSH_CTRL_WANT_INVALIDATE PPC_BIT(3)
+#define PC_NXC_FLUSH_CTRL_INJECT_INVALIDATE PPC_BIT(7)
+
+/* NxC Cache flush poll */
+#define X_PC_NXC_FLUSH_POLL 0x281
+#define PC_NXC_FLUSH_POLL 0x408
+#define PC_NXC_FLUSH_POLL_NXC_TYPE PPC_BITMASK(2, 3)
+#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVP 0
+#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVG 2
+#define PC_NXC_FLUSH_POLL_NXC_TYPE_NVC 3
+#define PC_NXC_FLUSH_POLL_BLOCK_ID PPC_BITMASK(4, 7)
+#define PC_NXC_FLUSH_POLL_OFFSET PPC_BITMASK(8, 31) /* 24-bit */
+#define PC_NXC_FLUSH_POLL_NXC_TYPE_MASK PPC_BITMASK(34, 35) /* 0: Ign */
+#define PC_NXC_FLUSH_POLL_BLOCK_ID_MASK PPC_BITMASK(36, 39)
+#define PC_NXC_FLUSH_POLL_OFFSET_MASK PPC_BITMASK(40, 63) /* 24-bit */
+
+/* NxC Cache Watch 0 Specification */
+#define X_PC_NXC_WATCH0_SPEC 0x2A0
+#define PC_NXC_WATCH0_SPEC 0x500
+#define PC_NXC_WATCH_CONFLICT PPC_BIT(0)
+#define PC_NXC_WATCH_FULL PPC_BIT(8)
+#define PC_NXC_WATCH_NXC_TYPE PPC_BITMASK(26, 27)
+#define PC_NXC_WATCH_NXC_NVP 0
+#define PC_NXC_WATCH_NXC_NVG 2
+#define PC_NXC_WATCH_NXC_NVC 3
+#define PC_NXC_WATCH_BLOCK_ID PPC_BITMASK(28, 31)
+#define PC_NXC_WATCH_INDEX PPC_BITMASK(40, 63)
+
+/* NxC Cache Watch 0 Data */
+#define X_PC_NXC_WATCH0_DATA0 0x2A4
+#define X_PC_NXC_WATCH0_DATA1 0x2A5
+#define X_PC_NXC_WATCH0_DATA2 0x2A6
+#define X_PC_NXC_WATCH0_DATA3 0x2A7
+
+#define PC_NXC_WATCH0_DATA0 0x520
+#define PC_NXC_WATCH0_DATA1 0x528
+#define PC_NXC_WATCH0_DATA2 0x530
+#define PC_NXC_WATCH0_DATA3 0x538
+
+/*
+ * TCTXT Registers
+ */
+
+/* Physical Thread Enable0 register */
+#define X_TCTXT_EN0 0x300
+#define TCTXT_EN0 0x000
+
+/* Physical Thread Enable0 Set register */
+#define X_TCTXT_EN0_SET 0x302
+#define TCTXT_EN0_SET 0x010
+
+/* Physical Thread Enable0 Reset register */
+#define X_TCTXT_EN0_RESET 0x303
+#define TCTXT_EN0_RESET 0x018
+
+/* Physical Thread Enable1 register */
+#define X_TCTXT_EN1 0x304
+#define TCTXT_EN1 0x020
+
+/* Physical Thread Enable1 Set register */
+#define X_TCTXT_EN1_SET 0x306
+#define TCTXT_EN1_SET 0x030
+
+/* Physical Thread Enable1 Reset register */
+#define X_TCTXT_EN1_RESET 0x307
+#define TCTXT_EN1_RESET 0x038
+
+/*
+ * VSD Tables
+ */
+#define VST_ESB 0
+#define VST_EAS 1 /* No used by PC */
+#define VST_END 2
+#define VST_NVP 3
+#define VST_NVG 4
+#define VST_NVC 5
+#define VST_IC 6 /* No used by PC */
+#define VST_SYNC 7
+#define VST_ERQ 8 /* No used by PC */
+
+/*
+ * Bits in a VSD entry.
+ *
+ * Note: the address is naturally aligned, we don't use a PPC_BITMASK,
+ * but just a mask to apply to the address before OR'ing it in.
+ *
+ * Note: VSD_FIRMWARE is a SW bit ! It hijacks an unused bit in the
+ * VSD and is only meant to be used in indirect mode !
+ */
+#define VSD_MODE PPC_BITMASK(0, 1)
+#define VSD_MODE_SHARED 1
+#define VSD_MODE_EXCLUSIVE 2
+#define VSD_MODE_FORWARD 3
+#define VSD_FIRMWARE PPC_BIT(2) /* Read warning */
+#define VSD_FIRMWARE2 PPC_BIT(3) /* unused */
+#define VSD_RESERVED PPC_BITMASK(4, 7) /* P10 reserved */
+#define VSD_ADDRESS_MASK 0x00fffffffffff000ull
+#define VSD_MIGRATION_REG PPC_BITMASK(52, 55)
+#define VSD_INDIRECT PPC_BIT(56)
+#define VSD_TSIZE PPC_BITMASK(59, 63)
+
+#endif /* PPC_PNV_XIVE2_REGS_H */
diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c
index eae95c716f..dc641cc604 100644
--- a/hw/intc/spapr_xive.c
+++ b/hw/intc/spapr_xive.c
@@ -480,6 +480,29 @@ static uint8_t spapr_xive_get_block_id(XiveRouter *xrtr)
return SPAPR_XIVE_BLOCK_ID;
}
+static int spapr_xive_get_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ uint8_t *pq)
+{
+ SpaprXive *xive = SPAPR_XIVE(xrtr);
+
+ assert(SPAPR_XIVE_BLOCK_ID == blk);
+
+ *pq = xive_source_esb_get(&xive->source, idx);
+ return 0;
+}
+
+static int spapr_xive_set_pq(XiveRouter *xrtr, uint8_t blk, uint32_t idx,
+ uint8_t *pq)
+{
+ SpaprXive *xive = SPAPR_XIVE(xrtr);
+
+ assert(SPAPR_XIVE_BLOCK_ID == blk);
+
+ *pq = xive_source_esb_set(&xive->source, idx, *pq);
+ return 0;
+}
+
+
static const VMStateDescription vmstate_spapr_xive_end = {
.name = TYPE_SPAPR_XIVE "/end",
.version_id = 1,
@@ -788,6 +811,8 @@ static void spapr_xive_class_init(ObjectClass *klass, void *data)
dc->vmsd = &vmstate_spapr_xive;
xrc->get_eas = spapr_xive_get_eas;
+ xrc->get_pq = spapr_xive_get_pq;
+ xrc->set_pq = spapr_xive_set_pq;
xrc->get_end = spapr_xive_get_end;
xrc->write_end = spapr_xive_write_end;
xrc->get_nvt = spapr_xive_get_nvt;
diff --git a/hw/intc/xive.c b/hw/intc/xive.c
index f15f98588a..b8e4c7294d 100644
--- a/hw/intc/xive.c
+++ b/hw/intc/xive.c
@@ -887,6 +887,16 @@ static bool xive_source_lsi_trigger(XiveSource *xsrc, uint32_t srcno)
}
/*
+ * Sources can be configured with PQ offloading in which case the check
+ * on the PQ state bits of MSIs is disabled
+ */
+static bool xive_source_esb_disabled(XiveSource *xsrc, uint32_t srcno)
+{
+ return (xsrc->esb_flags & XIVE_SRC_PQ_DISABLE) &&
+ !xive_source_irq_is_lsi(xsrc, srcno);
+}
+
+/*
* Returns whether the event notification should be forwarded.
*/
static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
@@ -895,6 +905,10 @@ static bool xive_source_esb_trigger(XiveSource *xsrc, uint32_t srcno)
assert(srcno < xsrc->nr_irqs);
+ if (xive_source_esb_disabled(xsrc, srcno)) {
+ return true;
+ }
+
ret = xive_esb_trigger(&xsrc->status[srcno]);
if (xive_source_irq_is_lsi(xsrc, srcno) &&
@@ -915,6 +929,11 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
assert(srcno < xsrc->nr_irqs);
+ if (xive_source_esb_disabled(xsrc, srcno)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid EOI for IRQ %d\n", srcno);
+ return false;
+ }
+
ret = xive_esb_eoi(&xsrc->status[srcno]);
/*
@@ -936,9 +955,10 @@ static bool xive_source_esb_eoi(XiveSource *xsrc, uint32_t srcno)
static void xive_source_notify(XiveSource *xsrc, int srcno)
{
XiveNotifierClass *xnc = XIVE_NOTIFIER_GET_CLASS(xsrc->xive);
+ bool pq_checked = !xive_source_esb_disabled(xsrc, srcno);
if (xnc->notify) {
- xnc->notify(xsrc->xive, srcno);
+ xnc->notify(xsrc->xive, srcno, pq_checked);
}
}
@@ -1061,6 +1081,15 @@ static void xive_source_esb_write(void *opaque, hwaddr addr,
notify = xive_source_esb_eoi(xsrc, srcno);
break;
+ /*
+ * This is an internal offset used to inject triggers when the PQ
+ * state bits are not controlled locally. Such as for LSIs when
+ * under ABT mode.
+ */
+ case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
+ notify = true;
+ break;
+
case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
@@ -1361,6 +1390,24 @@ int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
}
+static
+int xive_router_get_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
+}
+
+static
+int xive_router_set_pq(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq)
+{
+ XiveRouterClass *xrc = XIVE_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
+}
+
int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
XiveEND *end)
{
@@ -1712,7 +1759,7 @@ do_escalation:
xive_get_field32(END_W5_ESC_END_DATA, end.w5));
}
-void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
+void xive_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
{
XiveRouter *xrtr = XIVE_ROUTER(xn);
uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
@@ -1725,11 +1772,27 @@ void xive_router_notify(XiveNotifier *xn, uint32_t lisn)
return;
}
- /*
- * The IVRE checks the State Bit Cache at this point. We skip the
- * SBC lookup because the state bits of the sources are modeled
- * internally in QEMU.
- */
+ if (!pq_checked) {
+ bool notify;
+ uint8_t pq;
+
+ /* PQ cache lookup */
+ if (xive_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
+ /* Set FIR */
+ g_assert_not_reached();
+ }
+
+ notify = xive_esb_trigger(&pq);
+
+ if (xive_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
+ /* Set FIR */
+ g_assert_not_reached();
+ }
+
+ if (!notify) {
+ return;
+ }
+ }
if (!xive_eas_is_valid(&eas)) {
qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid LISN %x\n", lisn);
diff --git a/hw/intc/xive2.c b/hw/intc/xive2.c
new file mode 100644
index 0000000000..b6452f1478
--- /dev/null
+++ b/hw/intc/xive2.c
@@ -0,0 +1,1018 @@
+/*
+ * QEMU PowerPC XIVE2 interrupt controller model (POWER10)
+ *
+ * Copyright (c) 2019-2022, IBM Corporation..
+ *
+ * This code is licensed under the GPL version 2 or later. See the
+ * COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/log.h"
+#include "qemu/module.h"
+#include "qapi/error.h"
+#include "target/ppc/cpu.h"
+#include "sysemu/cpus.h"
+#include "sysemu/dma.h"
+#include "hw/qdev-properties.h"
+#include "monitor/monitor.h"
+#include "hw/ppc/xive.h"
+#include "hw/ppc/xive2.h"
+#include "hw/ppc/xive2_regs.h"
+
+uint32_t xive2_router_get_config(Xive2Router *xrtr)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_config(xrtr);
+}
+
+void xive2_eas_pic_print_info(Xive2Eas *eas, uint32_t lisn, Monitor *mon)
+{
+ if (!xive2_eas_is_valid(eas)) {
+ return;
+ }
+
+ monitor_printf(mon, " %08x %s end:%02x/%04x data:%08x\n",
+ lisn, xive2_eas_is_masked(eas) ? "M" : " ",
+ (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
+ (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
+ (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
+}
+
+void xive2_end_queue_pic_print_info(Xive2End *end, uint32_t width,
+ Monitor *mon)
+{
+ uint64_t qaddr_base = xive2_end_qaddr(end);
+ uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
+ uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
+ uint32_t qentries = 1 << (qsize + 10);
+ int i;
+
+ /*
+ * print out the [ (qindex - (width - 1)) .. (qindex + 1)] window
+ */
+ monitor_printf(mon, " [ ");
+ qindex = (qindex - (width - 1)) & (qentries - 1);
+ for (i = 0; i < width; i++) {
+ uint64_t qaddr = qaddr_base + (qindex << 2);
+ uint32_t qdata = -1;
+
+ if (dma_memory_read(&address_space_memory, qaddr, &qdata,
+ sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%"
+ HWADDR_PRIx "\n", qaddr);
+ return;
+ }
+ monitor_printf(mon, "%s%08x ", i == width - 1 ? "^" : "",
+ be32_to_cpu(qdata));
+ qindex = (qindex + 1) & (qentries - 1);
+ }
+ monitor_printf(mon, "]");
+}
+
+void xive2_end_pic_print_info(Xive2End *end, uint32_t end_idx, Monitor *mon)
+{
+ uint64_t qaddr_base = xive2_end_qaddr(end);
+ uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
+ uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
+ uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
+ uint32_t qentries = 1 << (qsize + 10);
+
+ uint32_t nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end->w6);
+ uint32_t nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end->w6);
+ uint8_t priority = xive_get_field32(END2_W7_F0_PRIORITY, end->w7);
+ uint8_t pq;
+
+ if (!xive2_end_is_valid(end)) {
+ return;
+ }
+
+ pq = xive_get_field32(END2_W1_ESn, end->w1);
+
+ monitor_printf(mon,
+ " %08x %c%c %c%c%c%c%c%c%c%c%c%c prio:%d nvp:%02x/%04x",
+ end_idx,
+ pq & XIVE_ESB_VAL_P ? 'P' : '-',
+ pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xive2_end_is_valid(end) ? 'v' : '-',
+ xive2_end_is_enqueue(end) ? 'q' : '-',
+ xive2_end_is_notify(end) ? 'n' : '-',
+ xive2_end_is_backlog(end) ? 'b' : '-',
+ xive2_end_is_escalate(end) ? 'e' : '-',
+ xive2_end_is_escalate_end(end) ? 'N' : '-',
+ xive2_end_is_uncond_escalation(end) ? 'u' : '-',
+ xive2_end_is_silent_escalation(end) ? 's' : '-',
+ xive2_end_is_firmware1(end) ? 'f' : '-',
+ xive2_end_is_firmware2(end) ? 'F' : '-',
+ priority, nvp_blk, nvp_idx);
+
+ if (qaddr_base) {
+ monitor_printf(mon, " eq:@%08"PRIx64"% 6d/%5d ^%d",
+ qaddr_base, qindex, qentries, qgen);
+ xive2_end_queue_pic_print_info(end, 6, mon);
+ }
+ monitor_printf(mon, "\n");
+}
+
+void xive2_end_eas_pic_print_info(Xive2End *end, uint32_t end_idx,
+ Monitor *mon)
+{
+ Xive2Eas *eas = (Xive2Eas *) &end->w4;
+ uint8_t pq;
+
+ if (!xive2_end_is_escalate(end)) {
+ return;
+ }
+
+ pq = xive_get_field32(END2_W1_ESe, end->w1);
+
+ monitor_printf(mon, " %08x %c%c %c%c end:%02x/%04x data:%08x\n",
+ end_idx,
+ pq & XIVE_ESB_VAL_P ? 'P' : '-',
+ pq & XIVE_ESB_VAL_Q ? 'Q' : '-',
+ xive2_eas_is_valid(eas) ? 'v' : ' ',
+ xive2_eas_is_masked(eas) ? 'M' : ' ',
+ (uint8_t) xive_get_field64(EAS2_END_BLOCK, eas->w),
+ (uint32_t) xive_get_field64(EAS2_END_INDEX, eas->w),
+ (uint32_t) xive_get_field64(EAS2_END_DATA, eas->w));
+}
+
+static void xive2_end_enqueue(Xive2End *end, uint32_t data)
+{
+ uint64_t qaddr_base = xive2_end_qaddr(end);
+ uint32_t qsize = xive_get_field32(END2_W3_QSIZE, end->w3);
+ uint32_t qindex = xive_get_field32(END2_W1_PAGE_OFF, end->w1);
+ uint32_t qgen = xive_get_field32(END2_W1_GENERATION, end->w1);
+
+ uint64_t qaddr = qaddr_base + (qindex << 2);
+ uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff));
+ uint32_t qentries = 1 << (qsize + 10);
+
+ if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata),
+ MEMTXATTRS_UNSPECIFIED)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%"
+ HWADDR_PRIx "\n", qaddr);
+ return;
+ }
+
+ qindex = (qindex + 1) & (qentries - 1);
+ if (qindex == 0) {
+ qgen ^= 1;
+ end->w1 = xive_set_field32(END2_W1_GENERATION, end->w1, qgen);
+
+ /* TODO(PowerNV): reset GF bit on a cache watch operation */
+ end->w1 = xive_set_field32(END2_W1_GEN_FLIPPED, end->w1, qgen);
+ }
+ end->w1 = xive_set_field32(END2_W1_PAGE_OFF, end->w1, qindex);
+}
+
+/*
+ * XIVE Thread Interrupt Management Area (TIMA) - Gen2 mode
+ *
+ * TIMA Gen2 VP “save & restore” (S&R) indicated by H bit next to V bit
+ *
+ * - if a context is enabled with the H bit set, the VP context
+ * information is retrieved from the NVP structure (“check out”)
+ * and stored back on a context pull (“check in”), the SW receives
+ * the same context pull information as on P9
+ *
+ * - the H bit cannot be changed while the V bit is set, i.e. a
+ * context cannot be set up in the TIMA and then be “pushed” into
+ * the NVP by changing the H bit while the context is enabled
+ */
+
+static void xive2_tctx_save_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
+ uint8_t nvp_blk, uint32_t nvp_idx)
+{
+ CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
+ uint32_t pir = env->spr_cb[SPR_PIR].default_value;
+ Xive2Nvp nvp;
+ uint8_t *regs = &tctx->regs[TM_QW1_OS];
+
+ if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_hw(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_co(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not checkout\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (xive_get_field32(NVP2_W1_CO_THRID_VALID, nvp.w1) &&
+ xive_get_field32(NVP2_W1_CO_THRID, nvp.w1) != pir) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: NVP %x/%x invalid checkout Thread %x\n",
+ nvp_blk, nvp_idx, pir);
+ return;
+ }
+
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, regs[TM_IPB]);
+ nvp.w2 = xive_set_field32(NVP2_W2_CPPR, nvp.w2, regs[TM_CPPR]);
+ nvp.w2 = xive_set_field32(NVP2_W2_LSMFB, nvp.w2, regs[TM_LSMFB]);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
+
+ nvp.w1 = xive_set_field32(NVP2_W1_CO, nvp.w1, 0);
+ /* NVP2_W1_CO_THRID_VALID only set once */
+ nvp.w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp.w1, 0xFFFF);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 1);
+}
+
+static void xive2_os_cam_decode(uint32_t cam, uint8_t *nvp_blk,
+ uint32_t *nvp_idx, bool *vo, bool *ho)
+{
+ *nvp_blk = xive2_nvp_blk(cam);
+ *nvp_idx = xive2_nvp_idx(cam);
+ *vo = !!(cam & TM2_QW1W2_VO);
+ *ho = !!(cam & TM2_QW1W2_HO);
+}
+
+uint64_t xive2_tm_pull_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, unsigned size)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
+ uint32_t qw1w2_new;
+ uint32_t cam = be32_to_cpu(qw1w2);
+ uint8_t nvp_blk;
+ uint32_t nvp_idx;
+ bool vo;
+ bool do_save;
+
+ xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_save);
+
+ if (!vo) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: pulling invalid NVP %x/%x !?\n",
+ nvp_blk, nvp_idx);
+ }
+
+ /* Invalidate CAM line */
+ qw1w2_new = xive_set_field32(TM2_QW1W2_VO, qw1w2, 0);
+ memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2_new, 4);
+
+ if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE && do_save) {
+ xive2_tctx_save_os_ctx(xrtr, tctx, nvp_blk, nvp_idx);
+ }
+
+ return qw1w2;
+}
+
+static uint8_t xive2_tctx_restore_os_ctx(Xive2Router *xrtr, XiveTCTX *tctx,
+ uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp)
+{
+ CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
+ uint32_t pir = env->spr_cb[SPR_PIR].default_value;
+ uint8_t cppr;
+
+ if (!xive2_nvp_is_hw(nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is not HW owned\n",
+ nvp_blk, nvp_idx);
+ return 0;
+ }
+
+ cppr = xive_get_field32(NVP2_W2_CPPR, nvp->w2);
+ nvp->w2 = xive_set_field32(NVP2_W2_CPPR, nvp->w2, 0);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 2);
+
+ tctx->regs[TM_QW1_OS + TM_CPPR] = cppr;
+ /* we don't model LSMFB */
+
+ nvp->w1 = xive_set_field32(NVP2_W1_CO, nvp->w1, 1);
+ nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID_VALID, nvp->w1, 1);
+ nvp->w1 = xive_set_field32(NVP2_W1_CO_THRID, nvp->w1, pir);
+
+ /*
+ * Checkout privilege: 0:OS, 1:Pool, 2:Hard
+ *
+ * TODO: we only support OS push/pull
+ */
+ nvp->w1 = xive_set_field32(NVP2_W1_CO_PRIV, nvp->w1, 0);
+
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, nvp, 1);
+
+ /* return restored CPPR to generate a CPU exception if needed */
+ return cppr;
+}
+
+static void xive2_tctx_need_resend(Xive2Router *xrtr, XiveTCTX *tctx,
+ uint8_t nvp_blk, uint32_t nvp_idx,
+ bool do_restore)
+{
+ Xive2Nvp nvp;
+ uint8_t ipb;
+ uint8_t cppr = 0;
+
+ /*
+ * Grab the associated thread interrupt context registers in the
+ * associated NVP
+ */
+ if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ /* Automatically restore thread context registers */
+ if (xive2_router_get_config(xrtr) & XIVE2_VP_SAVE_RESTORE &&
+ do_restore) {
+ cppr = xive2_tctx_restore_os_ctx(xrtr, tctx, nvp_blk, nvp_idx, &nvp);
+ }
+
+ ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2);
+ if (ipb) {
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, 0);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
+ }
+
+ /* An IPB or CPPR change can trigger a resend */
+ if (ipb || cppr) {
+ xive_tctx_ipb_update(tctx, TM_QW1_OS, ipb);
+ }
+}
+
+/*
+ * Updating the OS CAM line can trigger a resend of interrupt
+ */
+void xive2_tm_push_os_ctx(XivePresenter *xptr, XiveTCTX *tctx,
+ hwaddr offset, uint64_t value, unsigned size)
+{
+ uint32_t cam = value;
+ uint32_t qw1w2 = cpu_to_be32(cam);
+ uint8_t nvp_blk;
+ uint32_t nvp_idx;
+ bool vo;
+ bool do_restore;
+
+ xive2_os_cam_decode(cam, &nvp_blk, &nvp_idx, &vo, &do_restore);
+
+ /* First update the thead context */
+ memcpy(&tctx->regs[TM_QW1_OS + TM_WORD2], &qw1w2, 4);
+
+ /* Check the interrupt pending bits */
+ if (vo) {
+ xive2_tctx_need_resend(XIVE2_ROUTER(xptr), tctx, nvp_blk, nvp_idx,
+ do_restore);
+ }
+}
+
+/*
+ * XIVE Router (aka. Virtualization Controller or IVRE)
+ */
+
+int xive2_router_get_eas(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ Xive2Eas *eas)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_eas(xrtr, eas_blk, eas_idx, eas);
+}
+
+static
+int xive2_router_get_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_pq(xrtr, eas_blk, eas_idx, pq);
+}
+
+static
+int xive2_router_set_pq(Xive2Router *xrtr, uint8_t eas_blk, uint32_t eas_idx,
+ uint8_t *pq)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->set_pq(xrtr, eas_blk, eas_idx, pq);
+}
+
+int xive2_router_get_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_end(xrtr, end_blk, end_idx, end);
+}
+
+int xive2_router_write_end(Xive2Router *xrtr, uint8_t end_blk, uint32_t end_idx,
+ Xive2End *end, uint8_t word_number)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->write_end(xrtr, end_blk, end_idx, end, word_number);
+}
+
+int xive2_router_get_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_nvp(xrtr, nvp_blk, nvp_idx, nvp);
+}
+
+int xive2_router_write_nvp(Xive2Router *xrtr, uint8_t nvp_blk, uint32_t nvp_idx,
+ Xive2Nvp *nvp, uint8_t word_number)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->write_nvp(xrtr, nvp_blk, nvp_idx, nvp, word_number);
+}
+
+static int xive2_router_get_block_id(Xive2Router *xrtr)
+{
+ Xive2RouterClass *xrc = XIVE2_ROUTER_GET_CLASS(xrtr);
+
+ return xrc->get_block_id(xrtr);
+}
+
+/*
+ * Encode the HW CAM line with 7bit or 8bit thread id. The thread id
+ * width and block id width is configurable at the IC level.
+ *
+ * chipid << 24 | 0000 0000 0000 0000 1 threadid (7Bit)
+ * chipid << 24 | 0000 0000 0000 0001 threadid (8Bit)
+ */
+static uint32_t xive2_tctx_hw_cam_line(XivePresenter *xptr, XiveTCTX *tctx)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xptr);
+ CPUPPCState *env = &POWERPC_CPU(tctx->cs)->env;
+ uint32_t pir = env->spr_cb[SPR_PIR].default_value;
+ uint8_t blk = xive2_router_get_block_id(xrtr);
+ uint8_t tid_shift =
+ xive2_router_get_config(xrtr) & XIVE2_THREADID_8BITS ? 8 : 7;
+ uint8_t tid_mask = (1 << tid_shift) - 1;
+
+ return xive2_nvp_cam_line(blk, 1 << tid_shift | (pir & tid_mask));
+}
+
+/*
+ * The thread context register words are in big-endian format.
+ */
+int xive2_presenter_tctx_match(XivePresenter *xptr, XiveTCTX *tctx,
+ uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint32_t logic_serv)
+{
+ uint32_t cam = xive2_nvp_cam_line(nvt_blk, nvt_idx);
+ uint32_t qw3w2 = xive_tctx_word2(&tctx->regs[TM_QW3_HV_PHYS]);
+ uint32_t qw2w2 = xive_tctx_word2(&tctx->regs[TM_QW2_HV_POOL]);
+ uint32_t qw1w2 = xive_tctx_word2(&tctx->regs[TM_QW1_OS]);
+ uint32_t qw0w2 = xive_tctx_word2(&tctx->regs[TM_QW0_USER]);
+
+ /*
+ * TODO (PowerNV): ignore mode. The low order bits of the NVT
+ * identifier are ignored in the "CAM" match.
+ */
+
+ if (format == 0) {
+ if (cam_ignore == true) {
+ /*
+ * F=0 & i=1: Logical server notification (bits ignored at
+ * the end of the NVT identifier)
+ */
+ qemu_log_mask(LOG_UNIMP, "XIVE: no support for LS NVT %x/%x\n",
+ nvt_blk, nvt_idx);
+ return -1;
+ }
+
+ /* F=0 & i=0: Specific NVT notification */
+
+ /* PHYS ring */
+ if ((be32_to_cpu(qw3w2) & TM2_QW3W2_VT) &&
+ cam == xive2_tctx_hw_cam_line(xptr, tctx)) {
+ return TM_QW3_HV_PHYS;
+ }
+
+ /* HV POOL ring */
+ if ((be32_to_cpu(qw2w2) & TM2_QW2W2_VP) &&
+ cam == xive_get_field32(TM2_QW2W2_POOL_CAM, qw2w2)) {
+ return TM_QW2_HV_POOL;
+ }
+
+ /* OS ring */
+ if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
+ cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) {
+ return TM_QW1_OS;
+ }
+ } else {
+ /* F=1 : User level Event-Based Branch (EBB) notification */
+
+ /* USER ring */
+ if ((be32_to_cpu(qw1w2) & TM2_QW1W2_VO) &&
+ (cam == xive_get_field32(TM2_QW1W2_OS_CAM, qw1w2)) &&
+ (be32_to_cpu(qw0w2) & TM2_QW0W2_VU) &&
+ (logic_serv == xive_get_field32(TM2_QW0W2_LOGIC_SERV, qw0w2))) {
+ return TM_QW0_USER;
+ }
+ }
+ return -1;
+}
+
+static void xive2_router_realize(DeviceState *dev, Error **errp)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(dev);
+
+ assert(xrtr->xfb);
+}
+
+/*
+ * Notification using the END ESe/ESn bit (Event State Buffer for
+ * escalation and notification). Profide futher coalescing in the
+ * Router.
+ */
+static bool xive2_router_end_es_notify(Xive2Router *xrtr, uint8_t end_blk,
+ uint32_t end_idx, Xive2End *end,
+ uint32_t end_esmask)
+{
+ uint8_t pq = xive_get_field32(end_esmask, end->w1);
+ bool notify = xive_esb_trigger(&pq);
+
+ if (pq != xive_get_field32(end_esmask, end->w1)) {
+ end->w1 = xive_set_field32(end_esmask, end->w1, pq);
+ xive2_router_write_end(xrtr, end_blk, end_idx, end, 1);
+ }
+
+ /* ESe/n[Q]=1 : end of notification */
+ return notify;
+}
+
+/*
+ * An END trigger can come from an event trigger (IPI or HW) or from
+ * another chip. We don't model the PowerBus but the END trigger
+ * message has the same parameters than in the function below.
+ */
+static void xive2_router_end_notify(Xive2Router *xrtr, uint8_t end_blk,
+ uint32_t end_idx, uint32_t end_data)
+{
+ Xive2End end;
+ uint8_t priority;
+ uint8_t format;
+ bool found;
+ Xive2Nvp nvp;
+ uint8_t nvp_blk;
+ uint32_t nvp_idx;
+
+ /* END cache lookup */
+ if (xive2_router_get_end(xrtr, end_blk, end_idx, &end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
+ end_idx);
+ return;
+ }
+
+ if (!xive2_end_is_valid(&end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
+ end_blk, end_idx);
+ return;
+ }
+
+ if (xive2_end_is_enqueue(&end)) {
+ xive2_end_enqueue(&end, end_data);
+ /* Enqueuing event data modifies the EQ toggle and index */
+ xive2_router_write_end(xrtr, end_blk, end_idx, &end, 1);
+ }
+
+ /*
+ * When the END is silent, we skip the notification part.
+ */
+ if (xive2_end_is_silent_escalation(&end)) {
+ goto do_escalation;
+ }
+
+ /*
+ * The W7 format depends on the F bit in W6. It defines the type
+ * of the notification :
+ *
+ * F=0 : single or multiple NVP notification
+ * F=1 : User level Event-Based Branch (EBB) notification, no
+ * priority
+ */
+ format = xive_get_field32(END2_W6_FORMAT_BIT, end.w6);
+ priority = xive_get_field32(END2_W7_F0_PRIORITY, end.w7);
+
+ /* The END is masked */
+ if (format == 0 && priority == 0xff) {
+ return;
+ }
+
+ /*
+ * Check the END ESn (Event State Buffer for notification) for
+ * even futher coalescing in the Router
+ */
+ if (!xive2_end_is_notify(&end)) {
+ /* ESn[Q]=1 : end of notification */
+ if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
+ &end, END2_W1_ESn)) {
+ return;
+ }
+ }
+
+ /*
+ * Follows IVPE notification
+ */
+ nvp_blk = xive_get_field32(END2_W6_VP_BLOCK, end.w6);
+ nvp_idx = xive_get_field32(END2_W6_VP_OFFSET, end.w6);
+
+ /* NVP cache lookup */
+ if (xive2_router_get_nvp(xrtr, nvp_blk, nvp_idx, &nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: no NVP %x/%x\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ if (!xive2_nvp_is_valid(&nvp)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: NVP %x/%x is invalid\n",
+ nvp_blk, nvp_idx);
+ return;
+ }
+
+ found = xive_presenter_notify(xrtr->xfb, format, nvp_blk, nvp_idx,
+ xive_get_field32(END2_W6_IGNORE, end.w7),
+ priority,
+ xive_get_field32(END2_W7_F1_LOG_SERVER_ID, end.w7));
+
+ /* TODO: Auto EOI. */
+
+ if (found) {
+ return;
+ }
+
+ /*
+ * If no matching NVP is dispatched on a HW thread :
+ * - specific VP: update the NVP structure if backlog is activated
+ * - logical server : forward request to IVPE (not supported)
+ */
+ if (xive2_end_is_backlog(&end)) {
+ uint8_t ipb;
+
+ if (format == 1) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: END %x/%x invalid config: F1 & backlog\n",
+ end_blk, end_idx);
+ return;
+ }
+
+ /*
+ * Record the IPB in the associated NVP structure for later
+ * use. The presenter will resend the interrupt when the vCPU
+ * is dispatched again on a HW thread.
+ */
+ ipb = xive_get_field32(NVP2_W2_IPB, nvp.w2) |
+ xive_priority_to_ipb(priority);
+ nvp.w2 = xive_set_field32(NVP2_W2_IPB, nvp.w2, ipb);
+ xive2_router_write_nvp(xrtr, nvp_blk, nvp_idx, &nvp, 2);
+
+ /*
+ * On HW, follows a "Broadcast Backlog" to IVPEs
+ */
+ }
+
+do_escalation:
+ /*
+ * If activated, escalate notification using the ESe PQ bits and
+ * the EAS in w4-5
+ */
+ if (!xive2_end_is_escalate(&end)) {
+ return;
+ }
+
+ /*
+ * Check the END ESe (Event State Buffer for escalation) for even
+ * futher coalescing in the Router
+ */
+ if (!xive2_end_is_uncond_escalation(&end)) {
+ /* ESe[Q]=1 : end of escalation notification */
+ if (!xive2_router_end_es_notify(xrtr, end_blk, end_idx,
+ &end, END2_W1_ESe)) {
+ return;
+ }
+ }
+
+ /*
+ * The END trigger becomes an Escalation trigger
+ */
+ xive2_router_end_notify(xrtr,
+ xive_get_field32(END2_W4_END_BLOCK, end.w4),
+ xive_get_field32(END2_W4_ESC_END_INDEX, end.w4),
+ xive_get_field32(END2_W5_ESC_END_DATA, end.w5));
+}
+
+void xive2_router_notify(XiveNotifier *xn, uint32_t lisn, bool pq_checked)
+{
+ Xive2Router *xrtr = XIVE2_ROUTER(xn);
+ uint8_t eas_blk = XIVE_EAS_BLOCK(lisn);
+ uint32_t eas_idx = XIVE_EAS_INDEX(lisn);
+ Xive2Eas eas;
+
+ /* EAS cache lookup */
+ if (xive2_router_get_eas(xrtr, eas_blk, eas_idx, &eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Unknown LISN %x\n", lisn);
+ return;
+ }
+
+ if (!pq_checked) {
+ bool notify;
+ uint8_t pq;
+
+ /* PQ cache lookup */
+ if (xive2_router_get_pq(xrtr, eas_blk, eas_idx, &pq)) {
+ /* Set FIR */
+ g_assert_not_reached();
+ }
+
+ notify = xive_esb_trigger(&pq);
+
+ if (xive2_router_set_pq(xrtr, eas_blk, eas_idx, &pq)) {
+ /* Set FIR */
+ g_assert_not_reached();
+ }
+
+ if (!notify) {
+ return;
+ }
+ }
+
+ if (!xive2_eas_is_valid(&eas)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: Invalid LISN %x\n", lisn);
+ return;
+ }
+
+ if (xive2_eas_is_masked(&eas)) {
+ /* Notification completed */
+ return;
+ }
+
+ /*
+ * The event trigger becomes an END trigger
+ */
+ xive2_router_end_notify(xrtr,
+ xive_get_field64(EAS2_END_BLOCK, eas.w),
+ xive_get_field64(EAS2_END_INDEX, eas.w),
+ xive_get_field64(EAS2_END_DATA, eas.w));
+}
+
+static Property xive2_router_properties[] = {
+ DEFINE_PROP_LINK("xive-fabric", Xive2Router, xfb,
+ TYPE_XIVE_FABRIC, XiveFabric *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xive2_router_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass);
+
+ dc->desc = "XIVE2 Router Engine";
+ device_class_set_props(dc, xive2_router_properties);
+ /* Parent is SysBusDeviceClass. No need to call its realize hook */
+ dc->realize = xive2_router_realize;
+ xnc->notify = xive2_router_notify;
+}
+
+static const TypeInfo xive2_router_info = {
+ .name = TYPE_XIVE2_ROUTER,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .abstract = true,
+ .instance_size = sizeof(Xive2Router),
+ .class_size = sizeof(Xive2RouterClass),
+ .class_init = xive2_router_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_XIVE_NOTIFIER },
+ { TYPE_XIVE_PRESENTER },
+ { }
+ }
+};
+
+static inline bool addr_is_even(hwaddr addr, uint32_t shift)
+{
+ return !((addr >> shift) & 1);
+}
+
+static uint64_t xive2_end_source_read(void *opaque, hwaddr addr, unsigned size)
+{
+ Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
+ uint32_t offset = addr & 0xFFF;
+ uint8_t end_blk;
+ uint32_t end_idx;
+ Xive2End end;
+ uint32_t end_esmask;
+ uint8_t pq;
+ uint64_t ret;
+
+ /*
+ * The block id should be deduced from the load address on the END
+ * ESB MMIO but our model only supports a single block per XIVE chip.
+ */
+ end_blk = xive2_router_get_block_id(xsrc->xrtr);
+ end_idx = addr >> (xsrc->esb_shift + 1);
+
+ if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
+ end_idx);
+ return -1;
+ }
+
+ if (!xive2_end_is_valid(&end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
+ end_blk, end_idx);
+ return -1;
+ }
+
+ end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
+ END2_W1_ESe;
+ pq = xive_get_field32(end_esmask, end.w1);
+
+ switch (offset) {
+ case XIVE_ESB_LOAD_EOI ... XIVE_ESB_LOAD_EOI + 0x7FF:
+ ret = xive_esb_eoi(&pq);
+
+ /* Forward the source event notification for routing ?? */
+ break;
+
+ case XIVE_ESB_GET ... XIVE_ESB_GET + 0x3FF:
+ ret = pq;
+ break;
+
+ case XIVE_ESB_SET_PQ_00 ... XIVE_ESB_SET_PQ_00 + 0x0FF:
+ case XIVE_ESB_SET_PQ_01 ... XIVE_ESB_SET_PQ_01 + 0x0FF:
+ case XIVE_ESB_SET_PQ_10 ... XIVE_ESB_SET_PQ_10 + 0x0FF:
+ case XIVE_ESB_SET_PQ_11 ... XIVE_ESB_SET_PQ_11 + 0x0FF:
+ ret = xive_esb_set(&pq, (offset >> 8) & 0x3);
+ break;
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB load addr %d\n",
+ offset);
+ return -1;
+ }
+
+ if (pq != xive_get_field32(end_esmask, end.w1)) {
+ end.w1 = xive_set_field32(end_esmask, end.w1, pq);
+ xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
+ }
+
+ return ret;
+}
+
+static void xive2_end_source_write(void *opaque, hwaddr addr,
+ uint64_t value, unsigned size)
+{
+ Xive2EndSource *xsrc = XIVE2_END_SOURCE(opaque);
+ uint32_t offset = addr & 0xFFF;
+ uint8_t end_blk;
+ uint32_t end_idx;
+ Xive2End end;
+ uint32_t end_esmask;
+ uint8_t pq;
+ bool notify = false;
+
+ /*
+ * The block id should be deduced from the load address on the END
+ * ESB MMIO but our model only supports a single block per XIVE chip.
+ */
+ end_blk = xive2_router_get_block_id(xsrc->xrtr);
+ end_idx = addr >> (xsrc->esb_shift + 1);
+
+ if (xive2_router_get_end(xsrc->xrtr, end_blk, end_idx, &end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: No END %x/%x\n", end_blk,
+ end_idx);
+ return;
+ }
+
+ if (!xive2_end_is_valid(&end)) {
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: END %x/%x is invalid\n",
+ end_blk, end_idx);
+ return;
+ }
+
+ end_esmask = addr_is_even(addr, xsrc->esb_shift) ? END2_W1_ESn :
+ END2_W1_ESe;
+ pq = xive_get_field32(end_esmask, end.w1);
+
+ switch (offset) {
+ case 0 ... 0x3FF:
+ notify = xive_esb_trigger(&pq);
+ break;
+
+ case XIVE_ESB_STORE_EOI ... XIVE_ESB_STORE_EOI + 0x3FF:
+ /* TODO: can we check StoreEOI availability from the router ? */
+ notify = xive_esb_eoi(&pq);
+ break;
+
+ case XIVE_ESB_INJECT ... XIVE_ESB_INJECT + 0x3FF:
+ if (end_esmask == END2_W1_ESe) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "XIVE: END %x/%x can not EQ inject on ESe\n",
+ end_blk, end_idx);
+ return;
+ }
+ notify = true;
+ break;
+
+ default:
+ qemu_log_mask(LOG_GUEST_ERROR, "XIVE: invalid END ESB write addr %d\n",
+ offset);
+ return;
+ }
+
+ if (pq != xive_get_field32(end_esmask, end.w1)) {
+ end.w1 = xive_set_field32(end_esmask, end.w1, pq);
+ xive2_router_write_end(xsrc->xrtr, end_blk, end_idx, &end, 1);
+ }
+
+ /* TODO: Forward the source event notification for routing */
+ if (notify) {
+ ;
+ }
+}
+
+static const MemoryRegionOps xive2_end_source_ops = {
+ .read = xive2_end_source_read,
+ .write = xive2_end_source_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+ .valid = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+ .impl = {
+ .min_access_size = 8,
+ .max_access_size = 8,
+ },
+};
+
+static void xive2_end_source_realize(DeviceState *dev, Error **errp)
+{
+ Xive2EndSource *xsrc = XIVE2_END_SOURCE(dev);
+
+ assert(xsrc->xrtr);
+
+ if (!xsrc->nr_ends) {
+ error_setg(errp, "Number of interrupt needs to be greater than 0");
+ return;
+ }
+
+ if (xsrc->esb_shift != XIVE_ESB_4K &&
+ xsrc->esb_shift != XIVE_ESB_64K) {
+ error_setg(errp, "Invalid ESB shift setting");
+ return;
+ }
+
+ /*
+ * Each END is assigned an even/odd pair of MMIO pages, the even page
+ * manages the ESn field while the odd page manages the ESe field.
+ */
+ memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc),
+ &xive2_end_source_ops, xsrc, "xive.end",
+ (1ull << (xsrc->esb_shift + 1)) * xsrc->nr_ends);
+}
+
+static Property xive2_end_source_properties[] = {
+ DEFINE_PROP_UINT32("nr-ends", Xive2EndSource, nr_ends, 0),
+ DEFINE_PROP_UINT32("shift", Xive2EndSource, esb_shift, XIVE_ESB_64K),
+ DEFINE_PROP_LINK("xive", Xive2EndSource, xrtr, TYPE_XIVE2_ROUTER,
+ Xive2Router *),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void xive2_end_source_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "XIVE END Source";
+ device_class_set_props(dc, xive2_end_source_properties);
+ dc->realize = xive2_end_source_realize;
+}
+
+static const TypeInfo xive2_end_source_info = {
+ .name = TYPE_XIVE2_END_SOURCE,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(Xive2EndSource),
+ .class_init = xive2_end_source_class_init,
+};
+
+static void xive2_register_types(void)
+{
+ type_register_static(&xive2_router_info);
+ type_register_static(&xive2_end_source_info);
+}
+
+type_init(xive2_register_types)
diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c
index e91249ef64..b5b384e9ee 100644
--- a/hw/pci-host/pnv_phb4.c
+++ b/hw/pci-host/pnv_phb4.c
@@ -485,6 +485,15 @@ static void pnv_phb4_update_xsrc(PnvPHB4 *phb)
flags = 0;
}
+ /*
+ * When the PQ disable configuration bit is set, the check on the
+ * PQ state bits is disabled on the PHB side (for MSI only) and it
+ * is performed on the IC side instead.
+ */
+ if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_PQ_DISABLE) {
+ flags |= XIVE_SRC_PQ_DISABLE;
+ }
+
phb->xsrc.esb_shift = shift;
phb->xsrc.esb_flags = flags;
@@ -1568,40 +1577,36 @@ static PnvPhb4PecState *pnv_phb4_get_pec(PnvChip *chip, PnvPHB4 *phb,
static void pnv_phb4_realize(DeviceState *dev, Error **errp)
{
PnvPHB4 *phb = PNV_PHB4(dev);
+ PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
+ PnvChip *chip = pnv_get_chip(pnv, phb->chip_id);
PCIHostState *pci = PCI_HOST_BRIDGE(dev);
XiveSource *xsrc = &phb->xsrc;
+ BusState *s;
Error *local_err = NULL;
int nr_irqs;
char name[32];
- /* User created PHB */
- if (!phb->pec) {
- PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine());
- PnvChip *chip = pnv_get_chip(pnv, phb->chip_id);
- BusState *s;
-
- if (!chip) {
- error_setg(errp, "invalid chip id: %d", phb->chip_id);
- return;
- }
+ if (!chip) {
+ error_setg(errp, "invalid chip id: %d", phb->chip_id);
+ return;
+ }
+ /* User created PHBs need to be assigned to a PEC */
+ if (!phb->pec) {
phb->pec = pnv_phb4_get_pec(chip, phb, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
+ }
- /*
- * Reparent user created devices to the chip to build
- * correctly the device tree.
- */
- pnv_chip_parent_fixup(chip, OBJECT(phb), phb->phb_id);
+ /* Reparent the PHB to the chip to build the device tree */
+ pnv_chip_parent_fixup(chip, OBJECT(phb), phb->phb_id);
- s = qdev_get_parent_bus(DEVICE(chip));
- if (!qdev_set_parent_bus(DEVICE(phb), s, &local_err)) {
- error_propagate(errp, local_err);
- return;
- }
+ s = qdev_get_parent_bus(DEVICE(chip));
+ if (!qdev_set_parent_bus(DEVICE(phb), s, &local_err)) {
+ error_propagate(errp, local_err);
+ return;
}
/* Set the "big_phb" flag */
@@ -1664,15 +1669,64 @@ static const char *pnv_phb4_root_bus_path(PCIHostState *host_bridge,
return phb->bus_path;
}
-static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno)
+/*
+ * Address base trigger mode (POWER10)
+ *
+ * Trigger directly the IC ESB page
+ */
+static void pnv_phb4_xive_notify_abt(PnvPHB4 *phb, uint32_t srcno,
+ bool pq_checked)
+{
+ uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3];
+ uint64_t data = 0; /* trigger data : don't care */
+ hwaddr addr;
+ MemTxResult result;
+ int esb_shift;
+
+ if (notif_port & PHB_INT_NOTIFY_ADDR_64K) {
+ esb_shift = 16;
+ } else {
+ esb_shift = 12;
+ }
+
+ /* Compute the address of the IC ESB management page */
+ addr = (notif_port & ~PHB_INT_NOTIFY_ADDR_64K);
+ addr |= (1ull << (esb_shift + 1)) * srcno;
+ addr |= (1ull << esb_shift);
+
+ /*
+ * When the PQ state bits are checked on the PHB, the associated
+ * PQ state bits on the IC should be ignored. Use the unconditional
+ * trigger offset to inject a trigger on the IC. This is always
+ * the case for LSIs
+ */
+ if (pq_checked) {
+ addr |= XIVE_ESB_INJECT;
+ }
+
+ trace_pnv_phb4_xive_notify_ic(addr, data);
+
+ address_space_stq_be(&address_space_memory, addr, data,
+ MEMTXATTRS_UNSPECIFIED, &result);
+ if (result != MEMTX_OK) {
+ phb_error(phb, "trigger failed @%"HWADDR_PRIx "\n", addr);
+ return;
+ }
+}
+
+static void pnv_phb4_xive_notify_ic(PnvPHB4 *phb, uint32_t srcno,
+ bool pq_checked)
{
- PnvPHB4 *phb = PNV_PHB4(xf);
uint64_t notif_port = phb->regs[PHB_INT_NOTIFY_ADDR >> 3];
uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3];
- uint64_t data = XIVE_TRIGGER_PQ | offset | srcno;
+ uint64_t data = offset | srcno;
MemTxResult result;
- trace_pnv_phb4_xive_notify(notif_port, data);
+ if (pq_checked) {
+ data |= XIVE_TRIGGER_PQ;
+ }
+
+ trace_pnv_phb4_xive_notify_ic(notif_port, data);
address_space_stq_be(&address_space_memory, notif_port, data,
MEMTXATTRS_UNSPECIFIED, &result);
@@ -1682,6 +1736,18 @@ static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno)
}
}
+static void pnv_phb4_xive_notify(XiveNotifier *xf, uint32_t srcno,
+ bool pq_checked)
+{
+ PnvPHB4 *phb = PNV_PHB4(xf);
+
+ if (phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_ABT_MODE) {
+ pnv_phb4_xive_notify_abt(phb, srcno, pq_checked);
+ } else {
+ pnv_phb4_xive_notify_ic(phb, srcno, pq_checked);
+ }
+}
+
static Property pnv_phb4_properties[] = {
DEFINE_PROP_UINT32("index", PnvPHB4, phb_id, 0),
DEFINE_PROP_UINT32("chip-id", PnvPHB4, chip_id, 0),
@@ -1816,9 +1882,29 @@ static const TypeInfo pnv_phb4_root_port_info = {
.class_init = pnv_phb4_root_port_class_init,
};
+static void pnv_phb5_root_port_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
+
+ dc->desc = "IBM PHB5 PCIE Root Port";
+ dc->user_creatable = true;
+
+ k->vendor_id = PCI_VENDOR_ID_IBM;
+ k->device_id = PNV_PHB5_DEVICE_ID;
+}
+
+static const TypeInfo pnv_phb5_root_port_info = {
+ .name = TYPE_PNV_PHB5_ROOT_PORT,
+ .parent = TYPE_PNV_PHB4_ROOT_PORT,
+ .instance_size = sizeof(PnvPHB4RootPort),
+ .class_init = pnv_phb5_root_port_class_init,
+};
+
static void pnv_phb4_register_types(void)
{
type_register_static(&pnv_phb4_root_bus_info);
+ type_register_static(&pnv_phb5_root_port_info);
type_register_static(&pnv_phb4_root_port_info);
type_register_static(&pnv_phb4_type_info);
type_register_static(&pnv_phb4_iommu_memory_region_info);
@@ -1828,10 +1914,15 @@ type_init(pnv_phb4_register_types);
void pnv_phb4_pic_print_info(PnvPHB4 *phb, Monitor *mon)
{
+ uint64_t notif_port =
+ phb->regs[PHB_INT_NOTIFY_ADDR >> 3] & ~PHB_INT_NOTIFY_ADDR_64K;
uint32_t offset = phb->regs[PHB_INT_NOTIFY_INDEX >> 3];
+ bool abt = !!(phb->regs[PHB_CTRLR >> 3] & PHB_CTRLR_IRQ_ABT_MODE);
- monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x\n",
+ monitor_printf(mon, "PHB4[%x:%x] Source %08x .. %08x %s @%"HWADDR_PRIx"\n",
phb->chip_id, phb->phb_id,
- offset, offset + phb->xsrc.nr_irqs - 1);
+ offset, offset + phb->xsrc.nr_irqs - 1,
+ abt ? "ABT" : "",
+ notif_port);
xive_source_pic_print_info(&phb->xsrc, 0, mon);
}
diff --git a/hw/pci-host/pnv_phb4_pec.c b/hw/pci-host/pnv_phb4_pec.c
index 40d89fda56..0ab36e9c8f 100644
--- a/hw/pci-host/pnv_phb4_pec.c
+++ b/hw/pci-host/pnv_phb4_pec.c
@@ -281,9 +281,62 @@ static const TypeInfo pnv_pec_type_info = {
}
};
+/*
+ * POWER10 definitions
+ */
+
+static uint32_t pnv_phb5_pec_xscom_pci_base(PnvPhb4PecState *pec)
+{
+ return PNV10_XSCOM_PEC_PCI_BASE + 0x1000000 * pec->index;
+}
+
+static uint32_t pnv_phb5_pec_xscom_nest_base(PnvPhb4PecState *pec)
+{
+ /* index goes down ... */
+ return PNV10_XSCOM_PEC_NEST_BASE - 0x1000000 * pec->index;
+}
+
+/*
+ * PEC0 -> 3 stacks
+ * PEC1 -> 3 stacks
+ */
+static const uint32_t pnv_phb5_pec_num_stacks[] = { 3, 3 };
+
+static void pnv_phb5_pec_class_init(ObjectClass *klass, void *data)
+{
+ PnvPhb4PecClass *pecc = PNV_PHB4_PEC_CLASS(klass);
+ static const char compat[] = "ibm,power10-pbcq";
+ static const char stk_compat[] = "ibm,power10-phb-stack";
+
+ pecc->xscom_nest_base = pnv_phb5_pec_xscom_nest_base;
+ pecc->xscom_pci_base = pnv_phb5_pec_xscom_pci_base;
+ pecc->xscom_nest_size = PNV10_XSCOM_PEC_NEST_SIZE;
+ pecc->xscom_pci_size = PNV10_XSCOM_PEC_PCI_SIZE;
+ pecc->compat = compat;
+ pecc->compat_size = sizeof(compat);
+ pecc->stk_compat = stk_compat;
+ pecc->stk_compat_size = sizeof(stk_compat);
+ pecc->version = PNV_PHB5_VERSION;
+ pecc->num_phbs = pnv_phb5_pec_num_stacks;
+ pecc->rp_model = TYPE_PNV_PHB5_ROOT_PORT;
+}
+
+static const TypeInfo pnv_phb5_pec_type_info = {
+ .name = TYPE_PNV_PHB5_PEC,
+ .parent = TYPE_PNV_PHB4_PEC,
+ .instance_size = sizeof(PnvPhb4PecState),
+ .class_init = pnv_phb5_pec_class_init,
+ .class_size = sizeof(PnvPhb4PecClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_PNV_XSCOM_INTERFACE },
+ { }
+ }
+};
+
static void pnv_pec_register_types(void)
{
type_register_static(&pnv_pec_type_info);
+ type_register_static(&pnv_phb5_pec_type_info);
}
type_init(pnv_pec_register_types);
diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events
index 630e9fcc5e..6e5d8d3355 100644
--- a/hw/pci-host/trace-events
+++ b/hw/pci-host/trace-events
@@ -32,3 +32,5 @@ unin_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
# pnv_phb4.c
pnv_phb4_xive_notify(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64
+pnv_phb4_xive_notify_ic(uint64_t addr, uint64_t data) "addr=@0x%"PRIx64" data=0x%"PRIx64
+pnv_phb4_xive_notify_abt(uint64_t notif_port, uint64_t data) "notif=@0x%"PRIx64" data=0x%"PRIx64
diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c
index 837146a2fb..0ac86e104f 100644
--- a/hw/ppc/pnv.c
+++ b/hw/ppc/pnv.c
@@ -380,9 +380,12 @@ static void pnv_dt_serial(ISADevice *d, void *fdt, int lpc_off)
cpu_to_be32(io_base),
cpu_to_be32(8)
};
+ uint32_t irq;
char *name;
int node;
+ irq = object_property_get_uint(OBJECT(d), "irq", &error_fatal);
+
name = g_strdup_printf("%s@i%x", qdev_fw_name(DEVICE(d)), io_base);
node = fdt_add_subnode(fdt, lpc_off, name);
_FDT(node);
@@ -394,7 +397,7 @@ static void pnv_dt_serial(ISADevice *d, void *fdt, int lpc_off)
_FDT((fdt_setprop_cell(fdt, node, "clock-frequency", 1843200)));
_FDT((fdt_setprop_cell(fdt, node, "current-speed", 115200)));
- _FDT((fdt_setprop_cell(fdt, node, "interrupts", d->isairq[0])));
+ _FDT((fdt_setprop_cell(fdt, node, "interrupts", irq)));
_FDT((fdt_setprop_cell(fdt, node, "interrupt-parent",
fdt_get_phandle(fdt, lpc_off))));
@@ -722,7 +725,11 @@ static void pnv_chip_power10_pic_print_info(PnvChip *chip, Monitor *mon)
{
Pnv10Chip *chip10 = PNV10_CHIP(chip);
+ pnv_xive2_pic_print_info(&chip10->xive, mon);
pnv_psi_pic_print_info(&chip10->psi, mon);
+
+ object_child_foreach_recursive(OBJECT(chip),
+ pnv_chip_power9_pic_print_info_child, mon);
}
/* Always give the first 1GB to chip 0 else we won't boot */
@@ -1044,27 +1051,45 @@ static void pnv_chip_power9_intc_print_info(PnvChip *chip, PowerPCCPU *cpu,
static void pnv_chip_power10_intc_create(PnvChip *chip, PowerPCCPU *cpu,
Error **errp)
{
+ Pnv10Chip *chip10 = PNV10_CHIP(chip);
+ Error *local_err = NULL;
+ Object *obj;
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
- /* Will be defined when the interrupt controller is */
- pnv_cpu->intc = NULL;
+ /*
+ * The core creates its interrupt presenter but the XIVE2 interrupt
+ * controller object is initialized afterwards. Hopefully, it's
+ * only used at runtime.
+ */
+ obj = xive_tctx_create(OBJECT(cpu), XIVE_PRESENTER(&chip10->xive),
+ &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ pnv_cpu->intc = obj;
}
static void pnv_chip_power10_intc_reset(PnvChip *chip, PowerPCCPU *cpu)
{
- ;
+ PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
+
+ xive_tctx_reset(XIVE_TCTX(pnv_cpu->intc));
}
static void pnv_chip_power10_intc_destroy(PnvChip *chip, PowerPCCPU *cpu)
{
PnvCPUState *pnv_cpu = pnv_cpu_state(cpu);
+ xive_tctx_destroy(XIVE_TCTX(pnv_cpu->intc));
pnv_cpu->intc = NULL;
}
static void pnv_chip_power10_intc_print_info(PnvChip *chip, PowerPCCPU *cpu,
Monitor *mon)
{
+ xive_tctx_pic_print_info(XIVE_TCTX(pnv_cpu_state(cpu)->intc), mon);
}
/*
@@ -1366,6 +1391,21 @@ static void pnv_chip_power9_instance_init(Object *obj)
}
}
+static void pnv_chip_quad_realize_one(PnvChip *chip, PnvQuad *eq,
+ PnvCore *pnv_core)
+{
+ char eq_name[32];
+ int core_id = CPU_CORE(pnv_core)->core_id;
+
+ snprintf(eq_name, sizeof(eq_name), "eq[%d]", core_id);
+ object_initialize_child_with_props(OBJECT(chip), eq_name, eq,
+ sizeof(*eq), TYPE_PNV_QUAD,
+ &error_fatal, NULL);
+
+ object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal);
+ qdev_realize(DEVICE(eq), NULL, &error_fatal);
+}
+
static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp)
{
PnvChip *chip = PNV_CHIP(chip9);
@@ -1375,18 +1415,9 @@ static void pnv_chip_quad_realize(Pnv9Chip *chip9, Error **errp)
chip9->quads = g_new0(PnvQuad, chip9->nr_quads);
for (i = 0; i < chip9->nr_quads; i++) {
- char eq_name[32];
PnvQuad *eq = &chip9->quads[i];
- PnvCore *pnv_core = chip->cores[i * 4];
- int core_id = CPU_CORE(pnv_core)->core_id;
- snprintf(eq_name, sizeof(eq_name), "eq[%d]", core_id);
- object_initialize_child_with_props(OBJECT(chip), eq_name, eq,
- sizeof(*eq), TYPE_PNV_QUAD,
- &error_fatal, NULL);
-
- object_property_set_int(OBJECT(eq), "quad-id", core_id, &error_fatal);
- qdev_realize(DEVICE(eq), NULL, &error_fatal);
+ pnv_chip_quad_realize_one(chip, eq, chip->cores[i * 4]);
pnv_xscom_add_subregion(chip, PNV9_XSCOM_EQ_BASE(eq->quad_id),
&eq->xscom_regs);
@@ -1469,6 +1500,9 @@ static void pnv_chip_power9_realize(DeviceState *dev, Error **errp)
/* Processor Service Interface (PSI) Host Bridge */
object_property_set_int(OBJECT(&chip9->psi), "bar", PNV9_PSIHB_BASE(chip),
&error_fatal);
+ /* This is the only device with 4k ESB pages */
+ object_property_set_int(OBJECT(&chip9->psi), "shift", XIVE_ESB_4K,
+ &error_fatal);
if (!qdev_realize(DEVICE(&chip9->psi), NULL, errp)) {
return;
}
@@ -1553,10 +1587,73 @@ static void pnv_chip_power9_class_init(ObjectClass *klass, void *data)
static void pnv_chip_power10_instance_init(Object *obj)
{
+ PnvChip *chip = PNV_CHIP(obj);
Pnv10Chip *chip10 = PNV10_CHIP(obj);
+ PnvChipClass *pcc = PNV_CHIP_GET_CLASS(obj);
+ int i;
+ object_initialize_child(obj, "xive", &chip10->xive, TYPE_PNV_XIVE2);
+ object_property_add_alias(obj, "xive-fabric", OBJECT(&chip10->xive),
+ "xive-fabric");
object_initialize_child(obj, "psi", &chip10->psi, TYPE_PNV10_PSI);
object_initialize_child(obj, "lpc", &chip10->lpc, TYPE_PNV10_LPC);
+ object_initialize_child(obj, "occ", &chip10->occ, TYPE_PNV10_OCC);
+ object_initialize_child(obj, "homer", &chip10->homer, TYPE_PNV10_HOMER);
+
+ if (defaults_enabled()) {
+ chip->num_pecs = pcc->num_pecs;
+ }
+
+ for (i = 0; i < chip->num_pecs; i++) {
+ object_initialize_child(obj, "pec[*]", &chip10->pecs[i],
+ TYPE_PNV_PHB5_PEC);
+ }
+}
+
+static void pnv_chip_power10_quad_realize(Pnv10Chip *chip10, Error **errp)
+{
+ PnvChip *chip = PNV_CHIP(chip10);
+ int i;
+
+ chip10->nr_quads = DIV_ROUND_UP(chip->nr_cores, 4);
+ chip10->quads = g_new0(PnvQuad, chip10->nr_quads);
+
+ for (i = 0; i < chip10->nr_quads; i++) {
+ PnvQuad *eq = &chip10->quads[i];
+
+ pnv_chip_quad_realize_one(chip, eq, chip->cores[i * 4]);
+
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_EQ_BASE(eq->quad_id),
+ &eq->xscom_regs);
+ }
+}
+
+static void pnv_chip_power10_phb_realize(PnvChip *chip, Error **errp)
+{
+ Pnv10Chip *chip10 = PNV10_CHIP(chip);
+ int i;
+
+ for (i = 0; i < chip->num_pecs; i++) {
+ PnvPhb4PecState *pec = &chip10->pecs[i];
+ PnvPhb4PecClass *pecc = PNV_PHB4_PEC_GET_CLASS(pec);
+ uint32_t pec_nest_base;
+ uint32_t pec_pci_base;
+
+ object_property_set_int(OBJECT(pec), "index", i, &error_fatal);
+ object_property_set_int(OBJECT(pec), "chip-id", chip->chip_id,
+ &error_fatal);
+ object_property_set_link(OBJECT(pec), "chip", OBJECT(chip),
+ &error_fatal);
+ if (!qdev_realize(DEVICE(pec), NULL, errp)) {
+ return;
+ }
+
+ pec_nest_base = pecc->xscom_nest_base(pec);
+ pec_pci_base = pecc->xscom_pci_base(pec);
+
+ pnv_xscom_add_subregion(chip, pec_nest_base, &pec->nest_regs_mr);
+ pnv_xscom_add_subregion(chip, pec_pci_base, &pec->pci_regs_mr);
+ }
}
static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
@@ -1580,9 +1677,39 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
return;
}
+ pnv_chip_power10_quad_realize(chip10, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+
+ /* XIVE2 interrupt controller (POWER10) */
+ object_property_set_int(OBJECT(&chip10->xive), "ic-bar",
+ PNV10_XIVE2_IC_BASE(chip), &error_fatal);
+ object_property_set_int(OBJECT(&chip10->xive), "esb-bar",
+ PNV10_XIVE2_ESB_BASE(chip), &error_fatal);
+ object_property_set_int(OBJECT(&chip10->xive), "end-bar",
+ PNV10_XIVE2_END_BASE(chip), &error_fatal);
+ object_property_set_int(OBJECT(&chip10->xive), "nvpg-bar",
+ PNV10_XIVE2_NVPG_BASE(chip), &error_fatal);
+ object_property_set_int(OBJECT(&chip10->xive), "nvc-bar",
+ PNV10_XIVE2_NVC_BASE(chip), &error_fatal);
+ object_property_set_int(OBJECT(&chip10->xive), "tm-bar",
+ PNV10_XIVE2_TM_BASE(chip), &error_fatal);
+ object_property_set_link(OBJECT(&chip10->xive), "chip", OBJECT(chip),
+ &error_abort);
+ if (!sysbus_realize(SYS_BUS_DEVICE(&chip10->xive), errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_XIVE2_BASE,
+ &chip10->xive.xscom_regs);
+
/* Processor Service Interface (PSI) Host Bridge */
object_property_set_int(OBJECT(&chip10->psi), "bar",
PNV10_PSIHB_BASE(chip), &error_fatal);
+ /* PSI can now be configured to use 64k ESB pages on POWER10 */
+ object_property_set_int(OBJECT(&chip10->psi), "shift", XIVE_ESB_64K,
+ &error_fatal);
if (!qdev_realize(DEVICE(&chip10->psi), NULL, errp)) {
return;
}
@@ -1601,6 +1728,41 @@ static void pnv_chip_power10_realize(DeviceState *dev, Error **errp)
chip->fw_mr = &chip10->lpc.isa_fw;
chip->dt_isa_nodename = g_strdup_printf("/lpcm-opb@%" PRIx64 "/lpc@0",
(uint64_t) PNV10_LPCM_BASE(chip));
+
+ /* Create the simplified OCC model */
+ object_property_set_link(OBJECT(&chip10->occ), "psi", OBJECT(&chip10->psi),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip10->occ), NULL, errp)) {
+ return;
+ }
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_OCC_BASE,
+ &chip10->occ.xscom_regs);
+
+ /* OCC SRAM model */
+ memory_region_add_subregion(get_system_memory(),
+ PNV10_OCC_SENSOR_BASE(chip),
+ &chip10->occ.sram_regs);
+
+ /* HOMER */
+ object_property_set_link(OBJECT(&chip10->homer), "chip", OBJECT(chip),
+ &error_abort);
+ if (!qdev_realize(DEVICE(&chip10->homer), NULL, errp)) {
+ return;
+ }
+ /* Homer Xscom region */
+ pnv_xscom_add_subregion(chip, PNV10_XSCOM_PBA_BASE,
+ &chip10->homer.pba_regs);
+
+ /* Homer mmio region */
+ memory_region_add_subregion(get_system_memory(), PNV10_HOMER_BASE(chip),
+ &chip10->homer.regs);
+
+ /* PHBs */
+ pnv_chip_power10_phb_realize(chip, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
}
static uint32_t pnv_chip_power10_xscom_pcba(PnvChip *chip, uint64_t addr)
@@ -1627,6 +1789,7 @@ static void pnv_chip_power10_class_init(ObjectClass *klass, void *data)
k->xscom_core_base = pnv_chip_power10_xscom_core_base;
k->xscom_pcba = pnv_chip_power10_xscom_pcba;
dc->desc = "PowerNV Chip POWER10";
+ k->num_pecs = PNV10_CHIP_MAX_PEC;
device_class_set_parent_realize(dc, pnv_chip_power10_realize,
&k->parent_realize);
@@ -1924,6 +2087,35 @@ static int pnv_match_nvt(XiveFabric *xfb, uint8_t format,
return total_count;
}
+static int pnv10_xive_match_nvt(XiveFabric *xfb, uint8_t format,
+ uint8_t nvt_blk, uint32_t nvt_idx,
+ bool cam_ignore, uint8_t priority,
+ uint32_t logic_serv,
+ XiveTCTXMatch *match)
+{
+ PnvMachineState *pnv = PNV_MACHINE(xfb);
+ int total_count = 0;
+ int i;
+
+ for (i = 0; i < pnv->num_chips; i++) {
+ Pnv10Chip *chip10 = PNV10_CHIP(pnv->chips[i]);
+ XivePresenter *xptr = XIVE_PRESENTER(&chip10->xive);
+ XivePresenterClass *xpc = XIVE_PRESENTER_GET_CLASS(xptr);
+ int count;
+
+ count = xpc->match_nvt(xptr, format, nvt_blk, nvt_idx, cam_ignore,
+ priority, logic_serv, match);
+
+ if (count < 0) {
+ return count;
+ }
+
+ total_count += count;
+ }
+
+ return total_count;
+}
+
static void pnv_machine_power8_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
@@ -1968,6 +2160,7 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data)
{
MachineClass *mc = MACHINE_CLASS(oc);
PnvMachineClass *pmc = PNV_MACHINE_CLASS(oc);
+ XiveFabricClass *xfc = XIVE_FABRIC_CLASS(oc);
static const char compat[] = "qemu,powernv10\0ibm,powernv";
mc->desc = "IBM PowerNV (Non-Virtualized) POWER10";
@@ -1976,6 +2169,8 @@ static void pnv_machine_power10_class_init(ObjectClass *oc, void *data)
pmc->compat = compat;
pmc->compat_size = sizeof(compat);
pmc->dt_power_mgt = pnv_dt_power_mgt;
+
+ xfc->match_nvt = pnv10_xive_match_nvt;
}
static bool pnv_machine_get_hb(Object *obj, Error **errp)
@@ -2087,6 +2282,10 @@ static const TypeInfo types[] = {
.name = MACHINE_TYPE_NAME("powernv10"),
.parent = TYPE_PNV_MACHINE,
.class_init = pnv_machine_power10_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_XIVE_FABRIC },
+ { },
+ },
},
{
.name = MACHINE_TYPE_NAME("powernv9"),
diff --git a/hw/ppc/pnv_homer.c b/hw/ppc/pnv_homer.c
index 9a262629b7..ea73919e54 100644
--- a/hw/ppc/pnv_homer.c
+++ b/hw/ppc/pnv_homer.c
@@ -332,6 +332,69 @@ static const TypeInfo pnv_homer_power9_type_info = {
.class_init = pnv_homer_power9_class_init,
};
+static uint64_t pnv_homer_power10_pba_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ PnvHomer *homer = PNV_HOMER(opaque);
+ PnvChip *chip = homer->chip;
+ uint32_t reg = addr >> 3;
+ uint64_t val = 0;
+
+ switch (reg) {
+ case PBA_BAR0:
+ val = PNV10_HOMER_BASE(chip);
+ break;
+ case PBA_BARMASK0: /* P10 homer region mask */
+ val = (PNV10_HOMER_SIZE - 1) & 0x300000;
+ break;
+ case PBA_BAR2: /* P10 occ common area */
+ val = PNV10_OCC_COMMON_AREA_BASE;
+ break;
+ case PBA_BARMASK2: /* P10 occ common area size */
+ val = (PNV10_OCC_COMMON_AREA_SIZE - 1) & 0x700000;
+ break;
+ default:
+ qemu_log_mask(LOG_UNIMP, "PBA: read to unimplemented register: Ox%"
+ HWADDR_PRIx "\n", addr >> 3);
+ }
+ return val;
+}
+
+static void pnv_homer_power10_pba_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ qemu_log_mask(LOG_UNIMP, "PBA: write to unimplemented register: Ox%"
+ HWADDR_PRIx "\n", addr >> 3);
+}
+
+static const MemoryRegionOps pnv_homer_power10_pba_ops = {
+ .read = pnv_homer_power10_pba_read,
+ .write = pnv_homer_power10_pba_write,
+ .valid.min_access_size = 8,
+ .valid.max_access_size = 8,
+ .impl.min_access_size = 8,
+ .impl.max_access_size = 8,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void pnv_homer_power10_class_init(ObjectClass *klass, void *data)
+{
+ PnvHomerClass *homer = PNV_HOMER_CLASS(klass);
+
+ homer->pba_size = PNV10_XSCOM_PBA_SIZE;
+ homer->pba_ops = &pnv_homer_power10_pba_ops;
+ homer->homer_size = PNV10_HOMER_SIZE;
+ homer->homer_ops = &pnv_power9_homer_ops; /* TODO */
+ homer->core_max_base = PNV9_CORE_MAX_BASE;
+}
+
+static const TypeInfo pnv_homer_power10_type_info = {
+ .name = TYPE_PNV10_HOMER,
+ .parent = TYPE_PNV_HOMER,
+ .instance_size = sizeof(PnvHomer),
+ .class_init = pnv_homer_power10_class_init,
+};
+
static void pnv_homer_realize(DeviceState *dev, Error **errp)
{
PnvHomer *homer = PNV_HOMER(dev);
@@ -377,6 +440,7 @@ static void pnv_homer_register_types(void)
type_register_static(&pnv_homer_type_info);
type_register_static(&pnv_homer_power8_type_info);
type_register_static(&pnv_homer_power9_type_info);
+ type_register_static(&pnv_homer_power10_type_info);
}
type_init(pnv_homer_register_types);
diff --git a/hw/ppc/pnv_occ.c b/hw/ppc/pnv_occ.c
index 5a716c256e..4ed66f5e1f 100644
--- a/hw/ppc/pnv_occ.c
+++ b/hw/ppc/pnv_occ.c
@@ -236,7 +236,9 @@ static const MemoryRegionOps pnv_occ_power9_xscom_ops = {
static void pnv_occ_power9_class_init(ObjectClass *klass, void *data)
{
PnvOCCClass *poc = PNV_OCC_CLASS(klass);
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->desc = "PowerNV OCC Controller (POWER9)";
poc->xscom_size = PNV9_XSCOM_OCC_SIZE;
poc->xscom_ops = &pnv_occ_power9_xscom_ops;
poc->psi_irq = PSIHB9_IRQ_OCC;
@@ -249,6 +251,19 @@ static const TypeInfo pnv_occ_power9_type_info = {
.class_init = pnv_occ_power9_class_init,
};
+static void pnv_occ_power10_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->desc = "PowerNV OCC Controller (POWER10)";
+}
+
+static const TypeInfo pnv_occ_power10_type_info = {
+ .name = TYPE_PNV10_OCC,
+ .parent = TYPE_PNV9_OCC,
+ .class_init = pnv_occ_power10_class_init,
+};
+
static void pnv_occ_realize(DeviceState *dev, Error **errp)
{
PnvOCC *occ = PNV_OCC(dev);
@@ -297,6 +312,7 @@ static void pnv_occ_register_types(void)
type_register_static(&pnv_occ_type_info);
type_register_static(&pnv_occ_power8_type_info);
type_register_static(&pnv_occ_power9_type_info);
+ type_register_static(&pnv_occ_power10_type_info);
}
type_init(pnv_occ_register_types);
diff --git a/hw/ppc/pnv_psi.c b/hw/ppc/pnv_psi.c
index cd9a2c5952..466fb79798 100644
--- a/hw/ppc/pnv_psi.c
+++ b/hw/ppc/pnv_psi.c
@@ -601,7 +601,6 @@ static const TypeInfo pnv_psi_power8_info = {
#define PSIHB9_IRQ_METHOD PPC_BIT(0)
#define PSIHB9_IRQ_RESET PPC_BIT(1)
#define PSIHB9_ESB_CI_BASE 0x60
-#define PSIHB9_ESB_CI_64K PPC_BIT(1)
#define PSIHB9_ESB_CI_ADDR_MASK PPC_BITMASK(8, 47)
#define PSIHB9_ESB_CI_VALID PPC_BIT(63)
#define PSIHB9_ESB_NOTIF_ADDR 0x68
@@ -646,7 +645,15 @@ static const TypeInfo pnv_psi_power8_info = {
#define PSIHB9_IRQ_STAT_DIO PPC_BIT(12)
#define PSIHB9_IRQ_STAT_PSU PPC_BIT(13)
-static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno)
+/* P10 register extensions */
+
+#define PSIHB10_CR PSIHB9_CR
+#define PSIHB10_CR_STORE_EOI PPC_BIT(12)
+
+#define PSIHB10_ESB_CI_BASE PSIHB9_ESB_CI_BASE
+#define PSIHB10_ESB_CI_64K PPC_BIT(1)
+
+static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno, bool pq_checked)
{
PnvPsi *psi = PNV_PSI(xf);
uint64_t notif_port = psi->regs[PSIHB_REG(PSIHB9_ESB_NOTIF_ADDR)];
@@ -655,9 +662,13 @@ static void pnv_psi_notify(XiveNotifier *xf, uint32_t srcno)
uint32_t offset =
(psi->regs[PSIHB_REG(PSIHB9_IVT_OFFSET)] >> PSIHB9_IVT_OFF_SHIFT);
- uint64_t data = XIVE_TRIGGER_PQ | offset | srcno;
+ uint64_t data = offset | srcno;
MemTxResult result;
+ if (pq_checked) {
+ data |= XIVE_TRIGGER_PQ;
+ }
+
if (!valid) {
return;
}
@@ -704,6 +715,13 @@ static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr,
switch (addr) {
case PSIHB9_CR:
+ if (val & PSIHB10_CR_STORE_EOI) {
+ psi9->source.esb_flags |= XIVE_SRC_STORE_EOI;
+ } else {
+ psi9->source.esb_flags &= ~XIVE_SRC_STORE_EOI;
+ }
+ break;
+
case PSIHB9_SEMR:
/* FSP stuff */
break;
@@ -715,15 +733,20 @@ static void pnv_psi_p9_mmio_write(void *opaque, hwaddr addr,
break;
case PSIHB9_ESB_CI_BASE:
+ if (val & PSIHB10_ESB_CI_64K) {
+ psi9->source.esb_shift = XIVE_ESB_64K;
+ } else {
+ psi9->source.esb_shift = XIVE_ESB_4K;
+ }
if (!(val & PSIHB9_ESB_CI_VALID)) {
if (psi->regs[reg] & PSIHB9_ESB_CI_VALID) {
memory_region_del_subregion(sysmem, &psi9->source.esb_mmio);
}
} else {
if (!(psi->regs[reg] & PSIHB9_ESB_CI_VALID)) {
- memory_region_add_subregion(sysmem,
- val & ~PSIHB9_ESB_CI_VALID,
- &psi9->source.esb_mmio);
+ hwaddr addr = val & ~(PSIHB9_ESB_CI_VALID | PSIHB10_ESB_CI_64K);
+ memory_region_add_subregion(sysmem, addr,
+ &psi9->source.esb_mmio);
}
}
psi->regs[reg] = val;
@@ -831,6 +854,7 @@ static void pnv_psi_power9_instance_init(Object *obj)
Pnv9Psi *psi = PNV9_PSI(obj);
object_initialize_child(obj, "source", &psi->source, TYPE_XIVE_SOURCE);
+ object_property_add_alias(obj, "shift", OBJECT(&psi->source), "shift");
}
static void pnv_psi_power9_realize(DeviceState *dev, Error **errp)
@@ -839,8 +863,6 @@ static void pnv_psi_power9_realize(DeviceState *dev, Error **errp)
XiveSource *xsrc = &PNV9_PSI(psi)->source;
int i;
- /* This is the only device with 4k ESB pages */
- object_property_set_int(OBJECT(xsrc), "shift", XIVE_ESB_4K, &error_fatal);
object_property_set_int(OBJECT(xsrc), "nr-irqs", PSIHB9_NUM_IRQS,
&error_fatal);
object_property_set_link(OBJECT(xsrc), "xive", OBJECT(psi), &error_abort);
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index f0b75b22bb..4cc204f90d 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1018,9 +1018,9 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
if (reset) {
const char *boot_device = spapr->boot_device;
- char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
+ g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus);
size_t cb = 0;
- char *bootlist = get_boot_devices_list(&cb);
+ g_autofree char *bootlist = get_boot_devices_list(&cb);
if (machine->kernel_cmdline && machine->kernel_cmdline[0]) {
_FDT(fdt_setprop_string(fdt, chosen, "bootargs",
@@ -1087,9 +1087,6 @@ static void spapr_dt_chosen(SpaprMachineState *spapr, void *fdt, bool reset)
}
spapr_dt_ov5_platform_support(spapr, fdt, chosen);
-
- g_free(stdout_path);
- g_free(bootlist);
}
_FDT(spapr_dt_ovec(fdt, chosen, spapr->ov5_cas, "ibm,architecture-vec-5"));
@@ -2710,15 +2707,25 @@ static void spapr_machine_init(MachineState *machine)
MachineClass *mc = MACHINE_GET_CLASS(machine);
const char *bios_default = spapr->vof ? FW_FILE_NAME_VOF : FW_FILE_NAME;
const char *bios_name = machine->firmware ?: bios_default;
+ g_autofree char *filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
const char *kernel_filename = machine->kernel_filename;
const char *initrd_filename = machine->initrd_filename;
PCIHostState *phb;
int i;
MemoryRegion *sysmem = get_system_memory();
long load_limit, fw_size;
- char *filename;
Error *resize_hpt_err = NULL;
+ if (!filename) {
+ error_report("Could not find LPAR firmware '%s'", bios_name);
+ exit(1);
+ }
+ fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
+ if (fw_size <= 0) {
+ error_report("Could not load LPAR firmware '%s'", filename);
+ exit(1);
+ }
+
/*
* if Secure VM (PEF) support is configured, then initialize it
*/
@@ -2999,18 +3006,6 @@ static void spapr_machine_init(MachineState *machine)
}
}
- filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
- if (!filename) {
- error_report("Could not find LPAR firmware '%s'", bios_name);
- exit(1);
- }
- fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
- if (fw_size <= 0) {
- error_report("Could not load LPAR firmware '%s'", filename);
- exit(1);
- }
- g_free(filename);
-
/* FIXME: Should register things through the MachineState's qdev
* interface, this is a legacy from the sPAPREnvironment structure
* which predated MachineState but had a similar function */
diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c
index 6167431271..655ab856a0 100644
--- a/hw/ppc/spapr_caps.c
+++ b/hw/ppc/spapr_caps.c
@@ -95,12 +95,12 @@ static void spapr_cap_set_bool(Object *obj, Visitor *v, const char *name,
}
-static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name,
- void *opaque, Error **errp)
+static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name,
+ void *opaque, Error **errp)
{
SpaprCapabilityInfo *cap = opaque;
SpaprMachineState *spapr = SPAPR_MACHINE(obj);
- char *val = NULL;
+ g_autofree char *val = NULL;
uint8_t value = spapr_get_cap(spapr, cap->index);
if (value >= cap->possible->num) {
@@ -111,7 +111,6 @@ static void spapr_cap_get_string(Object *obj, Visitor *v, const char *name,
val = g_strdup(cap->possible->vals[value]);
visit_type_str(v, name, &val, errp);
- g_free(val);
}
static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name,
@@ -120,7 +119,7 @@ static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name,
SpaprCapabilityInfo *cap = opaque;
SpaprMachineState *spapr = SPAPR_MACHINE(obj);
uint8_t i;
- char *val;
+ g_autofree char *val = NULL;
if (!visit_type_str(v, name, &val, errp)) {
return;
@@ -128,20 +127,18 @@ static void spapr_cap_set_string(Object *obj, Visitor *v, const char *name,
if (!strcmp(val, "?")) {
error_setg(errp, "%s", cap->possible->help);
- goto out;
+ return;
}
for (i = 0; i < cap->possible->num; i++) {
if (!strcasecmp(val, cap->possible->vals[i])) {
spapr->cmd_line_caps[cap->index] = true;
spapr->eff.caps[cap->index] = i;
- goto out;
+ return;
}
}
error_setg(errp, "Invalid capability mode \"%s\" for cap-%s", val,
cap->name);
-out:
- g_free(val);
}
static void spapr_cap_get_pagesize(Object *obj, Visitor *v, const char *name,
@@ -933,16 +930,13 @@ void spapr_caps_add_properties(SpaprMachineClass *smc)
for (i = 0; i < ARRAY_SIZE(capability_table); i++) {
SpaprCapabilityInfo *cap = &capability_table[i];
- char *name = g_strdup_printf("cap-%s", cap->name);
- char *desc;
+ g_autofree char *name = g_strdup_printf("cap-%s", cap->name);
+ g_autofree char *desc = g_strdup_printf("%s", cap->description);
object_class_property_add(klass, name, cap->type,
cap->get, cap->set,
NULL, cap);
- desc = g_strdup_printf("%s", cap->description);
object_class_property_set_description(klass, name, desc);
- g_free(name);
- g_free(desc);
}
}
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
index f8ac0a10df..76bc5d42a0 100644
--- a/hw/ppc/spapr_drc.c
+++ b/hw/ppc/spapr_drc.c
@@ -519,8 +519,8 @@ static const VMStateDescription vmstate_spapr_drc = {
static void drc_realize(DeviceState *d, Error **errp)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
+ g_autofree gchar *link_name = g_strdup_printf("%x", spapr_drc_index(drc));
Object *root_container;
- gchar *link_name;
const char *child_name;
trace_spapr_drc_realize(spapr_drc_index(drc));
@@ -532,12 +532,10 @@ static void drc_realize(DeviceState *d, Error **errp)
* existing in the composition tree
*/
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
- link_name = g_strdup_printf("%x", spapr_drc_index(drc));
child_name = object_get_canonical_path_component(OBJECT(drc));
trace_spapr_drc_realize_child(spapr_drc_index(drc), child_name);
object_property_add_alias(root_container, link_name,
drc->owner, child_name);
- g_free(link_name);
vmstate_register(VMSTATE_IF(drc), spapr_drc_index(drc), &vmstate_spapr_drc,
drc);
trace_spapr_drc_realize_complete(spapr_drc_index(drc));
@@ -546,22 +544,20 @@ static void drc_realize(DeviceState *d, Error **errp)
static void drc_unrealize(DeviceState *d)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(d);
+ g_autofree gchar *name = g_strdup_printf("%x", spapr_drc_index(drc));
Object *root_container;
- gchar *name;
trace_spapr_drc_unrealize(spapr_drc_index(drc));
vmstate_unregister(VMSTATE_IF(drc), &vmstate_spapr_drc, drc);
root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
- name = g_strdup_printf("%x", spapr_drc_index(drc));
object_property_del(root_container, name);
- g_free(name);
}
SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type,
uint32_t id)
{
SpaprDrc *drc = SPAPR_DR_CONNECTOR(object_new(type));
- char *prop_name;
+ g_autofree char *prop_name = NULL;
drc->id = id;
drc->owner = owner;
@@ -570,7 +566,6 @@ SpaprDrc *spapr_dr_connector_new(Object *owner, const char *type,
object_property_add_child(owner, prop_name, OBJECT(drc));
object_unref(OBJECT(drc));
qdev_realize(DEVICE(drc), NULL, NULL);
- g_free(prop_name);
return drc;
}
@@ -803,11 +798,9 @@ static const TypeInfo spapr_drc_pmem_info = {
SpaprDrc *spapr_drc_by_index(uint32_t index)
{
Object *obj;
- gchar *name;
-
- name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH, index);
+ g_autofree gchar *name = g_strdup_printf("%s/%x", DRC_CONTAINER_PATH,
+ index);
obj = object_resolve_path(name, NULL);
- g_free(name);
return !obj ? NULL : SPAPR_DR_CONNECTOR(obj);
}
@@ -841,8 +834,14 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
ObjectProperty *prop;
ObjectPropertyIterator iter;
uint32_t drc_count = 0;
- GArray *drc_indexes, *drc_power_domains;
- GString *drc_names, *drc_types;
+ g_autoptr(GArray) drc_indexes = g_array_new(false, true,
+ sizeof(uint32_t));
+ g_autoptr(GArray) drc_power_domains = g_array_new(false, true,
+ sizeof(uint32_t));
+ g_autoptr(GString) drc_names = g_string_set_size(g_string_new(NULL),
+ sizeof(uint32_t));
+ g_autoptr(GString) drc_types = g_string_set_size(g_string_new(NULL),
+ sizeof(uint32_t));
int ret;
/*
@@ -857,12 +856,8 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
* reserve the space now and set the offsets accordingly so we
* can fill them in later.
*/
- drc_indexes = g_array_new(false, true, sizeof(uint32_t));
drc_indexes = g_array_set_size(drc_indexes, 1);
- drc_power_domains = g_array_new(false, true, sizeof(uint32_t));
drc_power_domains = g_array_set_size(drc_power_domains, 1);
- drc_names = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
- drc_types = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
/* aliases for all DRConnector objects will be rooted in QOM
* composition tree at DRC_CONTAINER_PATH
@@ -874,7 +869,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
Object *obj;
SpaprDrc *drc;
SpaprDrcClass *drck;
- char *drc_name = NULL;
+ g_autofree char *drc_name = NULL;
uint32_t drc_index, drc_power_domain;
if (!strstart(prop->type, "link<", NULL)) {
@@ -908,7 +903,6 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
drc_name = spapr_drc_name(drc);
drc_names = g_string_append(drc_names, drc_name);
drc_names = g_string_insert_len(drc_names, -1, "\0", 1);
- g_free(drc_name);
/* ibm,drc-types */
drc_types = g_string_append(drc_types, drck->typename);
@@ -928,7 +922,7 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
drc_indexes->len * sizeof(uint32_t));
if (ret) {
error_report("Couldn't create ibm,drc-indexes property");
- goto out;
+ return ret;
}
ret = fdt_setprop(fdt, offset, "ibm,drc-power-domains",
@@ -936,29 +930,22 @@ int spapr_dt_drc(void *fdt, int offset, Object *owner, uint32_t drc_type_mask)
drc_power_domains->len * sizeof(uint32_t));
if (ret) {
error_report("Couldn't finalize ibm,drc-power-domains property");
- goto out;
+ return ret;
}
ret = fdt_setprop(fdt, offset, "ibm,drc-names",
drc_names->str, drc_names->len);
if (ret) {
error_report("Couldn't finalize ibm,drc-names property");
- goto out;
+ return ret;
}
ret = fdt_setprop(fdt, offset, "ibm,drc-types",
drc_types->str, drc_types->len);
if (ret) {
error_report("Couldn't finalize ibm,drc-types property");
- goto out;
}
-out:
- g_array_free(drc_indexes, true);
- g_array_free(drc_power_domains, true);
- g_string_free(drc_names, true);
- g_string_free(drc_types, true);
-
return ret;
}
diff --git a/hw/ppc/spapr_numa.c b/hw/ppc/spapr_numa.c
index e9ef7e7646..4f93bdefec 100644
--- a/hw/ppc/spapr_numa.c
+++ b/hw/ppc/spapr_numa.c
@@ -431,12 +431,14 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
int max_distance_ref_points = get_max_dist_ref_points(spapr);
int nb_numa_nodes = machine->numa_state->num_nodes;
int nr_nodes = nb_numa_nodes ? nb_numa_nodes : 1;
- uint32_t *int_buf, *cur_index, buf_len;
- int ret, i;
+ g_autofree uint32_t *int_buf = NULL;
+ uint32_t *cur_index;
+ int i;
/* ibm,associativity-lookup-arrays */
- buf_len = (nr_nodes * max_distance_ref_points + 2) * sizeof(uint32_t);
- cur_index = int_buf = g_malloc0(buf_len);
+ int_buf = g_malloc0((nr_nodes * max_distance_ref_points + 2) *
+ sizeof(uint32_t));
+ cur_index = int_buf;
int_buf[0] = cpu_to_be32(nr_nodes);
/* Number of entries per associativity list */
int_buf[1] = cpu_to_be32(max_distance_ref_points);
@@ -451,11 +453,9 @@ int spapr_numa_write_assoc_lookup_arrays(SpaprMachineState *spapr, void *fdt,
sizeof(uint32_t) * max_distance_ref_points);
cur_index += max_distance_ref_points;
}
- ret = fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays", int_buf,
- (cur_index - int_buf) * sizeof(uint32_t));
- g_free(int_buf);
- return ret;
+ return fdt_setprop(fdt, offset, "ibm,associativity-lookup-arrays",
+ int_buf, (cur_index - int_buf) * sizeof(uint32_t));
}
static void spapr_numa_FORM1_write_rtas_dt(SpaprMachineState *spapr,
diff --git a/hw/ppc/spapr_pci_nvlink2.c b/hw/ppc/spapr_pci_nvlink2.c
index 7fb0cf4d04..4678c79235 100644
--- a/hw/ppc/spapr_pci_nvlink2.c
+++ b/hw/ppc/spapr_pci_nvlink2.c
@@ -320,7 +320,7 @@ void spapr_phb_nvgpu_populate_dt(SpaprPhbState *sphb, void *fdt, int bus_off,
void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
{
int i, j, linkidx, npuoff;
- char *npuname;
+ g_autofree char *npuname = NULL;
if (!sphb->nvgpus) {
return;
@@ -333,11 +333,10 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
_FDT(fdt_setprop_cell(fdt, npuoff, "#size-cells", 0));
/* Advertise NPU as POWER9 so the guest can enable NPU2 contexts */
_FDT((fdt_setprop_string(fdt, npuoff, "compatible", "ibm,power9-npu")));
- g_free(npuname);
for (i = 0, linkidx = 0; i < sphb->nvgpus->num; ++i) {
for (j = 0; j < sphb->nvgpus->slots[i].linknum; ++j) {
- char *linkname = g_strdup_printf("link@%d", linkidx);
+ g_autofree char *linkname = g_strdup_printf("link@%d", linkidx);
int off = fdt_add_subnode(fdt, npuoff, linkname);
_FDT(off);
@@ -347,7 +346,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
_FDT((fdt_setprop_cell(fdt, off, "phandle",
PHANDLE_NVLINK(sphb, i, j))));
_FDT((fdt_setprop_cell(fdt, off, "ibm,npu-link-index", linkidx)));
- g_free(linkname);
++linkidx;
}
}
@@ -360,7 +358,8 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
&error_abort);
uint64_t size = object_property_get_uint(nv_mrobj, "size", NULL);
uint64_t mem_reg[2] = { cpu_to_be64(nvslot->gpa), cpu_to_be64(size) };
- char *mem_name = g_strdup_printf("memory@%"PRIx64, nvslot->gpa);
+ g_autofree char *mem_name = g_strdup_printf("memory@%"PRIx64,
+ nvslot->gpa);
int off = fdt_add_subnode(fdt, 0, mem_name);
_FDT(off);
@@ -378,7 +377,6 @@ void spapr_phb_nvgpu_ram_populate_dt(SpaprPhbState *sphb, void *fdt)
sizeof(mem_reg))));
_FDT((fdt_setprop_cell(fdt, off, "phandle",
PHANDLE_GPURAM(sphb, i))));
- g_free(mem_name);
}
}
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index b476382ae6..d7c04237fe 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -279,30 +279,29 @@ static void rtas_ibm_get_system_parameter(PowerPCCPU *cpu,
switch (parameter) {
case RTAS_SYSPARM_SPLPAR_CHARACTERISTICS: {
- char *param_val = g_strdup_printf("MaxEntCap=%d,"
- "DesMem=%" PRIu64 ","
- "DesProcs=%d,"
- "MaxPlatProcs=%d",
- ms->smp.max_cpus,
- ms->ram_size / MiB,
- ms->smp.cpus,
- ms->smp.max_cpus);
+ g_autofree char *param_val = g_strdup_printf("MaxEntCap=%d,"
+ "DesMem=%" PRIu64 ","
+ "DesProcs=%d,"
+ "MaxPlatProcs=%d",
+ ms->smp.max_cpus,
+ ms->ram_size / MiB,
+ ms->smp.cpus,
+ ms->smp.max_cpus);
if (pcc->n_host_threads > 0) {
- char *hostthr_val, *old = param_val;
-
/*
* Add HostThrs property. This property is not present in PAPR but
* is expected by some guests to communicate the number of physical
* host threads per core on the system so that they can scale
* information which varies based on the thread configuration.
*/
- hostthr_val = g_strdup_printf(",HostThrs=%d", pcc->n_host_threads);
+ g_autofree char *hostthr_val = g_strdup_printf(",HostThrs=%d",
+ pcc->n_host_threads);
+ char *old = param_val;
+
param_val = g_strconcat(param_val, hostthr_val, NULL);
- g_free(hostthr_val);
g_free(old);
}
ret = sysparm_st(buffer, length, param_val, strlen(param_val) + 1);
- g_free(param_val);
break;
}
case RTAS_SYSPARM_DIAGNOSTICS_RUN_MODE: {
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index b975ed29ca..9d4fec2c04 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -726,7 +726,7 @@ void spapr_dt_vdevice(SpaprVioBus *bus, void *fdt)
gchar *spapr_vio_stdout_path(SpaprVioBus *bus)
{
SpaprVioDevice *dev;
- char *name, *path;
+ g_autofree char *name = NULL;
dev = spapr_vty_get_default(bus);
if (!dev) {
@@ -734,8 +734,6 @@ gchar *spapr_vio_stdout_path(SpaprVioBus *bus)
}
name = spapr_vio_get_dev_name(DEVICE(dev));
- path = g_strdup_printf("/vdevice/%s", name);
- g_free(name);
- return path;
+ return g_strdup_printf("/vdevice/%s", name);
}