aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorCédric Le Goater <clg@kaod.org>2018-12-06 00:22:19 +0100
committerDavid Gibson <david@gibson.dropbear.id.au>2018-12-21 09:26:42 +1100
commite4ddaac67f1fbdeea207fe28d71dca744832377b (patch)
tree6cf80af4501e214c30343360546b5265244ac9df /include
parent7ff7ea928039e418dfa584c91f3f78512284a79a (diff)
ppc/xive: introduce the XIVE Event Notification Descriptors
To complete the event routing, the IVRE sub-engine uses a second table containing Event Notification Descriptor (END) structures. An END specifies on which Event Queue (EQ) the event notification data, defined in the associated EAS, should be posted when an exception occurs. It also defines which Notification Virtual Target (NVT) should be notified. The Event Queue is a memory page provided by the O/S defining a circular buffer, one per server and priority couple, containing Event Queue entries. These are 4 bytes long, the first bit being a 'generation' bit and the 31 following bits the END Data field. They are pulled by the O/S when the exception occurs. The END Data field is a way to set an invariant logical event source number for an IRQ. On sPAPR machines, it is set with the H_INT_SET_SOURCE_CONFIG hcall when the EISN flag is used. Signed-off-by: Cédric Le Goater <clg@kaod.org> [dwg: Fold in a later fix from Cédric fixing field accessors] Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'include')
-rw-r--r--include/hw/ppc/xive.h18
-rw-r--r--include/hw/ppc/xive_regs.h67
2 files changed, 85 insertions, 0 deletions
diff --git a/include/hw/ppc/xive.h b/include/hw/ppc/xive.h
index 527aa73366..4851d3b3a4 100644
--- a/include/hw/ppc/xive.h
+++ b/include/hw/ppc/xive.h
@@ -321,11 +321,29 @@ typedef struct XiveRouterClass {
/* XIVE table accessors */
int (*get_eas)(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas);
+ int (*get_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
+ XiveEND *end);
+ int (*write_end)(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
+ XiveEND *end, uint8_t word_number);
} XiveRouterClass;
void xive_eas_pic_print_info(XiveEAS *eas, uint32_t lisn, Monitor *mon);
int xive_router_get_eas(XiveRouter *xrtr, uint8_t eas_blk, uint32_t eas_idx,
XiveEAS *eas);
+int xive_router_get_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
+ XiveEND *end);
+int xive_router_write_end(XiveRouter *xrtr, uint8_t end_blk, uint32_t end_idx,
+ XiveEND *end, uint8_t word_number);
+
+/*
+ * For legacy compatibility, the exceptions define up to 256 different
+ * priorities. P9 implements only 9 levels : 8 active levels [0 - 7]
+ * and the least favored level 0xFF.
+ */
+#define XIVE_PRIORITY_MAX 7
+
+void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon);
+void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon);
#endif /* PPC_XIVE_H */
diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index e1193fb3e4..b1d55ecf94 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -59,4 +59,71 @@ static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word,
return cpu_to_be64(tmp);
}
+static inline uint32_t xive_get_field32(uint32_t mask, uint32_t word)
+{
+ return (be32_to_cpu(word) & mask) >> ctz32(mask);
+}
+
+static inline uint32_t xive_set_field32(uint32_t mask, uint32_t word,
+ uint32_t value)
+{
+ uint32_t tmp =
+ (be32_to_cpu(word) & ~mask) | ((value << ctz32(mask)) & mask);
+ return cpu_to_be32(tmp);
+}
+
+/* Event Notification Descriptor (END) */
+typedef struct XiveEND {
+ uint32_t w0;
+#define END_W0_VALID PPC_BIT32(0) /* "v" bit */
+#define END_W0_ENQUEUE PPC_BIT32(1) /* "q" bit */
+#define END_W0_UCOND_NOTIFY PPC_BIT32(2) /* "n" bit */
+#define END_W0_BACKLOG PPC_BIT32(3) /* "b" bit */
+#define END_W0_PRECL_ESC_CTL PPC_BIT32(4) /* "p" bit */
+#define END_W0_ESCALATE_CTL PPC_BIT32(5) /* "e" bit */
+#define END_W0_UNCOND_ESCALATE PPC_BIT32(6) /* "u" bit - DD2.0 */
+#define END_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit - DD2.0 */
+#define END_W0_QSIZE PPC_BITMASK32(12, 15)
+#define END_W0_SW0 PPC_BIT32(16)
+#define END_W0_FIRMWARE END_W0_SW0 /* Owned by FW */
+#define END_QSIZE_4K 0
+#define END_QSIZE_64K 4
+#define END_W0_HWDEP PPC_BITMASK32(24, 31)
+ uint32_t w1;
+#define END_W1_ESn PPC_BITMASK32(0, 1)
+#define END_W1_ESn_P PPC_BIT32(0)
+#define END_W1_ESn_Q PPC_BIT32(1)
+#define END_W1_ESe PPC_BITMASK32(2, 3)
+#define END_W1_ESe_P PPC_BIT32(2)
+#define END_W1_ESe_Q PPC_BIT32(3)
+#define END_W1_GENERATION PPC_BIT32(9)
+#define END_W1_PAGE_OFF PPC_BITMASK32(10, 31)
+ uint32_t w2;
+#define END_W2_MIGRATION_REG PPC_BITMASK32(0, 3)
+#define END_W2_OP_DESC_HI PPC_BITMASK32(4, 31)
+ uint32_t w3;
+#define END_W3_OP_DESC_LO PPC_BITMASK32(0, 31)
+ uint32_t w4;
+#define END_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7)
+#define END_W4_ESC_END_INDEX PPC_BITMASK32(8, 31)
+ uint32_t w5;
+#define END_W5_ESC_END_DATA PPC_BITMASK32(1, 31)
+ uint32_t w6;
+#define END_W6_FORMAT_BIT PPC_BIT32(8)
+#define END_W6_NVT_BLOCK PPC_BITMASK32(9, 12)
+#define END_W6_NVT_INDEX PPC_BITMASK32(13, 31)
+ uint32_t w7;
+#define END_W7_F0_IGNORE PPC_BIT32(0)
+#define END_W7_F0_BLK_GROUPING PPC_BIT32(1)
+#define END_W7_F0_PRIORITY PPC_BITMASK32(8, 15)
+#define END_W7_F1_WAKEZ PPC_BIT32(0)
+#define END_W7_F1_LOG_SERVER_ID PPC_BITMASK32(1, 31)
+} XiveEND;
+
+#define xive_end_is_valid(end) (be32_to_cpu((end)->w0) & END_W0_VALID)
+#define xive_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END_W0_ENQUEUE)
+#define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY)
+#define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG)
+#define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL)
+
#endif /* PPC_XIVE_REGS_H */