aboutsummaryrefslogtreecommitdiff
path: root/include/hw/ppc/xive_regs.h
diff options
context:
space:
mode:
authorCédric Le Goater <clg@kaod.org>2018-12-06 00:22:19 +0100
committerDavid Gibson <david@gibson.dropbear.id.au>2018-12-21 09:26:42 +1100
commite4ddaac67f1fbdeea207fe28d71dca744832377b (patch)
tree6cf80af4501e214c30343360546b5265244ac9df /include/hw/ppc/xive_regs.h
parent7ff7ea928039e418dfa584c91f3f78512284a79a (diff)
ppc/xive: introduce the XIVE Event Notification Descriptors
To complete the event routing, the IVRE sub-engine uses a second table containing Event Notification Descriptor (END) structures. An END specifies on which Event Queue (EQ) the event notification data, defined in the associated EAS, should be posted when an exception occurs. It also defines which Notification Virtual Target (NVT) should be notified. The Event Queue is a memory page provided by the O/S defining a circular buffer, one per server and priority couple, containing Event Queue entries. These are 4 bytes long, the first bit being a 'generation' bit and the 31 following bits the END Data field. They are pulled by the O/S when the exception occurs. The END Data field is a way to set an invariant logical event source number for an IRQ. On sPAPR machines, it is set with the H_INT_SET_SOURCE_CONFIG hcall when the EISN flag is used. Signed-off-by: Cédric Le Goater <clg@kaod.org> [dwg: Fold in a later fix from Cédric fixing field accessors] Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
Diffstat (limited to 'include/hw/ppc/xive_regs.h')
-rw-r--r--include/hw/ppc/xive_regs.h67
1 files changed, 67 insertions, 0 deletions
diff --git a/include/hw/ppc/xive_regs.h b/include/hw/ppc/xive_regs.h
index e1193fb3e4..b1d55ecf94 100644
--- a/include/hw/ppc/xive_regs.h
+++ b/include/hw/ppc/xive_regs.h
@@ -59,4 +59,71 @@ static inline uint64_t xive_set_field64(uint64_t mask, uint64_t word,
return cpu_to_be64(tmp);
}
+static inline uint32_t xive_get_field32(uint32_t mask, uint32_t word)
+{
+ return (be32_to_cpu(word) & mask) >> ctz32(mask);
+}
+
+static inline uint32_t xive_set_field32(uint32_t mask, uint32_t word,
+ uint32_t value)
+{
+ uint32_t tmp =
+ (be32_to_cpu(word) & ~mask) | ((value << ctz32(mask)) & mask);
+ return cpu_to_be32(tmp);
+}
+
+/* Event Notification Descriptor (END) */
+typedef struct XiveEND {
+ uint32_t w0;
+#define END_W0_VALID PPC_BIT32(0) /* "v" bit */
+#define END_W0_ENQUEUE PPC_BIT32(1) /* "q" bit */
+#define END_W0_UCOND_NOTIFY PPC_BIT32(2) /* "n" bit */
+#define END_W0_BACKLOG PPC_BIT32(3) /* "b" bit */
+#define END_W0_PRECL_ESC_CTL PPC_BIT32(4) /* "p" bit */
+#define END_W0_ESCALATE_CTL PPC_BIT32(5) /* "e" bit */
+#define END_W0_UNCOND_ESCALATE PPC_BIT32(6) /* "u" bit - DD2.0 */
+#define END_W0_SILENT_ESCALATE PPC_BIT32(7) /* "s" bit - DD2.0 */
+#define END_W0_QSIZE PPC_BITMASK32(12, 15)
+#define END_W0_SW0 PPC_BIT32(16)
+#define END_W0_FIRMWARE END_W0_SW0 /* Owned by FW */
+#define END_QSIZE_4K 0
+#define END_QSIZE_64K 4
+#define END_W0_HWDEP PPC_BITMASK32(24, 31)
+ uint32_t w1;
+#define END_W1_ESn PPC_BITMASK32(0, 1)
+#define END_W1_ESn_P PPC_BIT32(0)
+#define END_W1_ESn_Q PPC_BIT32(1)
+#define END_W1_ESe PPC_BITMASK32(2, 3)
+#define END_W1_ESe_P PPC_BIT32(2)
+#define END_W1_ESe_Q PPC_BIT32(3)
+#define END_W1_GENERATION PPC_BIT32(9)
+#define END_W1_PAGE_OFF PPC_BITMASK32(10, 31)
+ uint32_t w2;
+#define END_W2_MIGRATION_REG PPC_BITMASK32(0, 3)
+#define END_W2_OP_DESC_HI PPC_BITMASK32(4, 31)
+ uint32_t w3;
+#define END_W3_OP_DESC_LO PPC_BITMASK32(0, 31)
+ uint32_t w4;
+#define END_W4_ESC_END_BLOCK PPC_BITMASK32(4, 7)
+#define END_W4_ESC_END_INDEX PPC_BITMASK32(8, 31)
+ uint32_t w5;
+#define END_W5_ESC_END_DATA PPC_BITMASK32(1, 31)
+ uint32_t w6;
+#define END_W6_FORMAT_BIT PPC_BIT32(8)
+#define END_W6_NVT_BLOCK PPC_BITMASK32(9, 12)
+#define END_W6_NVT_INDEX PPC_BITMASK32(13, 31)
+ uint32_t w7;
+#define END_W7_F0_IGNORE PPC_BIT32(0)
+#define END_W7_F0_BLK_GROUPING PPC_BIT32(1)
+#define END_W7_F0_PRIORITY PPC_BITMASK32(8, 15)
+#define END_W7_F1_WAKEZ PPC_BIT32(0)
+#define END_W7_F1_LOG_SERVER_ID PPC_BITMASK32(1, 31)
+} XiveEND;
+
+#define xive_end_is_valid(end) (be32_to_cpu((end)->w0) & END_W0_VALID)
+#define xive_end_is_enqueue(end) (be32_to_cpu((end)->w0) & END_W0_ENQUEUE)
+#define xive_end_is_notify(end) (be32_to_cpu((end)->w0) & END_W0_UCOND_NOTIFY)
+#define xive_end_is_backlog(end) (be32_to_cpu((end)->w0) & END_W0_BACKLOG)
+#define xive_end_is_escalate(end) (be32_to_cpu((end)->w0) & END_W0_ESCALATE_CTL)
+
#endif /* PPC_XIVE_REGS_H */