aboutsummaryrefslogtreecommitdiff
path: root/hw/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'hw/ppc')
-rw-r--r--hw/ppc/Makefile.objs2
-rw-r--r--hw/ppc/spapr.c49
-rw-r--r--hw/ppc/spapr_drc.c744
-rw-r--r--hw/ppc/spapr_events.c338
-rw-r--r--hw/ppc/spapr_iommu.c46
-rw-r--r--hw/ppc/spapr_pci.c513
-rw-r--r--hw/ppc/spapr_rtas.c361
-rw-r--r--hw/ppc/spapr_vio.c2
8 files changed, 1899 insertions, 156 deletions
diff --git a/hw/ppc/Makefile.objs b/hw/ppc/Makefile.objs
index 437955d1d5..c8ab06e7f3 100644
--- a/hw/ppc/Makefile.objs
+++ b/hw/ppc/Makefile.objs
@@ -3,7 +3,7 @@ obj-y += ppc.o ppc_booke.o
# IBM pSeries (sPAPR)
obj-$(CONFIG_PSERIES) += spapr.o spapr_vio.o spapr_events.o
obj-$(CONFIG_PSERIES) += spapr_hcall.o spapr_iommu.o spapr_rtas.o
-obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o
+obj-$(CONFIG_PSERIES) += spapr_pci.o spapr_rtc.o spapr_drc.o
ifeq ($(CONFIG_PCI)$(CONFIG_PSERIES)$(CONFIG_LINUX), yyy)
obj-y += spapr_pci_vfio.o
endif
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index a15fa3c965..f174e5a0f3 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -533,6 +533,8 @@ static void *spapr_create_fdt_skel(hwaddr initrd_base,
refpoints, sizeof(refpoints))));
_FDT((fdt_property_cell(fdt, "rtas-error-log-max", RTAS_ERROR_LOG_MAX)));
+ _FDT((fdt_property_cell(fdt, "rtas-event-scan-rate",
+ RTAS_EVENT_SCAN_RATE)));
/*
* According to PAPR, rtas ibm,os-term does not guarantee a return
@@ -794,8 +796,8 @@ static void spapr_finalize_fdt(sPAPREnvironment *spapr,
_FDT((fdt_pack(fdt)));
if (fdt_totalsize(fdt) > FDT_MAX_SIZE) {
- hw_error("FDT too big ! 0x%x bytes (max is 0x%x)\n",
- fdt_totalsize(fdt), FDT_MAX_SIZE);
+ error_report("FDT too big ! 0x%x bytes (max is 0x%x)",
+ fdt_totalsize(fdt), FDT_MAX_SIZE);
exit(1);
}
@@ -899,7 +901,7 @@ static int spapr_check_htab_fd(sPAPREnvironment *spapr)
spapr->htab_fd = kvmppc_get_htab_fd(false);
if (spapr->htab_fd < 0) {
error_report("Unable to open fd for reading hash table from KVM: "
- "%s", strerror(errno));
+ "%s", strerror(errno));
rc = -1;
}
spapr->htab_fd_stale = false;
@@ -1419,7 +1421,7 @@ static void ppc_spapr_init(MachineState *machine)
rma_alloc_size = kvmppc_alloc_rma(&rma);
if (rma_alloc_size == -1) {
- hw_error("qemu: Unable to create RMA\n");
+ error_report("Unable to create RMA");
exit(1);
}
@@ -1504,6 +1506,11 @@ static void ppc_spapr_init(MachineState *machine)
qemu_register_reset(spapr_cpu_reset, cpu);
}
+ if (kvm_enabled()) {
+ /* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
+ kvmppc_enable_logical_ci_hcalls();
+ }
+
/* allocate RAM */
spapr->ram_limit = ram_size;
memory_region_allocate_system_memory(ram, NULL, "ppc_spapr.ram",
@@ -1520,18 +1527,18 @@ static void ppc_spapr_init(MachineState *machine)
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "spapr-rtas.bin");
if (!filename) {
- hw_error("Could not find LPAR rtas '%s'\n", "spapr-rtas.bin");
+ error_report("Could not find LPAR rtas '%s'", "spapr-rtas.bin");
exit(1);
}
spapr->rtas_size = get_image_size(filename);
spapr->rtas_blob = g_malloc(spapr->rtas_size);
if (load_image_size(filename, spapr->rtas_blob, spapr->rtas_size) < 0) {
- hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
+ error_report("Could not load LPAR rtas '%s'", filename);
exit(1);
}
if (spapr->rtas_size > RTAS_MAX_SIZE) {
- hw_error("RTAS too big ! 0x%zx bytes (max is 0x%x)\n",
- (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
+ error_report("RTAS too big ! 0x%zx bytes (max is 0x%x)",
+ (size_t)spapr->rtas_size, RTAS_MAX_SIZE);
exit(1);
}
g_free(filename);
@@ -1641,12 +1648,12 @@ static void ppc_spapr_init(MachineState *machine)
}
filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name);
if (!filename) {
- hw_error("Could not find LPAR rtas '%s'\n", bios_name);
+ error_report("Could not find LPAR firmware '%s'", bios_name);
exit(1);
}
fw_size = load_image_targphys(filename, 0, FW_MAX_SIZE);
- if (fw_size < 0) {
- hw_error("qemu: could not load LPAR rtas '%s'\n", filename);
+ if (fw_size <= 0) {
+ error_report("Could not load LPAR firmware '%s'", filename);
exit(1);
}
g_free(filename);
@@ -1660,9 +1667,14 @@ static void ppc_spapr_init(MachineState *machine)
/* Prepare the device tree */
spapr->fdt_skel = spapr_create_fdt_skel(initrd_base, initrd_size,
kernel_size, kernel_le,
- kernel_cmdline, spapr->epow_irq);
+ kernel_cmdline,
+ spapr->check_exception_irq);
assert(spapr->fdt_skel != NULL);
+ /* used by RTAS */
+ QTAILQ_INIT(&spapr->ccs_list);
+ qemu_register_reset(spapr_ccs_reset_hook, spapr);
+
qemu_register_boot_set(spapr_boot_set, spapr);
}
@@ -1794,6 +1806,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data)
mc->max_cpus = MAX_CPUS;
mc->no_parallel = 1;
mc->default_boot_order = "";
+ mc->default_ram_size = 512 * M_BYTE;
mc->kvm_type = spapr_kvm_type;
mc->has_dynamic_sysbus = true;
@@ -1816,7 +1829,12 @@ static const TypeInfo spapr_machine_info = {
};
#define SPAPR_COMPAT_2_3 \
- HW_COMPAT_2_3
+ HW_COMPAT_2_3 \
+ {\
+ .driver = "spapr-pci-host-bridge",\
+ .property = "dynamic-reconfiguration",\
+ .value = "off",\
+ },
#define SPAPR_COMPAT_2_2 \
SPAPR_COMPAT_2_3 \
@@ -1905,10 +1923,15 @@ static const TypeInfo spapr_machine_2_2_info = {
static void spapr_machine_2_3_class_init(ObjectClass *oc, void *data)
{
+ static GlobalProperty compat_props[] = {
+ SPAPR_COMPAT_2_3
+ { /* end of list */ }
+ };
MachineClass *mc = MACHINE_CLASS(oc);
mc->name = "pseries-2.3";
mc->desc = "pSeries Logical Partition (PAPR compliant) v2.3";
+ mc->compat_props = compat_props;
}
static const TypeInfo spapr_machine_2_3_info = {
diff --git a/hw/ppc/spapr_drc.c b/hw/ppc/spapr_drc.c
new file mode 100644
index 0000000000..ef985381cb
--- /dev/null
+++ b/hw/ppc/spapr_drc.c
@@ -0,0 +1,744 @@
+/*
+ * QEMU SPAPR Dynamic Reconfiguration Connector Implementation
+ *
+ * Copyright IBM Corp. 2014
+ *
+ * Authors:
+ * Michael Roth <mdroth@linux.vnet.ibm.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "hw/ppc/spapr_drc.h"
+#include "qom/object.h"
+#include "hw/qdev.h"
+#include "qapi/visitor.h"
+#include "qemu/error-report.h"
+
+/* #define DEBUG_SPAPR_DRC */
+
+#ifdef DEBUG_SPAPR_DRC
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#define DPRINTFN(fmt, ...) \
+ do { DPRINTF(fmt, ## __VA_ARGS__); fprintf(stderr, "\n"); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#define DPRINTFN(fmt, ...) \
+ do { } while (0)
+#endif
+
+#define DRC_CONTAINER_PATH "/dr-connector"
+#define DRC_INDEX_TYPE_SHIFT 28
+#define DRC_INDEX_ID_MASK (~(~0 << DRC_INDEX_TYPE_SHIFT))
+
+static sPAPRDRConnectorTypeShift get_type_shift(sPAPRDRConnectorType type)
+{
+ uint32_t shift = 0;
+
+ /* make sure this isn't SPAPR_DR_CONNECTOR_TYPE_ANY, or some
+ * other wonky value.
+ */
+ g_assert(is_power_of_2(type));
+
+ while (type != (1 << shift)) {
+ shift++;
+ }
+ return shift;
+}
+
+static uint32_t get_index(sPAPRDRConnector *drc)
+{
+ /* no set format for a drc index: it only needs to be globally
+ * unique. this is how we encode the DRC type on bare-metal
+ * however, so might as well do that here
+ */
+ return (get_type_shift(drc->type) << DRC_INDEX_TYPE_SHIFT) |
+ (drc->id & DRC_INDEX_ID_MASK);
+}
+
+static int set_isolation_state(sPAPRDRConnector *drc,
+ sPAPRDRIsolationState state)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc: %x, set_isolation_state: %x", get_index(drc), state);
+
+ drc->isolation_state = state;
+
+ if (drc->isolation_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ /* if we're awaiting release, but still in an unconfigured state,
+ * it's likely the guest is still in the process of configuring
+ * the device and is transitioning the devices to an ISOLATED
+ * state as a part of that process. so we only complete the
+ * removal when this transition happens for a device in a
+ * configured state, as suggested by the state diagram from
+ * PAPR+ 2.7, 13.4
+ */
+ if (drc->awaiting_release) {
+ if (drc->configured) {
+ DPRINTFN("finalizing device removal");
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ } else {
+ DPRINTFN("deferring device removal on unconfigured device\n");
+ }
+ }
+ drc->configured = false;
+ }
+
+ return 0;
+}
+
+static int set_indicator_state(sPAPRDRConnector *drc,
+ sPAPRDRIndicatorState state)
+{
+ DPRINTFN("drc: %x, set_indicator_state: %x", get_index(drc), state);
+ drc->indicator_state = state;
+ return 0;
+}
+
+static int set_allocation_state(sPAPRDRConnector *drc,
+ sPAPRDRAllocationState state)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc: %x, set_allocation_state: %x", get_index(drc), state);
+
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->allocation_state = state;
+ if (drc->awaiting_release &&
+ drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ DPRINTFN("finalizing device removal");
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ }
+ }
+ return 0;
+}
+
+static uint32_t get_type(sPAPRDRConnector *drc)
+{
+ return drc->type;
+}
+
+static const char *get_name(sPAPRDRConnector *drc)
+{
+ return drc->name;
+}
+
+static const void *get_fdt(sPAPRDRConnector *drc, int *fdt_start_offset)
+{
+ if (fdt_start_offset) {
+ *fdt_start_offset = drc->fdt_start_offset;
+ }
+ return drc->fdt;
+}
+
+static void set_configured(sPAPRDRConnector *drc)
+{
+ DPRINTFN("drc: %x, set_configured", get_index(drc));
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_UNISOLATED) {
+ /* guest should be not configuring an isolated device */
+ DPRINTFN("drc: %x, set_configured: skipping isolated device",
+ get_index(drc));
+ return;
+ }
+ drc->configured = true;
+}
+
+/*
+ * dr-entity-sense sensor value
+ * returned via get-sensor-state RTAS calls
+ * as expected by state diagram in PAPR+ 2.7, 13.4
+ * based on the current allocation/indicator/power states
+ * for the DR connector.
+ */
+static sPAPRDREntitySense entity_sense(sPAPRDRConnector *drc)
+{
+ sPAPRDREntitySense state;
+
+ if (drc->dev) {
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ /* for logical DR, we return a state of UNUSABLE
+ * iff the allocation state UNUSABLE.
+ * Otherwise, report the state as USABLE/PRESENT,
+ * as we would for PCI.
+ */
+ state = SPAPR_DR_ENTITY_SENSE_UNUSABLE;
+ } else {
+ /* this assumes all PCI devices are assigned to
+ * a 'live insertion' power domain, where QEMU
+ * manages power state automatically as opposed
+ * to the guest. present, non-PCI resources are
+ * unaffected by power state.
+ */
+ state = SPAPR_DR_ENTITY_SENSE_PRESENT;
+ }
+ } else {
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ /* PCI devices, and only PCI devices, use EMPTY
+ * in cases where we'd otherwise use UNUSABLE
+ */
+ state = SPAPR_DR_ENTITY_SENSE_EMPTY;
+ } else {
+ state = SPAPR_DR_ENTITY_SENSE_UNUSABLE;
+ }
+ }
+
+ DPRINTFN("drc: %x, entity_sense: %x", get_index(drc), state);
+ return state;
+}
+
+static void prop_get_index(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->get_index(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static void prop_get_type(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->get_type(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static char *prop_get_name(Object *obj, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ return g_strdup(drck->get_name(drc));
+}
+
+static void prop_get_entity_sense(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ uint32_t value = (uint32_t)drck->entity_sense(drc);
+ visit_type_uint32(v, &value, name, errp);
+}
+
+static void prop_get_fdt(Object *obj, Visitor *v, void *opaque,
+ const char *name, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+ int fdt_offset_next, fdt_offset, fdt_depth;
+ void *fdt;
+
+ if (!drc->fdt) {
+ return;
+ }
+
+ fdt = drc->fdt;
+ fdt_offset = drc->fdt_start_offset;
+ fdt_depth = 0;
+
+ do {
+ const char *name = NULL;
+ const struct fdt_property *prop = NULL;
+ int prop_len = 0, name_len = 0;
+ uint32_t tag;
+
+ tag = fdt_next_tag(fdt, fdt_offset, &fdt_offset_next);
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ fdt_depth++;
+ name = fdt_get_name(fdt, fdt_offset, &name_len);
+ visit_start_struct(v, NULL, NULL, name, 0, NULL);
+ break;
+ case FDT_END_NODE:
+ /* shouldn't ever see an FDT_END_NODE before FDT_BEGIN_NODE */
+ g_assert(fdt_depth > 0);
+ visit_end_struct(v, NULL);
+ fdt_depth--;
+ break;
+ case FDT_PROP: {
+ int i;
+ prop = fdt_get_property_by_offset(fdt, fdt_offset, &prop_len);
+ name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+ visit_start_list(v, name, NULL);
+ for (i = 0; i < prop_len; i++) {
+ visit_type_uint8(v, (uint8_t *)&prop->data[i], NULL, NULL);
+
+ }
+ visit_end_list(v, NULL);
+ break;
+ }
+ default:
+ error_setg(&error_abort, "device FDT in unexpected state: %d", tag);
+ }
+ fdt_offset = fdt_offset_next;
+ } while (fdt_depth != 0);
+}
+
+static void attach(sPAPRDRConnector *drc, DeviceState *d, void *fdt,
+ int fdt_start_offset, bool coldplug, Error **errp)
+{
+ DPRINTFN("drc: %x, attach", get_index(drc));
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ error_setg(errp, "an attached device is still awaiting release");
+ return;
+ }
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ g_assert(drc->allocation_state == SPAPR_DR_ALLOCATION_STATE_USABLE);
+ }
+ g_assert(fdt || coldplug);
+
+ /* NOTE: setting initial isolation state to UNISOLATED means we can't
+ * detach unless guest has a userspace/kernel that moves this state
+ * back to ISOLATED in response to an unplug event, or this is done
+ * manually by the admin prior. if we force things while the guest
+ * may be accessing the device, we can easily crash the guest, so we
+ * we defer completion of removal in such cases to the reset() hook.
+ */
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->isolation_state = SPAPR_DR_ISOLATION_STATE_UNISOLATED;
+ }
+ drc->indicator_state = SPAPR_DR_INDICATOR_STATE_ACTIVE;
+
+ drc->dev = d;
+ drc->fdt = fdt;
+ drc->fdt_start_offset = fdt_start_offset;
+ drc->configured = false;
+
+ object_property_add_link(OBJECT(drc), "device",
+ object_get_typename(OBJECT(drc->dev)),
+ (Object **)(&drc->dev),
+ NULL, 0, NULL);
+}
+
+static void detach(sPAPRDRConnector *drc, DeviceState *d,
+ spapr_drc_detach_cb *detach_cb,
+ void *detach_cb_opaque, Error **errp)
+{
+ DPRINTFN("drc: %x, detach", get_index(drc));
+
+ drc->detach_cb = detach_cb;
+ drc->detach_cb_opaque = detach_cb_opaque;
+
+ if (drc->isolation_state != SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ DPRINTFN("awaiting transition to isolated state before removal");
+ drc->awaiting_release = true;
+ return;
+ }
+
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->allocation_state != SPAPR_DR_ALLOCATION_STATE_UNUSABLE) {
+ DPRINTFN("awaiting transition to unusable state before removal");
+ drc->awaiting_release = true;
+ return;
+ }
+
+ drc->indicator_state = SPAPR_DR_INDICATOR_STATE_INACTIVE;
+
+ if (drc->detach_cb) {
+ drc->detach_cb(drc->dev, drc->detach_cb_opaque);
+ }
+
+ drc->awaiting_release = false;
+ g_free(drc->fdt);
+ drc->fdt = NULL;
+ drc->fdt_start_offset = 0;
+ object_property_del(OBJECT(drc), "device", NULL);
+ drc->dev = NULL;
+ drc->detach_cb = NULL;
+ drc->detach_cb_opaque = NULL;
+}
+
+static bool release_pending(sPAPRDRConnector *drc)
+{
+ return drc->awaiting_release;
+}
+
+static void reset(DeviceState *d)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ DPRINTFN("drc reset: %x", drck->get_index(drc));
+ /* immediately upon reset we can safely assume DRCs whose devices
+ * are pending removal can be safely removed, and that they will
+ * subsequently be left in an ISOLATED state. move the DRC to this
+ * state in these cases (which will in turn complete any pending
+ * device removals)
+ */
+ if (drc->awaiting_release) {
+ drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_ISOLATED);
+ /* generally this should also finalize the removal, but if the device
+ * hasn't yet been configured we normally defer removal under the
+ * assumption that this transition is taking place as part of device
+ * configuration. so check if we're still waiting after this, and
+ * force removal if we are
+ */
+ if (drc->awaiting_release) {
+ drck->detach(drc, DEVICE(drc->dev), drc->detach_cb,
+ drc->detach_cb_opaque, NULL);
+ }
+
+ /* non-PCI devices may be awaiting a transition to UNUSABLE */
+ if (drc->type != SPAPR_DR_CONNECTOR_TYPE_PCI &&
+ drc->awaiting_release) {
+ drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_UNUSABLE);
+ }
+ }
+}
+
+static void realize(DeviceState *d, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ Object *root_container;
+ char link_name[256];
+ gchar *child_name;
+ Error *err = NULL;
+
+ DPRINTFN("drc realize: %x", drck->get_index(drc));
+ /* NOTE: we do this as part of realize/unrealize due to the fact
+ * that the guest will communicate with the DRC via RTAS calls
+ * referencing the global DRC index. By unlinking the DRC
+ * from DRC_CONTAINER_PATH/<drc_index> we effectively make it
+ * inaccessible by the guest, since lookups rely on this path
+ * existing in the composition tree
+ */
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ snprintf(link_name, sizeof(link_name), "%x", drck->get_index(drc));
+ child_name = object_get_canonical_path_component(OBJECT(drc));
+ DPRINTFN("drc child name: %s", child_name);
+ object_property_add_alias(root_container, link_name,
+ drc->owner, child_name, &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ object_unref(OBJECT(drc));
+ }
+ DPRINTFN("drc realize complete");
+}
+
+static void unrealize(DeviceState *d, Error **errp)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(d);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ Object *root_container;
+ char name[256];
+ Error *err = NULL;
+
+ DPRINTFN("drc unrealize: %x", drck->get_index(drc));
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+ snprintf(name, sizeof(name), "%x", drck->get_index(drc));
+ object_property_del(root_container, name, &err);
+ if (err) {
+ error_report("%s", error_get_pretty(err));
+ error_free(err);
+ object_unref(OBJECT(drc));
+ }
+}
+
+sPAPRDRConnector *spapr_dr_connector_new(Object *owner,
+ sPAPRDRConnectorType type,
+ uint32_t id)
+{
+ sPAPRDRConnector *drc =
+ SPAPR_DR_CONNECTOR(object_new(TYPE_SPAPR_DR_CONNECTOR));
+
+ g_assert(type);
+
+ drc->type = type;
+ drc->id = id;
+ drc->owner = owner;
+ object_property_add_child(owner, "dr-connector[*]", OBJECT(drc), NULL);
+ object_property_set_bool(OBJECT(drc), true, "realized", NULL);
+
+ /* human-readable name for a DRC to encode into the DT
+ * description. this is mainly only used within a guest in place
+ * of the unique DRC index.
+ *
+ * in the case of VIO/PCI devices, it corresponds to a
+ * "location code" that maps a logical device/function (DRC index)
+ * to a physical (or virtual in the case of VIO) location in the
+ * system by chaining together the "location label" for each
+ * encapsulating component.
+ *
+ * since this is more to do with diagnosing physical hardware
+ * issues than guest compatibility, we choose location codes/DRC
+ * names that adhere to the documented format, but avoid encoding
+ * the entire topology information into the label/code, instead
+ * just using the location codes based on the labels for the
+ * endpoints (VIO/PCI adaptor connectors), which is basically
+ * just "C" followed by an integer ID.
+ *
+ * DRC names as documented by PAPR+ v2.7, 13.5.2.4
+ * location codes as documented by PAPR+ v2.7, 12.3.1.5
+ */
+ switch (drc->type) {
+ case SPAPR_DR_CONNECTOR_TYPE_CPU:
+ drc->name = g_strdup_printf("CPU %d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_PHB:
+ drc->name = g_strdup_printf("PHB %d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_VIO:
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ drc->name = g_strdup_printf("C%d", id);
+ break;
+ case SPAPR_DR_CONNECTOR_TYPE_LMB:
+ drc->name = g_strdup_printf("LMB %d", id);
+ break;
+ default:
+ g_assert(false);
+ }
+
+ /* PCI slot always start in a USABLE state, and stay there */
+ if (drc->type == SPAPR_DR_CONNECTOR_TYPE_PCI) {
+ drc->allocation_state = SPAPR_DR_ALLOCATION_STATE_USABLE;
+ }
+
+ return drc;
+}
+
+static void spapr_dr_connector_instance_init(Object *obj)
+{
+ sPAPRDRConnector *drc = SPAPR_DR_CONNECTOR(obj);
+
+ object_property_add_uint32_ptr(obj, "isolation-state",
+ &drc->isolation_state, NULL);
+ object_property_add_uint32_ptr(obj, "indicator-state",
+ &drc->indicator_state, NULL);
+ object_property_add_uint32_ptr(obj, "allocation-state",
+ &drc->allocation_state, NULL);
+ object_property_add_uint32_ptr(obj, "id", &drc->id, NULL);
+ object_property_add(obj, "index", "uint32", prop_get_index,
+ NULL, NULL, NULL, NULL);
+ object_property_add(obj, "connector_type", "uint32", prop_get_type,
+ NULL, NULL, NULL, NULL);
+ object_property_add_str(obj, "name", prop_get_name, NULL, NULL);
+ object_property_add(obj, "entity-sense", "uint32", prop_get_entity_sense,
+ NULL, NULL, NULL, NULL);
+ object_property_add(obj, "fdt", "struct", prop_get_fdt,
+ NULL, NULL, NULL, NULL);
+}
+
+static void spapr_dr_connector_class_init(ObjectClass *k, void *data)
+{
+ DeviceClass *dk = DEVICE_CLASS(k);
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_CLASS(k);
+
+ dk->reset = reset;
+ dk->realize = realize;
+ dk->unrealize = unrealize;
+ drck->set_isolation_state = set_isolation_state;
+ drck->set_indicator_state = set_indicator_state;
+ drck->set_allocation_state = set_allocation_state;
+ drck->get_index = get_index;
+ drck->get_type = get_type;
+ drck->get_name = get_name;
+ drck->get_fdt = get_fdt;
+ drck->set_configured = set_configured;
+ drck->entity_sense = entity_sense;
+ drck->attach = attach;
+ drck->detach = detach;
+ drck->release_pending = release_pending;
+}
+
+static const TypeInfo spapr_dr_connector_info = {
+ .name = TYPE_SPAPR_DR_CONNECTOR,
+ .parent = TYPE_DEVICE,
+ .instance_size = sizeof(sPAPRDRConnector),
+ .instance_init = spapr_dr_connector_instance_init,
+ .class_size = sizeof(sPAPRDRConnectorClass),
+ .class_init = spapr_dr_connector_class_init,
+};
+
+static void spapr_drc_register_types(void)
+{
+ type_register_static(&spapr_dr_connector_info);
+}
+
+type_init(spapr_drc_register_types)
+
+/* helper functions for external users */
+
+sPAPRDRConnector *spapr_dr_connector_by_index(uint32_t index)
+{
+ Object *obj;
+ char name[256];
+
+ snprintf(name, sizeof(name), "%s/%x", DRC_CONTAINER_PATH, index);
+ obj = object_resolve_path(name, NULL);
+
+ return !obj ? NULL : SPAPR_DR_CONNECTOR(obj);
+}
+
+sPAPRDRConnector *spapr_dr_connector_by_id(sPAPRDRConnectorType type,
+ uint32_t id)
+{
+ return spapr_dr_connector_by_index(
+ (get_type_shift(type) << DRC_INDEX_TYPE_SHIFT) |
+ (id & DRC_INDEX_ID_MASK));
+}
+
+/* generate a string the describes the DRC to encode into the
+ * device tree.
+ *
+ * as documented by PAPR+ v2.7, 13.5.2.6 and C.6.1
+ */
+static const char *spapr_drc_get_type_str(sPAPRDRConnectorType type)
+{
+ switch (type) {
+ case SPAPR_DR_CONNECTOR_TYPE_CPU:
+ return "CPU";
+ case SPAPR_DR_CONNECTOR_TYPE_PHB:
+ return "PHB";
+ case SPAPR_DR_CONNECTOR_TYPE_VIO:
+ return "SLOT";
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ return "28";
+ case SPAPR_DR_CONNECTOR_TYPE_LMB:
+ return "MEM";
+ default:
+ g_assert(false);
+ }
+
+ return NULL;
+}
+
+/**
+ * spapr_drc_populate_dt
+ *
+ * @fdt: libfdt device tree
+ * @path: path in the DT to generate properties
+ * @owner: parent Object/DeviceState for which to generate DRC
+ * descriptions for
+ * @drc_type_mask: mask of sPAPRDRConnectorType values corresponding
+ * to the types of DRCs to generate entries for
+ *
+ * generate OF properties to describe DRC topology/indices to guests
+ *
+ * as documented in PAPR+ v2.1, 13.5.2
+ */
+int spapr_drc_populate_dt(void *fdt, int fdt_offset, Object *owner,
+ uint32_t drc_type_mask)
+{
+ Object *root_container;
+ ObjectProperty *prop;
+ uint32_t drc_count = 0;
+ GArray *drc_indexes, *drc_power_domains;
+ GString *drc_names, *drc_types;
+ int ret;
+
+ /* the first entry of each properties is a 32-bit integer encoding
+ * the number of elements in the array. we won't know this until
+ * we complete the iteration through all the matching DRCs, but
+ * reserve the space now and set the offsets accordingly so we
+ * can fill them in later.
+ */
+ drc_indexes = g_array_new(false, true, sizeof(uint32_t));
+ drc_indexes = g_array_set_size(drc_indexes, 1);
+ drc_power_domains = g_array_new(false, true, sizeof(uint32_t));
+ drc_power_domains = g_array_set_size(drc_power_domains, 1);
+ drc_names = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
+ drc_types = g_string_set_size(g_string_new(NULL), sizeof(uint32_t));
+
+ /* aliases for all DRConnector objects will be rooted in QOM
+ * composition tree at DRC_CONTAINER_PATH
+ */
+ root_container = container_get(object_get_root(), DRC_CONTAINER_PATH);
+
+ QTAILQ_FOREACH(prop, &root_container->properties, node) {
+ Object *obj;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t drc_index, drc_power_domain;
+
+ if (!strstart(prop->type, "link<", NULL)) {
+ continue;
+ }
+
+ obj = object_property_get_link(root_container, prop->name, NULL);
+ drc = SPAPR_DR_CONNECTOR(obj);
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ if (owner && (drc->owner != owner)) {
+ continue;
+ }
+
+ if ((drc->type & drc_type_mask) == 0) {
+ continue;
+ }
+
+ drc_count++;
+
+ /* ibm,drc-indexes */
+ drc_index = cpu_to_be32(drck->get_index(drc));
+ g_array_append_val(drc_indexes, drc_index);
+
+ /* ibm,drc-power-domains */
+ drc_power_domain = cpu_to_be32(-1);
+ g_array_append_val(drc_power_domains, drc_power_domain);
+
+ /* ibm,drc-names */
+ drc_names = g_string_append(drc_names, drck->get_name(drc));
+ drc_names = g_string_insert_len(drc_names, -1, "\0", 1);
+
+ /* ibm,drc-types */
+ drc_types = g_string_append(drc_types,
+ spapr_drc_get_type_str(drc->type));
+ drc_types = g_string_insert_len(drc_types, -1, "\0", 1);
+ }
+
+ /* now write the drc count into the space we reserved at the
+ * beginning of the arrays previously
+ */
+ *(uint32_t *)drc_indexes->data = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_power_domains->data = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_names->str = cpu_to_be32(drc_count);
+ *(uint32_t *)drc_types->str = cpu_to_be32(drc_count);
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-indexes",
+ drc_indexes->data,
+ drc_indexes->len * sizeof(uint32_t));
+ if (ret) {
+ fprintf(stderr, "Couldn't create ibm,drc-indexes property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-power-domains",
+ drc_power_domains->data,
+ drc_power_domains->len * sizeof(uint32_t));
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-power-domains property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-names",
+ drc_names->str, drc_names->len);
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-names property\n");
+ goto out;
+ }
+
+ ret = fdt_setprop(fdt, fdt_offset, "ibm,drc-types",
+ drc_types->str, drc_types->len);
+ if (ret) {
+ fprintf(stderr, "Couldn't finalize ibm,drc-types property\n");
+ goto out;
+ }
+
+out:
+ g_array_free(drc_indexes, true);
+ g_array_free(drc_power_domains, true);
+ g_string_free(drc_names, true);
+ g_string_free(drc_types, true);
+
+ return ret;
+}
diff --git a/hw/ppc/spapr_events.c b/hw/ppc/spapr_events.c
index 283e96bca1..fda9e3590a 100644
--- a/hw/ppc/spapr_events.c
+++ b/hw/ppc/spapr_events.c
@@ -32,6 +32,9 @@
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
+#include "hw/pci/pci.h"
+#include "hw/pci-host/spapr.h"
+#include "hw/ppc/spapr_drc.h"
#include <libfdt.h>
@@ -77,6 +80,7 @@ struct rtas_error_log {
#define RTAS_LOG_TYPE_ECC_UNCORR 0x00000009
#define RTAS_LOG_TYPE_ECC_CORR 0x0000000a
#define RTAS_LOG_TYPE_EPOW 0x00000040
+#define RTAS_LOG_TYPE_HOTPLUG 0x000000e5
uint32_t extended_length;
} QEMU_PACKED;
@@ -166,6 +170,38 @@ struct epow_log_full {
struct rtas_event_log_v6_epow epow;
} QEMU_PACKED;
+struct rtas_event_log_v6_hp {
+#define RTAS_LOG_V6_SECTION_ID_HOTPLUG 0x4850 /* HP */
+ struct rtas_event_log_v6_section_header hdr;
+ uint8_t hotplug_type;
+#define RTAS_LOG_V6_HP_TYPE_CPU 1
+#define RTAS_LOG_V6_HP_TYPE_MEMORY 2
+#define RTAS_LOG_V6_HP_TYPE_SLOT 3
+#define RTAS_LOG_V6_HP_TYPE_PHB 4
+#define RTAS_LOG_V6_HP_TYPE_PCI 5
+ uint8_t hotplug_action;
+#define RTAS_LOG_V6_HP_ACTION_ADD 1
+#define RTAS_LOG_V6_HP_ACTION_REMOVE 2
+ uint8_t hotplug_identifier;
+#define RTAS_LOG_V6_HP_ID_DRC_NAME 1
+#define RTAS_LOG_V6_HP_ID_DRC_INDEX 2
+#define RTAS_LOG_V6_HP_ID_DRC_COUNT 3
+ uint8_t reserved;
+ union {
+ uint32_t index;
+ uint32_t count;
+ char name[1];
+ } drc;
+} QEMU_PACKED;
+
+struct hp_log_full {
+ struct rtas_error_log hdr;
+ struct rtas_event_log_v6 v6hdr;
+ struct rtas_event_log_v6_maina maina;
+ struct rtas_event_log_v6_mainb mainb;
+ struct rtas_event_log_v6_hp hp;
+} QEMU_PACKED;
+
#define EVENT_MASK_INTERNAL_ERRORS 0x80000000
#define EVENT_MASK_EPOW 0x40000000
#define EVENT_MASK_HOTPLUG 0x10000000
@@ -181,67 +217,105 @@ struct epow_log_full {
} \
} while (0)
-void spapr_events_fdt_skel(void *fdt, uint32_t epow_irq)
+void spapr_events_fdt_skel(void *fdt, uint32_t check_exception_irq)
{
- uint32_t epow_irq_ranges[] = {cpu_to_be32(epow_irq), cpu_to_be32(1)};
- uint32_t epow_interrupts[] = {cpu_to_be32(epow_irq), 0};
+ uint32_t irq_ranges[] = {cpu_to_be32(check_exception_irq), cpu_to_be32(1)};
+ uint32_t interrupts[] = {cpu_to_be32(check_exception_irq), 0};
_FDT((fdt_begin_node(fdt, "event-sources")));
_FDT((fdt_property(fdt, "interrupt-controller", NULL, 0)));
_FDT((fdt_property_cell(fdt, "#interrupt-cells", 2)));
_FDT((fdt_property(fdt, "interrupt-ranges",
- epow_irq_ranges, sizeof(epow_irq_ranges))));
+ irq_ranges, sizeof(irq_ranges))));
_FDT((fdt_begin_node(fdt, "epow-events")));
- _FDT((fdt_property(fdt, "interrupts",
- epow_interrupts, sizeof(epow_interrupts))));
+ _FDT((fdt_property(fdt, "interrupts", interrupts, sizeof(interrupts))));
_FDT((fdt_end_node(fdt)));
_FDT((fdt_end_node(fdt)));
}
-static struct epow_log_full *pending_epow;
-static uint32_t next_plid;
+static void rtas_event_log_queue(int log_type, void *data, bool exception)
+{
+ sPAPREventLogEntry *entry = g_new(sPAPREventLogEntry, 1);
-static void spapr_powerdown_req(Notifier *n, void *opaque)
+ g_assert(data);
+ entry->log_type = log_type;
+ entry->exception = exception;
+ entry->data = data;
+ QTAILQ_INSERT_TAIL(&spapr->pending_events, entry, next);
+}
+
+static sPAPREventLogEntry *rtas_event_log_dequeue(uint32_t event_mask,
+ bool exception)
{
- sPAPREnvironment *spapr = container_of(n, sPAPREnvironment, epow_notifier);
- struct rtas_error_log *hdr;
- struct rtas_event_log_v6 *v6hdr;
- struct rtas_event_log_v6_maina *maina;
- struct rtas_event_log_v6_mainb *mainb;
- struct rtas_event_log_v6_epow *epow;
- struct tm tm;
- int year;
+ sPAPREventLogEntry *entry = NULL;
- if (pending_epow) {
- /* For now, we just throw away earlier events if two come
- * along before any are consumed. This is sufficient for our
- * powerdown messages, but we'll need more if we do more
- * general error/event logging */
- g_free(pending_epow);
+ /* we only queue EPOW events atm. */
+ if ((event_mask & EVENT_MASK_EPOW) == 0) {
+ return NULL;
}
- pending_epow = g_malloc0(sizeof(*pending_epow));
- hdr = &pending_epow->hdr;
- v6hdr = &pending_epow->v6hdr;
- maina = &pending_epow->maina;
- mainb = &pending_epow->mainb;
- epow = &pending_epow->epow;
- hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
- | RTAS_LOG_SEVERITY_EVENT
- | RTAS_LOG_DISPOSITION_NOT_RECOVERED
- | RTAS_LOG_OPTIONAL_PART_PRESENT
- | RTAS_LOG_TYPE_EPOW);
- hdr->extended_length = cpu_to_be32(sizeof(*pending_epow)
- - sizeof(pending_epow->hdr));
+ QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ if (entry->exception != exception) {
+ continue;
+ }
+
+ /* EPOW and hotplug events are surfaced in the same manner */
+ if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
+ entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ break;
+ }
+ }
+
+ if (entry) {
+ QTAILQ_REMOVE(&spapr->pending_events, entry, next);
+ }
+ return entry;
+}
+
+static bool rtas_event_log_contains(uint32_t event_mask, bool exception)
+{
+ sPAPREventLogEntry *entry = NULL;
+
+ /* we only queue EPOW events atm. */
+ if ((event_mask & EVENT_MASK_EPOW) == 0) {
+ return false;
+ }
+
+ QTAILQ_FOREACH(entry, &spapr->pending_events, next) {
+ if (entry->exception != exception) {
+ continue;
+ }
+
+ /* EPOW and hotplug events are surfaced in the same manner */
+ if (entry->log_type == RTAS_LOG_TYPE_EPOW ||
+ entry->log_type == RTAS_LOG_TYPE_HOTPLUG) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static uint32_t next_plid;
+
+static void spapr_init_v6hdr(struct rtas_event_log_v6 *v6hdr)
+{
v6hdr->b0 = RTAS_LOG_V6_B0_VALID | RTAS_LOG_V6_B0_NEW_LOG
| RTAS_LOG_V6_B0_BIGENDIAN;
v6hdr->b2 = RTAS_LOG_V6_B2_POWERPC_FORMAT
| RTAS_LOG_V6_B2_LOG_FORMAT_PLATFORM_EVENT;
v6hdr->company = cpu_to_be32(RTAS_LOG_V6_COMPANY_IBM);
+}
+
+static void spapr_init_maina(struct rtas_event_log_v6_maina *maina,
+ int section_count)
+{
+ struct tm tm;
+ int year;
maina->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINA);
maina->hdr.section_length = cpu_to_be16(sizeof(*maina));
@@ -256,8 +330,37 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
| (to_bcd(tm.tm_min) << 16)
| (to_bcd(tm.tm_sec) << 8));
maina->creator_id = 'H'; /* Hypervisor */
- maina->section_count = 3; /* Main-A, Main-B and EPOW */
+ maina->section_count = section_count;
maina->plid = next_plid++;
+}
+
+static void spapr_powerdown_req(Notifier *n, void *opaque)
+{
+ sPAPREnvironment *spapr = container_of(n, sPAPREnvironment, epow_notifier);
+ struct rtas_error_log *hdr;
+ struct rtas_event_log_v6 *v6hdr;
+ struct rtas_event_log_v6_maina *maina;
+ struct rtas_event_log_v6_mainb *mainb;
+ struct rtas_event_log_v6_epow *epow;
+ struct epow_log_full *new_epow;
+
+ new_epow = g_malloc0(sizeof(*new_epow));
+ hdr = &new_epow->hdr;
+ v6hdr = &new_epow->v6hdr;
+ maina = &new_epow->maina;
+ mainb = &new_epow->mainb;
+ epow = &new_epow->epow;
+
+ hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
+ | RTAS_LOG_SEVERITY_EVENT
+ | RTAS_LOG_DISPOSITION_NOT_RECOVERED
+ | RTAS_LOG_OPTIONAL_PART_PRESENT
+ | RTAS_LOG_TYPE_EPOW);
+ hdr->extended_length = cpu_to_be32(sizeof(*new_epow)
+ - sizeof(new_epow->hdr));
+
+ spapr_init_v6hdr(v6hdr);
+ spapr_init_maina(maina, 3 /* Main-A, Main-B and EPOW */);
mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
@@ -274,7 +377,80 @@ static void spapr_powerdown_req(Notifier *n, void *opaque)
epow->event_modifier = RTAS_LOG_V6_EPOW_MODIFIER_NORMAL;
epow->extended_modifier = RTAS_LOG_V6_EPOW_XMODIFIER_PARTITION_SPECIFIC;
- qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->epow_irq));
+ rtas_event_log_queue(RTAS_LOG_TYPE_EPOW, new_epow, true);
+
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+}
+
+static void spapr_hotplug_req_event(sPAPRDRConnector *drc, uint8_t hp_action)
+{
+ struct hp_log_full *new_hp;
+ struct rtas_error_log *hdr;
+ struct rtas_event_log_v6 *v6hdr;
+ struct rtas_event_log_v6_maina *maina;
+ struct rtas_event_log_v6_mainb *mainb;
+ struct rtas_event_log_v6_hp *hp;
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ sPAPRDRConnectorType drc_type = drck->get_type(drc);
+
+ new_hp = g_malloc0(sizeof(struct hp_log_full));
+ hdr = &new_hp->hdr;
+ v6hdr = &new_hp->v6hdr;
+ maina = &new_hp->maina;
+ mainb = &new_hp->mainb;
+ hp = &new_hp->hp;
+
+ hdr->summary = cpu_to_be32(RTAS_LOG_VERSION_6
+ | RTAS_LOG_SEVERITY_EVENT
+ | RTAS_LOG_DISPOSITION_NOT_RECOVERED
+ | RTAS_LOG_OPTIONAL_PART_PRESENT
+ | RTAS_LOG_INITIATOR_HOTPLUG
+ | RTAS_LOG_TYPE_HOTPLUG);
+ hdr->extended_length = cpu_to_be32(sizeof(*new_hp)
+ - sizeof(new_hp->hdr));
+
+ spapr_init_v6hdr(v6hdr);
+ spapr_init_maina(maina, 3 /* Main-A, Main-B, HP */);
+
+ mainb->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_MAINB);
+ mainb->hdr.section_length = cpu_to_be16(sizeof(*mainb));
+ mainb->subsystem_id = 0x80; /* External environment */
+ mainb->event_severity = 0x00; /* Informational / non-error */
+ mainb->event_subtype = 0x00; /* Normal shutdown */
+
+ hp->hdr.section_id = cpu_to_be16(RTAS_LOG_V6_SECTION_ID_HOTPLUG);
+ hp->hdr.section_length = cpu_to_be16(sizeof(*hp));
+ hp->hdr.section_version = 1; /* includes extended modifier */
+ hp->hotplug_action = hp_action;
+
+
+ switch (drc_type) {
+ case SPAPR_DR_CONNECTOR_TYPE_PCI:
+ hp->drc.index = cpu_to_be32(drck->get_index(drc));
+ hp->hotplug_identifier = RTAS_LOG_V6_HP_ID_DRC_INDEX;
+ hp->hotplug_type = RTAS_LOG_V6_HP_TYPE_PCI;
+ break;
+ default:
+ /* we shouldn't be signaling hotplug events for resources
+ * that don't support them
+ */
+ g_assert(false);
+ return;
+ }
+
+ rtas_event_log_queue(RTAS_LOG_TYPE_HOTPLUG, new_hp, true);
+
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+}
+
+void spapr_hotplug_req_add_event(sPAPRDRConnector *drc)
+{
+ spapr_hotplug_req_event(drc, RTAS_LOG_V6_HP_ACTION_ADD);
+}
+
+void spapr_hotplug_req_remove_event(sPAPRDRConnector *drc)
+{
+ spapr_hotplug_req_event(drc, RTAS_LOG_V6_HP_ACTION_REMOVE);
}
static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
@@ -282,8 +458,10 @@ static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
target_ulong args,
uint32_t nret, target_ulong rets)
{
- uint32_t mask, buf, len;
+ uint32_t mask, buf, len, event_len;
uint64_t xinfo;
+ sPAPREventLogEntry *event;
+ struct rtas_error_log *hdr;
if ((nargs < 6) || (nargs > 7) || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -298,25 +476,85 @@ static void check_exception(PowerPCCPU *cpu, sPAPREnvironment *spapr,
xinfo |= (uint64_t)rtas_ld(args, 6) << 32;
}
- if ((mask & EVENT_MASK_EPOW) && pending_epow) {
- if (sizeof(*pending_epow) < len) {
- len = sizeof(*pending_epow);
- }
+ event = rtas_event_log_dequeue(mask, true);
+ if (!event) {
+ goto out_no_events;
+ }
+
+ hdr = event->data;
+ event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr);
+
+ if (event_len < len) {
+ len = event_len;
+ }
+
+ cpu_physical_memory_write(buf, event->data, len);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ g_free(event->data);
+ g_free(event);
+
+ /* according to PAPR+, the IRQ must be left asserted, or re-asserted, if
+ * there are still pending events to be fetched via check-exception. We
+ * do the latter here, since our code relies on edge-triggered
+ * interrupts.
+ */
+ if (rtas_event_log_contains(mask, true)) {
+ qemu_irq_pulse(xics_get_qirq(spapr->icp, spapr->check_exception_irq));
+ }
- cpu_physical_memory_write(buf, pending_epow, len);
- g_free(pending_epow);
- pending_epow = NULL;
- rtas_st(rets, 0, RTAS_OUT_SUCCESS);
- } else {
- rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
+ return;
+
+out_no_events:
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
+}
+
+static void event_scan(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args,
+ uint32_t nret, target_ulong rets)
+{
+ uint32_t mask, buf, len, event_len;
+ sPAPREventLogEntry *event;
+ struct rtas_error_log *hdr;
+
+ if (nargs != 4 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
}
+
+ mask = rtas_ld(args, 0);
+ buf = rtas_ld(args, 2);
+ len = rtas_ld(args, 3);
+
+ event = rtas_event_log_dequeue(mask, false);
+ if (!event) {
+ goto out_no_events;
+ }
+
+ hdr = event->data;
+ event_len = be32_to_cpu(hdr->extended_length) + sizeof(*hdr);
+
+ if (event_len < len) {
+ len = event_len;
+ }
+
+ cpu_physical_memory_write(buf, event->data, len);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ g_free(event->data);
+ g_free(event);
+ return;
+
+out_no_events:
+ rtas_st(rets, 0, RTAS_OUT_NO_ERRORS_FOUND);
}
void spapr_events_init(sPAPREnvironment *spapr)
{
- spapr->epow_irq = xics_alloc(spapr->icp, 0, 0, false);
+ QTAILQ_INIT(&spapr->pending_events);
+ spapr->check_exception_irq = xics_alloc(spapr->icp, 0, 0, false);
spapr->epow_notifier.notify = spapr_powerdown_req;
qemu_register_powerdown_notifier(&spapr->epow_notifier);
spapr_rtas_register(RTAS_CHECK_EXCEPTION, "check-exception",
check_exception);
+ spapr_rtas_register(RTAS_EVENT_SCAN, "event-scan", event_scan);
}
diff --git a/hw/ppc/spapr_iommu.c b/hw/ppc/spapr_iommu.c
index f3990fdc32..8cd9dba9ac 100644
--- a/hw/ppc/spapr_iommu.c
+++ b/hw/ppc/spapr_iommu.c
@@ -41,7 +41,7 @@ enum sPAPRTCEAccess {
static QLIST_HEAD(spapr_tce_tables, sPAPRTCETable) spapr_tce_tables;
-static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
+sPAPRTCETable *spapr_tce_find_by_liobn(target_ulong liobn)
{
sPAPRTCETable *tcet;
@@ -52,7 +52,7 @@ static sPAPRTCETable *spapr_tce_find_by_liobn(uint32_t liobn)
}
QLIST_FOREACH(tcet, &spapr_tce_tables, list) {
- if (tcet->liobn == liobn) {
+ if (tcet->liobn == (uint32_t)liobn) {
return tcet;
}
}
@@ -126,11 +126,11 @@ static MemoryRegionIOMMUOps spapr_iommu_ops = {
static int spapr_tce_table_realize(DeviceState *dev)
{
sPAPRTCETable *tcet = SPAPR_TCE_TABLE(dev);
+ uint64_t window_size = (uint64_t)tcet->nb_table << tcet->page_shift;
- if (kvm_enabled()) {
+ if (kvm_enabled() && !(window_size >> 32)) {
tcet->table = kvmppc_create_spapr_tce(tcet->liobn,
- tcet->nb_table <<
- tcet->page_shift,
+ window_size,
&tcet->fd,
tcet->vfio_accel);
}
@@ -161,6 +161,7 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
bool vfio_accel)
{
sPAPRTCETable *tcet;
+ char tmp[64];
if (spapr_tce_find_by_liobn(liobn)) {
fprintf(stderr, "Attempted to create TCE table with duplicate"
@@ -179,7 +180,8 @@ sPAPRTCETable *spapr_tce_new_table(DeviceState *owner, uint32_t liobn,
tcet->nb_table = nb_table;
tcet->vfio_accel = vfio_accel;
- object_property_add_child(OBJECT(owner), "tce-table", OBJECT(tcet), NULL);
+ snprintf(tmp, sizeof(tmp), "tce-table-%x", liobn);
+ object_property_add_child(OBJECT(owner), tmp, OBJECT(tcet), NULL);
object_property_set_bool(OBJECT(tcet), true, "realized", NULL);
@@ -247,7 +249,7 @@ static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
target_ulong ioba1 = ioba;
target_ulong tce_list = args[2];
target_ulong npages = args[3];
- target_ulong ret = H_PARAMETER;
+ target_ulong ret = H_PARAMETER, tce = 0;
sPAPRTCETable *tcet = spapr_tce_find_by_liobn(liobn);
CPUState *cs = CPU(cpu);
hwaddr page_mask, page_size;
@@ -267,7 +269,7 @@ static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
for (i = 0; i < npages; ++i, ioba += page_size) {
target_ulong off = (tce_list & ~SPAPR_TCE_RW) +
i * sizeof(target_ulong);
- target_ulong tce = ldq_phys(cs->as, off);
+ tce = ldq_be_phys(cs->as, off);
ret = put_tce_emu(tcet, ioba, tce);
if (ret) {
@@ -277,11 +279,11 @@ static target_ulong h_put_tce_indirect(PowerPCCPU *cpu,
/* Trace last successful or the first problematic entry */
i = i ? (i - 1) : 0;
- trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i,
- ldq_phys(cs->as,
- tce_list + i * sizeof(target_ulong)),
- ret);
-
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_indirect(liobn, ioba1, tce_list, i, tce, ret);
+ } else {
+ trace_spapr_iommu_indirect(liobn, ioba1, tce_list, i, tce, ret);
+ }
return ret;
}
@@ -315,7 +317,11 @@ static target_ulong h_stuff_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
break;
}
}
- trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_stuff(liobn, ioba, tce_value, npages, ret);
+ } else {
+ trace_spapr_iommu_stuff(liobn, ioba, tce_value, npages, ret);
+ }
return ret;
}
@@ -336,7 +342,11 @@ static target_ulong h_put_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
ret = put_tce_emu(tcet, ioba, tce);
}
- trace_spapr_iommu_put(liobn, ioba, tce, ret);
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_put(liobn, ioba, tce, ret);
+ } else {
+ trace_spapr_iommu_put(liobn, ioba, tce, ret);
+ }
return ret;
}
@@ -376,7 +386,11 @@ static target_ulong h_get_tce(PowerPCCPU *cpu, sPAPREnvironment *spapr,
args[0] = tce;
}
}
- trace_spapr_iommu_get(liobn, ioba, ret, tce);
+ if (SPAPR_IS_PCI_LIOBN(liobn)) {
+ trace_spapr_iommu_pci_get(liobn, ioba, ret, tce);
+ } else {
+ trace_spapr_iommu_get(liobn, ioba, ret, tce);
+ }
return ret;
}
diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c
index 05f4faca6e..4df3a33db4 100644
--- a/hw/ppc/spapr_pci.c
+++ b/hw/ppc/spapr_pci.c
@@ -33,8 +33,11 @@
#include <libfdt.h>
#include "trace.h"
#include "qemu/error-report.h"
+#include "qapi/qmp/qerror.h"
#include "hw/pci/pci_bus.h"
+#include "hw/ppc/spapr_drc.h"
+#include "sysemu/device_tree.h"
/* Copied from the kernel arch/powerpc/platforms/pseries/msi.c */
#define RTAS_QUERY_FN 0
@@ -47,7 +50,15 @@
#define RTAS_TYPE_MSI 1
#define RTAS_TYPE_MSIX 2
-static sPAPRPHBState *find_phb(sPAPREnvironment *spapr, uint64_t buid)
+#define _FDT(exp) \
+ do { \
+ int ret = (exp); \
+ if (ret < 0) { \
+ return ret; \
+ } \
+ } while (0)
+
+sPAPRPHBState *spapr_pci_find_phb(sPAPREnvironment *spapr, uint64_t buid)
{
sPAPRPHBState *sphb;
@@ -61,10 +72,10 @@ static sPAPRPHBState *find_phb(sPAPREnvironment *spapr, uint64_t buid)
return NULL;
}
-static PCIDevice *find_dev(sPAPREnvironment *spapr, uint64_t buid,
- uint32_t config_addr)
+PCIDevice *spapr_pci_find_dev(sPAPREnvironment *spapr, uint64_t buid,
+ uint32_t config_addr)
{
- sPAPRPHBState *sphb = find_phb(spapr, buid);
+ sPAPRPHBState *sphb = spapr_pci_find_phb(spapr, buid);
PCIHostState *phb = PCI_HOST_BRIDGE(sphb);
int bus_num = (config_addr >> 16) & 0xFF;
int devfn = (config_addr >> 8) & 0xFF;
@@ -95,7 +106,7 @@ static void finish_read_pci_config(sPAPREnvironment *spapr, uint64_t buid,
return;
}
- pci_dev = find_dev(spapr, buid, addr);
+ pci_dev = spapr_pci_find_dev(spapr, buid, addr);
addr = rtas_pci_cfgaddr(addr);
if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
@@ -162,7 +173,7 @@ static void finish_write_pci_config(sPAPREnvironment *spapr, uint64_t buid,
return;
}
- pci_dev = find_dev(spapr, buid, addr);
+ pci_dev = spapr_pci_find_dev(spapr, buid, addr);
addr = rtas_pci_cfgaddr(addr);
if (!pci_dev || (addr % size) || (addr >= pci_config_size(pci_dev))) {
@@ -280,9 +291,9 @@ static void rtas_ibm_change_msi(PowerPCCPU *cpu, sPAPREnvironment *spapr,
}
/* Fins sPAPRPHBState */
- phb = find_phb(spapr, buid);
+ phb = spapr_pci_find_phb(spapr, buid);
if (phb) {
- pdev = find_dev(spapr, buid, config_addr);
+ pdev = spapr_pci_find_dev(spapr, buid, config_addr);
}
if (!phb || !pdev) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -381,9 +392,9 @@ static void rtas_ibm_query_interrupt_source_number(PowerPCCPU *cpu,
spapr_pci_msi *msi;
/* Find sPAPRPHBState */
- phb = find_phb(spapr, buid);
+ phb = spapr_pci_find_phb(spapr, buid);
if (phb) {
- pdev = find_dev(spapr, buid, config_addr);
+ pdev = spapr_pci_find_dev(spapr, buid, config_addr);
}
if (!phb || !pdev) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -426,7 +437,7 @@ static void rtas_ibm_set_eeh_option(PowerPCCPU *cpu,
addr = rtas_ld(args, 0);
option = rtas_ld(args, 3);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -461,7 +472,7 @@ static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -479,7 +490,7 @@ static void rtas_ibm_get_config_addr_info2(PowerPCCPU *cpu,
switch (option) {
case RTAS_GET_PE_ADDR:
addr = rtas_ld(args, 0);
- pdev = find_dev(spapr, buid, addr);
+ pdev = spapr_pci_find_dev(spapr, buid, addr);
if (!pdev) {
goto param_error_exit;
}
@@ -516,7 +527,7 @@ static void rtas_ibm_read_slot_reset_state2(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -562,7 +573,7 @@ static void rtas_ibm_set_slot_reset(PowerPCCPU *cpu,
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
option = rtas_ld(args, 3);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -596,7 +607,7 @@ static void rtas_ibm_configure_pe(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -631,7 +642,7 @@ static void rtas_ibm_slot_error_detail(PowerPCCPU *cpu,
}
buid = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 2);
- sphb = find_phb(spapr, buid);
+ sphb = spapr_pci_find_phb(spapr, buid);
if (!sphb) {
goto param_error_exit;
}
@@ -731,6 +742,372 @@ static AddressSpace *spapr_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn)
return &phb->iommu_as;
}
+/* Macros to operate with address in OF binding to PCI */
+#define b_x(x, p, l) (((x) & ((1<<(l))-1)) << (p))
+#define b_n(x) b_x((x), 31, 1) /* 0 if relocatable */
+#define b_p(x) b_x((x), 30, 1) /* 1 if prefetchable */
+#define b_t(x) b_x((x), 29, 1) /* 1 if the address is aliased */
+#define b_ss(x) b_x((x), 24, 2) /* the space code */
+#define b_bbbbbbbb(x) b_x((x), 16, 8) /* bus number */
+#define b_ddddd(x) b_x((x), 11, 5) /* device number */
+#define b_fff(x) b_x((x), 8, 3) /* function number */
+#define b_rrrrrrrr(x) b_x((x), 0, 8) /* register number */
+
+/* for 'reg'/'assigned-addresses' OF properties */
+#define RESOURCE_CELLS_SIZE 2
+#define RESOURCE_CELLS_ADDRESS 3
+
+typedef struct ResourceFields {
+ uint32_t phys_hi;
+ uint32_t phys_mid;
+ uint32_t phys_lo;
+ uint32_t size_hi;
+ uint32_t size_lo;
+} QEMU_PACKED ResourceFields;
+
+typedef struct ResourceProps {
+ ResourceFields reg[8];
+ ResourceFields assigned[7];
+ uint32_t reg_len;
+ uint32_t assigned_len;
+} ResourceProps;
+
+/* fill in the 'reg'/'assigned-resources' OF properties for
+ * a PCI device. 'reg' describes resource requirements for a
+ * device's IO/MEM regions, 'assigned-addresses' describes the
+ * actual resource assignments.
+ *
+ * the properties are arrays of ('phys-addr', 'size') pairs describing
+ * the addressable regions of the PCI device, where 'phys-addr' is a
+ * RESOURCE_CELLS_ADDRESS-tuple of 32-bit integers corresponding to
+ * (phys.hi, phys.mid, phys.lo), and 'size' is a
+ * RESOURCE_CELLS_SIZE-tuple corresponding to (size.hi, size.lo).
+ *
+ * phys.hi = 0xYYXXXXZZ, where:
+ * 0xYY = npt000ss
+ * ||| |
+ * ||| +-- space code: 1 if IO region, 2 if MEM region
+ * ||+------ for non-relocatable IO: 1 if aliased
+ * || for relocatable IO: 1 if below 64KB
+ * || for MEM: 1 if below 1MB
+ * |+------- 1 if region is prefetchable
+ * +-------- 1 if region is non-relocatable
+ * 0xXXXX = bbbbbbbb dddddfff, encoding bus, slot, and function
+ * bits respectively
+ * 0xZZ = rrrrrrrr, the register number of the BAR corresponding
+ * to the region
+ *
+ * phys.mid and phys.lo correspond respectively to the hi/lo portions
+ * of the actual address of the region.
+ *
+ * how the phys-addr/size values are used differ slightly between
+ * 'reg' and 'assigned-addresses' properties. namely, 'reg' has
+ * an additional description for the config space region of the
+ * device, and in the case of QEMU has n=0 and phys.mid=phys.lo=0
+ * to describe the region as relocatable, with an address-mapping
+ * that corresponds directly to the PHB's address space for the
+ * resource. 'assigned-addresses' always has n=1 set with an absolute
+ * address assigned for the resource. in general, 'assigned-addresses'
+ * won't be populated, since addresses for PCI devices are generally
+ * unmapped initially and left to the guest to assign.
+ *
+ * note also that addresses defined in these properties are, at least
+ * for PAPR guests, relative to the PHBs IO/MEM windows, and
+ * correspond directly to the addresses in the BARs.
+ *
+ * in accordance with PCI Bus Binding to Open Firmware,
+ * IEEE Std 1275-1994, section 4.1.1, as implemented by PAPR+ v2.7,
+ * Appendix C.
+ */
+static void populate_resource_props(PCIDevice *d, ResourceProps *rp)
+{
+ int bus_num = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(d))));
+ uint32_t dev_id = (b_bbbbbbbb(bus_num) |
+ b_ddddd(PCI_SLOT(d->devfn)) |
+ b_fff(PCI_FUNC(d->devfn)));
+ ResourceFields *reg, *assigned;
+ int i, reg_idx = 0, assigned_idx = 0;
+
+ /* config space region */
+ reg = &rp->reg[reg_idx++];
+ reg->phys_hi = cpu_to_be32(dev_id);
+ reg->phys_mid = 0;
+ reg->phys_lo = 0;
+ reg->size_hi = 0;
+ reg->size_lo = 0;
+
+ for (i = 0; i < PCI_NUM_REGIONS; i++) {
+ if (!d->io_regions[i].size) {
+ continue;
+ }
+
+ reg = &rp->reg[reg_idx++];
+
+ reg->phys_hi = cpu_to_be32(dev_id | b_rrrrrrrr(pci_bar(d, i)));
+ if (d->io_regions[i].type & PCI_BASE_ADDRESS_SPACE_IO) {
+ reg->phys_hi |= cpu_to_be32(b_ss(1));
+ } else {
+ reg->phys_hi |= cpu_to_be32(b_ss(2));
+ }
+ reg->phys_mid = 0;
+ reg->phys_lo = 0;
+ reg->size_hi = cpu_to_be32(d->io_regions[i].size >> 32);
+ reg->size_lo = cpu_to_be32(d->io_regions[i].size);
+
+ if (d->io_regions[i].addr == PCI_BAR_UNMAPPED) {
+ continue;
+ }
+
+ assigned = &rp->assigned[assigned_idx++];
+ assigned->phys_hi = cpu_to_be32(reg->phys_hi | b_n(1));
+ assigned->phys_mid = cpu_to_be32(d->io_regions[i].addr >> 32);
+ assigned->phys_lo = cpu_to_be32(d->io_regions[i].addr);
+ assigned->size_hi = reg->size_hi;
+ assigned->size_lo = reg->size_lo;
+ }
+
+ rp->reg_len = reg_idx * sizeof(ResourceFields);
+ rp->assigned_len = assigned_idx * sizeof(ResourceFields);
+}
+
+static int spapr_populate_pci_child_dt(PCIDevice *dev, void *fdt, int offset,
+ int phb_index, int drc_index,
+ const char *drc_name)
+{
+ ResourceProps rp;
+ bool is_bridge = false;
+ int pci_status;
+
+ if (pci_default_read_config(dev, PCI_HEADER_TYPE, 1) ==
+ PCI_HEADER_TYPE_BRIDGE) {
+ is_bridge = true;
+ }
+
+ /* in accordance with PAPR+ v2.7 13.6.3, Table 181 */
+ _FDT(fdt_setprop_cell(fdt, offset, "vendor-id",
+ pci_default_read_config(dev, PCI_VENDOR_ID, 2)));
+ _FDT(fdt_setprop_cell(fdt, offset, "device-id",
+ pci_default_read_config(dev, PCI_DEVICE_ID, 2)));
+ _FDT(fdt_setprop_cell(fdt, offset, "revision-id",
+ pci_default_read_config(dev, PCI_REVISION_ID, 1)));
+ _FDT(fdt_setprop_cell(fdt, offset, "class-code",
+ pci_default_read_config(dev, PCI_CLASS_DEVICE, 2)
+ << 8));
+ if (pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "interrupts",
+ pci_default_read_config(dev, PCI_INTERRUPT_PIN, 1)));
+ }
+
+ if (!is_bridge) {
+ _FDT(fdt_setprop_cell(fdt, offset, "min-grant",
+ pci_default_read_config(dev, PCI_MIN_GNT, 1)));
+ _FDT(fdt_setprop_cell(fdt, offset, "max-latency",
+ pci_default_read_config(dev, PCI_MAX_LAT, 1)));
+ }
+
+ if (pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "subsystem-id",
+ pci_default_read_config(dev, PCI_SUBSYSTEM_ID, 2)));
+ }
+
+ if (pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)) {
+ _FDT(fdt_setprop_cell(fdt, offset, "subsystem-vendor-id",
+ pci_default_read_config(dev, PCI_SUBSYSTEM_VENDOR_ID, 2)));
+ }
+
+ _FDT(fdt_setprop_cell(fdt, offset, "cache-line-size",
+ pci_default_read_config(dev, PCI_CACHE_LINE_SIZE, 1)));
+
+ /* the following fdt cells are masked off the pci status register */
+ pci_status = pci_default_read_config(dev, PCI_STATUS, 2);
+ _FDT(fdt_setprop_cell(fdt, offset, "devsel-speed",
+ PCI_STATUS_DEVSEL_MASK & pci_status));
+
+ if (pci_status & PCI_STATUS_FAST_BACK) {
+ _FDT(fdt_setprop(fdt, offset, "fast-back-to-back", NULL, 0));
+ }
+ if (pci_status & PCI_STATUS_66MHZ) {
+ _FDT(fdt_setprop(fdt, offset, "66mhz-capable", NULL, 0));
+ }
+ if (pci_status & PCI_STATUS_UDF) {
+ _FDT(fdt_setprop(fdt, offset, "udf-supported", NULL, 0));
+ }
+
+ /* NOTE: this is normally generated by firmware via path/unit name,
+ * but in our case we must set it manually since it does not get
+ * processed by OF beforehand
+ */
+ _FDT(fdt_setprop_string(fdt, offset, "name", "pci"));
+ _FDT(fdt_setprop(fdt, offset, "ibm,loc-code", drc_name, strlen(drc_name)));
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,my-drc-index", drc_index));
+
+ _FDT(fdt_setprop_cell(fdt, offset, "#address-cells",
+ RESOURCE_CELLS_ADDRESS));
+ _FDT(fdt_setprop_cell(fdt, offset, "#size-cells",
+ RESOURCE_CELLS_SIZE));
+ _FDT(fdt_setprop_cell(fdt, offset, "ibm,req#msi-x",
+ RESOURCE_CELLS_SIZE));
+
+ populate_resource_props(dev, &rp);
+ _FDT(fdt_setprop(fdt, offset, "reg", (uint8_t *)rp.reg, rp.reg_len));
+ _FDT(fdt_setprop(fdt, offset, "assigned-addresses",
+ (uint8_t *)rp.assigned, rp.assigned_len));
+
+ return 0;
+}
+
+/* create OF node for pci device and required OF DT properties */
+static void *spapr_create_pci_child_dt(sPAPRPHBState *phb, PCIDevice *dev,
+ int drc_index, const char *drc_name,
+ int *dt_offset)
+{
+ void *fdt;
+ int offset, ret, fdt_size;
+ int slot = PCI_SLOT(dev->devfn);
+ int func = PCI_FUNC(dev->devfn);
+ char nodename[512];
+
+ fdt = create_device_tree(&fdt_size);
+ if (func != 0) {
+ sprintf(nodename, "pci@%d,%d", slot, func);
+ } else {
+ sprintf(nodename, "pci@%d", slot);
+ }
+ offset = fdt_add_subnode(fdt, 0, nodename);
+ ret = spapr_populate_pci_child_dt(dev, fdt, offset, phb->index, drc_index,
+ drc_name);
+ g_assert(!ret);
+
+ *dt_offset = offset;
+ return fdt;
+}
+
+static void spapr_phb_add_pci_device(sPAPRDRConnector *drc,
+ sPAPRPHBState *phb,
+ PCIDevice *pdev,
+ Error **errp)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ DeviceState *dev = DEVICE(pdev);
+ int drc_index = drck->get_index(drc);
+ const char *drc_name = drck->get_name(drc);
+ void *fdt = NULL;
+ int fdt_start_offset = 0;
+
+ /* boot-time devices get their device tree node created by SLOF, but for
+ * hotplugged devices we need QEMU to generate it so the guest can fetch
+ * it via RTAS
+ */
+ if (dev->hotplugged) {
+ fdt = spapr_create_pci_child_dt(phb, pdev, drc_index, drc_name,
+ &fdt_start_offset);
+ }
+
+ drck->attach(drc, DEVICE(pdev),
+ fdt, fdt_start_offset, !dev->hotplugged, errp);
+ if (*errp) {
+ g_free(fdt);
+ }
+}
+
+static void spapr_phb_remove_pci_device_cb(DeviceState *dev, void *opaque)
+{
+ /* some version guests do not wait for completion of a device
+ * cleanup (generally done asynchronously by the kernel) before
+ * signaling to QEMU that the device is safe, but instead sleep
+ * for some 'safe' period of time. unfortunately on a busy host
+ * this sleep isn't guaranteed to be long enough, resulting in
+ * bad things like IRQ lines being left asserted during final
+ * device removal. to deal with this we call reset just prior
+ * to finalizing the device, which will put the device back into
+ * an 'idle' state, as the device cleanup code expects.
+ */
+ pci_device_reset(PCI_DEVICE(dev));
+ object_unparent(OBJECT(dev));
+}
+
+static void spapr_phb_remove_pci_device(sPAPRDRConnector *drc,
+ sPAPRPHBState *phb,
+ PCIDevice *pdev,
+ Error **errp)
+{
+ sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ drck->detach(drc, DEVICE(pdev), spapr_phb_remove_pci_device_cb, phb, errp);
+}
+
+static sPAPRDRConnector *spapr_phb_get_pci_drc(sPAPRPHBState *phb,
+ PCIDevice *pdev)
+{
+ uint32_t busnr = pci_bus_num(PCI_BUS(qdev_get_parent_bus(DEVICE(pdev))));
+ return spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_PCI,
+ (phb->index << 16) |
+ (busnr << 8) |
+ pdev->devfn);
+}
+
+static void spapr_phb_hot_plug_child(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
+{
+ sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
+ PCIDevice *pdev = PCI_DEVICE(plugged_dev);
+ sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
+ Error *local_err = NULL;
+
+ /* if DR is disabled we don't need to do anything in the case of
+ * hotplug or coldplug callbacks
+ */
+ if (!phb->dr_enabled) {
+ /* if this is a hotplug operation initiated by the user
+ * we need to let them know it's not enabled
+ */
+ if (plugged_dev->hotplugged) {
+ error_set(errp, QERR_BUS_NO_HOTPLUG,
+ object_get_typename(OBJECT(phb)));
+ }
+ return;
+ }
+
+ g_assert(drc);
+
+ spapr_phb_add_pci_device(drc, phb, pdev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ if (plugged_dev->hotplugged) {
+ spapr_hotplug_req_add_event(drc);
+ }
+}
+
+static void spapr_phb_hot_unplug_child(HotplugHandler *plug_handler,
+ DeviceState *plugged_dev, Error **errp)
+{
+ sPAPRPHBState *phb = SPAPR_PCI_HOST_BRIDGE(DEVICE(plug_handler));
+ PCIDevice *pdev = PCI_DEVICE(plugged_dev);
+ sPAPRDRConnectorClass *drck;
+ sPAPRDRConnector *drc = spapr_phb_get_pci_drc(phb, pdev);
+ Error *local_err = NULL;
+
+ if (!phb->dr_enabled) {
+ error_set(errp, QERR_BUS_NO_HOTPLUG,
+ object_get_typename(OBJECT(phb)));
+ return;
+ }
+
+ g_assert(drc);
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ if (!drck->release_pending(drc)) {
+ spapr_phb_remove_pci_device(drc, phb, pdev, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
+ }
+ spapr_hotplug_req_remove_event(drc);
+ }
+}
+
static void spapr_phb_realize(DeviceState *dev, Error **errp)
{
SysBusDevice *s = SYS_BUS_DEVICE(dev);
@@ -742,12 +1119,12 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
PCIBus *bus;
uint64_t msi_window_size = 4096;
- if (sphb->index != -1) {
+ if (sphb->index != (uint32_t)-1) {
hwaddr windows_base;
- if ((sphb->buid != -1) || (sphb->dma_liobn != -1)
- || (sphb->mem_win_addr != -1)
- || (sphb->io_win_addr != -1)) {
+ if ((sphb->buid != (uint64_t)-1) || (sphb->dma_liobn != (uint32_t)-1)
+ || (sphb->mem_win_addr != (hwaddr)-1)
+ || (sphb->io_win_addr != (hwaddr)-1)) {
error_setg(errp, "Either \"index\" or other parameters must"
" be specified for PAPR PHB, not both");
return;
@@ -760,7 +1137,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
}
sphb->buid = SPAPR_PCI_BASE_BUID + sphb->index;
- sphb->dma_liobn = SPAPR_PCI_BASE_LIOBN + sphb->index;
+ sphb->dma_liobn = SPAPR_PCI_LIOBN(sphb->index, 0);
windows_base = SPAPR_PCI_WINDOW_BASE
+ sphb->index * SPAPR_PCI_WINDOW_SPACING;
@@ -768,27 +1145,27 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
sphb->io_win_addr = windows_base + SPAPR_PCI_IO_WIN_OFF;
}
- if (sphb->buid == -1) {
+ if (sphb->buid == (uint64_t)-1) {
error_setg(errp, "BUID not specified for PHB");
return;
}
- if (sphb->dma_liobn == -1) {
+ if (sphb->dma_liobn == (uint32_t)-1) {
error_setg(errp, "LIOBN not specified for PHB");
return;
}
- if (sphb->mem_win_addr == -1) {
+ if (sphb->mem_win_addr == (hwaddr)-1) {
error_setg(errp, "Memory window address not specified for PHB");
return;
}
- if (sphb->io_win_addr == -1) {
+ if (sphb->io_win_addr == (hwaddr)-1) {
error_setg(errp, "IO window address not specified for PHB");
return;
}
- if (find_phb(spapr, sphb->buid)) {
+ if (spapr_pci_find_phb(spapr, sphb->buid)) {
error_setg(errp, "PCI host bridges must have unique BUIDs");
return;
}
@@ -824,6 +1201,7 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
&sphb->memspace, &sphb->iospace,
PCI_DEVFN(0, 0), PCI_NUM_PINS, TYPE_PCI_BUS);
phb->bus = bus;
+ qbus_set_hotplug_handler(BUS(phb->bus), DEVICE(sphb), NULL);
/*
* Initialize PHB address space.
@@ -880,6 +1258,15 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
sphb->lsi_table[i].irq = irq;
}
+ /* allocate connectors for child PCI devices */
+ if (sphb->dr_enabled) {
+ for (i = 0; i < PCI_SLOT_MAX * 8; i++) {
+ spapr_dr_connector_new(OBJECT(phb),
+ SPAPR_DR_CONNECTOR_TYPE_PCI,
+ (sphb->index << 16) | i);
+ }
+ }
+
if (!info->finish_realize) {
error_setg(errp, "finish_realize not defined");
return;
@@ -893,11 +1280,11 @@ static void spapr_phb_realize(DeviceState *dev, Error **errp)
static void spapr_phb_finish_realize(sPAPRPHBState *sphb, Error **errp)
{
sPAPRTCETable *tcet;
+ uint32_t nb_table;
+ nb_table = SPAPR_PCI_DMA32_SIZE >> SPAPR_TCE_PAGE_SHIFT;
tcet = spapr_tce_new_table(DEVICE(sphb), sphb->dma_liobn,
- 0,
- SPAPR_TCE_PAGE_SHIFT,
- 0x40000000 >> SPAPR_TCE_PAGE_SHIFT, false);
+ 0, SPAPR_TCE_PAGE_SHIFT, nb_table, false);
if (!tcet) {
error_setg(errp, "Unable to create TCE table for %s",
sphb->dtbusname);
@@ -936,6 +1323,8 @@ static Property spapr_phb_properties[] = {
DEFINE_PROP_UINT64("io_win_addr", sPAPRPHBState, io_win_addr, -1),
DEFINE_PROP_UINT64("io_win_size", sPAPRPHBState, io_win_size,
SPAPR_PCI_IO_WIN_SIZE),
+ DEFINE_PROP_BOOL("dynamic-reconfiguration", sPAPRPHBState, dr_enabled,
+ true),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1049,6 +1438,7 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
PCIHostBridgeClass *hc = PCI_HOST_BRIDGE_CLASS(klass);
DeviceClass *dc = DEVICE_CLASS(klass);
sPAPRPHBClass *spc = SPAPR_PCI_HOST_BRIDGE_CLASS(klass);
+ HotplugHandlerClass *hp = HOTPLUG_HANDLER_CLASS(klass);
hc->root_bus_path = spapr_phb_root_bus_path;
dc->realize = spapr_phb_realize;
@@ -1058,6 +1448,8 @@ static void spapr_phb_class_init(ObjectClass *klass, void *data)
set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
dc->cannot_instantiate_with_device_add_yet = false;
spc->finish_realize = spapr_phb_finish_realize;
+ hp->plug = spapr_phb_hot_plug_child;
+ hp->unplug = spapr_phb_hot_unplug_child;
}
static const TypeInfo spapr_phb_info = {
@@ -1066,6 +1458,10 @@ static const TypeInfo spapr_phb_info = {
.instance_size = sizeof(sPAPRPHBState),
.class_init = spapr_phb_class_init,
.class_size = sizeof(sPAPRPHBClass),
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_HOTPLUG_HANDLER },
+ { }
+ }
};
PCIHostState *spapr_create_phb(sPAPREnvironment *spapr, int index)
@@ -1079,45 +1475,11 @@ PCIHostState *spapr_create_phb(sPAPREnvironment *spapr, int index)
return PCI_HOST_BRIDGE(dev);
}
-/* Macros to operate with address in OF binding to PCI */
-#define b_x(x, p, l) (((x) & ((1<<(l))-1)) << (p))
-#define b_n(x) b_x((x), 31, 1) /* 0 if relocatable */
-#define b_p(x) b_x((x), 30, 1) /* 1 if prefetchable */
-#define b_t(x) b_x((x), 29, 1) /* 1 if the address is aliased */
-#define b_ss(x) b_x((x), 24, 2) /* the space code */
-#define b_bbbbbbbb(x) b_x((x), 16, 8) /* bus number */
-#define b_ddddd(x) b_x((x), 11, 5) /* device number */
-#define b_fff(x) b_x((x), 8, 3) /* function number */
-#define b_rrrrrrrr(x) b_x((x), 0, 8) /* register number */
-
-typedef struct sPAPRTCEDT {
- void *fdt;
- int node_off;
-} sPAPRTCEDT;
-
-static int spapr_phb_children_dt(Object *child, void *opaque)
-{
- sPAPRTCEDT *p = opaque;
- sPAPRTCETable *tcet;
-
- tcet = (sPAPRTCETable *) object_dynamic_cast(child, TYPE_SPAPR_TCE_TABLE);
- if (!tcet) {
- return 0;
- }
-
- spapr_dma_dt(p->fdt, p->node_off, "ibm,dma-window",
- tcet->liobn, tcet->bus_offset,
- tcet->nb_table << tcet->page_shift);
- /* Stop after the first window */
-
- return 1;
-}
-
int spapr_populate_pci_dt(sPAPRPHBState *phb,
uint32_t xics_phandle,
void *fdt)
{
- int bus_off, i, j;
+ int bus_off, i, j, ret;
char nodename[256];
uint32_t bus_range[] = { cpu_to_be32(0), cpu_to_be32(0xff) };
const uint64_t mmiosize = memory_region_size(&phb->memwindow);
@@ -1151,6 +1513,7 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
uint32_t interrupt_map_mask[] = {
cpu_to_be32(b_ddddd(-1)|b_fff(0)), 0x0, 0x0, cpu_to_be32(-1)};
uint32_t interrupt_map[PCI_SLOT_MAX * PCI_NUM_PINS][7];
+ sPAPRTCETable *tcet;
/* Start populating the FDT */
sprintf(nodename, "pci@%" PRIx64, phb->buid);
@@ -1159,14 +1522,6 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
return bus_off;
}
-#define _FDT(exp) \
- do { \
- int ret = (exp); \
- if (ret < 0) { \
- return ret; \
- } \
- } while (0)
-
/* Write PHB properties */
_FDT(fdt_setprop_string(fdt, bus_off, "device_type", "pci"));
_FDT(fdt_setprop_string(fdt, bus_off, "compatible", "IBM,Logical_PHB"));
@@ -1203,8 +1558,16 @@ int spapr_populate_pci_dt(sPAPRPHBState *phb,
_FDT(fdt_setprop(fdt, bus_off, "interrupt-map", &interrupt_map,
sizeof(interrupt_map)));
- object_child_foreach(OBJECT(phb), spapr_phb_children_dt,
- &((sPAPRTCEDT){ .fdt = fdt, .node_off = bus_off }));
+ tcet = spapr_tce_find_by_liobn(SPAPR_PCI_LIOBN(phb->index, 0));
+ spapr_dma_dt(fdt, bus_off, "ibm,dma-window",
+ tcet->liobn, tcet->bus_offset,
+ tcet->nb_table << tcet->page_shift);
+
+ ret = spapr_drc_populate_dt(fdt, bus_off, OBJECT(phb),
+ SPAPR_DR_CONNECTOR_TYPE_PCI);
+ if (ret) {
+ return ret;
+ }
return 0;
}
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index 0f1ae55828..fa28d43f81 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -35,6 +35,55 @@
#include "qapi-event.h"
#include <libfdt.h>
+#include "hw/ppc/spapr_drc.h"
+
+/* #define DEBUG_SPAPR */
+
+#ifdef DEBUG_SPAPR
+#define DPRINTF(fmt, ...) \
+ do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
+#else
+#define DPRINTF(fmt, ...) \
+ do { } while (0)
+#endif
+
+static sPAPRConfigureConnectorState *spapr_ccs_find(sPAPREnvironment *spapr,
+ uint32_t drc_index)
+{
+ sPAPRConfigureConnectorState *ccs = NULL;
+
+ QTAILQ_FOREACH(ccs, &spapr->ccs_list, next) {
+ if (ccs->drc_index == drc_index) {
+ break;
+ }
+ }
+
+ return ccs;
+}
+
+static void spapr_ccs_add(sPAPREnvironment *spapr,
+ sPAPRConfigureConnectorState *ccs)
+{
+ g_assert(!spapr_ccs_find(spapr, ccs->drc_index));
+ QTAILQ_INSERT_HEAD(&spapr->ccs_list, ccs, next);
+}
+
+static void spapr_ccs_remove(sPAPREnvironment *spapr,
+ sPAPRConfigureConnectorState *ccs)
+{
+ QTAILQ_REMOVE(&spapr->ccs_list, ccs, next);
+ g_free(ccs);
+}
+
+void spapr_ccs_reset_hook(void *opaque)
+{
+ sPAPREnvironment *spapr = opaque;
+ sPAPRConfigureConnectorState *ccs, *ccs_tmp;
+
+ QTAILQ_FOREACH_SAFE(ccs, &spapr->ccs_list, next, ccs_tmp) {
+ spapr_ccs_remove(spapr, ccs);
+ }
+}
static void rtas_display_character(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t token, uint32_t nargs,
@@ -245,6 +294,308 @@ static void rtas_ibm_os_term(PowerPCCPU *cpu,
rtas_st(rets, 0, ret);
}
+static void rtas_set_power_level(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ int32_t power_domain;
+
+ if (nargs != 2 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* we currently only use a single, "live insert" powerdomain for
+ * hotplugged/dlpar'd resources, so the power is always live/full (100)
+ */
+ power_domain = rtas_ld(args, 0);
+ if (power_domain != -1) {
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, 100);
+}
+
+static void rtas_get_power_level(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ int32_t power_domain;
+
+ if (nargs != 1 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ /* we currently only use a single, "live insert" powerdomain for
+ * hotplugged/dlpar'd resources, so the power is always live/full (100)
+ */
+ power_domain = rtas_ld(args, 0);
+ if (power_domain != -1) {
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, 100);
+}
+
+static bool sensor_type_is_dr(uint32_t sensor_type)
+{
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ case RTAS_SENSOR_TYPE_DR:
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ return true;
+ }
+
+ return false;
+}
+
+static void rtas_set_indicator(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ uint32_t sensor_state;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+
+ if (nargs != 3 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+ sensor_state = rtas_ld(args, 2);
+
+ if (!sensor_type_is_dr(sensor_type)) {
+ goto out_unimplemented;
+ }
+
+ /* if this is a DR sensor we can assume sensor_index == drc_index */
+ drc = spapr_dr_connector_by_index(sensor_index);
+ if (!drc) {
+ DPRINTF("rtas_set_indicator: invalid sensor/DRC index: %xh\n",
+ sensor_index);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+
+ switch (sensor_type) {
+ case RTAS_SENSOR_TYPE_ISOLATION_STATE:
+ /* if the guest is configuring a device attached to this
+ * DRC, we should reset the configuration state at this
+ * point since it may no longer be reliable (guest released
+ * device and needs to start over, or unplug occurred so
+ * the FDT is no longer valid)
+ */
+ if (sensor_state == SPAPR_DR_ISOLATION_STATE_ISOLATED) {
+ sPAPRConfigureConnectorState *ccs = spapr_ccs_find(spapr,
+ sensor_index);
+ if (ccs) {
+ spapr_ccs_remove(spapr, ccs);
+ }
+ }
+ drck->set_isolation_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_DR:
+ drck->set_indicator_state(drc, sensor_state);
+ break;
+ case RTAS_SENSOR_TYPE_ALLOCATION_STATE:
+ drck->set_allocation_state(drc, sensor_state);
+ break;
+ default:
+ goto out_unimplemented;
+ }
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ return;
+
+out_unimplemented:
+ /* currently only DR-related sensors are implemented */
+ DPRINTF("rtas_set_indicator: sensor/indicator not implemented: %d\n",
+ sensor_type);
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+}
+
+static void rtas_get_sensor_state(PowerPCCPU *cpu, sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint32_t sensor_type;
+ uint32_t sensor_index;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ uint32_t entity_sense;
+
+ if (nargs != 2 || nret != 2) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ sensor_type = rtas_ld(args, 0);
+ sensor_index = rtas_ld(args, 1);
+
+ if (sensor_type != RTAS_SENSOR_TYPE_ENTITY_SENSE) {
+ /* currently only DR-related sensors are implemented */
+ DPRINTF("rtas_get_sensor_state: sensor/indicator not implemented: %d\n",
+ sensor_type);
+ rtas_st(rets, 0, RTAS_OUT_NOT_SUPPORTED);
+ return;
+ }
+
+ drc = spapr_dr_connector_by_index(sensor_index);
+ if (!drc) {
+ DPRINTF("rtas_get_sensor_state: invalid sensor/DRC index: %xh\n",
+ sensor_index);
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ entity_sense = drck->entity_sense(drc);
+
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
+ rtas_st(rets, 1, entity_sense);
+}
+
+/* configure-connector work area offsets, int32_t units for field
+ * indexes, bytes for field offset/len values.
+ *
+ * as documented by PAPR+ v2.7, 13.5.3.5
+ */
+#define CC_IDX_NODE_NAME_OFFSET 2
+#define CC_IDX_PROP_NAME_OFFSET 2
+#define CC_IDX_PROP_LEN 3
+#define CC_IDX_PROP_DATA_OFFSET 4
+#define CC_VAL_DATA_OFFSET ((CC_IDX_PROP_DATA_OFFSET + 1) * 4)
+#define CC_WA_LEN 4096
+
+static void rtas_ibm_configure_connector(PowerPCCPU *cpu,
+ sPAPREnvironment *spapr,
+ uint32_t token, uint32_t nargs,
+ target_ulong args, uint32_t nret,
+ target_ulong rets)
+{
+ uint64_t wa_addr;
+ uint64_t wa_offset;
+ uint32_t drc_index;
+ sPAPRDRConnector *drc;
+ sPAPRDRConnectorClass *drck;
+ sPAPRConfigureConnectorState *ccs;
+ sPAPRDRCCResponse resp = SPAPR_DR_CC_RESPONSE_CONTINUE;
+ int rc;
+ const void *fdt;
+
+ if (nargs != 2 || nret != 1) {
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
+
+ wa_addr = ((uint64_t)rtas_ld(args, 1) << 32) | rtas_ld(args, 0);
+
+ drc_index = rtas_ld(wa_addr, 0);
+ drc = spapr_dr_connector_by_index(drc_index);
+ if (!drc) {
+ DPRINTF("rtas_ibm_configure_connector: invalid DRC index: %xh\n",
+ drc_index);
+ rc = RTAS_OUT_PARAM_ERROR;
+ goto out;
+ }
+
+ drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
+ fdt = drck->get_fdt(drc, NULL);
+
+ ccs = spapr_ccs_find(spapr, drc_index);
+ if (!ccs) {
+ ccs = g_new0(sPAPRConfigureConnectorState, 1);
+ (void)drck->get_fdt(drc, &ccs->fdt_offset);
+ ccs->drc_index = drc_index;
+ spapr_ccs_add(spapr, ccs);
+ }
+
+ do {
+ uint32_t tag;
+ const char *name;
+ const struct fdt_property *prop;
+ int fdt_offset_next, prop_len;
+
+ tag = fdt_next_tag(fdt, ccs->fdt_offset, &fdt_offset_next);
+
+ switch (tag) {
+ case FDT_BEGIN_NODE:
+ ccs->fdt_depth++;
+ name = fdt_get_name(fdt, ccs->fdt_offset, NULL);
+
+ /* provide the name of the next OF node */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_NODE_NAME_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)name, strlen(name) + 1);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_CHILD;
+ break;
+ case FDT_END_NODE:
+ ccs->fdt_depth--;
+ if (ccs->fdt_depth == 0) {
+ /* done sending the device tree, don't need to track
+ * the state anymore
+ */
+ drck->set_configured(drc);
+ spapr_ccs_remove(spapr, ccs);
+ ccs = NULL;
+ resp = SPAPR_DR_CC_RESPONSE_SUCCESS;
+ } else {
+ resp = SPAPR_DR_CC_RESPONSE_PREV_PARENT;
+ }
+ break;
+ case FDT_PROP:
+ prop = fdt_get_property_by_offset(fdt, ccs->fdt_offset,
+ &prop_len);
+ name = fdt_string(fdt, fdt32_to_cpu(prop->nameoff));
+
+ /* provide the name of the next OF property */
+ wa_offset = CC_VAL_DATA_OFFSET;
+ rtas_st(wa_addr, CC_IDX_PROP_NAME_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)name, strlen(name) + 1);
+
+ /* provide the length and value of the OF property. data gets
+ * placed immediately after NULL terminator of the OF property's
+ * name string
+ */
+ wa_offset += strlen(name) + 1,
+ rtas_st(wa_addr, CC_IDX_PROP_LEN, prop_len);
+ rtas_st(wa_addr, CC_IDX_PROP_DATA_OFFSET, wa_offset);
+ rtas_st_buffer_direct(wa_addr + wa_offset, CC_WA_LEN - wa_offset,
+ (uint8_t *)((struct fdt_property *)prop)->data,
+ prop_len);
+ resp = SPAPR_DR_CC_RESPONSE_NEXT_PROPERTY;
+ break;
+ case FDT_END:
+ resp = SPAPR_DR_CC_RESPONSE_ERROR;
+ default:
+ /* keep seeking for an actionable tag */
+ break;
+ }
+ if (ccs) {
+ ccs->fdt_offset = fdt_offset_next;
+ }
+ } while (resp == SPAPR_DR_CC_RESPONSE_CONTINUE);
+
+ rc = resp;
+out:
+ rtas_st(rets, 0, rc);
+}
+
static struct rtas_call {
const char *name;
spapr_rtas_fn fn;
@@ -370,6 +721,16 @@ static void core_rtas_register_types(void)
rtas_ibm_set_system_parameter);
spapr_rtas_register(RTAS_IBM_OS_TERM, "ibm,os-term",
rtas_ibm_os_term);
+ spapr_rtas_register(RTAS_SET_POWER_LEVEL, "set-power-level",
+ rtas_set_power_level);
+ spapr_rtas_register(RTAS_GET_POWER_LEVEL, "get-power-level",
+ rtas_get_power_level);
+ spapr_rtas_register(RTAS_SET_INDICATOR, "set-indicator",
+ rtas_set_indicator);
+ spapr_rtas_register(RTAS_GET_SENSOR_STATE, "get-sensor-state",
+ rtas_get_sensor_state);
+ spapr_rtas_register(RTAS_IBM_CONFIGURE_CONNECTOR, "ibm,configure-connector",
+ rtas_ibm_configure_connector);
}
type_init(core_rtas_register_types)
diff --git a/hw/ppc/spapr_vio.c b/hw/ppc/spapr_vio.c
index 1360b97ab0..174033dd41 100644
--- a/hw/ppc/spapr_vio.c
+++ b/hw/ppc/spapr_vio.c
@@ -469,7 +469,7 @@ static void spapr_vio_busdev_realize(DeviceState *qdev, Error **errp)
}
if (pc->rtce_window_size) {
- uint32_t liobn = SPAPR_VIO_BASE_LIOBN | dev->reg;
+ uint32_t liobn = SPAPR_VIO_LIOBN(dev->reg);
memory_region_init(&dev->mrroot, OBJECT(dev), "iommu-spapr-root",
ram_size);