diff options
Diffstat (limited to 'hw/ppc/spapr.c')
-rw-r--r-- | hw/ppc/spapr.c | 507 |
1 files changed, 436 insertions, 71 deletions
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index e465d7ac98..c3bb991605 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -63,6 +63,7 @@ #include "qemu/error-report.h" #include "trace.h" #include "hw/nmi.h" +#include "hw/intc/intc.h" #include "hw/compat.h" #include "qemu/cutils.h" @@ -95,37 +96,66 @@ #define HTAB_SIZE(spapr) (1ULL << ((spapr)->htab_shift)) -static XICSState *try_create_xics(const char *type, int nr_servers, - int nr_irqs, Error **errp) +static int try_create_xics(sPAPRMachineState *spapr, const char *type_ics, + const char *type_icp, int nr_servers, + int nr_irqs, Error **errp) { - Error *err = NULL; - DeviceState *dev; + XICSFabric *xi = XICS_FABRIC(spapr); + Error *err = NULL, *local_err = NULL; + ICSState *ics = NULL; + int i; - dev = qdev_create(NULL, type); - qdev_prop_set_uint32(dev, "nr_servers", nr_servers); - qdev_prop_set_uint32(dev, "nr_irqs", nr_irqs); - object_property_set_bool(OBJECT(dev), true, "realized", &err); + ics = ICS_SIMPLE(object_new(type_ics)); + object_property_add_child(OBJECT(spapr), "ics", OBJECT(ics), NULL); + object_property_set_int(OBJECT(ics), nr_irqs, "nr-irqs", &err); + object_property_add_const_link(OBJECT(ics), "xics", OBJECT(xi), NULL); + object_property_set_bool(OBJECT(ics), true, "realized", &local_err); + error_propagate(&err, local_err); if (err) { - error_propagate(errp, err); - object_unparent(OBJECT(dev)); - return NULL; + goto error; + } + + spapr->icps = g_malloc0(nr_servers * sizeof(ICPState)); + spapr->nr_servers = nr_servers; + + for (i = 0; i < nr_servers; i++) { + ICPState *icp = &spapr->icps[i]; + + object_initialize(icp, sizeof(*icp), type_icp); + object_property_add_child(OBJECT(spapr), "icp[*]", OBJECT(icp), NULL); + object_property_add_const_link(OBJECT(icp), "xics", OBJECT(xi), NULL); + object_property_set_bool(OBJECT(icp), true, "realized", &err); + if (err) { + goto error; + } + object_unref(OBJECT(icp)); } - return XICS_COMMON(dev); + + spapr->ics = ics; + return 0; + +error: + error_propagate(errp, err); + if (ics) { + object_unparent(OBJECT(ics)); + } + return -1; } -static XICSState *xics_system_init(MachineState *machine, - int nr_servers, int nr_irqs, Error **errp) +static int xics_system_init(MachineState *machine, + int nr_servers, int nr_irqs, Error **errp) { - XICSState *xics = NULL; + int rc = -1; if (kvm_enabled()) { Error *err = NULL; - if (machine_kernel_irqchip_allowed(machine)) { - xics = try_create_xics(TYPE_XICS_SPAPR_KVM, nr_servers, nr_irqs, - &err); + if (machine_kernel_irqchip_allowed(machine) && + !xics_kvm_init(SPAPR_MACHINE(machine), errp)) { + rc = try_create_xics(SPAPR_MACHINE(machine), TYPE_ICS_KVM, + TYPE_KVM_ICP, nr_servers, nr_irqs, &err); } - if (machine_kernel_irqchip_required(machine) && !xics) { + if (machine_kernel_irqchip_required(machine) && rc < 0) { error_reportf_err(err, "kernel_irqchip requested but unavailable: "); } else { @@ -133,11 +163,13 @@ static XICSState *xics_system_init(MachineState *machine, } } - if (!xics) { - xics = try_create_xics(TYPE_XICS_SPAPR, nr_servers, nr_irqs, errp); + if (rc < 0) { + xics_spapr_init(SPAPR_MACHINE(machine), errp); + rc = try_create_xics(SPAPR_MACHINE(machine), TYPE_ICS_SIMPLE, + TYPE_ICP, nr_servers, nr_irqs, errp); } - return xics; + return rc; } static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu, @@ -356,20 +388,36 @@ static void spapr_populate_pa_features(CPUPPCState *env, void *fdt, int offset) 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00 }; + /* Currently we don't advertise any of the "new" ISAv3.00 functionality */ + uint8_t pa_features_300[] = { 64, 0, + 0xf6, 0x1f, 0xc7, 0xc0, 0x80, 0xf0, /* 0 - 5 */ + 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, /* 6 - 11 */ + 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, /* 12 - 17 */ + 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 18 - 23 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 24 - 29 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 30 - 35 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 36 - 41 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 - 47 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 - 53 */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 54 - 59 */ + 0x00, 0x00, 0x00, 0x00 }; /* 60 - 63 */ + uint8_t *pa_features; size_t pa_size; - switch (env->mmu_model) { - case POWERPC_MMU_2_06: - case POWERPC_MMU_2_06a: + switch (POWERPC_MMU_VER(env->mmu_model)) { + case POWERPC_MMU_VER_2_06: pa_features = pa_features_206; pa_size = sizeof(pa_features_206); break; - case POWERPC_MMU_2_07: - case POWERPC_MMU_2_07a: + case POWERPC_MMU_VER_2_07: pa_features = pa_features_207; pa_size = sizeof(pa_features_207); break; + case POWERPC_MMU_VER_3_00: + pa_features = pa_features_300; + pa_size = sizeof(pa_features_300); + break; default: return; } @@ -924,7 +972,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, _FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2)); /* /interrupt controller */ - spapr_dt_xics(spapr->xics, fdt, PHANDLE_XICP); + spapr_dt_xics(spapr->nr_servers, fdt, PHANDLE_XICP); ret = spapr_populate_memory(spapr, fdt); if (ret < 0) { @@ -958,7 +1006,7 @@ static void *spapr_build_fdt(sPAPRMachineState *spapr, _FDT(spapr_drc_populate_dt(fdt, 0, NULL, SPAPR_DR_CONNECTOR_TYPE_LMB)); } - if (mc->query_hotpluggable_cpus) { + if (mc->has_hotpluggable_cpus) { int offset = fdt_path_offset(fdt, "/cpus"); ret = spapr_drc_populate_dt(fdt, offset, NULL, SPAPR_DR_CONNECTOR_TYPE_CPU); @@ -1010,6 +1058,9 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, { CPUPPCState *env = &cpu->env; + /* The TCG path should also be holding the BQL at this point */ + g_assert(qemu_mutex_iothread_locked()); + if (msr_pr) { hcall_dprintf("Hypercall made with MSR[PR]=1\n"); env->gpr[3] = H_PRIVILEGE; @@ -1018,6 +1069,13 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, } } +static uint64_t spapr_get_patbe(PPCVirtualHypervisor *vhyp) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); + + return spapr->patb_entry; +} + #define HPTE(_table, _i) (void *)(((uint64_t *)(_table)) + ((_i) * 2)) #define HPTE_VALID(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_VALID) #define HPTE_DIRTY(_hpte) (tswap64(*((uint64_t *)(_hpte))) & HPTE64_V_HPTE_DIRTY) @@ -1050,6 +1108,62 @@ static void close_htab_fd(sPAPRMachineState *spapr) spapr->htab_fd = -1; } +static hwaddr spapr_hpt_mask(PPCVirtualHypervisor *vhyp) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); + + return HTAB_SIZE(spapr) / HASH_PTEG_SIZE_64 - 1; +} + +static const ppc_hash_pte64_t *spapr_map_hptes(PPCVirtualHypervisor *vhyp, + hwaddr ptex, int n) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); + hwaddr pte_offset = ptex * HASH_PTE_SIZE_64; + + if (!spapr->htab) { + /* + * HTAB is controlled by KVM. Fetch into temporary buffer + */ + ppc_hash_pte64_t *hptes = g_malloc(n * HASH_PTE_SIZE_64); + kvmppc_read_hptes(hptes, ptex, n); + return hptes; + } + + /* + * HTAB is controlled by QEMU. Just point to the internally + * accessible PTEG. + */ + return (const ppc_hash_pte64_t *)(spapr->htab + pte_offset); +} + +static void spapr_unmap_hptes(PPCVirtualHypervisor *vhyp, + const ppc_hash_pte64_t *hptes, + hwaddr ptex, int n) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); + + if (!spapr->htab) { + g_free((void *)hptes); + } + + /* Nothing to do for qemu managed HPT */ +} + +static void spapr_store_hpte(PPCVirtualHypervisor *vhyp, hwaddr ptex, + uint64_t pte0, uint64_t pte1) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(vhyp); + hwaddr offset = ptex * HASH_PTE_SIZE_64; + + if (!spapr->htab) { + kvmppc_write_hpte(ptex, pte0, pte1); + } else { + stq_p(spapr->htab + offset, pte0); + stq_p(spapr->htab + offset + HASH_PTE_SIZE_64 / 2, pte1); + } +} + static int spapr_hpt_shift_for_ramsize(uint64_t ramsize) { int shift; @@ -1141,6 +1255,8 @@ static void ppc_spapr_reset(void) /* Check for unknown sysbus devices */ foreach_dynamic_sysbus_device(find_unknown_sysbus_device, NULL); + spapr->patb_entry = 0; + /* Allocate and/or reset the hash page table */ spapr_reallocate_hpt(spapr, spapr_hpt_shift_for_ramsize(machine->maxram_size), @@ -1249,6 +1365,13 @@ static int spapr_post_load(void *opaque, int version_id) sPAPRMachineState *spapr = (sPAPRMachineState *)opaque; int err = 0; + if (!object_dynamic_cast(OBJECT(spapr->ics), TYPE_ICS_KVM)) { + int i; + for (i = 0; i < spapr->nr_servers; i++) { + icp_resend(&spapr->icps[i]); + } + } + /* In earlier versions, there was no separate qdev for the PAPR * RTC, so the RTC offset was stored directly in sPAPREnvironment. * So when migrating from those versions, poke the incoming offset @@ -1327,6 +1450,24 @@ static const VMStateDescription vmstate_spapr_ov5_cas = { }, }; +static bool spapr_patb_entry_needed(void *opaque) +{ + sPAPRMachineState *spapr = opaque; + + return !!spapr->patb_entry; +} + +static const VMStateDescription vmstate_spapr_patb_entry = { + .name = "spapr_patb_entry", + .version_id = 1, + .minimum_version_id = 1, + .needed = spapr_patb_entry_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(patb_entry, sPAPRMachineState), + VMSTATE_END_OF_LIST() + }, +}; + static const VMStateDescription vmstate_spapr = { .name = "spapr", .version_id = 3, @@ -1344,6 +1485,7 @@ static const VMStateDescription vmstate_spapr = { }, .subsections = (const VMStateDescription*[]) { &vmstate_spapr_ov5_cas, + &vmstate_spapr_patb_entry, NULL } }; @@ -1751,13 +1893,28 @@ static void spapr_validate_node_memory(MachineState *machine, Error **errp) } } +/* find cpu slot in machine->possible_cpus by core_id */ +static CPUArchId *spapr_find_cpu_slot(MachineState *ms, uint32_t id, int *idx) +{ + int index = id / smp_threads; + + if (index >= ms->possible_cpus->len) { + return NULL; + } + if (idx) { + *idx = index; + } + return &ms->possible_cpus->cpus[index]; +} + static void spapr_init_cpus(sPAPRMachineState *spapr) { MachineState *machine = MACHINE(spapr); MachineClass *mc = MACHINE_GET_CLASS(machine); char *type = spapr_get_cpu_core_type(machine->cpu_model); int smt = kvmppc_smt_threads(); - int spapr_max_cores, spapr_cores; + const CPUArchIdList *possible_cpus; + int boot_cores_nr = smp_cpus / smp_threads; int i; if (!type) { @@ -1765,7 +1922,8 @@ static void spapr_init_cpus(sPAPRMachineState *spapr) exit(1); } - if (mc->query_hotpluggable_cpus) { + possible_cpus = mc->possible_cpu_arch_ids(machine); + if (mc->has_hotpluggable_cpus) { if (smp_cpus % smp_threads) { error_report("smp_cpus (%u) must be multiple of threads (%u)", smp_cpus, smp_threads); @@ -1776,24 +1934,18 @@ static void spapr_init_cpus(sPAPRMachineState *spapr) max_cpus, smp_threads); exit(1); } - - spapr_max_cores = max_cpus / smp_threads; - spapr_cores = smp_cpus / smp_threads; } else { if (max_cpus != smp_cpus) { error_report("This machine version does not support CPU hotplug"); exit(1); } - - spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; - spapr_cores = spapr_max_cores; + boot_cores_nr = possible_cpus->len; } - spapr->cores = g_new0(Object *, spapr_max_cores); - for (i = 0; i < spapr_max_cores; i++) { + for (i = 0; i < possible_cpus->len; i++) { int core_id = i * smp_threads; - if (mc->query_hotpluggable_cpus) { + if (mc->has_hotpluggable_cpus) { sPAPRDRConnector *drc = spapr_dr_connector_new(OBJECT(spapr), SPAPR_DR_CONNECTOR_TYPE_CPU, @@ -1802,7 +1954,7 @@ static void spapr_init_cpus(sPAPRMachineState *spapr) qemu_register_reset(spapr_drc_reset, drc); } - if (i < spapr_cores) { + if (i < boot_cores_nr) { Object *core = object_new(type); int nr_threads = smp_threads; @@ -1889,9 +2041,8 @@ static void ppc_spapr_init(MachineState *machine) load_limit = MIN(spapr->rma_size, RTAS_MAX_ADDR) - FW_OVERHEAD; /* Set up Interrupt Controller before we create the VCPUs */ - spapr->xics = xics_system_init(machine, - DIV_ROUND_UP(max_cpus * smt, smp_threads), - XICS_IRQS_SPAPR, &error_fatal); + xics_system_init(machine, DIV_ROUND_UP(max_cpus * smt, smp_threads), + XICS_IRQS_SPAPR, &error_fatal); /* Set up containers for ibm,client-set-architecture negotiated options */ spapr->ov5 = spapr_ovec_new(); @@ -2357,6 +2508,7 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, uint64_t align = memory_region_get_alignment(mr); uint64_t size = memory_region_size(mr); uint64_t addr; + char *mem_dev; if (size % SPAPR_MEMORY_BLOCK_SIZE) { error_setg(&local_err, "Hotplugged memory size must be a multiple of " @@ -2364,6 +2516,13 @@ static void spapr_memory_plug(HotplugHandler *hotplug_dev, DeviceState *dev, goto out; } + mem_dev = object_property_get_str(OBJECT(dimm), PC_DIMM_MEMDEV_PROP, NULL); + if (mem_dev && !kvmppc_is_mem_backend_page_size_ok(mem_dev)) { + error_setg(&local_err, "Memory backend has bad page size. " + "Use 'memory-backend-file' with correct mem-path."); + goto out; + } + pc_dimm_memory_plug(dev, &ms->hotplug_memory, mr, align, &local_err); if (local_err) { goto out; @@ -2488,6 +2647,165 @@ void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset, return fdt; } +static void spapr_core_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + CPUCore *cc = CPU_CORE(dev); + CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL); + + core_slot->cpu = NULL; + object_unparent(OBJECT(dev)); +} + +static void spapr_core_release(DeviceState *dev, void *opaque) +{ + HotplugHandler *hotplug_ctrl; + + hotplug_ctrl = qdev_get_hotplug_handler(dev); + hotplug_handler_unplug(hotplug_ctrl, dev, &error_abort); +} + +static +void spapr_core_unplug_request(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + int index; + sPAPRDRConnector *drc; + sPAPRDRConnectorClass *drck; + Error *local_err = NULL; + CPUCore *cc = CPU_CORE(dev); + int smt = kvmppc_smt_threads(); + + if (!spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index)) { + error_setg(errp, "Unable to find CPU core with core-id: %d", + cc->core_id); + return; + } + if (index == 0) { + error_setg(errp, "Boot CPU core may not be unplugged"); + return; + } + + drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt); + g_assert(drc); + + drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + drck->detach(drc, dev, spapr_core_release, NULL, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + spapr_hotplug_req_remove_by_index(drc); +} + +static void spapr_core_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev)); + MachineClass *mc = MACHINE_GET_CLASS(spapr); + sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev)); + CPUCore *cc = CPU_CORE(dev); + CPUState *cs = CPU(core->threads); + sPAPRDRConnector *drc; + Error *local_err = NULL; + void *fdt = NULL; + int fdt_offset = 0; + int smt = kvmppc_smt_threads(); + CPUArchId *core_slot; + int index; + + core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); + if (!core_slot) { + error_setg(errp, "Unable to find CPU core with core-id: %d", + cc->core_id); + return; + } + drc = spapr_dr_connector_by_id(SPAPR_DR_CONNECTOR_TYPE_CPU, index * smt); + + g_assert(drc || !mc->has_hotpluggable_cpus); + + /* + * Setup CPU DT entries only for hotplugged CPUs. For boot time or + * coldplugged CPUs DT entries are setup in spapr_build_fdt(). + */ + if (dev->hotplugged) { + fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr); + } + + if (drc) { + sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + drck->attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, &local_err); + if (local_err) { + g_free(fdt); + error_propagate(errp, local_err); + return; + } + } + + if (dev->hotplugged) { + /* + * Send hotplug notification interrupt to the guest only in case + * of hotplugged CPUs. + */ + spapr_hotplug_req_add_by_index(drc); + } else { + /* + * Set the right DRC states for cold plugged CPU. + */ + if (drc) { + sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc); + drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE); + drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED); + } + } + core_slot->cpu = OBJECT(dev); +} + +static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + MachineState *machine = MACHINE(OBJECT(hotplug_dev)); + MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); + Error *local_err = NULL; + CPUCore *cc = CPU_CORE(dev); + char *base_core_type = spapr_get_cpu_core_type(machine->cpu_model); + const char *type = object_get_typename(OBJECT(dev)); + CPUArchId *core_slot; + int index; + + if (dev->hotplugged && !mc->has_hotpluggable_cpus) { + error_setg(&local_err, "CPU hotplug not supported for this machine"); + goto out; + } + + if (strcmp(base_core_type, type)) { + error_setg(&local_err, "CPU core type should be %s", base_core_type); + goto out; + } + + if (cc->core_id % smp_threads) { + error_setg(&local_err, "invalid core id %d", cc->core_id); + goto out; + } + + core_slot = spapr_find_cpu_slot(MACHINE(hotplug_dev), cc->core_id, &index); + if (!core_slot) { + error_setg(&local_err, "core id %d out of range", cc->core_id); + goto out; + } + + if (core_slot->cpu) { + error_setg(&local_err, "core %d already populated", cc->core_id); + goto out; + } + +out: + g_free(base_core_type); + error_propagate(errp, local_err); +} + static void spapr_machine_device_plug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { @@ -2550,7 +2868,7 @@ static void spapr_machine_device_unplug(HotplugHandler *hotplug_dev, error_setg(errp, "Memory hot unplug not supported for this guest"); } } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { - if (!mc->query_hotpluggable_cpus) { + if (!mc->has_hotpluggable_cpus) { error_setg(errp, "CPU hot unplug not supported on this machine"); return; } @@ -2577,11 +2895,11 @@ static void spapr_machine_device_unplug_request(HotplugHandler *hotplug_dev, error_setg(errp, "Memory hot unplug not supported for this guest"); } } else if (object_dynamic_cast(OBJECT(dev), TYPE_SPAPR_CPU_CORE)) { - if (!mc->query_hotpluggable_cpus) { + if (!mc->has_hotpluggable_cpus) { error_setg(errp, "CPU hot unplug not supported on this machine"); return; } - spapr_core_unplug(hotplug_dev, dev, errp); + spapr_core_unplug_request(hotplug_dev, dev, errp); } } @@ -2610,35 +2928,34 @@ static unsigned spapr_cpu_index_to_socket_id(unsigned cpu_index) return cpu_index / smp_threads / smp_cores; } -static HotpluggableCPUList *spapr_query_hotpluggable_cpus(MachineState *machine) +static const CPUArchIdList *spapr_possible_cpu_arch_ids(MachineState *machine) { int i; - HotpluggableCPUList *head = NULL; - sPAPRMachineState *spapr = SPAPR_MACHINE(machine); int spapr_max_cores = max_cpus / smp_threads; + MachineClass *mc = MACHINE_GET_CLASS(machine); + + if (!mc->has_hotpluggable_cpus) { + spapr_max_cores = QEMU_ALIGN_UP(smp_cpus, smp_threads) / smp_threads; + } + if (machine->possible_cpus) { + assert(machine->possible_cpus->len == spapr_max_cores); + return machine->possible_cpus; + } - for (i = 0; i < spapr_max_cores; i++) { - HotpluggableCPUList *list_item = g_new0(typeof(*list_item), 1); - HotpluggableCPU *cpu_item = g_new0(typeof(*cpu_item), 1); - CpuInstanceProperties *cpu_props = g_new0(typeof(*cpu_props), 1); + machine->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * spapr_max_cores); + machine->possible_cpus->len = spapr_max_cores; + for (i = 0; i < machine->possible_cpus->len; i++) { + int core_id = i * smp_threads; - cpu_item->type = spapr_get_cpu_core_type(machine->cpu_model); - cpu_item->vcpus_count = smp_threads; - cpu_props->has_core_id = true; - cpu_props->core_id = i * smp_threads; + machine->possible_cpus->cpus[i].vcpus_count = smp_threads; + machine->possible_cpus->cpus[i].arch_id = core_id; + machine->possible_cpus->cpus[i].props.has_core_id = true; + machine->possible_cpus->cpus[i].props.core_id = core_id; /* TODO: add 'has_node/node' here to describe to which node core belongs */ - - cpu_item->props = cpu_props; - if (spapr->cores[i]) { - cpu_item->has_qom_path = true; - cpu_item->qom_path = object_get_canonical_path(spapr->cores[i]); - } - list_item->value = cpu_item; - list_item->next = head; - head = list_item; } - return head; + return machine->possible_cpus; } static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index, @@ -2693,6 +3010,40 @@ static void spapr_phb_placement(sPAPRMachineState *spapr, uint32_t index, *mmio64 = SPAPR_PCI_BASE + (index + 1) * SPAPR_PCI_MEM64_WIN_SIZE; } +static ICSState *spapr_ics_get(XICSFabric *dev, int irq) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(dev); + + return ics_valid_irq(spapr->ics, irq) ? spapr->ics : NULL; +} + +static void spapr_ics_resend(XICSFabric *dev) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(dev); + + ics_resend(spapr->ics); +} + +static ICPState *spapr_icp_get(XICSFabric *xi, int server) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(xi); + + return (server < spapr->nr_servers) ? &spapr->icps[server] : NULL; +} + +static void spapr_pic_print_info(InterruptStatsProvider *obj, + Monitor *mon) +{ + sPAPRMachineState *spapr = SPAPR_MACHINE(obj); + int i; + + for (i = 0; i < spapr->nr_servers; i++) { + icp_pic_print_info(&spapr->icps[i], mon); + } + + ics_pic_print_info(spapr->ics, mon); +} + static void spapr_machine_class_init(ObjectClass *oc, void *data) { MachineClass *mc = MACHINE_CLASS(oc); @@ -2701,6 +3052,8 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) NMIClass *nc = NMI_CLASS(oc); HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); PPCVirtualHypervisorClass *vhc = PPC_VIRTUAL_HYPERVISOR_CLASS(oc); + XICSFabricClass *xic = XICS_FABRIC_CLASS(oc); + InterruptStatsProviderClass *ispc = INTERRUPT_STATS_PROVIDER_CLASS(oc); mc->desc = "pSeries Logical Partition (PAPR compliant)"; @@ -2712,7 +3065,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) mc->init = ppc_spapr_init; mc->reset = ppc_spapr_reset; mc->block_default_type = IF_SCSI; - mc->max_cpus = 255; + mc->max_cpus = 1024; mc->no_parallel = 1; mc->default_boot_order = ""; mc->default_ram_size = 512 * M_BYTE; @@ -2724,15 +3077,25 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) hc->plug = spapr_machine_device_plug; hc->unplug = spapr_machine_device_unplug; mc->cpu_index_to_socket_id = spapr_cpu_index_to_socket_id; + mc->possible_cpu_arch_ids = spapr_possible_cpu_arch_ids; hc->unplug_request = spapr_machine_device_unplug_request; smc->dr_lmb_enabled = true; smc->tcg_default_cpu = "POWER8"; - mc->query_hotpluggable_cpus = spapr_query_hotpluggable_cpus; + mc->has_hotpluggable_cpus = true; fwc->get_dev_path = spapr_get_fw_dev_path; nc->nmi_monitor_handler = spapr_nmi; smc->phb_placement = spapr_phb_placement; vhc->hypercall = emulate_spapr_hypercall; + vhc->hpt_mask = spapr_hpt_mask; + vhc->map_hptes = spapr_map_hptes; + vhc->unmap_hptes = spapr_unmap_hptes; + vhc->store_hpte = spapr_store_hpte; + vhc->get_patbe = spapr_get_patbe; + xic->ics_get = spapr_ics_get; + xic->ics_resend = spapr_ics_resend; + xic->icp_get = spapr_icp_get; + ispc->print_info = spapr_pic_print_info; } static const TypeInfo spapr_machine_info = { @@ -2749,6 +3112,8 @@ static const TypeInfo spapr_machine_info = { { TYPE_NMI }, { TYPE_HOTPLUG_HANDLER }, { TYPE_PPC_VIRTUAL_HYPERVISOR }, + { TYPE_XICS_FABRIC }, + { TYPE_INTERRUPT_STATS_PROVIDER }, { } }, }; @@ -2928,7 +3293,7 @@ static void spapr_machine_2_6_instance_options(MachineState *machine) static void spapr_machine_2_6_class_options(MachineClass *mc) { spapr_machine_2_7_class_options(mc); - mc->query_hotpluggable_cpus = NULL; + mc->has_hotpluggable_cpus = false; SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_6); } |