aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
Diffstat (limited to 'hw')
-rw-r--r--hw/misc/macio/macio.c39
-rw-r--r--hw/pci-host/trace-events2
-rw-r--r--hw/pci-host/uninorth.c58
-rw-r--r--hw/ppc/mac.h9
-rw-r--r--hw/ppc/mac_newworld.c56
-rw-r--r--hw/ppc/spapr.c84
-rw-r--r--hw/ppc/spapr_cpu_core.c47
-rw-r--r--hw/ppc/spapr_hcall.c50
-rw-r--r--hw/ppc/spapr_rtas.c108
-rw-r--r--hw/ppc/trace-events4
-rw-r--r--hw/rdma/rdma_backend.c2
-rw-r--r--hw/rdma/rdma_rm.c2
-rw-r--r--hw/rdma/rdma_rm_defs.h9
-rw-r--r--hw/rdma/vmw/pvrdma.h6
-rw-r--r--hw/rdma/vmw/pvrdma_cmd.c10
-rw-r--r--hw/rdma/vmw/pvrdma_main.c19
-rw-r--r--hw/rdma/vmw/pvrdma_qp_ops.c1
-rw-r--r--hw/s390x/event-facility.c64
-rw-r--r--hw/s390x/ipl.c4
-rw-r--r--hw/s390x/s390-pci-inst.c8
-rw-r--r--hw/s390x/s390-virtio-ccw.c36
-rw-r--r--hw/vfio/ccw.c56
22 files changed, 390 insertions, 284 deletions
diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c
index dac7bcd15e..79621eb879 100644
--- a/hw/misc/macio/macio.c
+++ b/hw/misc/macio/macio.c
@@ -279,11 +279,10 @@ static void macio_newworld_realize(PCIDevice *d, Error **errp)
{
MacIOState *s = MACIO(d);
NewWorldMacIOState *ns = NEWWORLD_MACIO(d);
+ DeviceState *pic_dev = DEVICE(ns->pic);
Error *err = NULL;
SysBusDevice *sysbus_dev;
MemoryRegion *timer_memory = NULL;
- int i;
- int cur_irq = 0;
macio_common_realize(d, &err);
if (err) {
@@ -292,11 +291,14 @@ static void macio_newworld_realize(PCIDevice *d, Error **errp)
}
sysbus_dev = SYS_BUS_DEVICE(&s->cuda);
- sysbus_connect_irq(sysbus_dev, 0, ns->irqs[cur_irq++]);
+ sysbus_connect_irq(sysbus_dev, 0, qdev_get_gpio_in(pic_dev,
+ NEWWORLD_CUDA_IRQ));
sysbus_dev = SYS_BUS_DEVICE(&s->escc);
- sysbus_connect_irq(sysbus_dev, 0, ns->irqs[cur_irq++]);
- sysbus_connect_irq(sysbus_dev, 1, ns->irqs[cur_irq++]);
+ sysbus_connect_irq(sysbus_dev, 0, qdev_get_gpio_in(pic_dev,
+ NEWWORLD_ESCCB_IRQ));
+ sysbus_connect_irq(sysbus_dev, 1, qdev_get_gpio_in(pic_dev,
+ NEWWORLD_ESCCA_IRQ));
/* OpenPIC */
sysbus_dev = SYS_BUS_DEVICE(ns->pic);
@@ -304,15 +306,22 @@ static void macio_newworld_realize(PCIDevice *d, Error **errp)
sysbus_mmio_get_region(sysbus_dev, 0));
/* IDE buses */
- for (i = 0; i < ARRAY_SIZE(ns->ide); i++) {
- qemu_irq irq0 = ns->irqs[cur_irq++];
- qemu_irq irq1 = ns->irqs[cur_irq++];
-
- macio_realize_ide(s, &ns->ide[i], irq0, irq1, 0x16 + (i * 4), &err);
- if (err) {
- error_propagate(errp, err);
- return;
- }
+ macio_realize_ide(s, &ns->ide[0],
+ qdev_get_gpio_in(pic_dev, NEWWORLD_IDE0_IRQ),
+ qdev_get_gpio_in(pic_dev, NEWWORLD_IDE0_DMA_IRQ),
+ 0x16, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
+ }
+
+ macio_realize_ide(s, &ns->ide[1],
+ qdev_get_gpio_in(pic_dev, NEWWORLD_IDE1_IRQ),
+ qdev_get_gpio_in(pic_dev, NEWWORLD_IDE1_DMA_IRQ),
+ 0x1a, &err);
+ if (err) {
+ error_propagate(errp, err);
+ return;
}
/* Timer */
@@ -328,8 +337,6 @@ static void macio_newworld_init(Object *obj)
NewWorldMacIOState *ns = NEWWORLD_MACIO(obj);
int i;
- qdev_init_gpio_out(DEVICE(obj), ns->irqs, ARRAY_SIZE(ns->irqs));
-
object_property_add_link(obj, "pic", TYPE_OPENPIC,
(Object **) &ns->pic,
qdev_prop_allow_set_link_before_realize,
diff --git a/hw/pci-host/trace-events b/hw/pci-host/trace-events
index 341a87a702..dd7a398e96 100644
--- a/hw/pci-host/trace-events
+++ b/hw/pci-host/trace-events
@@ -18,3 +18,5 @@ unin_set_irq(int irq_num, int level) "setting INT %d = %d"
unin_get_config_reg(uint32_t reg, uint32_t addr, uint32_t retval) "converted config space accessor 0x%"PRIx32 "/0x%"PRIx32 " -> 0x%"PRIx32
unin_data_write(uint64_t addr, unsigned len, uint64_t val) "write addr 0x%"PRIx64 " len %d val 0x%"PRIx64
unin_data_read(uint64_t addr, unsigned len, uint64_t val) "read addr 0x%"PRIx64 " len %d val 0x%"PRIx64
+unin_write(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
+unin_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
diff --git a/hw/pci-host/uninorth.c b/hw/pci-host/uninorth.c
index fada0ffd5f..ba76b84dbc 100644
--- a/hw/pci-host/uninorth.c
+++ b/hw/pci-host/uninorth.c
@@ -519,6 +519,62 @@ static const TypeInfo pci_unin_internal_info = {
.class_init = pci_unin_internal_class_init,
};
+/* UniN device */
+static void unin_write(void *opaque, hwaddr addr, uint64_t value,
+ unsigned size)
+{
+ trace_unin_write(addr, value);
+ if (addr == 0x0) {
+ *(int *)opaque = value;
+ }
+}
+
+static uint64_t unin_read(void *opaque, hwaddr addr, unsigned size)
+{
+ uint32_t value;
+
+ value = 0;
+ switch (addr) {
+ case 0:
+ value = *(int *)opaque;
+ }
+
+ trace_unin_read(addr, value);
+
+ return value;
+}
+
+static const MemoryRegionOps unin_ops = {
+ .read = unin_read,
+ .write = unin_write,
+ .endianness = DEVICE_BIG_ENDIAN,
+};
+
+static void unin_init(Object *obj)
+{
+ UNINState *s = UNI_NORTH(obj);
+ SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
+
+ memory_region_init_io(&s->mem, obj, &unin_ops, &s->token, "unin", 0x1000);
+
+ sysbus_init_mmio(sbd, &s->mem);
+}
+
+static void unin_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories);
+}
+
+static const TypeInfo unin_info = {
+ .name = TYPE_UNI_NORTH,
+ .parent = TYPE_SYS_BUS_DEVICE,
+ .instance_size = sizeof(UNINState),
+ .instance_init = unin_init,
+ .class_init = unin_class_init,
+};
+
static void unin_register_types(void)
{
type_register_static(&unin_main_pci_host_info);
@@ -530,6 +586,8 @@ static void unin_register_types(void)
type_register_static(&pci_u3_agp_info);
type_register_static(&pci_unin_agp_info);
type_register_static(&pci_unin_internal_info);
+
+ type_register_static(&unin_info);
}
type_init(unin_register_types)
diff --git a/hw/ppc/mac.h b/hw/ppc/mac.h
index 892dd03789..22a7efbed6 100644
--- a/hw/ppc/mac.h
+++ b/hw/ppc/mac.h
@@ -56,6 +56,15 @@
#define OLDWORLD_IDE1_IRQ 0xe
#define OLDWORLD_IDE1_DMA_IRQ 0x3
+/* New World IRQs */
+#define NEWWORLD_CUDA_IRQ 0x19
+#define NEWWORLD_ESCCB_IRQ 0x24
+#define NEWWORLD_ESCCA_IRQ 0x25
+#define NEWWORLD_IDE0_IRQ 0xd
+#define NEWWORLD_IDE0_DMA_IRQ 0x2
+#define NEWWORLD_IDE1_IRQ 0xe
+#define NEWWORLD_IDE1_DMA_IRQ 0x3
+
/* MacIO */
#define TYPE_MACIO_IDE "macio-ide"
#define MACIO_IDE(obj) OBJECT_CHECK(MACIOIDEState, (obj), TYPE_MACIO_IDE)
diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c
index 29bd3838bf..744acdfd2e 100644
--- a/hw/ppc/mac_newworld.c
+++ b/hw/ppc/mac_newworld.c
@@ -82,36 +82,6 @@
#define NDRV_VGA_FILENAME "qemu_vga.ndrv"
-/* UniN device */
-static void unin_write(void *opaque, hwaddr addr, uint64_t value,
- unsigned size)
-{
- trace_mac99_uninorth_write(addr, value);
- if (addr == 0x0) {
- *(int*)opaque = value;
- }
-}
-
-static uint64_t unin_read(void *opaque, hwaddr addr, unsigned size)
-{
- uint32_t value;
-
- value = 0;
- switch (addr) {
- case 0:
- value = *(int*)opaque;
- }
-
- trace_mac99_uninorth_read(addr, value);
-
- return value;
-}
-
-static const MemoryRegionOps unin_ops = {
- .read = unin_read,
- .write = unin_write,
- .endianness = DEVICE_NATIVE_ENDIAN,
-};
static void fw_cfg_boot_set(void *opaque, const char *boot_device,
Error **errp)
@@ -144,8 +114,7 @@ static void ppc_core99_init(MachineState *machine)
PowerPCCPU *cpu = NULL;
CPUPPCState *env = NULL;
char *filename;
- qemu_irq *pic, **openpic_irqs;
- MemoryRegion *unin_memory = g_new(MemoryRegion, 1);
+ qemu_irq **openpic_irqs;
int linux_boot, i, j, k;
MemoryRegion *ram = g_new(MemoryRegion, 1), *bios = g_new(MemoryRegion, 1);
hwaddr kernel_base, initrd_base, cmdline_base = 0;
@@ -164,7 +133,6 @@ static void ppc_core99_init(MachineState *machine)
int machine_arch;
SysBusDevice *s;
DeviceState *dev, *pic_dev;
- int *token = g_new(int, 1);
hwaddr nvram_addr = 0xFFF04000;
uint64_t tbfreq;
@@ -272,9 +240,12 @@ static void ppc_core99_init(MachineState *machine)
}
}
- /* UniN init: XXX should be a real device */
- memory_region_init_io(unin_memory, NULL, &unin_ops, token, "unin", 0x1000);
- memory_region_add_subregion(get_system_memory(), 0xf8000000, unin_memory);
+ /* UniN init */
+ dev = qdev_create(NULL, TYPE_UNI_NORTH);
+ qdev_init_nofail(dev);
+ s = SYS_BUS_DEVICE(dev);
+ memory_region_add_subregion(get_system_memory(), 0xf8000000,
+ sysbus_mmio_get_region(s, 0));
openpic_irqs = g_malloc0(smp_cpus * sizeof(qemu_irq *));
openpic_irqs[0] =
@@ -320,8 +291,6 @@ static void ppc_core99_init(MachineState *machine)
}
}
- pic = g_new0(qemu_irq, 64);
-
pic_dev = qdev_create(NULL, TYPE_OPENPIC);
qdev_prop_set_uint32(pic_dev, "model", OPENPIC_MODEL_KEYLARGO);
qdev_init_nofail(pic_dev);
@@ -333,10 +302,6 @@ static void ppc_core99_init(MachineState *machine)
}
}
- for (i = 0; i < 64; i++) {
- pic[i] = qdev_get_gpio_in(pic_dev, i);
- }
-
if (PPC_INPUT(env) == PPC_FLAGS_INPUT_970) {
/* 970 gets a U3 bus */
/* Uninorth AGP bus */
@@ -410,13 +375,6 @@ static void ppc_core99_init(MachineState *machine)
/* MacIO */
macio = NEWWORLD_MACIO(pci_create(pci_bus, -1, TYPE_NEWWORLD_MACIO));
dev = DEVICE(macio);
- qdev_connect_gpio_out(dev, 0, pic[0x19]); /* CUDA */
- qdev_connect_gpio_out(dev, 1, pic[0x24]); /* ESCC-B */
- qdev_connect_gpio_out(dev, 2, pic[0x25]); /* ESCC-A */
- qdev_connect_gpio_out(dev, 3, pic[0x0d]); /* IDE */
- qdev_connect_gpio_out(dev, 4, pic[0x02]); /* IDE DMA */
- qdev_connect_gpio_out(dev, 5, pic[0x0e]); /* IDE */
- qdev_connect_gpio_out(dev, 6, pic[0x03]); /* IDE DMA */
qdev_prop_set_uint64(dev, "frequency", tbfreq);
object_property_set_link(OBJECT(macio), OBJECT(pic_dev), "pic",
&error_abort);
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
index b35aff5d81..32ab3c43b6 100644
--- a/hw/ppc/spapr.c
+++ b/hw/ppc/spapr.c
@@ -1668,10 +1668,8 @@ static void spapr_machine_reset(void)
g_free(fdt);
/* Set up the entry state */
- first_ppc_cpu->env.gpr[3] = fdt_addr;
+ spapr_cpu_set_entry_state(first_ppc_cpu, SPAPR_ENTRY_POINT, fdt_addr);
first_ppc_cpu->env.gpr[5] = 0;
- first_cpu->halted = 0;
- first_ppc_cpu->env.nip = SPAPR_ENTRY_POINT;
spapr->cas_reboot = false;
}
@@ -1851,10 +1849,12 @@ static bool spapr_ov5_cas_needed(void *opaque)
*
* Thus, for any cases where the set of available CAS-negotiatable
* options extends beyond OV5_FORM1_AFFINITY and OV5_DRCONF_MEMORY, we
- * include the CAS-negotiated options in the migration stream.
+ * include the CAS-negotiated options in the migration stream, unless
+ * if they affect boot time behaviour only.
*/
spapr_ovec_set(ov5_mask, OV5_FORM1_AFFINITY);
spapr_ovec_set(ov5_mask, OV5_DRCONF_MEMORY);
+ spapr_ovec_set(ov5_mask, OV5_DRMEM_V2);
/* spapr_ovec_diff returns true if bits were removed. we avoid using
* the mask itself since in the future it's possible "legacy" bits may be
@@ -2508,13 +2508,11 @@ static void spapr_machine_init(MachineState *machine)
int i;
MemoryRegion *sysmem = get_system_memory();
MemoryRegion *ram = g_new(MemoryRegion, 1);
- MemoryRegion *rma_region;
- void *rma = NULL;
- hwaddr rma_alloc_size;
hwaddr node0_size = spapr_node0_size(machine);
long load_limit, fw_size;
char *filename;
Error *resize_hpt_err = NULL;
+ PowerPCCPU *first_ppc_cpu;
msi_nonbroken = true;
@@ -2549,40 +2547,28 @@ static void spapr_machine_init(MachineState *machine)
exit(1);
}
- /* Allocate RMA if necessary */
- rma_alloc_size = kvmppc_alloc_rma(&rma);
+ spapr->rma_size = node0_size;
- if (rma_alloc_size == -1) {
- error_report("Unable to create RMA");
- exit(1);
+ /* With KVM, we don't actually know whether KVM supports an
+ * unbounded RMA (PR KVM) or is limited by the hash table size
+ * (HV KVM using VRMA), so we always assume the latter
+ *
+ * In that case, we also limit the initial allocations for RTAS
+ * etc... to 256M since we have no way to know what the VRMA size
+ * is going to be as it depends on the size of the hash table
+ * which isn't determined yet.
+ */
+ if (kvm_enabled()) {
+ spapr->vrma_adjust = 1;
+ spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
}
- if (rma_alloc_size && (rma_alloc_size < node0_size)) {
- spapr->rma_size = rma_alloc_size;
- } else {
- spapr->rma_size = node0_size;
-
- /* With KVM, we don't actually know whether KVM supports an
- * unbounded RMA (PR KVM) or is limited by the hash table size
- * (HV KVM using VRMA), so we always assume the latter
- *
- * In that case, we also limit the initial allocations for RTAS
- * etc... to 256M since we have no way to know what the VRMA size
- * is going to be as it depends on the size of the hash table
- * isn't determined yet.
- */
- if (kvm_enabled()) {
- spapr->vrma_adjust = 1;
- spapr->rma_size = MIN(spapr->rma_size, 0x10000000);
- }
-
- /* Actually we don't support unbounded RMA anymore since we
- * added proper emulation of HV mode. The max we can get is
- * 16G which also happens to be what we configure for PAPR
- * mode so make sure we don't do anything bigger than that
- */
- spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
- }
+ /* Actually we don't support unbounded RMA anymore since we added
+ * proper emulation of HV mode. The max we can get is 16G which
+ * also happens to be what we configure for PAPR mode so make sure
+ * we don't do anything bigger than that
+ */
+ spapr->rma_size = MIN(spapr->rma_size, 0x400000000ull);
if (spapr->rma_size > node0_size) {
error_report("Numa node 0 has to span the RMA (%#08"HWADDR_PRIx")",
@@ -2607,11 +2593,6 @@ static void spapr_machine_init(MachineState *machine)
}
spapr_ovec_set(spapr->ov5, OV5_FORM1_AFFINITY);
- if (!kvm_enabled() || kvmppc_has_cap_mmu_radix()) {
- /* KVM and TCG always allow GTSE with radix... */
- spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
- }
- /* ... but not with hash (currently). */
/* advertise support for dedicated HP event source to guests */
if (spapr->use_hotplug_event_source) {
@@ -2629,6 +2610,15 @@ static void spapr_machine_init(MachineState *machine)
/* init CPUs */
spapr_init_cpus(spapr);
+ first_ppc_cpu = POWERPC_CPU(first_cpu);
+ if ((!kvm_enabled() || kvmppc_has_cap_mmu_radix()) &&
+ ppc_check_compat(first_ppc_cpu, CPU_POWERPC_LOGICAL_3_00, 0,
+ spapr->max_compat_pvr)) {
+ /* KVM and TCG always allow GTSE with radix... */
+ spapr_ovec_set(spapr->ov5, OV5_MMU_RADIX_GTSE);
+ }
+ /* ... but not with hash (currently). */
+
if (kvm_enabled()) {
/* Enable H_LOGICAL_CI_* so SLOF can talk to in-kernel devices */
kvmppc_enable_logical_ci_hcalls();
@@ -2643,14 +2633,6 @@ static void spapr_machine_init(MachineState *machine)
machine->ram_size);
memory_region_add_subregion(sysmem, 0, ram);
- if (rma_alloc_size && rma) {
- rma_region = g_new(MemoryRegion, 1);
- memory_region_init_ram_ptr(rma_region, NULL, "ppc_spapr.rma",
- rma_alloc_size, rma);
- vmstate_register_ram_global(rma_region);
- memory_region_add_subregion(sysmem, 0, rma_region);
- }
-
/* initialize hotplug memory address space */
if (machine->ram_size < machine->maxram_size) {
ram_addr_t hotplug_mem_size = machine->maxram_size - machine->ram_size;
diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c
index 01dbc69424..f3e9b879b2 100644
--- a/hw/ppc/spapr_cpu_core.c
+++ b/hw/ppc/spapr_cpu_core.c
@@ -28,6 +28,7 @@ static void spapr_cpu_reset(void *opaque)
CPUState *cs = CPU(cpu);
CPUPPCState *env = &cpu->env;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ target_ulong lpcr;
cpu_reset(cs);
@@ -43,13 +44,43 @@ static void spapr_cpu_reset(void *opaque)
env->spr[SPR_HIOR] = 0;
- /* Disable Power-saving mode Exit Cause exceptions for the CPU.
- * This can cause issues when rebooting the guest if a secondary
- * is awaken */
- if (cs != first_cpu) {
- env->spr[SPR_LPCR] &= ~pcc->lpcr_pm;
- }
+ lpcr = env->spr[SPR_LPCR];
+
+ /* Set emulated LPCR to not send interrupts to hypervisor. Note that
+ * under KVM, the actual HW LPCR will be set differently by KVM itself,
+ * the settings below ensure proper operations with TCG in absence of
+ * a real hypervisor.
+ *
+ * Clearing VPM0 will also cause us to use RMOR in mmu-hash64.c for
+ * real mode accesses, which thankfully defaults to 0 and isn't
+ * accessible in guest mode.
+ *
+ * Disable Power-saving mode Exit Cause exceptions for the CPU, so
+ * we don't get spurious wakups before an RTAS start-cpu call.
+ */
+ lpcr &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | pcc->lpcr_pm);
+ lpcr |= LPCR_LPES0 | LPCR_LPES1;
+
+ /* Set RMLS to the max (ie, 16G) */
+ lpcr &= ~LPCR_RMLS;
+ lpcr |= 1ull << LPCR_RMLS_SHIFT;
+
+ ppc_store_lpcr(cpu, lpcr);
+
+ /* Set a full AMOR so guest can use the AMR as it sees fit */
+ env->spr[SPR_AMOR] = 0xffffffffffffffffull;
+}
+
+void spapr_cpu_set_entry_state(PowerPCCPU *cpu, target_ulong nip, target_ulong r3)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ CPUPPCState *env = &cpu->env;
+ env->nip = nip;
+ env->gpr[3] = r3;
+ CPU(cpu)->halted = 0;
+ /* Enable Power-saving mode Exit Cause exceptions */
+ ppc_store_lpcr(cpu, env->spr[SPR_LPCR] | pcc->lpcr_pm);
}
static void spapr_cpu_destroy(PowerPCCPU *cpu)
@@ -65,8 +96,8 @@ static void spapr_cpu_init(sPAPRMachineState *spapr, PowerPCCPU *cpu,
/* Set time-base frequency to 512 MHz */
cpu_ppc_tb_init(env, SPAPR_TIMEBASE_FREQ);
- /* Enable PAPR mode in TCG or KVM */
- cpu_ppc_set_papr(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
+ cpu_ppc_set_vhyp(cpu, PPC_VIRTUAL_HYPERVISOR(spapr));
+ kvmppc_set_papr(cpu);
qemu_register_reset(spapr_cpu_reset, cpu);
spapr_cpu_reset(cpu);
diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c
index 16bccdd5c0..ca9702e667 100644
--- a/hw/ppc/spapr_hcall.c
+++ b/hw/ppc/spapr_hcall.c
@@ -15,32 +15,35 @@
#include "hw/ppc/spapr_ovec.h"
#include "mmu-book3s-v3.h"
-struct SPRSyncState {
- int spr;
+struct LPCRSyncState {
target_ulong value;
target_ulong mask;
};
-static void do_spr_sync(CPUState *cs, run_on_cpu_data arg)
+static void do_lpcr_sync(CPUState *cs, run_on_cpu_data arg)
{
- struct SPRSyncState *s = arg.host_ptr;
+ struct LPCRSyncState *s = arg.host_ptr;
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
+ target_ulong lpcr;
cpu_synchronize_state(cs);
- env->spr[s->spr] &= ~s->mask;
- env->spr[s->spr] |= s->value;
+ lpcr = env->spr[SPR_LPCR];
+ lpcr &= ~s->mask;
+ lpcr |= s->value;
+ ppc_store_lpcr(cpu, lpcr);
}
-static void set_spr(CPUState *cs, int spr, target_ulong value,
- target_ulong mask)
+static void set_all_lpcrs(target_ulong value, target_ulong mask)
{
- struct SPRSyncState s = {
- .spr = spr,
+ CPUState *cs;
+ struct LPCRSyncState s = {
.value = value,
.mask = mask
};
- run_on_cpu(cs, do_spr_sync, RUN_ON_CPU_HOST_PTR(&s));
+ CPU_FOREACH(cs) {
+ run_on_cpu(cs, do_lpcr_sync, RUN_ON_CPU_HOST_PTR(&s));
+ }
}
static bool has_spr(PowerPCCPU *cpu, int spr)
@@ -1235,8 +1238,6 @@ static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
target_ulong value1,
target_ulong value2)
{
- CPUState *cs;
-
if (value1) {
return H_P3;
}
@@ -1246,16 +1247,12 @@ static target_ulong h_set_mode_resource_le(PowerPCCPU *cpu,
switch (mflags) {
case H_SET_MODE_ENDIAN_BIG:
- CPU_FOREACH(cs) {
- set_spr(cs, SPR_LPCR, 0, LPCR_ILE);
- }
+ set_all_lpcrs(0, LPCR_ILE);
spapr_pci_switch_vga(true);
return H_SUCCESS;
case H_SET_MODE_ENDIAN_LITTLE:
- CPU_FOREACH(cs) {
- set_spr(cs, SPR_LPCR, LPCR_ILE, LPCR_ILE);
- }
+ set_all_lpcrs(LPCR_ILE, LPCR_ILE);
spapr_pci_switch_vga(false);
return H_SUCCESS;
}
@@ -1268,7 +1265,6 @@ static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
target_ulong value1,
target_ulong value2)
{
- CPUState *cs;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
if (!(pcc->insns_flags2 & PPC2_ISA207S)) {
@@ -1285,9 +1281,7 @@ static target_ulong h_set_mode_resource_addr_trans_mode(PowerPCCPU *cpu,
return H_UNSUPPORTED_FLAG;
}
- CPU_FOREACH(cs) {
- set_spr(cs, SPR_LPCR, mflags << LPCR_AIL_SHIFT, LPCR_AIL);
- }
+ set_all_lpcrs(mflags << LPCR_AIL_SHIFT, LPCR_AIL);
return H_SUCCESS;
}
@@ -1364,7 +1358,6 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu,
target_ulong opcode,
target_ulong *args)
{
- CPUState *cs;
target_ulong flags = args[0];
target_ulong proc_tbl = args[1];
target_ulong page_size = args[2];
@@ -1422,12 +1415,9 @@ static target_ulong h_register_process_table(PowerPCCPU *cpu,
spapr->patb_entry = cproc; /* Save new process table */
/* Update the UPRT and GTSE bits in the LPCR for all cpus */
- CPU_FOREACH(cs) {
- set_spr(cs, SPR_LPCR,
- ((flags & (FLAG_RADIX | FLAG_HASH_PROC_TBL)) ? LPCR_UPRT : 0) |
- ((flags & FLAG_GTSE) ? LPCR_GTSE : 0),
- LPCR_UPRT | LPCR_GTSE);
- }
+ set_all_lpcrs(((flags & (FLAG_RADIX | FLAG_HASH_PROC_TBL)) ? LPCR_UPRT : 0) |
+ ((flags & FLAG_GTSE) ? LPCR_GTSE : 0),
+ LPCR_UPRT | LPCR_GTSE);
if (kvm_enabled()) {
return kvmppc_configure_v3_mmu(cpu, flags & FLAG_RADIX,
diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c
index 0ec5fa4cfe..7f9738daed 100644
--- a/hw/ppc/spapr_rtas.c
+++ b/hw/ppc/spapr_rtas.c
@@ -32,11 +32,12 @@
#include "hw/qdev.h"
#include "sysemu/device_tree.h"
#include "sysemu/cpus.h"
-#include "sysemu/kvm.h"
+#include "sysemu/hw_accel.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
#include "hw/ppc/spapr_rtas.h"
+#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/ppc.h"
#include "hw/boards.h"
@@ -45,6 +46,8 @@
#include "qemu/cutils.h"
#include "trace.h"
#include "hw/ppc/fdt.h"
+#include "target/ppc/mmu-hash64.h"
+#include "target/ppc/mmu-book3s-v3.h"
static void rtas_display_character(PowerPCCPU *cpu, sPAPRMachineState *spapr,
uint32_t token, uint32_t nargs,
@@ -119,34 +122,16 @@ static void rtas_query_cpu_stopped_state(PowerPCCPU *cpu_,
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
}
-/*
- * Set the timebase offset of the CPU to that of first CPU.
- * This helps hotplugged CPU to have the correct timebase offset.
- */
-static void spapr_cpu_update_tb_offset(PowerPCCPU *cpu)
-{
- PowerPCCPU *fcpu = POWERPC_CPU(first_cpu);
-
- cpu->env.tb_env->tb_offset = fcpu->env.tb_env->tb_offset;
-}
-
-static void spapr_cpu_set_endianness(PowerPCCPU *cpu)
-{
- PowerPCCPU *fcpu = POWERPC_CPU(first_cpu);
- PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(fcpu);
-
- if (!pcc->interrupts_big_endian(fcpu)) {
- cpu->env.spr[SPR_LPCR] |= LPCR_ILE;
- }
-}
-
-static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr,
+static void rtas_start_cpu(PowerPCCPU *callcpu, sPAPRMachineState *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
target_ulong id, start, r3;
- PowerPCCPU *cpu;
+ PowerPCCPU *newcpu;
+ CPUPPCState *env;
+ PowerPCCPUClass *pcc;
+ target_ulong lpcr;
if (nargs != 3 || nret != 1) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
@@ -157,41 +142,55 @@ static void rtas_start_cpu(PowerPCCPU *cpu_, sPAPRMachineState *spapr,
start = rtas_ld(args, 1);
r3 = rtas_ld(args, 2);
- cpu = spapr_find_cpu(id);
- if (cpu != NULL) {
- CPUState *cs = CPU(cpu);
- CPUPPCState *env = &cpu->env;
- PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
+ newcpu = spapr_find_cpu(id);
+ if (!newcpu) {
+ /* Didn't find a matching cpu */
+ rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ return;
+ }
- if (!cs->halted) {
- rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
- return;
- }
+ env = &newcpu->env;
+ pcc = POWERPC_CPU_GET_CLASS(newcpu);
+
+ if (!CPU(newcpu)->halted) {
+ rtas_st(rets, 0, RTAS_OUT_HW_ERROR);
+ return;
+ }
- /* This will make sure qemu state is up to date with kvm, and
- * mark it dirty so our changes get flushed back before the
- * new cpu enters */
- kvm_cpu_synchronize_state(cs);
+ cpu_synchronize_state(CPU(newcpu));
- env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME);
+ env->msr = (1ULL << MSR_SF) | (1ULL << MSR_ME);
- /* Enable Power-saving mode Exit Cause exceptions for the new CPU */
- env->spr[SPR_LPCR] |= pcc->lpcr_pm;
+ /* Enable Power-saving mode Exit Cause exceptions for the new CPU */
+ lpcr = env->spr[SPR_LPCR];
+ if (!pcc->interrupts_big_endian(callcpu)) {
+ lpcr |= LPCR_ILE;
+ }
+ if (env->mmu_model == POWERPC_MMU_3_00) {
+ /*
+ * New cpus are expected to start in the same radix/hash mode
+ * as the existing CPUs
+ */
+ if (ppc64_radix_guest(callcpu)) {
+ lpcr |= LPCR_UPRT | LPCR_GTSE;
+ } else {
+ lpcr &= ~(LPCR_UPRT | LPCR_GTSE);
+ }
+ }
+ ppc_store_lpcr(newcpu, lpcr);
- env->nip = start;
- env->gpr[3] = r3;
- cs->halted = 0;
- spapr_cpu_set_endianness(cpu);
- spapr_cpu_update_tb_offset(cpu);
+ /*
+ * Set the timebase offset of the new CPU to that of the invoking
+ * CPU. This helps hotplugged CPU to have the correct timebase
+ * offset.
+ */
+ newcpu->env.tb_env->tb_offset = callcpu->env.tb_env->tb_offset;
- qemu_cpu_kick(cs);
+ spapr_cpu_set_entry_state(newcpu, start, r3);
- rtas_st(rets, 0, RTAS_OUT_SUCCESS);
- return;
- }
+ qemu_cpu_kick(CPU(newcpu));
- /* Didn't find a matching cpu */
- rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
+ rtas_st(rets, 0, RTAS_OUT_SUCCESS);
}
static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr,
@@ -203,13 +202,12 @@ static void rtas_stop_self(PowerPCCPU *cpu, sPAPRMachineState *spapr,
CPUPPCState *env = &cpu->env;
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
- cs->halted = 1;
- qemu_cpu_kick(cs);
-
/* Disable Power-saving mode Exit Cause exceptions for the CPU.
* This could deliver an interrupt on a dying CPU and crash the
* guest */
- env->spr[SPR_LPCR] &= ~pcc->lpcr_pm;
+ ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm);
+ cs->halted = 1;
+ qemu_cpu_kick(cs);
}
static inline int sysparm_st(target_ulong addr, target_ulong len,
diff --git a/hw/ppc/trace-events b/hw/ppc/trace-events
index 66ec7eda6e..dc5e65aee9 100644
--- a/hw/ppc/trace-events
+++ b/hw/ppc/trace-events
@@ -92,10 +92,6 @@ rs6000mc_size_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x"
rs6000mc_size_write(uint32_t addr, uint32_t val) "write addr=0x%x val=0x%x"
rs6000mc_parity_read(uint32_t addr, uint32_t val) "read addr=0x%x val=0x%x"
-# hw/ppc/mac_newworld.c
-mac99_uninorth_write(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
-mac99_uninorth_read(uint64_t addr, uint64_t value) "addr=0x%" PRIx64 " val=0x%"PRIx64
-
# hw/ppc/ppc4xx_pci.c
ppc4xx_pci_map_irq(int32_t devfn, int irq_num, int slot) "devfn 0x%x irq %d -> %d"
ppc4xx_pci_set_irq(int irq_num) "PCI irq %d"
diff --git a/hw/rdma/rdma_backend.c b/hw/rdma/rdma_backend.c
index 5c7b3d8949..e9ced6f9ef 100644
--- a/hw/rdma/rdma_backend.c
+++ b/hw/rdma/rdma_backend.c
@@ -774,7 +774,7 @@ int rdma_backend_init(RdmaBackendDev *backend_dev,
goto out_destroy_comm_channel;
}
- if (backend_dev->backend_gid_idx > port_attr.gid_tbl_len) {
+ if (backend_dev->backend_gid_idx >= port_attr.gid_tbl_len) {
error_setg(errp, "Invalid backend_gid_idx, should be less than %d",
port_attr.gid_tbl_len);
goto out_destroy_comm_channel;
diff --git a/hw/rdma/rdma_rm.c b/hw/rdma/rdma_rm.c
index 51a47d7292..415da15efe 100644
--- a/hw/rdma/rdma_rm.c
+++ b/hw/rdma/rdma_rm.c
@@ -21,8 +21,6 @@
#include "rdma_backend.h"
#include "rdma_rm.h"
-#define MAX_RM_TBL_NAME 16
-
/* Page directory and page tables */
#define PG_DIR_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
#define PG_TBL_SZ { TARGET_PAGE_SIZE / sizeof(__u64) }
diff --git a/hw/rdma/rdma_rm_defs.h b/hw/rdma/rdma_rm_defs.h
index fc646da61f..226011176d 100644
--- a/hw/rdma/rdma_rm_defs.h
+++ b/hw/rdma/rdma_rm_defs.h
@@ -20,9 +20,9 @@
#define MAX_PORTS 1
#define MAX_PORT_GIDS 1
+#define MAX_GIDS MAX_PORT_GIDS
#define MAX_PORT_PKEYS 1
-#define MAX_PKEYS 1
-#define MAX_GIDS 2048
+#define MAX_PKEYS MAX_PORT_PKEYS
#define MAX_UCS 512
#define MAX_MR_SIZE (1UL << 27)
#define MAX_QP 1024
@@ -34,9 +34,9 @@
#define MAX_QP_INIT_RD_ATOM 16
#define MAX_AH 64
-#define MAX_RMRESTBL_NAME_SZ 16
+#define MAX_RM_TBL_NAME 16
typedef struct RdmaRmResTbl {
- char name[MAX_RMRESTBL_NAME_SZ];
+ char name[MAX_RM_TBL_NAME];
QemuMutex lock;
unsigned long *bitmap;
size_t tbl_sz;
@@ -87,7 +87,6 @@ typedef struct RdmaRmQP {
typedef struct RdmaRmPort {
union ibv_gid gid_tbl[MAX_PORT_GIDS];
enum ibv_port_state state;
- int *pkey_tbl; /* TODO: Not yet supported */
} RdmaRmPort;
typedef struct RdmaDeviceResources {
diff --git a/hw/rdma/vmw/pvrdma.h b/hw/rdma/vmw/pvrdma.h
index 8c173cb824..0b46dc5a9b 100644
--- a/hw/rdma/vmw/pvrdma.h
+++ b/hw/rdma/vmw/pvrdma.h
@@ -31,7 +31,7 @@
#define RDMA_REG_BAR_IDX 1
#define RDMA_UAR_BAR_IDX 2
#define RDMA_BAR0_MSIX_SIZE (16 * 1024)
-#define RDMA_BAR1_REGS_SIZE 256
+#define RDMA_BAR1_REGS_SIZE 64
#define RDMA_BAR2_UAR_SIZE (0x1000 * MAX_UCS) /* each uc gets page */
/* MSIX */
@@ -86,7 +86,7 @@ static inline int get_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t *val)
{
int idx = addr >> 2;
- if (idx > RDMA_BAR1_REGS_SIZE) {
+ if (idx >= RDMA_BAR1_REGS_SIZE) {
return -EINVAL;
}
@@ -99,7 +99,7 @@ static inline int set_reg_val(PVRDMADev *dev, hwaddr addr, uint32_t val)
{
int idx = addr >> 2;
- if (idx > RDMA_BAR1_REGS_SIZE) {
+ if (idx >= RDMA_BAR1_REGS_SIZE) {
return -EINVAL;
}
diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c
index 99019d8741..14255d609f 100644
--- a/hw/rdma/vmw/pvrdma_cmd.c
+++ b/hw/rdma/vmw/pvrdma_cmd.c
@@ -232,7 +232,7 @@ static int create_mr(PVRDMADev *dev, union pvrdma_cmd_req *req,
cmd->start, cmd->length, host_virt,
cmd->access_flags, &resp->mr_handle,
&resp->lkey, &resp->rkey);
- if (!resp->hdr.err) {
+ if (host_virt && !resp->hdr.err) {
munmap(host_virt, cmd->length);
}
@@ -576,7 +576,7 @@ static int create_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
pr_dbg("index=%d\n", cmd->index);
- if (cmd->index > MAX_PORT_GIDS) {
+ if (cmd->index >= MAX_PORT_GIDS) {
return -EINVAL;
}
@@ -603,7 +603,11 @@ static int destroy_bind(PVRDMADev *dev, union pvrdma_cmd_req *req,
{
struct pvrdma_cmd_destroy_bind *cmd = &req->destroy_bind;
- pr_dbg("clear index %d\n", cmd->index);
+ pr_dbg("index=%d\n", cmd->index);
+
+ if (cmd->index >= MAX_PORT_GIDS) {
+ return -EINVAL;
+ }
memset(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw, 0,
sizeof(dev->rdma_dev_res.ports[0].gid_tbl[cmd->index].raw));
diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c
index c552248c90..3ed7409763 100644
--- a/hw/rdma/vmw/pvrdma_main.c
+++ b/hw/rdma/vmw/pvrdma_main.c
@@ -275,15 +275,6 @@ static void init_dsr_dev_caps(PVRDMADev *dev)
pr_dbg("Initialized\n");
}
-static void free_ports(PVRDMADev *dev)
-{
- int i;
-
- for (i = 0; i < MAX_PORTS; i++) {
- g_free(dev->rdma_dev_res.ports[i].gid_tbl);
- }
-}
-
static void init_ports(PVRDMADev *dev, Error **errp)
{
int i;
@@ -292,10 +283,6 @@ static void init_ports(PVRDMADev *dev, Error **errp)
for (i = 0; i < MAX_PORTS; i++) {
dev->rdma_dev_res.ports[i].state = IBV_PORT_DOWN;
-
- dev->rdma_dev_res.ports[i].pkey_tbl =
- g_malloc0(sizeof(*dev->rdma_dev_res.ports[i].pkey_tbl) *
- MAX_PORT_PKEYS);
}
}
@@ -462,14 +449,14 @@ static void init_bars(PCIDevice *pdev)
/* BAR 1 - Registers */
memset(&dev->regs_data, 0, sizeof(dev->regs_data));
memory_region_init_io(&dev->regs, OBJECT(dev), &regs_ops, dev,
- "pvrdma-regs", RDMA_BAR1_REGS_SIZE);
+ "pvrdma-regs", sizeof(dev->regs_data));
pci_register_bar(pdev, RDMA_REG_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY,
&dev->regs);
/* BAR 2 - UAR */
memset(&dev->uar_data, 0, sizeof(dev->uar_data));
memory_region_init_io(&dev->uar, OBJECT(dev), &uar_ops, dev, "rdma-uar",
- RDMA_BAR2_UAR_SIZE);
+ sizeof(dev->uar_data));
pci_register_bar(pdev, RDMA_UAR_BAR_IDX, PCI_BASE_ADDRESS_SPACE_MEMORY,
&dev->uar);
}
@@ -622,8 +609,6 @@ static void pvrdma_exit(PCIDevice *pdev)
pvrdma_qp_ops_fini();
- free_ports(dev);
-
rdma_rm_fini(&dev->rdma_dev_res);
rdma_backend_fini(&dev->backend_dev);
diff --git a/hw/rdma/vmw/pvrdma_qp_ops.c b/hw/rdma/vmw/pvrdma_qp_ops.c
index 750ade6c31..99bb51111e 100644
--- a/hw/rdma/vmw/pvrdma_qp_ops.c
+++ b/hw/rdma/vmw/pvrdma_qp_ops.c
@@ -216,6 +216,7 @@ void pvrdma_cq_poll(RdmaDeviceResources *dev_res, uint32_t cq_handle)
cq = rdma_rm_get_cq(dev_res, cq_handle);
if (!cq) {
pr_dbg("Invalid CQ# %d\n", cq_handle);
+ return;
}
rdma_backend_poll_cq(dev_res, &cq->backend_cq);
diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c
index 9c24bc6f7c..ee5b83448b 100644
--- a/hw/s390x/event-facility.c
+++ b/hw/s390x/event-facility.c
@@ -26,11 +26,23 @@ typedef struct SCLPEventsBus {
BusState qbus;
} SCLPEventsBus;
+/* we need to save 32 bit chunks for compatibility */
+#ifdef HOST_WORDS_BIGENDIAN
+#define RECV_MASK_LOWER 1
+#define RECV_MASK_UPPER 0
+#else /* little endian host */
+#define RECV_MASK_LOWER 0
+#define RECV_MASK_UPPER 1
+#endif
+
struct SCLPEventFacility {
SysBusDevice parent_obj;
SCLPEventsBus sbus;
/* guest's receive mask */
- sccb_mask_t receive_mask;
+ union {
+ uint32_t receive_mask_pieces[2];
+ sccb_mask_t receive_mask;
+ };
/*
* when false, we keep the same broken, backwards compatible behaviour as
* before, allowing only masks of size exactly 4; when true, we implement
@@ -262,7 +274,7 @@ static void read_event_data(SCLPEventFacility *ef, SCCB *sccb)
case SCLP_SELECTIVE_READ:
copy_mask((uint8_t *)&sclp_active_selection_mask, (uint8_t *)&red->mask,
sizeof(sclp_active_selection_mask), ef->mask_length);
- sclp_active_selection_mask = be32_to_cpu(sclp_active_selection_mask);
+ sclp_active_selection_mask = be64_to_cpu(sclp_active_selection_mask);
if (!sclp_cp_receive_mask ||
(sclp_active_selection_mask & ~sclp_cp_receive_mask)) {
sccb->h.response_code =
@@ -294,21 +306,22 @@ static void write_event_mask(SCLPEventFacility *ef, SCCB *sccb)
}
/*
- * Note: We currently only support masks up to 4 byte length;
- * the remainder is filled up with zeroes. Linux uses
- * a 4 byte mask length.
+ * Note: We currently only support masks up to 8 byte length;
+ * the remainder is filled up with zeroes. Older Linux
+ * kernels use a 4 byte mask length, newer ones can use both
+ * 8 or 4 depending on what is available on the host.
*/
/* keep track of the guest's capability masks */
copy_mask((uint8_t *)&tmp_mask, WEM_CP_RECEIVE_MASK(we_mask, mask_length),
sizeof(tmp_mask), mask_length);
- ef->receive_mask = be32_to_cpu(tmp_mask);
+ ef->receive_mask = be64_to_cpu(tmp_mask);
/* return the SCLP's capability masks to the guest */
- tmp_mask = cpu_to_be32(get_host_receive_mask(ef));
+ tmp_mask = cpu_to_be64(get_host_receive_mask(ef));
copy_mask(WEM_RECEIVE_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
mask_length, sizeof(tmp_mask));
- tmp_mask = cpu_to_be32(get_host_send_mask(ef));
+ tmp_mask = cpu_to_be64(get_host_send_mask(ef));
copy_mask(WEM_SEND_MASK(we_mask, mask_length), (uint8_t *)&tmp_mask,
mask_length, sizeof(tmp_mask));
@@ -369,6 +382,13 @@ static void command_handler(SCLPEventFacility *ef, SCCB *sccb, uint64_t code)
}
}
+static bool vmstate_event_facility_mask64_needed(void *opaque)
+{
+ SCLPEventFacility *ef = opaque;
+
+ return (ef->receive_mask & 0xFFFFFFFF) != 0;
+}
+
static bool vmstate_event_facility_mask_length_needed(void *opaque)
{
SCLPEventFacility *ef = opaque;
@@ -376,6 +396,17 @@ static bool vmstate_event_facility_mask_length_needed(void *opaque)
return ef->allow_all_mask_sizes;
}
+static const VMStateDescription vmstate_event_facility_mask64 = {
+ .name = "vmstate-event-facility/mask64",
+ .version_id = 0,
+ .minimum_version_id = 0,
+ .needed = vmstate_event_facility_mask64_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_LOWER], SCLPEventFacility),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
static const VMStateDescription vmstate_event_facility_mask_length = {
.name = "vmstate-event-facility/mask_length",
.version_id = 0,
@@ -392,10 +423,11 @@ static const VMStateDescription vmstate_event_facility = {
.version_id = 0,
.minimum_version_id = 0,
.fields = (VMStateField[]) {
- VMSTATE_UINT32(receive_mask, SCLPEventFacility),
+ VMSTATE_UINT32(receive_mask_pieces[RECV_MASK_UPPER], SCLPEventFacility),
VMSTATE_END_OF_LIST()
},
.subsections = (const VMStateDescription * []) {
+ &vmstate_event_facility_mask64,
&vmstate_event_facility_mask_length,
NULL
}
@@ -511,3 +543,17 @@ static void register_types(void)
}
type_init(register_types)
+
+BusState *sclp_get_event_facility_bus(void)
+{
+ Object *busobj;
+ SCLPEventsBus *sbus;
+
+ busobj = object_resolve_path_type("", TYPE_SCLP_EVENTS_BUS, NULL);
+ sbus = OBJECT_CHECK(SCLPEventsBus, busobj, TYPE_SCLP_EVENTS_BUS);
+ if (!sbus) {
+ return NULL;
+ }
+
+ return &sbus->qbus;
+}
diff --git a/hw/s390x/ipl.c b/hw/s390x/ipl.c
index fb554ab156..150f6c0582 100644
--- a/hw/s390x/ipl.c
+++ b/hw/s390x/ipl.c
@@ -373,6 +373,10 @@ int s390_ipl_set_loadparm(uint8_t *loadparm)
loadparm[i] = ascii2ebcdic[(uint8_t) lp[i]];
}
+ if (i < 8) {
+ memset(loadparm + i, 0x40, 8 - i); /* fill with EBCDIC spaces */
+ }
+
g_free(lp);
return 0;
}
diff --git a/hw/s390x/s390-pci-inst.c b/hw/s390x/s390-pci-inst.c
index 3fcc330fe3..02a815fd31 100644
--- a/hw/s390x/s390-pci-inst.c
+++ b/hw/s390x/s390-pci-inst.c
@@ -155,8 +155,6 @@ int clp_service_call(S390CPU *cpu, uint8_t r2, uintptr_t ra)
S390pciState *s = s390_get_phb();
int i;
- cpu_synchronize_state(CPU(cpu));
-
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
@@ -389,8 +387,6 @@ int pcilg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
uint32_t fh;
uint8_t pcias;
- cpu_synchronize_state(CPU(cpu));
-
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
@@ -487,8 +483,6 @@ int pcistg_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
uint32_t fh;
uint8_t pcias;
- cpu_synchronize_state(CPU(cpu));
-
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
@@ -620,8 +614,6 @@ int rpcit_service_call(S390CPU *cpu, uint8_t r1, uint8_t r2, uintptr_t ra)
S390IOTLBEntry entry;
hwaddr start, end;
- cpu_synchronize_state(CPU(cpu));
-
if (env->psw.mask & PSW_MASK_PSTATE) {
s390_program_interrupt(env, PGM_PRIVILEGED, 4, ra);
return 0;
diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c
index 435f7c99e7..100dfdc96d 100644
--- a/hw/s390x/s390-virtio-ccw.c
+++ b/hw/s390x/s390-virtio-ccw.c
@@ -288,6 +288,15 @@ static void s390_create_virtio_net(BusState *bus, const char *name)
}
}
+static void s390_create_sclpconsole(const char *type, Chardev *chardev)
+{
+ DeviceState *dev;
+
+ dev = qdev_create(sclp_get_event_facility_bus(), type);
+ qdev_prop_set_chr(dev, "chardev", chardev);
+ qdev_init_nofail(dev);
+}
+
static void ccw_init(MachineState *machine)
{
int ret;
@@ -346,6 +355,14 @@ static void ccw_init(MachineState *machine)
/* Create VirtIO network adapters */
s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw");
+ /* init consoles */
+ if (serial_hd(0)) {
+ s390_create_sclpconsole("sclpconsole", serial_hd(0));
+ }
+ if (serial_hd(1)) {
+ s390_create_sclpconsole("sclplmconsole", serial_hd(1));
+ }
+
/* Register savevm handler for guest TOD clock */
register_savevm_live(NULL, "todclock", 0, 1, &savevm_gtod, NULL);
}
@@ -470,10 +487,8 @@ static void ccw_machine_class_init(ObjectClass *oc, void *data)
mc->block_default_type = IF_VIRTIO;
mc->no_cdrom = 1;
mc->no_floppy = 1;
- mc->no_serial = 1;
mc->no_parallel = 1;
mc->no_sdcard = 1;
- mc->use_sclp = 1;
mc->max_cpus = S390_MAX_CPUS;
mc->has_hotpluggable_cpus = true;
mc->get_hotplug_handler = s390_get_hotplug_handler;
@@ -671,6 +686,9 @@ bool css_migration_enabled(void)
} \
type_init(ccw_machine_register_##suffix)
+#define CCW_COMPAT_2_12 \
+ HW_COMPAT_2_12
+
#define CCW_COMPAT_2_11 \
HW_COMPAT_2_11 \
{\
@@ -756,14 +774,26 @@ bool css_migration_enabled(void)
.value = "0",\
},
+static void ccw_machine_2_13_instance_options(MachineState *machine)
+{
+}
+
+static void ccw_machine_2_13_class_options(MachineClass *mc)
+{
+}
+DEFINE_CCW_MACHINE(2_13, "2.13", true);
+
static void ccw_machine_2_12_instance_options(MachineState *machine)
{
+ ccw_machine_2_13_instance_options(machine);
}
static void ccw_machine_2_12_class_options(MachineClass *mc)
{
+ ccw_machine_2_13_class_options(mc);
+ SET_MACHINE_COMPAT(mc, CCW_COMPAT_2_12);
}
-DEFINE_CCW_MACHINE(2_12, "2.12", true);
+DEFINE_CCW_MACHINE(2_12, "2.12", false);
static void ccw_machine_2_11_instance_options(MachineState *machine)
{
diff --git a/hw/vfio/ccw.c b/hw/vfio/ccw.c
index fe34b50769..e67392c5f9 100644
--- a/hw/vfio/ccw.c
+++ b/hw/vfio/ccw.c
@@ -292,12 +292,43 @@ static void vfio_ccw_put_region(VFIOCCWDevice *vcdev)
g_free(vcdev->io_region);
}
-static void vfio_put_device(VFIOCCWDevice *vcdev)
+static void vfio_ccw_put_device(VFIOCCWDevice *vcdev)
{
g_free(vcdev->vdev.name);
vfio_put_base_device(&vcdev->vdev);
}
+static void vfio_ccw_get_device(VFIOGroup *group, VFIOCCWDevice *vcdev,
+ Error **errp)
+{
+ char *name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid,
+ vcdev->cdev.hostid.ssid,
+ vcdev->cdev.hostid.devid);
+ VFIODevice *vbasedev;
+
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (strcmp(vbasedev->name, name) == 0) {
+ error_setg(errp, "vfio: subchannel %s has already been attached",
+ name);
+ goto out_err;
+ }
+ }
+
+ if (vfio_get_device(group, vcdev->cdev.mdevid, &vcdev->vdev, errp)) {
+ goto out_err;
+ }
+
+ vcdev->vdev.ops = &vfio_ccw_ops;
+ vcdev->vdev.type = VFIO_DEVICE_TYPE_CCW;
+ vcdev->vdev.name = name;
+ vcdev->vdev.dev = &vcdev->cdev.parent_obj.parent_obj;
+
+ return;
+
+out_err:
+ g_free(name);
+}
+
static VFIOGroup *vfio_ccw_get_group(S390CCWDevice *cdev, Error **errp)
{
char *tmp, group_path[PATH_MAX];
@@ -327,7 +358,6 @@ static VFIOGroup *vfio_ccw_get_group(S390CCWDevice *cdev, Error **errp)
static void vfio_ccw_realize(DeviceState *dev, Error **errp)
{
- VFIODevice *vbasedev;
VFIOGroup *group;
CcwDevice *ccw_dev = DO_UPCAST(CcwDevice, parent_obj, dev);
S390CCWDevice *cdev = DO_UPCAST(S390CCWDevice, parent_obj, ccw_dev);
@@ -348,22 +378,8 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp)
goto out_group_err;
}
- vcdev->vdev.ops = &vfio_ccw_ops;
- vcdev->vdev.type = VFIO_DEVICE_TYPE_CCW;
- vcdev->vdev.name = g_strdup_printf("%x.%x.%04x", cdev->hostid.cssid,
- cdev->hostid.ssid, cdev->hostid.devid);
- vcdev->vdev.dev = dev;
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (strcmp(vbasedev->name, vcdev->vdev.name) == 0) {
- error_setg(&err, "vfio: subchannel %s has already been attached",
- vcdev->vdev.name);
- g_free(vcdev->vdev.name);
- goto out_device_err;
- }
- }
-
- if (vfio_get_device(group, cdev->mdevid, &vcdev->vdev, &err)) {
- g_free(vcdev->vdev.name);
+ vfio_ccw_get_device(group, vcdev, &err);
+ if (err) {
goto out_device_err;
}
@@ -382,7 +398,7 @@ static void vfio_ccw_realize(DeviceState *dev, Error **errp)
out_notifier_err:
vfio_ccw_put_region(vcdev);
out_region_err:
- vfio_put_device(vcdev);
+ vfio_ccw_put_device(vcdev);
out_device_err:
vfio_put_group(group);
out_group_err:
@@ -403,7 +419,7 @@ static void vfio_ccw_unrealize(DeviceState *dev, Error **errp)
vfio_ccw_unregister_io_notifier(vcdev);
vfio_ccw_put_region(vcdev);
- vfio_put_device(vcdev);
+ vfio_ccw_put_device(vcdev);
vfio_put_group(group);
if (cdc->unrealize) {