diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2018-12-13 13:06:09 +0000 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2018-12-13 13:06:09 +0000 |
commit | 6145a6d84b3bf0f25935b88543febe076c61b0f4 (patch) | |
tree | 144409e1b987f54a8abe9ea02dc0e9c2fb2ff879 /hw | |
parent | b39c027d824847dc606e49f79304cb572ed6a9b3 (diff) | |
parent | c9aacaadeb42076c01b07e59dd94dcf73f751e05 (diff) |
Merge remote-tracking branch 'remotes/cohuck/tags/s390x-20181212' into staging
s390x patches for 4.0:
- add 4.0 machine type
- various fixes and small changes
# gpg: Signature made Wed 12 Dec 2018 09:52:04 GMT
# gpg: using RSA key DECF6B93C6F02FAF
# gpg: Good signature from "Cornelia Huck <conny@cornelia-huck.de>"
# gpg: aka "Cornelia Huck <huckc@linux.vnet.ibm.com>"
# gpg: aka "Cornelia Huck <cornelia.huck@de.ibm.com>"
# gpg: aka "Cornelia Huck <cohuck@kernel.org>"
# gpg: aka "Cornelia Huck <cohuck@redhat.com>"
# Primary key fingerprint: C3D0 D66D C362 4FF6 A8C0 18CE DECF 6B93 C6F0 2FAF
* remotes/cohuck/tags/s390x-20181212:
hw/s390x/virtio-ccw.c: Don't take address of fields in packed structs
vfio-ap: flag as compatible with balloon
s390x/tod: Properly stop the KVM TOD while the guest is not running
s390/MAINTAINERS: Add Halil as kvm and machine maintainer
s390x: introduce 4.0 compat machine
s390x/zpci: drop msix.available
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/s390x/s390-pci-bus.c | 2 | ||||
-rw-r--r-- | hw/s390x/s390-pci-bus.h | 1 | ||||
-rw-r--r-- | hw/s390x/s390-virtio-ccw.c | 17 | ||||
-rw-r--r-- | hw/s390x/tod-kvm.c | 102 | ||||
-rw-r--r-- | hw/s390x/virtio-ccw.c | 42 | ||||
-rw-r--r-- | hw/vfio/ap.c | 8 |
6 files changed, 146 insertions, 26 deletions
diff --git a/hw/s390x/s390-pci-bus.c b/hw/s390x/s390-pci-bus.c index 060ff062bc..99d0368868 100644 --- a/hw/s390x/s390-pci-bus.c +++ b/hw/s390x/s390-pci-bus.c @@ -745,7 +745,6 @@ static int s390_pci_msix_init(S390PCIBusDevice *pbdev) pos = pci_find_capability(pbdev->pdev, PCI_CAP_ID_MSIX); if (!pos) { - pbdev->msix.available = false; return -1; } @@ -761,7 +760,6 @@ static int s390_pci_msix_init(S390PCIBusDevice *pbdev) pbdev->msix.pba_bar = pba & PCI_MSIX_FLAGS_BIRMASK; pbdev->msix.pba_offset = pba & ~PCI_MSIX_FLAGS_BIRMASK; pbdev->msix.entries = (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1; - pbdev->msix.available = true; name = g_strdup_printf("msix-s390-%04x", pbdev->uid); memory_region_init_io(&pbdev->msix_notify_mr, OBJECT(pbdev), diff --git a/hw/s390x/s390-pci-bus.h b/hw/s390x/s390-pci-bus.h index 1f7f9b5814..f47a0f2da5 100644 --- a/hw/s390x/s390-pci-bus.h +++ b/hw/s390x/s390-pci-bus.h @@ -252,7 +252,6 @@ typedef struct ChscSeiNt2Res { } QEMU_PACKED ChscSeiNt2Res; typedef struct S390MsixInfo { - bool available; uint8_t table_bar; uint8_t pba_bar; uint16_t entries; diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index a0615a8b35..fd9d0b0542 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -651,6 +651,9 @@ bool css_migration_enabled(void) } \ type_init(ccw_machine_register_##suffix) +#define CCW_COMPAT_3_1 \ + HW_COMPAT_3_1 + #define CCW_COMPAT_3_0 \ HW_COMPAT_3_0 @@ -742,14 +745,26 @@ bool css_migration_enabled(void) .value = "0",\ }, +static void ccw_machine_4_0_instance_options(MachineState *machine) +{ +} + +static void ccw_machine_4_0_class_options(MachineClass *mc) +{ +} +DEFINE_CCW_MACHINE(4_0, "4.0", true); + static void ccw_machine_3_1_instance_options(MachineState *machine) { + ccw_machine_4_0_instance_options(machine); } static void ccw_machine_3_1_class_options(MachineClass *mc) { + ccw_machine_4_0_class_options(mc); + SET_MACHINE_COMPAT(mc, CCW_COMPAT_3_1); } -DEFINE_CCW_MACHINE(3_1, "3.1", true); +DEFINE_CCW_MACHINE(3_1, "3.1", false); static void ccw_machine_3_0_instance_options(MachineState *machine) { diff --git a/hw/s390x/tod-kvm.c b/hw/s390x/tod-kvm.c index df564ab89c..2456bf7b24 100644 --- a/hw/s390x/tod-kvm.c +++ b/hw/s390x/tod-kvm.c @@ -10,10 +10,11 @@ #include "qemu/osdep.h" #include "qapi/error.h" +#include "sysemu/sysemu.h" #include "hw/s390x/tod.h" #include "kvm_s390x.h" -static void kvm_s390_tod_get(const S390TODState *td, S390TOD *tod, Error **errp) +static void kvm_s390_get_tod_raw(S390TOD *tod, Error **errp) { int r; @@ -27,7 +28,17 @@ static void kvm_s390_tod_get(const S390TODState *td, S390TOD *tod, Error **errp) } } -static void kvm_s390_tod_set(S390TODState *td, const S390TOD *tod, Error **errp) +static void kvm_s390_tod_get(const S390TODState *td, S390TOD *tod, Error **errp) +{ + if (td->stopped) { + *tod = td->base; + return; + } + + kvm_s390_get_tod_raw(tod, errp); +} + +static void kvm_s390_set_tod_raw(const S390TOD *tod, Error **errp) { int r; @@ -41,18 +52,105 @@ static void kvm_s390_tod_set(S390TODState *td, const S390TOD *tod, Error **errp) } } +static void kvm_s390_tod_set(S390TODState *td, const S390TOD *tod, Error **errp) +{ + Error *local_err = NULL; + + /* + * Somebody (e.g. migration) set the TOD. We'll store it into KVM to + * properly detect errors now but take a look at the runstate to decide + * whether really to keep the tod running. E.g. during migration, this + * is the point where we want to stop the initially running TOD to fire + * it back up when actually starting the migrated guest. + */ + kvm_s390_set_tod_raw(tod, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (runstate_is_running()) { + td->stopped = false; + } else { + td->stopped = true; + td->base = *tod; + } +} + +static void kvm_s390_tod_vm_state_change(void *opaque, int running, + RunState state) +{ + S390TODState *td = opaque; + Error *local_err = NULL; + + if (running && td->stopped) { + /* Set the old TOD when running the VM - start the TOD clock. */ + kvm_s390_set_tod_raw(&td->base, &local_err); + if (local_err) { + warn_report_err(local_err); + } + /* Treat errors like the TOD was running all the time. */ + td->stopped = false; + } else if (!running && !td->stopped) { + /* Store the TOD when stopping the VM - stop the TOD clock. */ + kvm_s390_get_tod_raw(&td->base, &local_err); + if (local_err) { + /* Keep the TOD running in case we could not back it up. */ + warn_report_err(local_err); + } else { + td->stopped = true; + } + } +} + +static void kvm_s390_tod_realize(DeviceState *dev, Error **errp) +{ + S390TODState *td = S390_TOD(dev); + S390TODClass *tdc = S390_TOD_GET_CLASS(td); + Error *local_err = NULL; + + tdc->parent_realize(dev, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * We need to know when the VM gets started/stopped to start/stop the TOD. + * As we can never have more than one TOD instance (and that will never be + * removed), registering here and never unregistering is good enough. + */ + qemu_add_vm_change_state_handler(kvm_s390_tod_vm_state_change, td); +} + static void kvm_s390_tod_class_init(ObjectClass *oc, void *data) { S390TODClass *tdc = S390_TOD_CLASS(oc); + device_class_set_parent_realize(DEVICE_CLASS(oc), kvm_s390_tod_realize, + &tdc->parent_realize); tdc->get = kvm_s390_tod_get; tdc->set = kvm_s390_tod_set; } +static void kvm_s390_tod_init(Object *obj) +{ + S390TODState *td = S390_TOD(obj); + + /* + * The TOD is initially running (value stored in KVM). Avoid needless + * loading/storing of the TOD when starting a simple VM, so let it + * run although the (never started) VM is stopped. For migration, we + * will properly set the TOD later. + */ + td->stopped = false; +} + static TypeInfo kvm_s390_tod_info = { .name = TYPE_KVM_S390_TOD, .parent = TYPE_S390_TOD, .instance_size = sizeof(S390TODState), + .instance_init = kvm_s390_tod_init, .class_init = kvm_s390_tod_class_init, .class_size = sizeof(S390TODClass), }; diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index 212b3d3dea..c2b78c8e9b 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -287,18 +287,18 @@ static int virtio_ccw_handle_set_vq(SubchDev *sch, CCW1 ccw, bool check_len, } if (is_legacy) { ccw_dstream_read(&sch->cds, linfo); - be64_to_cpus(&linfo.queue); - be32_to_cpus(&linfo.align); - be16_to_cpus(&linfo.index); - be16_to_cpus(&linfo.num); + linfo.queue = be64_to_cpu(linfo.queue); + linfo.align = be32_to_cpu(linfo.align); + linfo.index = be16_to_cpu(linfo.index); + linfo.num = be16_to_cpu(linfo.num); ret = virtio_ccw_set_vqs(sch, NULL, &linfo); } else { ccw_dstream_read(&sch->cds, info); - be64_to_cpus(&info.desc); - be16_to_cpus(&info.index); - be16_to_cpus(&info.num); - be64_to_cpus(&info.avail); - be64_to_cpus(&info.used); + info.desc = be64_to_cpu(info.desc); + info.index = be16_to_cpu(info.index); + info.num = be16_to_cpu(info.num); + info.avail = be64_to_cpu(info.avail); + info.used = be64_to_cpu(info.used); ret = virtio_ccw_set_vqs(sch, &info, NULL); } sch->curr_status.scsw.count = 0; @@ -382,7 +382,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) features.features = 0; } ccw_dstream_rewind(&sch->cds); - cpu_to_le32s(&features.features); + features.features = cpu_to_le32(features.features); ccw_dstream_write(&sch->cds, features.features); sch->curr_status.scsw.count = ccw.count - sizeof(features); ret = 0; @@ -403,7 +403,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) ret = -EFAULT; } else { ccw_dstream_read(&sch->cds, features); - le32_to_cpus(&features.features); + features.features = le32_to_cpu(features.features); if (features.index == 0) { virtio_set_features(vdev, (vdev->guest_features & 0xffffffff00000000ULL) | @@ -546,7 +546,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) ret = -EFAULT; } else { ccw_dstream_read(&sch->cds, indicators); - be64_to_cpus(&indicators); + indicators = be64_to_cpu(indicators); dev->indicators = get_indicator(indicators, sizeof(uint64_t)); sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; @@ -567,7 +567,7 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) ret = -EFAULT; } else { ccw_dstream_read(&sch->cds, indicators); - be64_to_cpus(&indicators); + indicators = be64_to_cpu(indicators); dev->indicators2 = get_indicator(indicators, sizeof(uint64_t)); sch->curr_status.scsw.count = ccw.count - sizeof(indicators); ret = 0; @@ -588,14 +588,14 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) ret = -EFAULT; } else { ccw_dstream_read(&sch->cds, vq_config.index); - be16_to_cpus(&vq_config.index); + vq_config.index = be16_to_cpu(vq_config.index); if (vq_config.index >= VIRTIO_QUEUE_MAX) { ret = -EINVAL; break; } vq_config.num_max = virtio_queue_get_num(vdev, vq_config.index); - cpu_to_be16s(&vq_config.num_max); + vq_config.num_max = cpu_to_be16(vq_config.num_max); ccw_dstream_write(&sch->cds, vq_config.num_max); sch->curr_status.scsw.count = ccw.count - sizeof(vq_config); ret = 0; @@ -621,9 +621,11 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) if (ccw_dstream_read(&sch->cds, thinint)) { ret = -EFAULT; } else { - be64_to_cpus(&thinint.ind_bit); - be64_to_cpus(&thinint.summary_indicator); - be64_to_cpus(&thinint.device_indicator); + thinint.ind_bit = be64_to_cpu(thinint.ind_bit); + thinint.summary_indicator = + be64_to_cpu(thinint.summary_indicator); + thinint.device_indicator = + be64_to_cpu(thinint.device_indicator); dev->summary_indicator = get_indicator(thinint.summary_indicator, sizeof(uint8_t)); @@ -654,8 +656,8 @@ static int virtio_ccw_cb(SubchDev *sch, CCW1 ccw) break; } ccw_dstream_read_buf(&sch->cds, &revinfo, 4); - be16_to_cpus(&revinfo.revision); - be16_to_cpus(&revinfo.length); + revinfo.revision = be16_to_cpu(revinfo.revision); + revinfo.length = be16_to_cpu(revinfo.length); if (ccw.count < len + revinfo.length || (check_len && ccw.count > len + revinfo.length)) { ret = -EINVAL; diff --git a/hw/vfio/ap.c b/hw/vfio/ap.c index 65de952f44..0a25f5e096 100644 --- a/hw/vfio/ap.c +++ b/hw/vfio/ap.c @@ -104,6 +104,14 @@ static void vfio_ap_realize(DeviceState *dev, Error **errp) vapdev->vdev.name = g_strdup_printf("%s", mdevid); vapdev->vdev.dev = dev; + /* + * vfio-ap devices operate in a way compatible with + * memory ballooning, as no pages are pinned in the host. + * This needs to be set before vfio_get_device() for vfio common to + * handle the balloon inhibitor. + */ + vapdev->vdev.balloon_allowed = true; + ret = vfio_get_device(vfio_group, mdevid, &vapdev->vdev, &local_err); if (ret) { goto out_get_dev_err; |