diff options
Diffstat (limited to 'hw')
50 files changed, 3052 insertions, 691 deletions
diff --git a/hw/arm/imx25_pdk.c b/hw/arm/imx25_pdk.c index eef1b184b0..a0423ffb67 100644 --- a/hw/arm/imx25_pdk.c +++ b/hw/arm/imx25_pdk.c @@ -131,15 +131,6 @@ static void imx25_pdk_init(MachineState *machine) */ if (!qtest_enabled()) { arm_load_kernel(&s->soc.cpu, &imx25_pdk_binfo); - } else { - /* - * This I2C device doesn't exist on the real board. - * We add it here (only on qtest usage) to be able to do a bit - * of simple qtest. See "make check" for details. - */ - i2c_create_slave((I2CBus *)qdev_get_child_bus(DEVICE(&s->soc.i2c[0]), - "i2c-bus.0"), - "ds1338", 0x68); } } diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index 8c37bd314a..158c78f852 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -173,6 +173,7 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); unsigned i; unsigned nvqs = s->conf->num_queues; + Error *local_err = NULL; int r; if (vblk->dataplane_started || s->starting) { @@ -212,7 +213,11 @@ int virtio_blk_data_plane_start(VirtIODevice *vdev) vblk->dataplane_started = true; trace_virtio_blk_data_plane_start(s); - blk_set_aio_context(s->conf->conf.blk, s->ctx); + r = blk_set_aio_context(s->conf->conf.blk, s->ctx, &local_err); + if (r < 0) { + error_report_err(local_err); + goto fail_guest_notifiers; + } /* Kick right away to begin processing requests already in vring */ for (i = 0; i < nvqs; i++) { @@ -281,8 +286,9 @@ void virtio_blk_data_plane_stop(VirtIODevice *vdev) aio_context_acquire(s->ctx); aio_wait_bh_oneshot(s->ctx, virtio_blk_data_plane_stop_bh, s); - /* Drain and switch bs back to the QEMU main loop */ - blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context()); + /* Drain and try to switch bs back to the QEMU main loop. If other users + * keep the BlockBackend in the iothread, that's ok */ + blk_set_aio_context(s->conf->conf.blk, qemu_get_aio_context(), NULL); aio_context_release(s->ctx); diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index bb8f1186e4..f7ad452bbd 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -682,7 +682,8 @@ void xen_block_dataplane_stop(XenBlockDataPlane *dataplane) } aio_context_acquire(dataplane->ctx); - blk_set_aio_context(dataplane->blk, qemu_get_aio_context()); + /* Xen doesn't have multiple users for nodes, so this can't fail */ + blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); aio_context_release(dataplane->ctx); xendev = dataplane->xendev; @@ -811,7 +812,8 @@ void xen_block_dataplane_start(XenBlockDataPlane *dataplane, } aio_context_acquire(dataplane->ctx); - blk_set_aio_context(dataplane->blk, dataplane->ctx); + /* If other users keep the BlockBackend in the iothread, that's ok */ + blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); aio_context_release(dataplane->ctx); return; diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 6f19f127a5..37ccedc9f7 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -538,7 +538,7 @@ static void floppy_drive_realize(DeviceState *qdev, Error **errp) if (!dev->conf.blk) { /* Anonymous BlockBackend for an empty drive */ - dev->conf.blk = blk_new(0, BLK_PERM_ALL); + dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); ret = blk_attach_dev(dev->conf.blk, qdev); assert(ret == 0); } diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 63a5b58849..30e50f7a38 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -219,6 +219,30 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg, QEMUIOVector *iov, uint64_t prp1, return NVME_INVALID_FIELD | NVME_DNR; } +static uint16_t nvme_dma_write_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, + uint64_t prp1, uint64_t prp2) +{ + QEMUSGList qsg; + QEMUIOVector iov; + uint16_t status = NVME_SUCCESS; + + if (nvme_map_prp(&qsg, &iov, prp1, prp2, len, n)) { + return NVME_INVALID_FIELD | NVME_DNR; + } + if (qsg.nsg > 0) { + if (dma_buf_write(ptr, len, &qsg)) { + status = NVME_INVALID_FIELD | NVME_DNR; + } + qemu_sglist_destroy(&qsg); + } else { + if (qemu_iovec_to_buf(&iov, 0, ptr, len) != len) { + status = NVME_INVALID_FIELD | NVME_DNR; + } + qemu_iovec_destroy(&iov); + } + return status; +} + static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t *ptr, uint32_t len, uint64_t prp1, uint64_t prp2) { @@ -678,7 +702,6 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeIdentify *c) return ret; } - static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd) { NvmeIdentify *c = (NvmeIdentify *)cmd; @@ -696,6 +719,57 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeCmd *cmd) } } +static inline void nvme_set_timestamp(NvmeCtrl *n, uint64_t ts) +{ + trace_nvme_setfeat_timestamp(ts); + + n->host_timestamp = le64_to_cpu(ts); + n->timestamp_set_qemu_clock_ms = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); +} + +static inline uint64_t nvme_get_timestamp(const NvmeCtrl *n) +{ + uint64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL); + uint64_t elapsed_time = current_time - n->timestamp_set_qemu_clock_ms; + + union nvme_timestamp { + struct { + uint64_t timestamp:48; + uint64_t sync:1; + uint64_t origin:3; + uint64_t rsvd1:12; + }; + uint64_t all; + }; + + union nvme_timestamp ts; + ts.all = 0; + + /* + * If the sum of the Timestamp value set by the host and the elapsed + * time exceeds 2^48, the value returned should be reduced modulo 2^48. + */ + ts.timestamp = (n->host_timestamp + elapsed_time) & 0xffffffffffff; + + /* If the host timestamp is non-zero, set the timestamp origin */ + ts.origin = n->host_timestamp ? 0x01 : 0x00; + + trace_nvme_getfeat_timestamp(ts.all); + + return cpu_to_le64(ts.all); +} + +static uint16_t nvme_get_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd) +{ + uint64_t prp1 = le64_to_cpu(cmd->prp1); + uint64_t prp2 = le64_to_cpu(cmd->prp2); + + uint64_t timestamp = nvme_get_timestamp(n); + + return nvme_dma_read_prp(n, (uint8_t *)×tamp, + sizeof(timestamp), prp1, prp2); +} + static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { uint32_t dw10 = le32_to_cpu(cmd->cdw10); @@ -710,6 +784,9 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); trace_nvme_getfeat_numq(result); break; + case NVME_TIMESTAMP: + return nvme_get_feature_timestamp(n, cmd); + break; default: trace_nvme_err_invalid_getfeat(dw10); return NVME_INVALID_FIELD | NVME_DNR; @@ -719,6 +796,24 @@ static uint16_t nvme_get_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) return NVME_SUCCESS; } +static uint16_t nvme_set_feature_timestamp(NvmeCtrl *n, NvmeCmd *cmd) +{ + uint16_t ret; + uint64_t timestamp; + uint64_t prp1 = le64_to_cpu(cmd->prp1); + uint64_t prp2 = le64_to_cpu(cmd->prp2); + + ret = nvme_dma_write_prp(n, (uint8_t *)×tamp, + sizeof(timestamp), prp1, prp2); + if (ret != NVME_SUCCESS) { + return ret; + } + + nvme_set_timestamp(n, timestamp); + + return NVME_SUCCESS; +} + static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) { uint32_t dw10 = le32_to_cpu(cmd->cdw10); @@ -735,6 +830,11 @@ static uint16_t nvme_set_feature(NvmeCtrl *n, NvmeCmd *cmd, NvmeRequest *req) req->cqe.result = cpu_to_le32((n->num_queues - 2) | ((n->num_queues - 2) << 16)); break; + + case NVME_TIMESTAMP: + return nvme_set_feature_timestamp(n, cmd); + break; + default: trace_nvme_err_invalid_setfeat(dw10); return NVME_INVALID_FIELD | NVME_DNR; @@ -907,6 +1007,8 @@ static int nvme_start_ctrl(NvmeCtrl *n) nvme_init_sq(&n->admin_sq, n, n->bar.asq, 0, 0, NVME_AQA_ASQS(n->bar.aqa) + 1); + nvme_set_timestamp(n, 0ULL); + return 0; } @@ -1270,7 +1372,7 @@ static void nvme_realize(PCIDevice *pci_dev, Error **errp) id->sqes = (0x6 << 4) | 0x6; id->cqes = (0x4 << 4) | 0x4; id->nn = cpu_to_le32(n->num_namespaces); - id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS); + id->oncs = cpu_to_le16(NVME_ONCS_WRITE_ZEROS | NVME_ONCS_TIMESTAMP); id->psd[0].mp = cpu_to_le16(0x9c4); id->psd[0].enlat = cpu_to_le32(0x10); id->psd[0].exlat = cpu_to_le32(0x4); diff --git a/hw/block/nvme.h b/hw/block/nvme.h index 56c9d4b4b1..557194ee19 100644 --- a/hw/block/nvme.h +++ b/hw/block/nvme.h @@ -79,6 +79,8 @@ typedef struct NvmeCtrl { uint32_t cmbloc; uint8_t *cmbuf; uint64_t irq_status; + uint64_t host_timestamp; /* Timestamp sent by the host */ + uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */ char *serial; NvmeNamespace *namespaces; diff --git a/hw/block/trace-events b/hw/block/trace-events index b92039a573..97a17838ed 100644 --- a/hw/block/trace-events +++ b/hw/block/trace-events @@ -46,6 +46,8 @@ nvme_identify_nslist(uint16_t ns) "identify namespace list, nsid=%"PRIu16"" nvme_getfeat_vwcache(const char* result) "get feature volatile write cache, result=%s" nvme_getfeat_numq(int result) "get feature number of queues, result=%d" nvme_setfeat_numq(int reqcq, int reqsq, int gotcq, int gotsq) "requested cq_count=%d sq_count=%d, responding with cq_count=%d sq_count=%d" +nvme_setfeat_timestamp(uint64_t ts) "set feature timestamp = 0x%"PRIx64"" +nvme_getfeat_timestamp(uint64_t ts) "get feature timestamp = 0x%"PRIx64"" nvme_mmio_intm_set(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask set, data=0x%"PRIx64", new_mask=0x%"PRIx64"" nvme_mmio_intm_clr(uint64_t data, uint64_t new_mask) "wrote MMIO, interrupt mask clr, data=0x%"PRIx64", new_mask=0x%"PRIx64"" nvme_mmio_cfg(uint64_t data) "wrote MMIO, config controller config=0x%"PRIx64"" diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index ef635be4c2..31b0f5ccc8 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -609,7 +609,7 @@ static void xen_cdrom_realize(XenBlockDevice *blockdev, Error **errp) int rc; /* Set up an empty drive */ - conf->blk = blk_new(0, BLK_PERM_ALL); + conf->blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); rc = blk_attach_dev(conf->blk, DEVICE(blockdev)); if (!rc) { diff --git a/hw/core/machine.c b/hw/core/machine.c index 934c1bcceb..f1a0f45f9c 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -24,6 +24,9 @@ #include "hw/pci/pci.h" #include "hw/mem/nvdimm.h" +GlobalProperty hw_compat_4_0_1[] = {}; +const size_t hw_compat_4_0_1_len = G_N_ELEMENTS(hw_compat_4_0_1); + GlobalProperty hw_compat_4_0[] = {}; const size_t hw_compat_4_0_len = G_N_ELEMENTS(hw_compat_4_0); @@ -36,7 +39,7 @@ GlobalProperty hw_compat_3_1[] = { { "tpm-tis", "ppi", "false" }, { "usb-kbd", "serial", "42" }, { "usb-mouse", "serial", "42" }, - { "usb-kbd", "serial", "42" }, + { "usb-tablet", "serial", "42" }, { "virtio-blk-device", "discard", "false" }, { "virtio-blk-device", "write-zeroes", "false" }, }; diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index b45a7ef54b..ba412dd2ca 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -69,8 +69,8 @@ static void set_pointer(Object *obj, Visitor *v, Property *prop, /* --- drive --- */ -static void parse_drive(DeviceState *dev, const char *str, void **ptr, - const char *propname, Error **errp) +static void do_parse_drive(DeviceState *dev, const char *str, void **ptr, + const char *propname, bool iothread, Error **errp) { BlockBackend *blk; bool blk_created = false; @@ -80,7 +80,16 @@ static void parse_drive(DeviceState *dev, const char *str, void **ptr, if (!blk) { BlockDriverState *bs = bdrv_lookup_bs(NULL, str, NULL); if (bs) { - blk = blk_new(0, BLK_PERM_ALL); + /* + * If the device supports iothreads, it will make sure to move the + * block node to the right AioContext if necessary (or fail if this + * isn't possible because of other users). Devices that are not + * aware of iothreads require their BlockBackends to be in the main + * AioContext. + */ + AioContext *ctx = iothread ? bdrv_get_aio_context(bs) : + qemu_get_aio_context(); + blk = blk_new(ctx, 0, BLK_PERM_ALL); blk_created = true; ret = blk_insert_bs(blk, bs, errp); @@ -118,6 +127,18 @@ fail: } } +static void parse_drive(DeviceState *dev, const char *str, void **ptr, + const char *propname, Error **errp) +{ + do_parse_drive(dev, str, ptr, propname, false, errp); +} + +static void parse_drive_iothread(DeviceState *dev, const char *str, void **ptr, + const char *propname, Error **errp) +{ + do_parse_drive(dev, str, ptr, propname, true, errp); +} + static void release_drive(Object *obj, const char *name, void *opaque) { DeviceState *dev = DEVICE(obj); @@ -160,6 +181,12 @@ static void set_drive(Object *obj, Visitor *v, const char *name, void *opaque, set_pointer(obj, v, opaque, parse_drive, name, errp); } +static void set_drive_iothread(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + set_pointer(obj, v, opaque, parse_drive_iothread, name, errp); +} + const PropertyInfo qdev_prop_drive = { .name = "str", .description = "Node name or ID of a block device to use as a backend", @@ -168,6 +195,14 @@ const PropertyInfo qdev_prop_drive = { .release = release_drive, }; +const PropertyInfo qdev_prop_drive_iothread = { + .name = "str", + .description = "Node name or ID of a block device to use as a backend", + .get = get_drive, + .set = set_drive_iothread, + .release = release_drive, +}; + /* --- character device --- */ static void get_chr(Object *obj, Visitor *v, const char *name, void *opaque, diff --git a/hw/core/sysbus.c b/hw/core/sysbus.c index 307cf90a51..689a867a22 100644 --- a/hw/core/sysbus.c +++ b/hw/core/sysbus.c @@ -153,6 +153,16 @@ static void sysbus_mmio_map_common(SysBusDevice *dev, int n, hwaddr addr, } } +void sysbus_mmio_unmap(SysBusDevice *dev, int n) +{ + assert(n >= 0 && n < dev->num_mmio); + + if (dev->mmio[n].addr != (hwaddr)-1) { + memory_region_del_subregion(get_system_memory(), dev->mmio[n].memory); + dev->mmio[n].addr = (hwaddr)-1; + } +} + void sysbus_mmio_map(SysBusDevice *dev, int n, hwaddr addr) { sysbus_mmio_map_common(dev, n, addr, false, 0); diff --git a/hw/display/Kconfig b/hw/display/Kconfig index dc1f113df2..910dccb2f7 100644 --- a/hw/display/Kconfig +++ b/hw/display/Kconfig @@ -111,6 +111,16 @@ config VIRTIO_VGA depends on VIRTIO_PCI select VGA +config VHOST_USER_GPU + bool + default y + depends on VIRTIO_GPU && VHOST_USER + +config VHOST_USER_VGA + bool + default y + depends on VIRTIO_VGA && VHOST_USER_GPU + config DPCD bool select AUX diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs index 650031f725..a64998fc7b 100644 --- a/hw/display/Makefile.objs +++ b/hw/display/Makefile.objs @@ -43,9 +43,12 @@ obj-$(CONFIG_VGA) += vga.o common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o -obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu.o virtio-gpu-3d.o +obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o +obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o +obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o +obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o virtio-gpu.o-cflags := $(VIRGL_CFLAGS) virtio-gpu.o-libs += $(VIRGL_LIBS) virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS) diff --git a/hw/display/vhost-user-gpu-pci.c b/hw/display/vhost-user-gpu-pci.c new file mode 100644 index 0000000000..7d9b1f5a8c --- /dev/null +++ b/hw/display/vhost-user-gpu-pci.c @@ -0,0 +1,51 @@ +/* + * vhost-user GPU PCI device + * + * Copyright Red Hat, Inc. 2018 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/virtio/virtio-gpu-pci.h" + +#define TYPE_VHOST_USER_GPU_PCI "vhost-user-gpu-pci" +#define VHOST_USER_GPU_PCI(obj) \ + OBJECT_CHECK(VhostUserGPUPCI, (obj), TYPE_VHOST_USER_GPU_PCI) + +typedef struct VhostUserGPUPCI { + VirtIOGPUPCIBase parent_obj; + + VhostUserGPU vdev; +} VhostUserGPUPCI; + +static void vhost_user_gpu_pci_initfn(Object *obj) +{ + VhostUserGPUPCI *dev = VHOST_USER_GPU_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_GPU); + + VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); + + object_property_add_alias(obj, "chardev", + OBJECT(&dev->vdev), "chardev", + &error_abort); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_gpu_pci_info = { + .generic_name = TYPE_VHOST_USER_GPU_PCI, + .parent = TYPE_VIRTIO_GPU_PCI_BASE, + .instance_size = sizeof(VhostUserGPUPCI), + .instance_init = vhost_user_gpu_pci_initfn, +}; + +static void vhost_user_gpu_pci_register_types(void) +{ + virtio_pci_types_register(&vhost_user_gpu_pci_info); +} + +type_init(vhost_user_gpu_pci_register_types) diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c new file mode 100644 index 0000000000..7181d9cdba --- /dev/null +++ b/hw/display/vhost-user-gpu.c @@ -0,0 +1,607 @@ +/* + * vhost-user GPU Device + * + * Copyright Red Hat, Inc. 2018 + * + * Authors: + * Marc-AndrĂ© Lureau <marcandre.lureau@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "hw/virtio/virtio-gpu.h" +#include "chardev/char-fe.h" +#include "qapi/error.h" +#include "migration/blocker.h" + +#define VHOST_USER_GPU(obj) \ + OBJECT_CHECK(VhostUserGPU, (obj), TYPE_VHOST_USER_GPU) + +typedef enum VhostUserGpuRequest { + VHOST_USER_GPU_NONE = 0, + VHOST_USER_GPU_GET_PROTOCOL_FEATURES, + VHOST_USER_GPU_SET_PROTOCOL_FEATURES, + VHOST_USER_GPU_GET_DISPLAY_INFO, + VHOST_USER_GPU_CURSOR_POS, + VHOST_USER_GPU_CURSOR_POS_HIDE, + VHOST_USER_GPU_CURSOR_UPDATE, + VHOST_USER_GPU_SCANOUT, + VHOST_USER_GPU_UPDATE, + VHOST_USER_GPU_DMABUF_SCANOUT, + VHOST_USER_GPU_DMABUF_UPDATE, +} VhostUserGpuRequest; + +typedef struct VhostUserGpuDisplayInfoReply { + struct virtio_gpu_resp_display_info info; +} VhostUserGpuDisplayInfoReply; + +typedef struct VhostUserGpuCursorPos { + uint32_t scanout_id; + uint32_t x; + uint32_t y; +} QEMU_PACKED VhostUserGpuCursorPos; + +typedef struct VhostUserGpuCursorUpdate { + VhostUserGpuCursorPos pos; + uint32_t hot_x; + uint32_t hot_y; + uint32_t data[64 * 64]; +} QEMU_PACKED VhostUserGpuCursorUpdate; + +typedef struct VhostUserGpuScanout { + uint32_t scanout_id; + uint32_t width; + uint32_t height; +} QEMU_PACKED VhostUserGpuScanout; + +typedef struct VhostUserGpuUpdate { + uint32_t scanout_id; + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; + uint8_t data[]; +} QEMU_PACKED VhostUserGpuUpdate; + +typedef struct VhostUserGpuDMABUFScanout { + uint32_t scanout_id; + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; + uint32_t fd_width; + uint32_t fd_height; + uint32_t fd_stride; + uint32_t fd_flags; + int fd_drm_fourcc; +} QEMU_PACKED VhostUserGpuDMABUFScanout; + +typedef struct VhostUserGpuMsg { + uint32_t request; /* VhostUserGpuRequest */ + uint32_t flags; + uint32_t size; /* the following payload size */ + union { + VhostUserGpuCursorPos cursor_pos; + VhostUserGpuCursorUpdate cursor_update; + VhostUserGpuScanout scanout; + VhostUserGpuUpdate update; + VhostUserGpuDMABUFScanout dmabuf_scanout; + struct virtio_gpu_resp_display_info display_info; + uint64_t u64; + } payload; +} QEMU_PACKED VhostUserGpuMsg; + +static VhostUserGpuMsg m __attribute__ ((unused)); +#define VHOST_USER_GPU_HDR_SIZE \ + (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags)) + +#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4 + +static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked); + +static void +vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg) +{ + VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos; + struct virtio_gpu_scanout *s; + + if (pos->scanout_id >= g->parent_obj.conf.max_outputs) { + return; + } + s = &g->parent_obj.scanout[pos->scanout_id]; + + if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) { + VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update; + if (!s->current_cursor) { + s->current_cursor = cursor_alloc(64, 64); + } + + s->current_cursor->hot_x = up->hot_x; + s->current_cursor->hot_y = up->hot_y; + + memcpy(s->current_cursor->data, up->data, + 64 * 64 * sizeof(uint32_t)); + + dpy_cursor_define(s->con, s->current_cursor); + } + + dpy_mouse_set(s->con, pos->x, pos->y, + msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE); +} + +static void +vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg) +{ + qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg, + VHOST_USER_GPU_HDR_SIZE + msg->size); +} + +static void +vhost_user_gpu_unblock(VhostUserGPU *g) +{ + VhostUserGpuMsg msg = { + .request = VHOST_USER_GPU_DMABUF_UPDATE, + .flags = VHOST_USER_GPU_MSG_FLAG_REPLY, + }; + + vhost_user_gpu_send_msg(g, &msg); +} + +static void +vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg) +{ + QemuConsole *con = NULL; + struct virtio_gpu_scanout *s; + + switch (msg->request) { + case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: { + VhostUserGpuMsg reply = { + .request = msg->request, + .flags = VHOST_USER_GPU_MSG_FLAG_REPLY, + .size = sizeof(uint64_t), + }; + + vhost_user_gpu_send_msg(g, &reply); + break; + } + case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: { + break; + } + case VHOST_USER_GPU_GET_DISPLAY_INFO: { + struct virtio_gpu_resp_display_info display_info = { {} }; + VhostUserGpuMsg reply = { + .request = msg->request, + .flags = VHOST_USER_GPU_MSG_FLAG_REPLY, + .size = sizeof(struct virtio_gpu_resp_display_info), + }; + + display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; + virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); + memcpy(&reply.payload.display_info, &display_info, + sizeof(display_info)); + vhost_user_gpu_send_msg(g, &reply); + break; + } + case VHOST_USER_GPU_SCANOUT: { + VhostUserGpuScanout *m = &msg->payload.scanout; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs) { + return; + } + + g->parent_obj.enable = 1; + s = &g->parent_obj.scanout[m->scanout_id]; + con = s->con; + + if (m->scanout_id == 0 && m->width == 0) { + s->ds = qemu_create_message_surface(640, 480, + "Guest disabled display."); + dpy_gfx_replace_surface(con, s->ds); + } else { + s->ds = qemu_create_displaysurface(m->width, m->height); + /* replace surface on next update */ + } + + break; + } + case VHOST_USER_GPU_DMABUF_SCANOUT: { + VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout; + int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr); + QemuDmaBuf *dmabuf; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs) { + error_report("invalid scanout: %d", m->scanout_id); + if (fd >= 0) { + close(fd); + } + break; + } + + g->parent_obj.enable = 1; + con = g->parent_obj.scanout[m->scanout_id].con; + dmabuf = &g->dmabuf[m->scanout_id]; + if (dmabuf->fd >= 0) { + close(dmabuf->fd); + dmabuf->fd = -1; + } + if (!console_has_gl_dmabuf(con)) { + /* it would be nice to report that error earlier */ + error_report("console doesn't support dmabuf!"); + break; + } + dpy_gl_release_dmabuf(con, dmabuf); + if (fd == -1) { + dpy_gl_scanout_disable(con); + break; + } + *dmabuf = (QemuDmaBuf) { + .fd = fd, + .width = m->fd_width, + .height = m->fd_height, + .stride = m->fd_stride, + .fourcc = m->fd_drm_fourcc, + .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP, + }; + dpy_gl_scanout_dmabuf(con, dmabuf); + break; + } + case VHOST_USER_GPU_DMABUF_UPDATE: { + VhostUserGpuUpdate *m = &msg->payload.update; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs || + !g->parent_obj.scanout[m->scanout_id].con) { + error_report("invalid scanout update: %d", m->scanout_id); + vhost_user_gpu_unblock(g); + break; + } + + con = g->parent_obj.scanout[m->scanout_id].con; + if (!console_has_gl(con)) { + error_report("console doesn't support GL!"); + vhost_user_gpu_unblock(g); + break; + } + dpy_gl_update(con, m->x, m->y, m->width, m->height); + g->backend_blocked = true; + break; + } + case VHOST_USER_GPU_UPDATE: { + VhostUserGpuUpdate *m = &msg->payload.update; + + if (m->scanout_id >= g->parent_obj.conf.max_outputs) { + break; + } + s = &g->parent_obj.scanout[m->scanout_id]; + con = s->con; + pixman_image_t *image = + pixman_image_create_bits(PIXMAN_x8r8g8b8, + m->width, + m->height, + (uint32_t *)m->data, + m->width * 4); + + pixman_image_composite(PIXMAN_OP_SRC, + image, NULL, s->ds->image, + 0, 0, 0, 0, m->x, m->y, m->width, m->height); + + pixman_image_unref(image); + if (qemu_console_surface(con) != s->ds) { + dpy_gfx_replace_surface(con, s->ds); + } else { + dpy_gfx_update(con, m->x, m->y, m->width, m->height); + } + break; + } + default: + g_warning("unhandled message %d %d", msg->request, msg->size); + } + + if (con && qemu_console_is_gl_blocked(con)) { + vhost_user_gpu_update_blocked(g, true); + } +} + +static void +vhost_user_gpu_chr_read(void *opaque) +{ + VhostUserGPU *g = opaque; + VhostUserGpuMsg *msg = NULL; + VhostUserGpuRequest request; + uint32_t size, flags; + int r; + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&request, sizeof(uint32_t)); + if (r != sizeof(uint32_t)) { + error_report("failed to read msg header: %d, %d", r, errno); + goto end; + } + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&flags, sizeof(uint32_t)); + if (r != sizeof(uint32_t)) { + error_report("failed to read msg flags"); + goto end; + } + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&size, sizeof(uint32_t)); + if (r != sizeof(uint32_t)) { + error_report("failed to read msg size"); + goto end; + } + + msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size); + g_return_if_fail(msg != NULL); + + r = qemu_chr_fe_read_all(&g->vhost_chr, + (uint8_t *)&msg->payload, size); + if (r != size) { + error_report("failed to read msg payload %d != %d", r, size); + goto end; + } + + msg->request = request; + msg->flags = size; + msg->size = size; + + if (request == VHOST_USER_GPU_CURSOR_UPDATE || + request == VHOST_USER_GPU_CURSOR_POS || + request == VHOST_USER_GPU_CURSOR_POS_HIDE) { + vhost_user_gpu_handle_cursor(g, msg); + } else { + vhost_user_gpu_handle_display(g, msg); + } + +end: + g_free(msg); +} + +static void +vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked) +{ + qemu_set_fd_handler(g->vhost_gpu_fd, + blocked ? NULL : vhost_user_gpu_chr_read, NULL, g); +} + +static void +vhost_user_gpu_gl_unblock(VirtIOGPUBase *b) +{ + VhostUserGPU *g = VHOST_USER_GPU(b); + + if (g->backend_blocked) { + vhost_user_gpu_unblock(VHOST_USER_GPU(g)); + g->backend_blocked = false; + } + + vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false); +} + +static bool +vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp) +{ + Chardev *chr; + int sv[2]; + + if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + error_setg_errno(errp, errno, "socketpair() failed"); + return false; + } + + chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET)); + if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) { + error_setg(errp, "Failed to make socket chardev"); + goto err; + } + if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) { + goto err; + } + if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) { + error_setg(errp, "Failed to set vhost-user-gpu socket"); + qemu_chr_fe_deinit(&g->vhost_chr, false); + goto err; + } + + g->vhost_gpu_fd = sv[0]; + vhost_user_gpu_update_blocked(g, false); + close(sv[1]); + return true; + +err: + close(sv[0]); + close(sv[1]); + if (chr) { + object_unref(OBJECT(chr)); + } + return false; +} + +static void +vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev); + struct virtio_gpu_config *vgconfig = + (struct virtio_gpu_config *)config_data; + int ret; + + memset(config_data, 0, sizeof(struct virtio_gpu_config)); + + ret = vhost_dev_get_config(&g->vhost->dev, + config_data, sizeof(struct virtio_gpu_config)); + if (ret) { + error_report("vhost-user-gpu: get device config space failed"); + return; + } + + /* those fields are managed by qemu */ + vgconfig->num_scanouts = b->virtio_config.num_scanouts; + vgconfig->events_read = b->virtio_config.events_read; + vgconfig->events_clear = b->virtio_config.events_clear; +} + +static void +vhost_user_gpu_set_config(VirtIODevice *vdev, + const uint8_t *config_data) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev); + const struct virtio_gpu_config *vgconfig = + (const struct virtio_gpu_config *)config_data; + int ret; + + if (vgconfig->events_clear) { + b->virtio_config.events_read &= ~vgconfig->events_clear; + } + + ret = vhost_dev_set_config(&g->vhost->dev, config_data, + 0, sizeof(struct virtio_gpu_config), + VHOST_SET_CONFIG_TYPE_MASTER); + if (ret) { + error_report("vhost-user-gpu: set device config space failed"); + return; + } +} + +static void +vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + Error *err = NULL; + + if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) { + if (!vhost_user_gpu_do_set_socket(g, &err)) { + error_report_err(err); + return; + } + vhost_user_backend_start(g->vhost); + } else { + /* unblock any wait and stop processing */ + if (g->vhost_gpu_fd != -1) { + vhost_user_gpu_update_blocked(g, true); + qemu_chr_fe_deinit(&g->vhost_chr, true); + g->vhost_gpu_fd = -1; + } + vhost_user_backend_stop(g->vhost); + } +} + +static bool +vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + + return vhost_virtqueue_pending(&g->vhost->dev, idx); +} + +static void +vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + + vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask); +} + +static void +vhost_user_gpu_instance_init(Object *obj) +{ + VhostUserGPU *g = VHOST_USER_GPU(obj); + + g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND)); + object_property_add_alias(obj, "chardev", + OBJECT(g->vhost), "chardev", &error_abort); +} + +static void +vhost_user_gpu_instance_finalize(Object *obj) +{ + VhostUserGPU *g = VHOST_USER_GPU(obj); + + object_unref(OBJECT(g->vhost)); +} + +static void +vhost_user_gpu_reset(VirtIODevice *vdev) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + + virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); + + vhost_user_backend_stop(g->vhost); +} + +static int +vhost_user_gpu_config_change(struct vhost_dev *dev) +{ + error_report("vhost-user-gpu: unhandled backend config change"); + return -1; +} + +static const VhostDevConfigOps config_ops = { + .vhost_dev_config_notifier = vhost_user_gpu_config_change, +}; + +static void +vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp) +{ + VhostUserGPU *g = VHOST_USER_GPU(qdev); + VirtIODevice *vdev = VIRTIO_DEVICE(g); + + vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops); + if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) { + return; + } + + if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) { + g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED; + } + + if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) { + return; + } + + g->vhost_gpu_fd = -1; +} + +static Property vhost_user_gpu_properties[] = { + VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void +vhost_user_gpu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass); + + vgc->gl_unblock = vhost_user_gpu_gl_unblock; + + vdc->realize = vhost_user_gpu_device_realize; + vdc->reset = vhost_user_gpu_reset; + vdc->set_status = vhost_user_gpu_set_status; + vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask; + vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending; + vdc->get_config = vhost_user_gpu_get_config; + vdc->set_config = vhost_user_gpu_set_config; + + dc->props = vhost_user_gpu_properties; +} + +static const TypeInfo vhost_user_gpu_info = { + .name = TYPE_VHOST_USER_GPU, + .parent = TYPE_VIRTIO_GPU_BASE, + .instance_size = sizeof(VhostUserGPU), + .instance_init = vhost_user_gpu_instance_init, + .instance_finalize = vhost_user_gpu_instance_finalize, + .class_init = vhost_user_gpu_class_init, +}; + +static void vhost_user_gpu_register_types(void) +{ + type_register_static(&vhost_user_gpu_info); +} + +type_init(vhost_user_gpu_register_types) diff --git a/hw/display/vhost-user-vga.c b/hw/display/vhost-user-vga.c new file mode 100644 index 0000000000..a7195276d9 --- /dev/null +++ b/hw/display/vhost-user-vga.c @@ -0,0 +1,52 @@ +/* + * vhost-user VGA device + * + * Copyright Red Hat, Inc. 2018 + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "virtio-vga.h" + +#define TYPE_VHOST_USER_VGA "vhost-user-vga" + +#define VHOST_USER_VGA(obj) \ + OBJECT_CHECK(VhostUserVGA, (obj), TYPE_VHOST_USER_VGA) + +typedef struct VhostUserVGA { + VirtIOVGABase parent_obj; + + VhostUserGPU vdev; +} VhostUserVGA; + +static void vhost_user_vga_inst_initfn(Object *obj) +{ + VhostUserVGA *dev = VHOST_USER_VGA(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_USER_GPU); + + VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); + + object_property_add_alias(obj, "chardev", + OBJECT(&dev->vdev), "chardev", + &error_abort); +} + +static const VirtioPCIDeviceTypeInfo vhost_user_vga_info = { + .generic_name = TYPE_VHOST_USER_VGA, + .parent = TYPE_VIRTIO_VGA_BASE, + .instance_size = sizeof(struct VhostUserVGA), + .instance_init = vhost_user_vga_inst_initfn, +}; + +static void vhost_user_vga_register_types(void) +{ + virtio_pci_types_register(&vhost_user_vga_info); +} + +type_init(vhost_user_vga_register_types) diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c index 2d302526ab..b918167aec 100644 --- a/hw/display/virtio-gpu-3d.c +++ b/hw/display/virtio-gpu-3d.c @@ -118,11 +118,11 @@ static void virgl_cmd_context_destroy(VirtIOGPU *g, static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y, int width, int height) { - if (!g->scanout[idx].con) { + if (!g->parent_obj.scanout[idx].con) { return; } - dpy_gl_update(g->scanout[idx].con, x, y, width, height); + dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height); } static void virgl_cmd_resource_flush(VirtIOGPU *g, @@ -135,8 +135,8 @@ static void virgl_cmd_resource_flush(VirtIOGPU *g, trace_virtio_gpu_cmd_res_flush(rf.resource_id, rf.r.width, rf.r.height, rf.r.x, rf.r.y); - for (i = 0; i < g->conf.max_outputs; i++) { - if (g->scanout[i].resource_id != rf.resource_id) { + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + if (g->parent_obj.scanout[i].resource_id != rf.resource_id) { continue; } virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height); @@ -154,13 +154,13 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g, trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, ss.r.width, ss.r.height, ss.r.x, ss.r.y); - if (ss.scanout_id >= g->conf.max_outputs) { + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", __func__, ss.scanout_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; return; } - g->enable = 1; + g->parent_obj.enable = 1; memset(&info, 0, sizeof(info)); @@ -173,20 +173,22 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g, cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; return; } - qemu_console_resize(g->scanout[ss.scanout_id].con, + qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con, ss.r.width, ss.r.height); virgl_renderer_force_ctx_0(); - dpy_gl_scanout_texture(g->scanout[ss.scanout_id].con, info.tex_id, - info.flags & 1 /* FIXME: Y_0_TOP */, - info.width, info.height, - ss.r.x, ss.r.y, ss.r.width, ss.r.height); + dpy_gl_scanout_texture( + g->parent_obj.scanout[ss.scanout_id].con, info.tex_id, + info.flags & 1 /* FIXME: Y_0_TOP */, + info.width, info.height, + ss.r.x, ss.r.y, ss.r.width, ss.r.height); } else { if (ss.scanout_id != 0) { - dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); + dpy_gfx_replace_surface( + g->parent_obj.scanout[ss.scanout_id].con, NULL); } - dpy_gl_scanout_disable(g->scanout[ss.scanout_id].con); + dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con); } - g->scanout[ss.scanout_id].resource_id = ss.resource_id; + g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id; } static void virgl_cmd_submit_3d(VirtIOGPU *g, @@ -209,7 +211,7 @@ static void virgl_cmd_submit_3d(VirtIOGPU *g, goto out; } - if (virtio_gpu_stats_enabled(g->conf)) { + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { g->stats.req_3d++; g->stats.bytes_3d += cs.size; } @@ -507,7 +509,7 @@ static void virgl_write_fence(void *opaque, uint32_t fence) QTAILQ_REMOVE(&g->fenceq, cmd, next); g_free(cmd); g->inflight--; - if (virtio_gpu_stats_enabled(g->conf)) { + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { fprintf(stderr, "inflight: %3d (-)\r", g->inflight); } } @@ -524,7 +526,7 @@ virgl_create_context(void *opaque, int scanout_idx, qparams.major_ver = params->major_ver; qparams.minor_ver = params->minor_ver; - ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams); + ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams); return (virgl_renderer_gl_context)ctx; } @@ -533,7 +535,7 @@ static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx) VirtIOGPU *g = opaque; QEMUGLContext qctx = (QEMUGLContext)ctx; - dpy_gl_ctx_destroy(g->scanout[0].con, qctx); + dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx); } static int virgl_make_context_current(void *opaque, int scanout_idx, @@ -542,7 +544,8 @@ static int virgl_make_context_current(void *opaque, int scanout_idx, VirtIOGPU *g = opaque; QEMUGLContext qctx = (QEMUGLContext)ctx; - return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx); + return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con, + qctx); } static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { @@ -594,11 +597,11 @@ void virtio_gpu_virgl_reset(VirtIOGPU *g) int i; /* virgl_renderer_reset() ??? */ - for (i = 0; i < g->conf.max_outputs; i++) { + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { if (i != 0) { - dpy_gfx_replace_surface(g->scanout[i].con, NULL); + dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL); } - dpy_gl_scanout_disable(g->scanout[i].con); + dpy_gl_scanout_disable(g->parent_obj.scanout[i].con); } } @@ -614,7 +617,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g) g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, virtio_gpu_fence_poll, g); - if (virtio_gpu_stats_enabled(g->conf)) { + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, virtio_gpu_print_stats, g); timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c new file mode 100644 index 0000000000..55e07995fe --- /dev/null +++ b/hw/display/virtio-gpu-base.c @@ -0,0 +1,268 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Gerd Hoffmann <kraxel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" + +#include "hw/virtio/virtio-gpu.h" +#include "migration/blocker.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "trace.h" + +void +virtio_gpu_base_reset(VirtIOGPUBase *g) +{ + int i; + + g->enable = 0; + g->use_virgl_renderer = false; + + for (i = 0; i < g->conf.max_outputs; i++) { + g->scanout[i].resource_id = 0; + g->scanout[i].width = 0; + g->scanout[i].height = 0; + g->scanout[i].x = 0; + g->scanout[i].y = 0; + g->scanout[i].ds = NULL; + } +} + +void +virtio_gpu_base_fill_display_info(VirtIOGPUBase *g, + struct virtio_gpu_resp_display_info *dpy_info) +{ + int i; + + for (i = 0; i < g->conf.max_outputs; i++) { + if (g->enabled_output_bitmask & (1 << i)) { + dpy_info->pmodes[i].enabled = 1; + dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); + dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); + } + } +} + +static void virtio_gpu_invalidate_display(void *opaque) +{ +} + +static void virtio_gpu_update_display(void *opaque) +{ +} + +static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) +{ +} + +static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type) +{ + g->virtio_config.events_read |= event_type; + virtio_notify_config(&g->parent_obj); +} + +static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) +{ + VirtIOGPUBase *g = opaque; + + if (idx >= g->conf.max_outputs) { + return -1; + } + + g->req_state[idx].x = info->xoff; + g->req_state[idx].y = info->yoff; + g->req_state[idx].width = info->width; + g->req_state[idx].height = info->height; + + if (info->width && info->height) { + g->enabled_output_bitmask |= (1 << idx); + } else { + g->enabled_output_bitmask &= ~(1 << idx); + } + + /* send event to guest */ + virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); + return 0; +} + +static void +virtio_gpu_gl_block(void *opaque, bool block) +{ + VirtIOGPUBase *g = opaque; + VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g); + + if (block) { + g->renderer_blocked++; + } else { + g->renderer_blocked--; + } + assert(g->renderer_blocked >= 0); + + if (g->renderer_blocked == 0) { + vgc->gl_unblock(g); + } +} + +const GraphicHwOps virtio_gpu_ops = { + .invalidate = virtio_gpu_invalidate_display, + .gfx_update = virtio_gpu_update_display, + .text_update = virtio_gpu_text_update, + .ui_info = virtio_gpu_ui_info, + .gl_block = virtio_gpu_gl_block, +}; + +bool +virtio_gpu_base_device_realize(DeviceState *qdev, + VirtIOHandleOutput ctrl_cb, + VirtIOHandleOutput cursor_cb, + Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(qdev); + VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); + Error *local_err = NULL; + int i; + + if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { + error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); + return false; + } + + g->use_virgl_renderer = false; + if (virtio_gpu_virgl_enabled(g->conf)) { + error_setg(&g->migration_blocker, "virgl is not yet migratable"); + migrate_add_blocker(g->migration_blocker, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_free(g->migration_blocker); + return false; + } + } + + g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); + virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, + sizeof(struct virtio_gpu_config)); + + if (virtio_gpu_virgl_enabled(g->conf)) { + /* use larger control queue in 3d mode */ + virtio_add_queue(vdev, 256, ctrl_cb); + virtio_add_queue(vdev, 16, cursor_cb); + } else { + virtio_add_queue(vdev, 64, ctrl_cb); + virtio_add_queue(vdev, 16, cursor_cb); + } + + g->enabled_output_bitmask = 1; + + g->req_state[0].width = g->conf.xres; + g->req_state[0].height = g->conf.yres; + + for (i = 0; i < g->conf.max_outputs; i++) { + g->scanout[i].con = + graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); + if (i > 0) { + dpy_gfx_replace_surface(g->scanout[i].con, NULL); + } + } + + return true; +} + +static uint64_t +virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features, + Error **errp) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); + + if (virtio_gpu_virgl_enabled(g->conf)) { + features |= (1 << VIRTIO_GPU_F_VIRGL); + } + if (virtio_gpu_edid_enabled(g->conf)) { + features |= (1 << VIRTIO_GPU_F_EDID); + } + + return features; +} + +static void +virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features) +{ + static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); + VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); + + g->use_virgl_renderer = ((features & virgl) == virgl); + trace_virtio_gpu_features(g->use_virgl_renderer); +} + +static void +virtio_gpu_base_device_unrealize(DeviceState *qdev, Error **errp) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev); + + if (g->migration_blocker) { + migrate_del_blocker(g->migration_blocker); + error_free(g->migration_blocker); + } +} + +static void +virtio_gpu_base_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + vdc->unrealize = virtio_gpu_base_device_unrealize; + vdc->get_features = virtio_gpu_base_get_features; + vdc->set_features = virtio_gpu_base_set_features; + + set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); + dc->hotpluggable = false; +} + +static const TypeInfo virtio_gpu_base_info = { + .name = TYPE_VIRTIO_GPU_BASE, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOGPUBase), + .class_size = sizeof(VirtIOGPUBaseClass), + .class_init = virtio_gpu_base_class_init, + .abstract = true +}; + +static void +virtio_register_types(void) +{ + type_register_static(&virtio_gpu_base_info); +} + +type_init(virtio_register_types) + +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); + +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c index 0bc4d9d424..206870cd4c 100644 --- a/hw/display/virtio-gpu-pci.c +++ b/hw/display/virtio-gpu-pci.c @@ -16,33 +16,18 @@ #include "hw/pci/pci.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-bus.h" -#include "hw/virtio/virtio-pci.h" -#include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-pci.h" -typedef struct VirtIOGPUPCI VirtIOGPUPCI; - -/* - * virtio-gpu-pci: This extends VirtioPCIProxy. - */ -#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci" -#define VIRTIO_GPU_PCI(obj) \ - OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI) - -struct VirtIOGPUPCI { - VirtIOPCIProxy parent_obj; - VirtIOGPU vdev; -}; - -static Property virtio_gpu_pci_properties[] = { +static Property virtio_gpu_pci_base_properties[] = { DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), DEFINE_PROP_END_OF_LIST(), }; -static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { - VirtIOGPUPCI *vgpu = VIRTIO_GPU_PCI(vpci_dev); - VirtIOGPU *g = &vgpu->vdev; - DeviceState *vdev = DEVICE(&vgpu->vdev); + VirtIOGPUPCIBase *vgpu = VIRTIO_GPU_PCI_BASE(vpci_dev); + VirtIOGPUBase *g = vgpu->vgpu; + DeviceState *vdev = DEVICE(g); int i; Error *local_error = NULL; @@ -64,36 +49,56 @@ static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) } } -static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data) +static void virtio_gpu_pci_base_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); - dc->props = virtio_gpu_pci_properties; + dc->props = virtio_gpu_pci_base_properties; dc->hotpluggable = false; - k->realize = virtio_gpu_pci_realize; + k->realize = virtio_gpu_pci_base_realize; pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER; } +static const TypeInfo virtio_gpu_pci_base_info = { + .name = TYPE_VIRTIO_GPU_PCI_BASE, + .parent = TYPE_VIRTIO_PCI, + .instance_size = sizeof(VirtIOGPUPCIBase), + .class_init = virtio_gpu_pci_base_class_init, + .abstract = true +}; + +#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci" +#define VIRTIO_GPU_PCI(obj) \ + OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI) + +typedef struct VirtIOGPUPCI { + VirtIOGPUPCIBase parent_obj; + VirtIOGPU vdev; +} VirtIOGPUPCI; + static void virtio_gpu_initfn(Object *obj) { VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj); virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_GPU); + VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); } static const VirtioPCIDeviceTypeInfo virtio_gpu_pci_info = { .generic_name = TYPE_VIRTIO_GPU_PCI, + .parent = TYPE_VIRTIO_GPU_PCI_BASE, .instance_size = sizeof(VirtIOGPUPCI), .instance_init = virtio_gpu_initfn, - .class_init = virtio_gpu_pci_class_init, }; static void virtio_gpu_pci_register_types(void) { + type_register_static(&virtio_gpu_pci_base_info); virtio_pci_types_register(&virtio_gpu_pci_info); } + type_init(virtio_gpu_pci_register_types) diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index 9e37e0ac96..4a49da5ecb 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -20,11 +20,13 @@ #include "sysemu/dma.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-gpu-bswap.h" +#include "hw/virtio/virtio-gpu-pixman.h" #include "hw/virtio/virtio-bus.h" #include "hw/display/edid.h" -#include "migration/blocker.h" #include "qemu/log.h" #include "qapi/error.h" +#include "qemu/error-report.h" #define VIRTIO_GPU_VM_VERSION 1 @@ -34,53 +36,11 @@ virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); static void virtio_gpu_cleanup_mapping(VirtIOGPU *g, struct virtio_gpu_simple_resource *res); -static void -virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr) -{ - le32_to_cpus(&hdr->type); - le32_to_cpus(&hdr->flags); - le64_to_cpus(&hdr->fence_id); - le32_to_cpus(&hdr->ctx_id); - le32_to_cpus(&hdr->padding); -} - -static void virtio_gpu_bswap_32(void *ptr, - size_t size) -{ -#ifdef HOST_WORDS_BIGENDIAN - - size_t i; - struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr; - - virtio_gpu_ctrl_hdr_bswap(hdr); - - i = sizeof(struct virtio_gpu_ctrl_hdr); - while (i < size) { - le32_to_cpus((uint32_t *)(ptr + i)); - i = i + sizeof(uint32_t); - } - -#endif -} - -static void -virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d) -{ - virtio_gpu_ctrl_hdr_bswap(&t2d->hdr); - le32_to_cpus(&t2d->r.x); - le32_to_cpus(&t2d->r.y); - le32_to_cpus(&t2d->r.width); - le32_to_cpus(&t2d->r.height); - le64_to_cpus(&t2d->offset); - le32_to_cpus(&t2d->resource_id); - le32_to_cpus(&t2d->padding); -} - #ifdef CONFIG_VIRGL #include <virglrenderer.h> #define VIRGL(_g, _virgl, _simple, ...) \ do { \ - if (_g->use_virgl_renderer) { \ + if (_g->parent_obj.use_virgl_renderer) { \ _virgl(__VA_ARGS__); \ } else { \ _simple(__VA_ARGS__); \ @@ -148,10 +108,10 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) struct virtio_gpu_scanout *s; bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR; - if (cursor->pos.scanout_id >= g->conf.max_outputs) { + if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) { return; } - s = &g->scanout[cursor->pos.scanout_id]; + s = &g->parent_obj.scanout[cursor->pos.scanout_id]; trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, cursor->pos.x, @@ -182,53 +142,6 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) cursor->resource_id ? 1 : 0); } -static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) -{ - VirtIOGPU *g = VIRTIO_GPU(vdev); - memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); -} - -static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) -{ - VirtIOGPU *g = VIRTIO_GPU(vdev); - struct virtio_gpu_config vgconfig; - - memcpy(&vgconfig, config, sizeof(g->virtio_config)); - - if (vgconfig.events_clear) { - g->virtio_config.events_read &= ~vgconfig.events_clear; - } -} - -static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, - Error **errp) -{ - VirtIOGPU *g = VIRTIO_GPU(vdev); - - if (virtio_gpu_virgl_enabled(g->conf)) { - features |= (1 << VIRTIO_GPU_F_VIRGL); - } - if (virtio_gpu_edid_enabled(g->conf)) { - features |= (1 << VIRTIO_GPU_F_EDID); - } - return features; -} - -static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) -{ - static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL); - VirtIOGPU *g = VIRTIO_GPU(vdev); - - g->use_virgl_renderer = ((features & virgl) == virgl); - trace_virtio_gpu_features(g->use_virgl_renderer); -} - -static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) -{ - g->virtio_config.events_read |= event_type; - virtio_notify_config(&g->parent_obj); -} - static struct virtio_gpu_simple_resource * virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) { @@ -277,21 +190,6 @@ void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); } -static void -virtio_gpu_fill_display_info(VirtIOGPU *g, - struct virtio_gpu_resp_display_info *dpy_info) -{ - int i; - - for (i = 0; i < g->conf.max_outputs; i++) { - if (g->enabled_output_bitmask & (1 << i)) { - dpy_info->pmodes[i].enabled = 1; - dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width); - dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height); - } - } -} - void virtio_gpu_get_display_info(VirtIOGPU *g, struct virtio_gpu_ctrl_command *cmd) { @@ -300,7 +198,7 @@ void virtio_gpu_get_display_info(VirtIOGPU *g, trace_virtio_gpu_cmd_get_display_info(); memset(&display_info, 0, sizeof(display_info)); display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; - virtio_gpu_fill_display_info(g, &display_info); + virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info); virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, sizeof(display_info)); } @@ -309,9 +207,10 @@ static void virtio_gpu_generate_edid(VirtIOGPU *g, int scanout, struct virtio_gpu_resp_edid *edid) { + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); qemu_edid_info info = { - .prefx = g->req_state[scanout].width, - .prefy = g->req_state[scanout].height, + .prefx = b->req_state[scanout].width, + .prefy = b->req_state[scanout].height, }; edid->size = cpu_to_le32(sizeof(edid->edid)); @@ -323,11 +222,12 @@ void virtio_gpu_get_edid(VirtIOGPU *g, { struct virtio_gpu_resp_edid edid; struct virtio_gpu_cmd_get_edid get_edid; + VirtIOGPUBase *b = VIRTIO_GPU_BASE(g); VIRTIO_GPU_FILL_CMD(get_edid); virtio_gpu_bswap_32(&get_edid, sizeof(get_edid)); - if (get_edid.scanout >= g->conf.max_outputs) { + if (get_edid.scanout >= b->conf.max_outputs) { cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; return; } @@ -339,30 +239,6 @@ void virtio_gpu_get_edid(VirtIOGPU *g, virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid)); } -static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) -{ - switch (virtio_gpu_format) { - case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: - return PIXMAN_BE_b8g8r8x8; - case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: - return PIXMAN_BE_b8g8r8a8; - case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: - return PIXMAN_BE_x8r8g8b8; - case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: - return PIXMAN_BE_a8r8g8b8; - case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: - return PIXMAN_BE_r8g8b8x8; - case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: - return PIXMAN_BE_r8g8b8a8; - case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: - return PIXMAN_BE_x8b8g8r8; - case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: - return PIXMAN_BE_a8b8g8r8; - default: - return 0; - } -} - static uint32_t calc_image_hostmem(pixman_format_code_t pformat, uint32_t width, uint32_t height) { @@ -409,7 +285,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, res->format = c2d.format; res->resource_id = c2d.resource_id; - pformat = get_pixman_format(c2d.format); + pformat = virtio_gpu_get_pixman_format(c2d.format); if (!pformat) { qemu_log_mask(LOG_GUEST_ERROR, "%s: host couldn't handle guest format %d\n", @@ -420,7 +296,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, } res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height); - if (res->hostmem + g->hostmem < g->conf.max_hostmem) { + if (res->hostmem + g->hostmem < g->conf_max_hostmem) { res->image = pixman_image_create_bits(pformat, c2d.width, c2d.height, @@ -442,7 +318,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g, static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id) { - struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id]; + struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id]; struct virtio_gpu_simple_resource *res; DisplaySurface *ds = NULL; @@ -474,7 +350,7 @@ static void virtio_gpu_resource_destroy(VirtIOGPU *g, int i; if (res->scanout_bitmask) { - for (i = 0; i < g->conf.max_outputs; i++) { + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { if (res->scanout_bitmask & (1 << i)) { virtio_gpu_disable_scanout(g, i); } @@ -604,7 +480,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, pixman_region_init_rect(&flush_region, rf.r.x, rf.r.y, rf.r.width, rf.r.height); - for (i = 0; i < g->conf.max_outputs; i++) { + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { struct virtio_gpu_scanout *scanout; pixman_region16_t region, finalregion; pixman_box16_t *extents; @@ -612,7 +488,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, if (!(res->scanout_bitmask & (1 << i))) { continue; } - scanout = &g->scanout[i]; + scanout = &g->parent_obj.scanout[i]; pixman_region_init(&finalregion); pixman_region_init_rect(®ion, scanout->x, scanout->y, @@ -622,7 +498,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g, pixman_region_translate(&finalregion, -scanout->x, -scanout->y); extents = pixman_region_extents(&finalregion); /* work out the area we need to update for each console */ - dpy_gfx_update(g->scanout[i].con, + dpy_gfx_update(g->parent_obj.scanout[i].con, extents->x1, extents->y1, extents->x2 - extents->x1, extents->y2 - extents->y1); @@ -653,14 +529,14 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g, trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, ss.r.width, ss.r.height, ss.r.x, ss.r.y); - if (ss.scanout_id >= g->conf.max_outputs) { + if (ss.scanout_id >= g->parent_obj.conf.max_outputs) { qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", __func__, ss.scanout_id); cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; return; } - g->enable = 1; + g->parent_obj.enable = 1; if (ss.resource_id == 0) { virtio_gpu_disable_scanout(g, ss.scanout_id); return; @@ -677,6 +553,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g, if (ss.r.x > res->width || ss.r.y > res->height || + ss.r.width < 16 || + ss.r.height < 16 || ss.r.width > res->width || ss.r.height > res->height || ss.r.x + ss.r.width > res->width || @@ -689,7 +567,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g, return; } - scanout = &g->scanout[ss.scanout_id]; + scanout = &g->parent_obj.scanout[ss.scanout_id]; format = pixman_image_get_format(res->image); bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8); @@ -712,7 +590,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g, return; } pixman_image_unref(rect); - dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); + dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con, + scanout->ds); } ores = virtio_gpu_find_resource(g, scanout->resource_id); @@ -930,7 +809,7 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) while (!QTAILQ_EMPTY(&g->cmdq)) { cmd = QTAILQ_FIRST(&g->cmdq); - if (g->renderer_blocked) { + if (g->parent_obj.renderer_blocked) { break; } @@ -939,14 +818,14 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) g, cmd); QTAILQ_REMOVE(&g->cmdq, cmd, next); - if (virtio_gpu_stats_enabled(g->conf)) { + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { g->stats.requests++; } if (!cmd->finished) { QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); g->inflight++; - if (virtio_gpu_stats_enabled(g->conf)) { + if (virtio_gpu_stats_enabled(g->parent_obj.conf)) { if (g->stats.max_inflight < g->inflight) { g->stats.max_inflight = g->inflight; } @@ -958,6 +837,19 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g) } } +static void virtio_gpu_gl_unblock(VirtIOGPUBase *b) +{ + VirtIOGPU *g = VIRTIO_GPU(b); + +#ifdef CONFIG_VIRGL + if (g->renderer_reset) { + g->renderer_reset = false; + virtio_gpu_virgl_reset(g); + } +#endif + virtio_gpu_process_cmdq(g); +} + static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) { VirtIOGPU *g = VIRTIO_GPU(vdev); @@ -968,7 +860,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) } #ifdef CONFIG_VIRGL - if (!g->renderer_inited && g->use_virgl_renderer) { + if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) { virtio_gpu_virgl_init(g); g->renderer_inited = true; } @@ -986,7 +878,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) virtio_gpu_process_cmdq(g); #ifdef CONFIG_VIRGL - if (g->use_virgl_renderer) { + if (g->parent_obj.use_virgl_renderer) { virtio_gpu_virgl_fence_poll(g); } #endif @@ -995,7 +887,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) static void virtio_gpu_ctrl_bh(void *opaque) { VirtIOGPU *g = opaque; - virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); + virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq); } static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) @@ -1033,75 +925,9 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) static void virtio_gpu_cursor_bh(void *opaque) { VirtIOGPU *g = opaque; - virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); + virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq); } -static void virtio_gpu_invalidate_display(void *opaque) -{ -} - -static void virtio_gpu_update_display(void *opaque) -{ -} - -static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) -{ -} - -static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) -{ - VirtIOGPU *g = opaque; - - if (idx >= g->conf.max_outputs) { - return -1; - } - - g->req_state[idx].x = info->xoff; - g->req_state[idx].y = info->yoff; - g->req_state[idx].width = info->width; - g->req_state[idx].height = info->height; - - if (info->width && info->height) { - g->enabled_output_bitmask |= (1 << idx); - } else { - g->enabled_output_bitmask &= ~(1 << idx); - } - - /* send event to guest */ - virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); - return 0; -} - -static void virtio_gpu_gl_block(void *opaque, bool block) -{ - VirtIOGPU *g = opaque; - - if (block) { - g->renderer_blocked++; - } else { - g->renderer_blocked--; - } - assert(g->renderer_blocked >= 0); - - if (g->renderer_blocked == 0) { -#ifdef CONFIG_VIRGL - if (g->renderer_reset) { - g->renderer_reset = false; - virtio_gpu_virgl_reset(g); - } -#endif - virtio_gpu_process_cmdq(g); - } -} - -const GraphicHwOps virtio_gpu_ops = { - .invalidate = virtio_gpu_invalidate_display, - .gfx_update = virtio_gpu_update_display, - .text_update = virtio_gpu_text_update, - .ui_info = virtio_gpu_ui_info, - .gl_block = virtio_gpu_gl_block, -}; - static const VMStateDescription vmstate_virtio_gpu_scanout = { .name = "virtio-gpu-one-scanout", .version_id = 1, @@ -1124,10 +950,11 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = { .name = "virtio-gpu-scanouts", .version_id = 1, .fields = (VMStateField[]) { - VMSTATE_INT32(enable, struct VirtIOGPU), - VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL), - VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU, - conf.max_outputs, 1, + VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU), + VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs, + struct VirtIOGPU, NULL), + VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU, + parent_obj.conf.max_outputs, 1, vmstate_virtio_gpu_scanout, struct virtio_gpu_scanout), VMSTATE_END_OF_LIST() @@ -1183,7 +1010,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, res->iov_cnt = qemu_get_be32(f); /* allocate */ - pformat = get_pixman_format(res->format); + pformat = virtio_gpu_get_pixman_format(res->format); if (!pformat) { g_free(res); return -EINVAL; @@ -1242,8 +1069,8 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, /* load & apply scanout state */ vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); - for (i = 0; i < g->conf.max_outputs; i++) { - scanout = &g->scanout[i]; + for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + scanout = &g->parent_obj.scanout[i]; if (!scanout->resource_id) { continue; } @@ -1272,84 +1099,35 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) VirtIODevice *vdev = VIRTIO_DEVICE(qdev); VirtIOGPU *g = VIRTIO_GPU(qdev); bool have_virgl; - Error *local_err = NULL; - int i; - - if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) { - error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS); - return; - } - g->use_virgl_renderer = false; #if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) have_virgl = false; #else have_virgl = display_opengl; #endif if (!have_virgl) { - g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); - } - - if (virtio_gpu_virgl_enabled(g->conf)) { - error_setg(&g->migration_blocker, "virgl is not yet migratable"); - migrate_add_blocker(g->migration_blocker, &local_err); - if (local_err) { - error_propagate(errp, local_err); - error_free(g->migration_blocker); - return; - } - } - - g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs); - virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, - sizeof(struct virtio_gpu_config)); - - g->req_state[0].width = g->conf.xres; - g->req_state[0].height = g->conf.yres; - - if (virtio_gpu_virgl_enabled(g->conf)) { - /* use larger control queue in 3d mode */ - g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); - g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); - + g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); + } else { #if defined(CONFIG_VIRGL) - g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g); -#else - g->virtio_config.num_capsets = 0; + VIRTIO_GPU_BASE(g)->virtio_config.num_capsets = + virtio_gpu_virgl_get_num_capsets(g); #endif - } else { - g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); - g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); } + if (!virtio_gpu_base_device_realize(qdev, + virtio_gpu_handle_ctrl_cb, + virtio_gpu_handle_cursor_cb, + errp)) { + return; + } + + g->ctrl_vq = virtio_get_queue(vdev, 0); + g->cursor_vq = virtio_get_queue(vdev, 1); g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); QTAILQ_INIT(&g->reslist); QTAILQ_INIT(&g->cmdq); QTAILQ_INIT(&g->fenceq); - - g->enabled_output_bitmask = 1; - - for (i = 0; i < g->conf.max_outputs; i++) { - g->scanout[i].con = - graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); - if (i > 0) { - dpy_gfx_replace_surface(g->scanout[i].con, NULL); - } - } -} - -static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp) -{ - VirtIOGPU *g = VIRTIO_GPU(qdev); - if (g->migration_blocker) { - migrate_del_blocker(g->migration_blocker); - error_free(g->migration_blocker); - } -} - -static void virtio_gpu_instance_init(Object *obj) -{ } static void virtio_gpu_reset(VirtIODevice *vdev) @@ -1357,21 +1135,16 @@ static void virtio_gpu_reset(VirtIODevice *vdev) VirtIOGPU *g = VIRTIO_GPU(vdev); struct virtio_gpu_simple_resource *res, *tmp; struct virtio_gpu_ctrl_command *cmd; - int i; - g->enable = 0; +#ifdef CONFIG_VIRGL + if (g->parent_obj.use_virgl_renderer) { + virtio_gpu_virgl_reset(g); + } +#endif QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { virtio_gpu_resource_destroy(g, res); } - for (i = 0; i < g->conf.max_outputs; i++) { - g->scanout[i].resource_id = 0; - g->scanout[i].width = 0; - g->scanout[i].height = 0; - g->scanout[i].x = 0; - g->scanout[i].y = 0; - g->scanout[i].ds = NULL; - } while (!QTAILQ_EMPTY(&g->cmdq)) { cmd = QTAILQ_FIRST(&g->cmdq); @@ -1387,15 +1160,37 @@ static void virtio_gpu_reset(VirtIODevice *vdev) } #ifdef CONFIG_VIRGL - if (g->use_virgl_renderer) { - if (g->renderer_blocked) { + if (g->parent_obj.use_virgl_renderer) { + if (g->parent_obj.renderer_blocked) { g->renderer_reset = true; } else { virtio_gpu_virgl_reset(g); } - g->use_virgl_renderer = 0; + g->parent_obj.use_virgl_renderer = false; } #endif + + virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev)); +} + +static void +virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); + + memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); +} + +static void +virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev); + const struct virtio_gpu_config *vgconfig = + (const struct virtio_gpu_config *)config; + + if (vgconfig->events_clear) { + g->virtio_config.events_read &= ~vgconfig->events_clear; + } } /* @@ -1426,18 +1221,15 @@ static const VMStateDescription vmstate_virtio_gpu = { }; static Property virtio_gpu_properties[] = { - DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), - DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB), + VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf), + DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem, + 256 * MiB), #ifdef CONFIG_VIRGL - DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, + DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags, VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), - DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, + DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags, VIRTIO_GPU_FLAG_STATS_ENABLED, false), #endif - DEFINE_PROP_BIT("edid", VirtIOGPU, conf.flags, - VIRTIO_GPU_FLAG_EDID_ENABLED, false), - DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024), - DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768), DEFINE_PROP_END_OF_LIST(), }; @@ -1445,27 +1237,22 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass); + vgc->gl_unblock = virtio_gpu_gl_unblock; vdc->realize = virtio_gpu_device_realize; - vdc->unrealize = virtio_gpu_device_unrealize; + vdc->reset = virtio_gpu_reset; vdc->get_config = virtio_gpu_get_config; vdc->set_config = virtio_gpu_set_config; - vdc->get_features = virtio_gpu_get_features; - vdc->set_features = virtio_gpu_set_features; - vdc->reset = virtio_gpu_reset; - - set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); - dc->props = virtio_gpu_properties; dc->vmsd = &vmstate_virtio_gpu; - dc->hotpluggable = false; + dc->props = virtio_gpu_properties; } static const TypeInfo virtio_gpu_info = { .name = TYPE_VIRTIO_GPU, - .parent = TYPE_VIRTIO_DEVICE, + .parent = TYPE_VIRTIO_GPU_BASE, .instance_size = sizeof(VirtIOGPU), - .instance_init = virtio_gpu_instance_init, .class_init = virtio_gpu_class_init, }; @@ -1475,26 +1262,3 @@ static void virtio_register_types(void) } type_init(virtio_register_types) - -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); - -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); -QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c index 5d57bf5b0c..67e34935c2 100644 --- a/hw/display/virtio-vga.c +++ b/hw/display/virtio-vga.c @@ -1,63 +1,42 @@ #include "qemu/osdep.h" #include "hw/hw.h" #include "hw/pci/pci.h" -#include "vga_int.h" -#include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-gpu.h" #include "qapi/error.h" +#include "virtio-vga.h" -/* - * virtio-vga: This extends VirtioPCIProxy. - */ -#define TYPE_VIRTIO_VGA "virtio-vga" -#define VIRTIO_VGA(obj) \ - OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA) -#define VIRTIO_VGA_GET_CLASS(obj) \ - OBJECT_GET_CLASS(VirtIOVGAClass, obj, TYPE_VIRTIO_VGA) -#define VIRTIO_VGA_CLASS(klass) \ - OBJECT_CLASS_CHECK(VirtIOVGAClass, klass, TYPE_VIRTIO_VGA) - -typedef struct VirtIOVGA { - VirtIOPCIProxy parent_obj; - VirtIOGPU vdev; - VGACommonState vga; - MemoryRegion vga_mrs[3]; -} VirtIOVGA; - -typedef struct VirtIOVGAClass { - VirtioPCIClass parent_class; - DeviceReset parent_reset; -} VirtIOVGAClass; - -static void virtio_vga_invalidate_display(void *opaque) +static void virtio_vga_base_invalidate_display(void *opaque) { - VirtIOVGA *vvga = opaque; + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; - if (vvga->vdev.enable) { - virtio_gpu_ops.invalidate(&vvga->vdev); + if (g->enable) { + virtio_gpu_ops.invalidate(g); } else { vvga->vga.hw_ops->invalidate(&vvga->vga); } } -static void virtio_vga_update_display(void *opaque) +static void virtio_vga_base_update_display(void *opaque) { - VirtIOVGA *vvga = opaque; + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; - if (vvga->vdev.enable) { - virtio_gpu_ops.gfx_update(&vvga->vdev); + if (g->enable) { + virtio_gpu_ops.gfx_update(g); } else { vvga->vga.hw_ops->gfx_update(&vvga->vga); } } -static void virtio_vga_text_update(void *opaque, console_ch_t *chardata) +static void virtio_vga_base_text_update(void *opaque, console_ch_t *chardata) { - VirtIOVGA *vvga = opaque; + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; - if (vvga->vdev.enable) { + if (g->enable) { if (virtio_gpu_ops.text_update) { - virtio_gpu_ops.text_update(&vvga->vdev, chardata); + virtio_gpu_ops.text_update(g, chardata); } } else { if (vvga->vga.hw_ops->text_update) { @@ -66,49 +45,52 @@ static void virtio_vga_text_update(void *opaque, console_ch_t *chardata) } } -static int virtio_vga_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) +static int virtio_vga_base_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) { - VirtIOVGA *vvga = opaque; + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; if (virtio_gpu_ops.ui_info) { - return virtio_gpu_ops.ui_info(&vvga->vdev, idx, info); + return virtio_gpu_ops.ui_info(g, idx, info); } return -1; } -static void virtio_vga_gl_block(void *opaque, bool block) +static void virtio_vga_base_gl_block(void *opaque, bool block) { - VirtIOVGA *vvga = opaque; + VirtIOVGABase *vvga = opaque; + VirtIOGPUBase *g = vvga->vgpu; if (virtio_gpu_ops.gl_block) { - virtio_gpu_ops.gl_block(&vvga->vdev, block); + virtio_gpu_ops.gl_block(g, block); } } -static const GraphicHwOps virtio_vga_ops = { - .invalidate = virtio_vga_invalidate_display, - .gfx_update = virtio_vga_update_display, - .text_update = virtio_vga_text_update, - .ui_info = virtio_vga_ui_info, - .gl_block = virtio_vga_gl_block, +static const GraphicHwOps virtio_vga_base_ops = { + .invalidate = virtio_vga_base_invalidate_display, + .gfx_update = virtio_vga_base_update_display, + .text_update = virtio_vga_base_text_update, + .ui_info = virtio_vga_base_ui_info, + .gl_block = virtio_vga_base_gl_block, }; -static const VMStateDescription vmstate_virtio_vga = { +static const VMStateDescription vmstate_virtio_vga_base = { .name = "virtio-vga", .version_id = 2, .minimum_version_id = 2, .fields = (VMStateField[]) { /* no pci stuff here, saving the virtio device will handle that */ - VMSTATE_STRUCT(vga, VirtIOVGA, 0, vmstate_vga_common, VGACommonState), + VMSTATE_STRUCT(vga, VirtIOVGABase, 0, + vmstate_vga_common, VGACommonState), VMSTATE_END_OF_LIST() } }; /* VGA device wrapper around PCI device around virtio GPU */ -static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +static void virtio_vga_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { - VirtIOVGA *vvga = VIRTIO_VGA(vpci_dev); - VirtIOGPU *g = &vvga->vdev; + VirtIOVGABase *vvga = VIRTIO_VGA_BASE(vpci_dev); + VirtIOGPUBase *g = vvga->vgpu; VGACommonState *vga = &vvga->vga; Error *err = NULL; uint32_t offset; @@ -168,7 +150,7 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp) vvga->vga_mrs, true, false); vga->con = g->scanout[0].con; - graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga); + graphic_console_set_hwops(vga->con, &virtio_vga_base_ops, vvga); for (i = 0; i < g->conf.max_outputs; i++) { object_property_set_link(OBJECT(g->scanout[i].con), @@ -177,10 +159,10 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp) } } -static void virtio_vga_reset(DeviceState *dev) +static void virtio_vga_base_reset(DeviceState *dev) { - VirtIOVGAClass *klass = VIRTIO_VGA_GET_CLASS(dev); - VirtIOVGA *vvga = VIRTIO_VGA(dev); + VirtIOVGABaseClass *klass = VIRTIO_VGA_BASE_GET_CLASS(dev); + VirtIOVGABase *vvga = VIRTIO_VGA_BASE(dev); /* reset virtio-gpu */ klass->parent_reset(dev); @@ -190,48 +172,70 @@ static void virtio_vga_reset(DeviceState *dev) vga_dirty_log_start(&vvga->vga); } -static Property virtio_vga_properties[] = { +static Property virtio_vga_base_properties[] = { DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), DEFINE_PROP_END_OF_LIST(), }; -static void virtio_vga_class_init(ObjectClass *klass, void *data) +static void virtio_vga_base_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); - VirtIOVGAClass *v = VIRTIO_VGA_CLASS(klass); + VirtIOVGABaseClass *v = VIRTIO_VGA_BASE_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); - dc->props = virtio_vga_properties; - dc->vmsd = &vmstate_virtio_vga; + dc->props = virtio_vga_base_properties; + dc->vmsd = &vmstate_virtio_vga_base; dc->hotpluggable = false; - device_class_set_parent_reset(dc, virtio_vga_reset, + device_class_set_parent_reset(dc, virtio_vga_base_reset, &v->parent_reset); - k->realize = virtio_vga_realize; + k->realize = virtio_vga_base_realize; pcidev_k->romfile = "vgabios-virtio.bin"; pcidev_k->class_id = PCI_CLASS_DISPLAY_VGA; } +static TypeInfo virtio_vga_base_info = { + .name = TYPE_VIRTIO_VGA_BASE, + .parent = TYPE_VIRTIO_PCI, + .instance_size = sizeof(struct VirtIOVGABase), + .class_size = sizeof(struct VirtIOVGABaseClass), + .class_init = virtio_vga_base_class_init, + .abstract = true, +}; + +#define TYPE_VIRTIO_VGA "virtio-vga" + +#define VIRTIO_VGA(obj) \ + OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA) + +typedef struct VirtIOVGA { + VirtIOVGABase parent_obj; + + VirtIOGPU vdev; +} VirtIOVGA; + static void virtio_vga_inst_initfn(Object *obj) { VirtIOVGA *dev = VIRTIO_VGA(obj); virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_GPU); + VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev); } + static VirtioPCIDeviceTypeInfo virtio_vga_info = { .generic_name = TYPE_VIRTIO_VGA, + .parent = TYPE_VIRTIO_VGA_BASE, .instance_size = sizeof(struct VirtIOVGA), .instance_init = virtio_vga_inst_initfn, - .class_size = sizeof(struct VirtIOVGAClass), - .class_init = virtio_vga_class_init, }; static void virtio_vga_register_types(void) { + type_register_static(&virtio_vga_base_info); virtio_pci_types_register(&virtio_vga_info); } diff --git a/hw/display/virtio-vga.h b/hw/display/virtio-vga.h new file mode 100644 index 0000000000..c10bf390aa --- /dev/null +++ b/hw/display/virtio-vga.h @@ -0,0 +1,32 @@ +#ifndef VIRTIO_VGA_H_ +#define VIRTIO_VGA_H_ + +#include "hw/virtio/virtio-gpu-pci.h" +#include "vga_int.h" + +/* + * virtio-vga-base: This extends VirtioPCIProxy. + */ +#define TYPE_VIRTIO_VGA_BASE "virtio-vga-base" +#define VIRTIO_VGA_BASE(obj) \ + OBJECT_CHECK(VirtIOVGABase, (obj), TYPE_VIRTIO_VGA_BASE) +#define VIRTIO_VGA_BASE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(VirtIOVGABaseClass, obj, TYPE_VIRTIO_VGA_BASE) +#define VIRTIO_VGA_BASE_CLASS(klass) \ + OBJECT_CLASS_CHECK(VirtIOVGABaseClass, klass, TYPE_VIRTIO_VGA_BASE) + +typedef struct VirtIOVGABase { + VirtIOPCIProxy parent_obj; + + VirtIOGPUBase *vgpu; + VGACommonState vga; + MemoryRegion vga_mrs[3]; +} VirtIOVGABase; + +typedef struct VirtIOVGABaseClass { + VirtioPCIClass parent_class; + + DeviceReset parent_reset; +} VirtIOVGABaseClass; + +#endif /* VIRTIO_VGA_H_ */ diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 2632b73f80..edc240bcbf 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -110,6 +110,9 @@ struct hpet_fw_config hpet_cfg = {.count = UINT8_MAX}; /* Physical Address of PVH entry point read from kernel ELF NOTE */ static size_t pvh_start_addr; +GlobalProperty pc_compat_4_0_1[] = {}; +const size_t pc_compat_4_0_1_len = G_N_ELEMENTS(pc_compat_4_0_1); + GlobalProperty pc_compat_4_0[] = {}; const size_t pc_compat_4_0_len = G_N_ELEMENTS(pc_compat_4_0); diff --git a/hw/i386/pc_q35.c b/hw/i386/pc_q35.c index 37dd350511..dcddc64662 100644 --- a/hw/i386/pc_q35.c +++ b/hw/i386/pc_q35.c @@ -357,7 +357,7 @@ static void pc_q35_machine_options(MachineClass *m) m->units_per_default_bus = 1; m->default_machine_opts = "firmware=bios-256k.bin"; m->default_display = "std"; - m->default_kernel_irqchip_split = true; + m->default_kernel_irqchip_split = false; m->no_floppy = 1; machine_class_allow_dynamic_sysbus_dev(m, TYPE_AMD_IOMMU_DEVICE); machine_class_allow_dynamic_sysbus_dev(m, TYPE_INTEL_IOMMU_DEVICE); @@ -374,10 +374,22 @@ static void pc_q35_4_1_machine_options(MachineClass *m) DEFINE_Q35_MACHINE(v4_1, "pc-q35-4.1", NULL, pc_q35_4_1_machine_options); -static void pc_q35_4_0_machine_options(MachineClass *m) +static void pc_q35_4_0_1_machine_options(MachineClass *m) { pc_q35_4_1_machine_options(m); m->alias = NULL; + compat_props_add(m->compat_props, hw_compat_4_0_1, hw_compat_4_0_1_len); + compat_props_add(m->compat_props, pc_compat_4_0_1, pc_compat_4_0_1_len); +} + +DEFINE_Q35_MACHINE(v4_0_1, "pc-q35-4.0.1", NULL, + pc_q35_4_0_1_machine_options); + +static void pc_q35_4_0_machine_options(MachineClass *m) +{ + pc_q35_4_0_1_machine_options(m); + m->default_kernel_irqchip_split = true; + m->alias = NULL; compat_props_add(m->compat_props, hw_compat_4_0, hw_compat_4_0_len); compat_props_add(m->compat_props, pc_compat_4_0, pc_compat_4_0_len); } diff --git a/hw/ide/qdev.c b/hw/ide/qdev.c index 573b022e1e..360cd20bd8 100644 --- a/hw/ide/qdev.c +++ b/hw/ide/qdev.c @@ -168,7 +168,7 @@ static void ide_dev_initfn(IDEDevice *dev, IDEDriveKind kind, Error **errp) return; } else { /* Anonymous BlockBackend for an empty drive */ - dev->conf.blk = blk_new(0, BLK_PERM_ALL); + dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); ret = blk_attach_dev(dev->conf.blk, &dev->qdev); assert(ret == 0); } diff --git a/hw/intc/Makefile.objs b/hw/intc/Makefile.objs index df712c3e6c..03019b9a03 100644 --- a/hw/intc/Makefile.objs +++ b/hw/intc/Makefile.objs @@ -39,6 +39,7 @@ obj-$(CONFIG_XICS_SPAPR) += xics_spapr.o obj-$(CONFIG_XICS_KVM) += xics_kvm.o obj-$(CONFIG_XIVE) += xive.o obj-$(CONFIG_XIVE_SPAPR) += spapr_xive.o +obj-$(CONFIG_XIVE_KVM) += spapr_xive_kvm.o obj-$(CONFIG_POWERNV) += xics_pnv.o pnv_xive.o obj-$(CONFIG_ALLWINNER_A10_PIC) += allwinner-a10-pic.o obj-$(CONFIG_S390_FLIC) += s390_flic.o diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 097f88d460..62e0ef8fa5 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -41,13 +41,6 @@ #define SPAPR_XIVE_NVT_BASE 0x400 /* - * The sPAPR machine has a unique XIVE IC device. Assign a fixed value - * to the controller block id value. It can nevertheless be changed - * for testing purpose. - */ -#define SPAPR_XIVE_BLOCK_ID 0x0 - -/* * sPAPR NVT and END indexing helpers */ static uint32_t spapr_xive_nvt_to_target(uint8_t nvt_blk, uint32_t nvt_idx) @@ -86,6 +79,22 @@ static int spapr_xive_target_to_nvt(uint32_t target, * sPAPR END indexing uses a simple mapping of the CPU vcpu_id, 8 * priorities per CPU */ +int spapr_xive_end_to_target(uint8_t end_blk, uint32_t end_idx, + uint32_t *out_server, uint8_t *out_prio) +{ + + assert(end_blk == SPAPR_XIVE_BLOCK_ID); + + if (out_server) { + *out_server = end_idx >> 3; + } + + if (out_prio) { + *out_prio = end_idx & 0x7; + } + return 0; +} + static void spapr_xive_cpu_to_end(PowerPCCPU *cpu, uint8_t prio, uint8_t *out_end_blk, uint32_t *out_end_idx) { @@ -120,6 +129,7 @@ static int spapr_xive_target_to_end(uint32_t target, uint8_t prio, static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end, Monitor *mon) { + uint64_t qaddr_base = xive_end_qaddr(end); uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); @@ -127,9 +137,9 @@ static void spapr_xive_end_pic_print_info(SpaprXive *xive, XiveEND *end, uint32_t nvt = xive_get_field32(END_W6_NVT_INDEX, end->w6); uint8_t priority = xive_get_field32(END_W7_F0_PRIORITY, end->w7); - monitor_printf(mon, "%3d/%d % 6d/%5d ^%d", + monitor_printf(mon, "%3d/%d % 6d/%5d @%"PRIx64" ^%d", spapr_xive_nvt_to_target(0, nvt), - priority, qindex, qentries, qgen); + priority, qindex, qentries, qaddr_base, qgen); xive_end_queue_pic_print_info(end, 6, mon); monitor_printf(mon, "]"); @@ -140,7 +150,17 @@ void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) XiveSource *xsrc = &xive->source; int i; - monitor_printf(mon, " LSIN PQ EISN CPU/PRIO EQ\n"); + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_synchronize_state(xive, &local_err); + if (local_err) { + error_report_err(local_err); + return; + } + } + + monitor_printf(mon, " LISN PQ EISN CPU/PRIO EQ\n"); for (i = 0; i < xive->nr_irqs; i++) { uint8_t pq = xive_source_esb_get(xsrc, i); @@ -173,7 +193,7 @@ void spapr_xive_pic_print_info(SpaprXive *xive, Monitor *mon) } } -static void spapr_xive_map_mmio(SpaprXive *xive) +void spapr_xive_map_mmio(SpaprXive *xive) { sysbus_mmio_map(SYS_BUS_DEVICE(xive), 0, xive->vc_base); sysbus_mmio_map(SYS_BUS_DEVICE(xive), 1, xive->end_base); @@ -250,6 +270,9 @@ static void spapr_xive_instance_init(Object *obj) object_initialize_child(obj, "end_source", &xive->end_source, sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, &error_abort, NULL); + + /* Not connected to the KVM XIVE device */ + xive->fd = -1; } static void spapr_xive_realize(DeviceState *dev, Error **errp) @@ -304,22 +327,36 @@ static void spapr_xive_realize(DeviceState *dev, Error **errp) xive->eat = g_new0(XiveEAS, xive->nr_irqs); xive->endt = g_new0(XiveEND, xive->nr_ends); - /* TIMA initialization */ - memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive, - "xive.tima", 4ull << TM_SHIFT); + xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64, + xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); + + qemu_register_reset(spapr_xive_reset, dev); /* Define all XIVE MMIO regions on SysBus */ sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xsrc->esb_mmio); sysbus_init_mmio(SYS_BUS_DEVICE(xive), &end_xsrc->esb_mmio); sysbus_init_mmio(SYS_BUS_DEVICE(xive), &xive->tm_mmio); +} - /* Map all regions */ - spapr_xive_map_mmio(xive); +void spapr_xive_init(SpaprXive *xive, Error **errp) +{ + XiveSource *xsrc = &xive->source; - xive->nodename = g_strdup_printf("interrupt-controller@%" PRIx64, - xive->tm_base + XIVE_TM_USER_PAGE * (1 << TM_SHIFT)); + /* + * The emulated XIVE device can only be initialized once. If the + * ESB memory region has been already mapped, it means we have been + * through there. + */ + if (memory_region_is_mapped(&xsrc->esb_mmio)) { + return; + } - qemu_register_reset(spapr_xive_reset, dev); + /* TIMA initialization */ + memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive, + "xive.tima", 4ull << TM_SHIFT); + + /* Map all regions */ + spapr_xive_map_mmio(xive); } static int spapr_xive_get_eas(XiveRouter *xrtr, uint8_t eas_blk, @@ -427,10 +464,34 @@ static const VMStateDescription vmstate_spapr_xive_eas = { }, }; +static int vmstate_spapr_xive_pre_save(void *opaque) +{ + if (kvm_irqchip_in_kernel()) { + return kvmppc_xive_pre_save(SPAPR_XIVE(opaque)); + } + + return 0; +} + +/* + * Called by the sPAPR IRQ backend 'post_load' method at the machine + * level. + */ +int spapr_xive_post_load(SpaprXive *xive, int version_id) +{ + if (kvm_irqchip_in_kernel()) { + return kvmppc_xive_post_load(xive, version_id); + } + + return 0; +} + static const VMStateDescription vmstate_spapr_xive = { .name = TYPE_SPAPR_XIVE, .version_id = 1, .minimum_version_id = 1, + .pre_save = vmstate_spapr_xive_pre_save, + .post_load = NULL, /* handled at the machine level */ .fields = (VMStateField[]) { VMSTATE_UINT32_EQUAL(nr_irqs, SpaprXive, NULL), VMSTATE_STRUCT_VARRAY_POINTER_UINT32(eat, SpaprXive, nr_irqs, @@ -494,6 +555,17 @@ bool spapr_xive_irq_claim(SpaprXive *xive, uint32_t lisn, bool lsi) if (lsi) { xive_source_irq_set_lsi(xsrc, lisn); } + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_source_reset_one(xsrc, lisn, &local_err); + if (local_err) { + error_report_err(local_err); + return false; + } + } + return true; } @@ -755,6 +827,16 @@ static target_ulong h_int_set_source_config(PowerPCCPU *cpu, new_eas.w = xive_set_field64(EAS_END_DATA, new_eas.w, eisn); } + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_set_source_config(xive, lisn, &new_eas, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + out: xive->eat[lisn] = new_eas; return H_SUCCESS; @@ -993,6 +1075,12 @@ static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, case 16: case 21: case 24: + if (!QEMU_IS_ALIGNED(qpage, 1ul << qsize)) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: EQ @0x%" HWADDR_PRIx + " is not naturally aligned with %" HWADDR_PRIx "\n", + qpage, (hwaddr)1 << qsize); + return H_P4; + } end.w2 = cpu_to_be32((qpage >> 32) & 0x0fffffff); end.w3 = cpu_to_be32(qpage & 0xffffffff); end.w0 |= cpu_to_be32(END_W0_ENQUEUE); @@ -1060,6 +1148,16 @@ static target_ulong h_int_set_queue_config(PowerPCCPU *cpu, */ out: + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_set_queue_config(xive, end_blk, end_idx, &end, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + /* Update END */ memcpy(&xive->endt[end_idx], &end, sizeof(XiveEND)); return H_SUCCESS; @@ -1144,14 +1242,23 @@ static target_ulong h_int_get_queue_config(PowerPCCPU *cpu, } if (xive_end_is_enqueue(end)) { - args[1] = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 - | be32_to_cpu(end->w3); + args[1] = xive_end_qaddr(end); args[2] = xive_get_field32(END_W0_QSIZE, end->w0) + 12; } else { args[1] = 0; args[2] = 0; } + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_get_queue_config(xive, end_blk, end_idx, end, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } + /* TODO: do we need any locking on the END ? */ if (flags & SPAPR_XIVE_END_DEBUG) { /* Load the event queue generation number into the return flags */ @@ -1304,15 +1411,20 @@ static target_ulong h_int_esb(PowerPCCPU *cpu, return H_P3; } - mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; + if (kvm_irqchip_in_kernel()) { + args[0] = kvmppc_xive_esb_rw(xsrc, lisn, offset, data, + flags & SPAPR_XIVE_ESB_STORE); + } else { + mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; - if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, - (flags & SPAPR_XIVE_ESB_STORE))) { - qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" - HWADDR_PRIx "\n", mmio_addr); - return H_HARDWARE; + if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, + (flags & SPAPR_XIVE_ESB_STORE))) { + qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" + HWADDR_PRIx "\n", mmio_addr); + return H_HARDWARE; + } + args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; } - args[0] = (flags & SPAPR_XIVE_ESB_STORE) ? -1 : data; return H_SUCCESS; } @@ -1369,7 +1481,20 @@ static target_ulong h_int_sync(PowerPCCPU *cpu, * This is not needed when running the emulation under QEMU */ - /* This is not real hardware. Nothing to be done */ + /* + * This is not real hardware. Nothing to be done unless when + * under KVM + */ + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_sync_source(xive, lisn, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } return H_SUCCESS; } @@ -1404,6 +1529,16 @@ static target_ulong h_int_reset(PowerPCCPU *cpu, } device_reset(DEVICE(xive)); + + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_reset(xive, &local_err); + if (local_err) { + error_report_err(local_err); + return H_HARDWARE; + } + } return H_SUCCESS; } diff --git a/hw/intc/spapr_xive_kvm.c b/hw/intc/spapr_xive_kvm.c new file mode 100644 index 0000000000..b48f135838 --- /dev/null +++ b/hw/intc/spapr_xive_kvm.c @@ -0,0 +1,823 @@ +/* + * QEMU PowerPC sPAPR XIVE interrupt controller model + * + * Copyright (c) 2017-2019, IBM Corporation. + * + * This code is licensed under the GPL version 2 or later. See the + * COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qemu/log.h" +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "target/ppc/cpu.h" +#include "sysemu/cpus.h" +#include "sysemu/kvm.h" +#include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" +#include "hw/ppc/spapr_xive.h" +#include "hw/ppc/xive.h" +#include "kvm_ppc.h" + +#include <sys/ioctl.h> + +/* + * Helpers for CPU hotplug + * + * TODO: make a common KVMEnabledCPU layer for XICS and XIVE + */ +typedef struct KVMEnabledCPU { + unsigned long vcpu_id; + QLIST_ENTRY(KVMEnabledCPU) node; +} KVMEnabledCPU; + +static QLIST_HEAD(, KVMEnabledCPU) + kvm_enabled_cpus = QLIST_HEAD_INITIALIZER(&kvm_enabled_cpus); + +static bool kvm_cpu_is_enabled(CPUState *cs) +{ + KVMEnabledCPU *enabled_cpu; + unsigned long vcpu_id = kvm_arch_vcpu_id(cs); + + QLIST_FOREACH(enabled_cpu, &kvm_enabled_cpus, node) { + if (enabled_cpu->vcpu_id == vcpu_id) { + return true; + } + } + return false; +} + +static void kvm_cpu_enable(CPUState *cs) +{ + KVMEnabledCPU *enabled_cpu; + unsigned long vcpu_id = kvm_arch_vcpu_id(cs); + + enabled_cpu = g_malloc(sizeof(*enabled_cpu)); + enabled_cpu->vcpu_id = vcpu_id; + QLIST_INSERT_HEAD(&kvm_enabled_cpus, enabled_cpu, node); +} + +static void kvm_cpu_disable_all(void) +{ + KVMEnabledCPU *enabled_cpu, *next; + + QLIST_FOREACH_SAFE(enabled_cpu, &kvm_enabled_cpus, node, next) { + QLIST_REMOVE(enabled_cpu, node); + g_free(enabled_cpu); + } +} + +/* + * XIVE Thread Interrupt Management context (KVM) + */ + +static void kvmppc_xive_cpu_set_state(XiveTCTX *tctx, Error **errp) +{ + uint64_t state[2]; + int ret; + + /* word0 and word1 of the OS ring. */ + state[0] = *((uint64_t *) &tctx->regs[TM_QW1_OS]); + + ret = kvm_set_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); + if (ret != 0) { + error_setg_errno(errp, errno, + "XIVE: could not restore KVM state of CPU %ld", + kvm_arch_vcpu_id(tctx->cs)); + } +} + +void kvmppc_xive_cpu_get_state(XiveTCTX *tctx, Error **errp) +{ + SpaprXive *xive = SPAPR_MACHINE(qdev_get_machine())->xive; + uint64_t state[2] = { 0 }; + int ret; + + /* The KVM XIVE device is not in use */ + if (xive->fd == -1) { + return; + } + + ret = kvm_get_one_reg(tctx->cs, KVM_REG_PPC_VP_STATE, state); + if (ret != 0) { + error_setg_errno(errp, errno, + "XIVE: could not capture KVM state of CPU %ld", + kvm_arch_vcpu_id(tctx->cs)); + return; + } + + /* word0 and word1 of the OS ring. */ + *((uint64_t *) &tctx->regs[TM_QW1_OS]) = state[0]; +} + +typedef struct { + XiveTCTX *tctx; + Error *err; +} XiveCpuGetState; + +static void kvmppc_xive_cpu_do_synchronize_state(CPUState *cpu, + run_on_cpu_data arg) +{ + XiveCpuGetState *s = arg.host_ptr; + + kvmppc_xive_cpu_get_state(s->tctx, &s->err); +} + +void kvmppc_xive_cpu_synchronize_state(XiveTCTX *tctx, Error **errp) +{ + XiveCpuGetState s = { + .tctx = tctx, + .err = NULL, + }; + + /* + * Kick the vCPU to make sure they are available for the KVM ioctl. + */ + run_on_cpu(tctx->cs, kvmppc_xive_cpu_do_synchronize_state, + RUN_ON_CPU_HOST_PTR(&s)); + + if (s.err) { + error_propagate(errp, s.err); + return; + } +} + +void kvmppc_xive_cpu_connect(XiveTCTX *tctx, Error **errp) +{ + SpaprXive *xive = SPAPR_MACHINE(qdev_get_machine())->xive; + unsigned long vcpu_id; + int ret; + + /* The KVM XIVE device is not in use */ + if (xive->fd == -1) { + return; + } + + /* Check if CPU was hot unplugged and replugged. */ + if (kvm_cpu_is_enabled(tctx->cs)) { + return; + } + + vcpu_id = kvm_arch_vcpu_id(tctx->cs); + + ret = kvm_vcpu_enable_cap(tctx->cs, KVM_CAP_PPC_IRQ_XIVE, 0, xive->fd, + vcpu_id, 0); + if (ret < 0) { + error_setg(errp, "XIVE: unable to connect CPU%ld to KVM device: %s", + vcpu_id, strerror(errno)); + return; + } + + kvm_cpu_enable(tctx->cs); +} + +/* + * XIVE Interrupt Source (KVM) + */ + +void kvmppc_xive_set_source_config(SpaprXive *xive, uint32_t lisn, XiveEAS *eas, + Error **errp) +{ + uint32_t end_idx; + uint32_t end_blk; + uint8_t priority; + uint32_t server; + bool masked; + uint32_t eisn; + uint64_t kvm_src; + Error *local_err = NULL; + + assert(xive_eas_is_valid(eas)); + + end_idx = xive_get_field64(EAS_END_INDEX, eas->w); + end_blk = xive_get_field64(EAS_END_BLOCK, eas->w); + eisn = xive_get_field64(EAS_END_DATA, eas->w); + masked = xive_eas_is_masked(eas); + + spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); + + kvm_src = priority << KVM_XIVE_SOURCE_PRIORITY_SHIFT & + KVM_XIVE_SOURCE_PRIORITY_MASK; + kvm_src |= server << KVM_XIVE_SOURCE_SERVER_SHIFT & + KVM_XIVE_SOURCE_SERVER_MASK; + kvm_src |= ((uint64_t) masked << KVM_XIVE_SOURCE_MASKED_SHIFT) & + KVM_XIVE_SOURCE_MASKED_MASK; + kvm_src |= ((uint64_t)eisn << KVM_XIVE_SOURCE_EISN_SHIFT) & + KVM_XIVE_SOURCE_EISN_MASK; + + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_CONFIG, lisn, + &kvm_src, true, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +void kvmppc_xive_sync_source(SpaprXive *xive, uint32_t lisn, Error **errp) +{ + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE_SYNC, lisn, + NULL, true, errp); +} + +/* + * At reset, the interrupt sources are simply created and MASKED. We + * only need to inform the KVM XIVE device about their type: LSI or + * MSI. + */ +void kvmppc_xive_source_reset_one(XiveSource *xsrc, int srcno, Error **errp) +{ + SpaprXive *xive = SPAPR_XIVE(xsrc->xive); + uint64_t state = 0; + + /* The KVM XIVE device is not in use */ + if (xive->fd == -1) { + return; + } + + if (xive_source_irq_is_lsi(xsrc, srcno)) { + state |= KVM_XIVE_LEVEL_SENSITIVE; + if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + state |= KVM_XIVE_LEVEL_ASSERTED; + } + } + + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_SOURCE, srcno, &state, + true, errp); +} + +static void kvmppc_xive_source_reset(XiveSource *xsrc, Error **errp) +{ + int i; + + for (i = 0; i < xsrc->nr_irqs; i++) { + Error *local_err = NULL; + + kvmppc_xive_source_reset_one(xsrc, i, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } +} + +/* + * This is used to perform the magic loads on the ESB pages, described + * in xive.h. + * + * Memory barriers should not be needed for loads (no store for now). + */ +static uint64_t xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, + uint64_t data, bool write) +{ + uint64_t *addr = xsrc->esb_mmap + xive_source_esb_mgmt(xsrc, srcno) + + offset; + + if (write) { + *addr = cpu_to_be64(data); + return -1; + } else { + /* Prevent the compiler from optimizing away the load */ + volatile uint64_t value = be64_to_cpu(*addr); + return value; + } +} + +static uint8_t xive_esb_read(XiveSource *xsrc, int srcno, uint32_t offset) +{ + return xive_esb_rw(xsrc, srcno, offset, 0, 0) & 0x3; +} + +static void xive_esb_trigger(XiveSource *xsrc, int srcno) +{ + uint64_t *addr = xsrc->esb_mmap + xive_source_esb_page(xsrc, srcno); + + *addr = 0x0; +} + +uint64_t kvmppc_xive_esb_rw(XiveSource *xsrc, int srcno, uint32_t offset, + uint64_t data, bool write) +{ + if (write) { + return xive_esb_rw(xsrc, srcno, offset, data, 1); + } + + /* + * Special Load EOI handling for LSI sources. Q bit is never set + * and the interrupt should be re-triggered if the level is still + * asserted. + */ + if (xive_source_irq_is_lsi(xsrc, srcno) && + offset == XIVE_ESB_LOAD_EOI) { + xive_esb_read(xsrc, srcno, XIVE_ESB_SET_PQ_00); + if (xsrc->status[srcno] & XIVE_STATUS_ASSERTED) { + xive_esb_trigger(xsrc, srcno); + } + return 0; + } else { + return xive_esb_rw(xsrc, srcno, offset, 0, 0); + } +} + +static void kvmppc_xive_source_get_state(XiveSource *xsrc) +{ + int i; + + for (i = 0; i < xsrc->nr_irqs; i++) { + /* Perform a load without side effect to retrieve the PQ bits */ + uint8_t pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); + + /* and save PQ locally */ + xive_source_esb_set(xsrc, i, pq); + } +} + +void kvmppc_xive_source_set_irq(void *opaque, int srcno, int val) +{ + XiveSource *xsrc = opaque; + SpaprXive *xive = SPAPR_XIVE(xsrc->xive); + struct kvm_irq_level args; + int rc; + + /* The KVM XIVE device should be in use */ + assert(xive->fd != -1); + + args.irq = srcno; + if (!xive_source_irq_is_lsi(xsrc, srcno)) { + if (!val) { + return; + } + args.level = KVM_INTERRUPT_SET; + } else { + if (val) { + xsrc->status[srcno] |= XIVE_STATUS_ASSERTED; + args.level = KVM_INTERRUPT_SET_LEVEL; + } else { + xsrc->status[srcno] &= ~XIVE_STATUS_ASSERTED; + args.level = KVM_INTERRUPT_UNSET; + } + } + rc = kvm_vm_ioctl(kvm_state, KVM_IRQ_LINE, &args); + if (rc < 0) { + error_report("XIVE: kvm_irq_line() failed : %s", strerror(errno)); + } +} + +/* + * sPAPR XIVE interrupt controller (KVM) + */ +void kvmppc_xive_get_queue_config(SpaprXive *xive, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + Error **errp) +{ + struct kvm_ppc_xive_eq kvm_eq = { 0 }; + uint64_t kvm_eq_idx; + uint8_t priority; + uint32_t server; + Error *local_err = NULL; + + assert(xive_end_is_valid(end)); + + /* Encode the tuple (server, prio) as a KVM EQ index */ + spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); + + kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & + KVM_XIVE_EQ_PRIORITY_MASK; + kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & + KVM_XIVE_EQ_SERVER_MASK; + + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, + &kvm_eq, false, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* + * The EQ index and toggle bit are updated by HW. These are the + * only fields from KVM we want to update QEMU with. The other END + * fields should already be in the QEMU END table. + */ + end->w1 = xive_set_field32(END_W1_GENERATION, 0ul, kvm_eq.qtoggle) | + xive_set_field32(END_W1_PAGE_OFF, 0ul, kvm_eq.qindex); +} + +void kvmppc_xive_set_queue_config(SpaprXive *xive, uint8_t end_blk, + uint32_t end_idx, XiveEND *end, + Error **errp) +{ + struct kvm_ppc_xive_eq kvm_eq = { 0 }; + uint64_t kvm_eq_idx; + uint8_t priority; + uint32_t server; + Error *local_err = NULL; + + /* + * Build the KVM state from the local END structure. + */ + + kvm_eq.flags = 0; + if (xive_get_field32(END_W0_UCOND_NOTIFY, end->w0)) { + kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY; + } + + /* + * If the hcall is disabling the EQ, set the size and page address + * to zero. When migrating, only valid ENDs are taken into + * account. + */ + if (xive_end_is_valid(end)) { + kvm_eq.qshift = xive_get_field32(END_W0_QSIZE, end->w0) + 12; + kvm_eq.qaddr = xive_end_qaddr(end); + /* + * The EQ toggle bit and index should only be relevant when + * restoring the EQ state + */ + kvm_eq.qtoggle = xive_get_field32(END_W1_GENERATION, end->w1); + kvm_eq.qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); + } else { + kvm_eq.qshift = 0; + kvm_eq.qaddr = 0; + } + + /* Encode the tuple (server, prio) as a KVM EQ index */ + spapr_xive_end_to_target(end_blk, end_idx, &server, &priority); + + kvm_eq_idx = priority << KVM_XIVE_EQ_PRIORITY_SHIFT & + KVM_XIVE_EQ_PRIORITY_MASK; + kvm_eq_idx |= server << KVM_XIVE_EQ_SERVER_SHIFT & + KVM_XIVE_EQ_SERVER_MASK; + + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_EQ_CONFIG, kvm_eq_idx, + &kvm_eq, true, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } +} + +void kvmppc_xive_reset(SpaprXive *xive, Error **errp) +{ + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, KVM_DEV_XIVE_RESET, + NULL, true, errp); +} + +static void kvmppc_xive_get_queues(SpaprXive *xive, Error **errp) +{ + Error *local_err = NULL; + int i; + + for (i = 0; i < xive->nr_ends; i++) { + if (!xive_end_is_valid(&xive->endt[i])) { + continue; + } + + kvmppc_xive_get_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, + &xive->endt[i], &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } +} + +/* + * The primary goal of the XIVE VM change handler is to mark the EQ + * pages dirty when all XIVE event notifications have stopped. + * + * Whenever the VM is stopped, the VM change handler sets the source + * PQs to PENDING to stop the flow of events and to possibly catch a + * triggered interrupt occuring while the VM is stopped. The previous + * state is saved in anticipation of a migration. The XIVE controller + * is then synced through KVM to flush any in-flight event + * notification and stabilize the EQs. + * + * At this stage, we can mark the EQ page dirty and let a migration + * sequence transfer the EQ pages to the destination, which is done + * just after the stop state. + * + * The previous configuration of the sources is restored when the VM + * runs again. If an interrupt was queued while the VM was stopped, + * simply generate a trigger. + */ +static void kvmppc_xive_change_state_handler(void *opaque, int running, + RunState state) +{ + SpaprXive *xive = opaque; + XiveSource *xsrc = &xive->source; + Error *local_err = NULL; + int i; + + /* + * Restore the sources to their initial state. This is called when + * the VM resumes after a stop or a migration. + */ + if (running) { + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq = xive_source_esb_get(xsrc, i); + uint8_t old_pq; + + old_pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_00 + (pq << 8)); + + /* + * An interrupt was queued while the VM was stopped, + * generate a trigger. + */ + if (pq == XIVE_ESB_RESET && old_pq == XIVE_ESB_QUEUED) { + xive_esb_trigger(xsrc, i); + } + } + + return; + } + + /* + * Mask the sources, to stop the flow of event notifications, and + * save the PQs locally in the XiveSource object. The XiveSource + * state will be collected later on by its vmstate handler if a + * migration is in progress. + */ + for (i = 0; i < xsrc->nr_irqs; i++) { + uint8_t pq = xive_esb_read(xsrc, i, XIVE_ESB_GET); + + /* + * PQ is set to PENDING to possibly catch a triggered + * interrupt occuring while the VM is stopped (hotplug event + * for instance) . + */ + if (pq != XIVE_ESB_OFF) { + pq = xive_esb_read(xsrc, i, XIVE_ESB_SET_PQ_10); + } + xive_source_esb_set(xsrc, i, pq); + } + + /* + * Sync the XIVE controller in KVM, to flush in-flight event + * notification that should be enqueued in the EQs and mark the + * XIVE EQ pages dirty to collect all updates. + */ + kvm_device_access(xive->fd, KVM_DEV_XIVE_GRP_CTRL, + KVM_DEV_XIVE_EQ_SYNC, NULL, true, &local_err); + if (local_err) { + error_report_err(local_err); + return; + } +} + +void kvmppc_xive_synchronize_state(SpaprXive *xive, Error **errp) +{ + /* The KVM XIVE device is not in use */ + if (xive->fd == -1) { + return; + } + + /* + * When the VM is stopped, the sources are masked and the previous + * state is saved in anticipation of a migration. We should not + * synchronize the source state in that case else we will override + * the saved state. + */ + if (runstate_is_running()) { + kvmppc_xive_source_get_state(&xive->source); + } + + /* EAT: there is no extra state to query from KVM */ + + /* ENDT */ + kvmppc_xive_get_queues(xive, errp); +} + +/* + * The SpaprXive 'pre_save' method is called by the vmstate handler of + * the SpaprXive model, after the XIVE controller is synced in the VM + * change handler. + */ +int kvmppc_xive_pre_save(SpaprXive *xive) +{ + Error *local_err = NULL; + + /* The KVM XIVE device is not in use */ + if (xive->fd == -1) { + return 0; + } + + /* EAT: there is no extra state to query from KVM */ + + /* ENDT */ + kvmppc_xive_get_queues(xive, &local_err); + if (local_err) { + error_report_err(local_err); + return -1; + } + + return 0; +} + +/* + * The SpaprXive 'post_load' method is not called by a vmstate + * handler. It is called at the sPAPR machine level at the end of the + * migration sequence by the sPAPR IRQ backend 'post_load' method, + * when all XIVE states have been transferred and loaded. + */ +int kvmppc_xive_post_load(SpaprXive *xive, int version_id) +{ + Error *local_err = NULL; + CPUState *cs; + int i; + + /* The KVM XIVE device should be in use */ + assert(xive->fd != -1); + + /* Restore the ENDT first. The targetting depends on it. */ + for (i = 0; i < xive->nr_ends; i++) { + if (!xive_end_is_valid(&xive->endt[i])) { + continue; + } + + kvmppc_xive_set_queue_config(xive, SPAPR_XIVE_BLOCK_ID, i, + &xive->endt[i], &local_err); + if (local_err) { + error_report_err(local_err); + return -1; + } + } + + /* Restore the EAT */ + for (i = 0; i < xive->nr_irqs; i++) { + if (!xive_eas_is_valid(&xive->eat[i])) { + continue; + } + + kvmppc_xive_set_source_config(xive, i, &xive->eat[i], &local_err); + if (local_err) { + error_report_err(local_err); + return -1; + } + } + + /* Restore the thread interrupt contexts */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + kvmppc_xive_cpu_set_state(spapr_cpu_state(cpu)->tctx, &local_err); + if (local_err) { + error_report_err(local_err); + return -1; + } + } + + /* The source states will be restored when the machine starts running */ + return 0; +} + +static void *kvmppc_xive_mmap(SpaprXive *xive, int pgoff, size_t len, + Error **errp) +{ + void *addr; + uint32_t page_shift = 16; /* TODO: fix page_shift */ + + addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, xive->fd, + pgoff << page_shift); + if (addr == MAP_FAILED) { + error_setg_errno(errp, errno, "XIVE: unable to set memory mapping"); + return NULL; + } + + return addr; +} + +/* + * All the XIVE memory regions are now backed by mappings from the KVM + * XIVE device. + */ +void kvmppc_xive_connect(SpaprXive *xive, Error **errp) +{ + XiveSource *xsrc = &xive->source; + Error *local_err = NULL; + size_t esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs; + size_t tima_len = 4ull << TM_SHIFT; + CPUState *cs; + + /* + * The KVM XIVE device already in use. This is the case when + * rebooting under the XIVE-only interrupt mode. + */ + if (xive->fd != -1) { + return; + } + + if (!kvmppc_has_cap_xive()) { + error_setg(errp, "IRQ_XIVE capability must be present for KVM"); + return; + } + + /* First, create the KVM XIVE device */ + xive->fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_XIVE, false); + if (xive->fd < 0) { + error_setg_errno(errp, -xive->fd, "XIVE: error creating KVM device"); + return; + } + + /* + * 1. Source ESB pages - KVM mapping + */ + xsrc->esb_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_ESB_PAGE_OFFSET, esb_len, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + memory_region_init_ram_device_ptr(&xsrc->esb_mmio, OBJECT(xsrc), + "xive.esb", esb_len, xsrc->esb_mmap); + + /* + * 2. END ESB pages (No KVM support yet) + */ + + /* + * 3. TIMA pages - KVM mapping + */ + xive->tm_mmap = kvmppc_xive_mmap(xive, KVM_XIVE_TIMA_PAGE_OFFSET, tima_len, + &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + memory_region_init_ram_device_ptr(&xive->tm_mmio, OBJECT(xive), + "xive.tima", tima_len, xive->tm_mmap); + + xive->change = qemu_add_vm_change_state_handler( + kvmppc_xive_change_state_handler, xive); + + /* Connect the presenters to the initial VCPUs of the machine */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + kvmppc_xive_cpu_connect(spapr_cpu_state(cpu)->tctx, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + + /* Update the KVM sources */ + kvmppc_xive_source_reset(xsrc, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + kvm_kernel_irqchip = true; + kvm_msi_via_irqfd_allowed = true; + kvm_gsi_direct_mapping = true; + + /* Map all regions */ + spapr_xive_map_mmio(xive); +} + +void kvmppc_xive_disconnect(SpaprXive *xive, Error **errp) +{ + XiveSource *xsrc; + size_t esb_len; + + /* The KVM XIVE device is not in use */ + if (!xive || xive->fd == -1) { + return; + } + + if (!kvmppc_has_cap_xive()) { + error_setg(errp, "IRQ_XIVE capability must be present for KVM"); + return; + } + + /* Clear the KVM mapping */ + xsrc = &xive->source; + esb_len = (1ull << xsrc->esb_shift) * xsrc->nr_irqs; + + sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 0); + munmap(xsrc->esb_mmap, esb_len); + + sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 1); + + sysbus_mmio_unmap(SYS_BUS_DEVICE(xive), 2); + munmap(xive->tm_mmap, 4ull << TM_SHIFT); + + /* + * When the KVM device fd is closed, the KVM device is destroyed + * and removed from the list of devices of the VM. The VCPU + * presenters are also detached from the device. + */ + close(xive->fd); + xive->fd = -1; + + kvm_kernel_irqchip = false; + kvm_msi_via_irqfd_allowed = false; + kvm_gsi_direct_mapping = false; + + /* Clear the local list of presenter (hotplug) */ + kvm_cpu_disable_all(); + + /* VM Change state handler is not needed anymore */ + qemu_del_vm_change_state_handler(xive->change); +} diff --git a/hw/intc/xics.c b/hw/intc/xics.c index af7dc709ab..79f5a8a916 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -610,6 +610,12 @@ static const TypeInfo ics_simple_info = { .class_size = sizeof(ICSStateClass), }; +static void ics_reset_irq(ICSIRQState *irq) +{ + irq->priority = 0xff; + irq->saved_priority = 0xff; +} + static void ics_base_reset(DeviceState *dev) { ICSState *ics = ICS_BASE(dev); @@ -623,8 +629,7 @@ static void ics_base_reset(DeviceState *dev) memset(ics->irqs, 0, sizeof(ICSIRQState) * ics->nr_irqs); for (i = 0; i < ics->nr_irqs; i++) { - ics->irqs[i].priority = 0xff; - ics->irqs[i].saved_priority = 0xff; + ics_reset_irq(ics->irqs + i); ics->irqs[i].flags = flags[i]; } } @@ -760,6 +765,7 @@ void ics_set_irq_type(ICSState *ics, int srcno, bool lsi) lsi ? XICS_FLAGS_IRQ_LSI : XICS_FLAGS_IRQ_MSI; if (kvm_irqchip_in_kernel()) { + ics_reset_irq(ics->irqs + srcno); ics_set_kvm_state_one(ics, srcno); } } diff --git a/hw/intc/xics_kvm.c b/hw/intc/xics_kvm.c index 78a252e6df..5ba5b77561 100644 --- a/hw/intc/xics_kvm.c +++ b/hw/intc/xics_kvm.c @@ -33,6 +33,7 @@ #include "trace.h" #include "sysemu/kvm.h" #include "hw/ppc/spapr.h" +#include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/xics.h" #include "hw/ppc/xics_spapr.h" #include "kvm_ppc.h" @@ -51,6 +52,16 @@ typedef struct KVMEnabledICP { static QLIST_HEAD(, KVMEnabledICP) kvm_enabled_icps = QLIST_HEAD_INITIALIZER(&kvm_enabled_icps); +static void kvm_disable_icps(void) +{ + KVMEnabledICP *enabled_icp, *next; + + QLIST_FOREACH_SAFE(enabled_icp, &kvm_enabled_icps, node, next) { + QLIST_REMOVE(enabled_icp, node); + g_free(enabled_icp); + } +} + /* * ICP-KVM */ @@ -59,6 +70,11 @@ void icp_get_kvm_state(ICPState *icp) uint64_t state; int ret; + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return; + } + /* ICP for this CPU thread is not in use, exiting */ if (!icp->cs) { return; @@ -95,6 +111,11 @@ int icp_set_kvm_state(ICPState *icp) uint64_t state; int ret; + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return 0; + } + /* ICP for this CPU thread is not in use, exiting */ if (!icp->cs) { return 0; @@ -123,8 +144,9 @@ void icp_kvm_realize(DeviceState *dev, Error **errp) unsigned long vcpu_id; int ret; + /* The KVM XICS device is not in use */ if (kernel_xics_fd == -1) { - abort(); + return; } cs = icp->cs; @@ -160,6 +182,11 @@ void ics_get_kvm_state(ICSState *ics) uint64_t state; int i; + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return; + } + for (i = 0; i < ics->nr_irqs; i++) { ICSIRQState *irq = &ics->irqs[i]; @@ -220,6 +247,11 @@ int ics_set_kvm_state_one(ICSState *ics, int srcno) ICSIRQState *irq = &ics->irqs[srcno]; int ret; + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return 0; + } + state = irq->server; state |= (uint64_t)(irq->saved_priority & KVM_XICS_PRIORITY_MASK) << KVM_XICS_PRIORITY_SHIFT; @@ -259,6 +291,11 @@ int ics_set_kvm_state(ICSState *ics) { int i; + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return 0; + } + for (i = 0; i < ics->nr_irqs; i++) { int ret; @@ -276,6 +313,9 @@ void ics_kvm_set_irq(ICSState *ics, int srcno, int val) struct kvm_irq_level args; int rc; + /* The KVM XICS device should be in use */ + assert(kernel_xics_fd != -1); + args.irq = srcno + ics->offset; if (ics->irqs[srcno].flags & XICS_FLAGS_IRQ_MSI) { if (!val) { @@ -303,6 +343,16 @@ static void rtas_dummy(PowerPCCPU *cpu, SpaprMachineState *spapr, int xics_kvm_init(SpaprMachineState *spapr, Error **errp) { int rc; + CPUState *cs; + Error *local_err = NULL; + + /* + * The KVM XICS device already in use. This is the case when + * rebooting under the XICS-only interrupt mode. + */ + if (kernel_xics_fd != -1) { + return 0; + } if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) { error_setg(errp, @@ -351,6 +401,26 @@ int xics_kvm_init(SpaprMachineState *spapr, Error **errp) kvm_msi_via_irqfd_allowed = true; kvm_gsi_direct_mapping = true; + /* Create the presenters */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + + icp_kvm_realize(DEVICE(spapr_cpu_state(cpu)->icp), &local_err); + if (local_err) { + error_propagate(errp, local_err); + goto fail; + } + } + + /* Update the KVM sources */ + ics_set_kvm_state(spapr->ics); + + /* Connect the presenters to the initial VCPUs of the machine */ + CPU_FOREACH(cs) { + PowerPCCPU *cpu = POWERPC_CPU(cs); + icp_set_kvm_state(spapr_cpu_state(cpu)->icp); + } + return 0; fail: @@ -360,3 +430,44 @@ fail: kvmppc_define_rtas_kernel_token(0, "ibm,int-off"); return -1; } + +void xics_kvm_disconnect(SpaprMachineState *spapr, Error **errp) +{ + /* The KVM XICS device is not in use */ + if (kernel_xics_fd == -1) { + return; + } + + if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_IRQ_XICS)) { + error_setg(errp, + "KVM and IRQ_XICS capability must be present for KVM XICS device"); + return; + } + + /* + * Only on P9 using the XICS-on XIVE KVM device: + * + * When the KVM device fd is closed, the device is destroyed and + * removed from the list of devices of the VM. The VCPU presenters + * are also detached from the device. + */ + close(kernel_xics_fd); + kernel_xics_fd = -1; + + spapr_rtas_unregister(RTAS_IBM_SET_XIVE); + spapr_rtas_unregister(RTAS_IBM_GET_XIVE); + spapr_rtas_unregister(RTAS_IBM_INT_OFF); + spapr_rtas_unregister(RTAS_IBM_INT_ON); + + kvmppc_define_rtas_kernel_token(0, "ibm,set-xive"); + kvmppc_define_rtas_kernel_token(0, "ibm,get-xive"); + kvmppc_define_rtas_kernel_token(0, "ibm,int-on"); + kvmppc_define_rtas_kernel_token(0, "ibm,int-off"); + + kvm_kernel_irqchip = false; + kvm_msi_via_irqfd_allowed = false; + kvm_gsi_direct_mapping = false; + + /* Clear the presenter from the VCPUs */ + kvm_disable_icps(); +} diff --git a/hw/intc/xics_spapr.c b/hw/intc/xics_spapr.c index 9d2b8adef7..5a1835e8b1 100644 --- a/hw/intc/xics_spapr.c +++ b/hw/intc/xics_spapr.c @@ -239,6 +239,13 @@ static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr, void xics_spapr_init(SpaprMachineState *spapr) { + /* Emulated mode can only be initialized once. */ + if (spapr->ics->init) { + return; + } + + spapr->ics->init = true; + /* Registration of global state belongs into realize */ spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); diff --git a/hw/intc/xive.c b/hw/intc/xive.c index a0b87001da..0c74e47aa4 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -493,6 +493,16 @@ void xive_tctx_pic_print_info(XiveTCTX *tctx, Monitor *mon) int cpu_index = tctx->cs ? tctx->cs->cpu_index : -1; int i; + if (kvm_irqchip_in_kernel()) { + Error *local_err = NULL; + + kvmppc_xive_cpu_synchronize_state(tctx, &local_err); + if (local_err) { + error_report_err(local_err); + return; + } + } + monitor_printf(mon, "CPU[%04x]: QW NSR CPPR IPB LSMFB ACK# INC AGE PIPR" " W2\n", cpu_index); @@ -555,6 +565,15 @@ static void xive_tctx_realize(DeviceState *dev, Error **errp) return; } + /* Connect the presenter to the VCPU (required for CPU hotplug) */ + if (kvm_irqchip_in_kernel()) { + kvmppc_xive_cpu_connect(tctx, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + qemu_register_reset(xive_tctx_reset, dev); } @@ -563,10 +582,27 @@ static void xive_tctx_unrealize(DeviceState *dev, Error **errp) qemu_unregister_reset(xive_tctx_reset, dev); } +static int vmstate_xive_tctx_pre_save(void *opaque) +{ + Error *local_err = NULL; + + if (kvm_irqchip_in_kernel()) { + kvmppc_xive_cpu_get_state(XIVE_TCTX(opaque), &local_err); + if (local_err) { + error_report_err(local_err); + return -1; + } + } + + return 0; +} + static const VMStateDescription vmstate_xive_tctx = { .name = TYPE_XIVE_TCTX, .version_id = 1, .minimum_version_id = 1, + .pre_save = vmstate_xive_tctx_pre_save, + .post_load = NULL, /* handled by the sPAPRxive model */ .fields = (VMStateField[]) { VMSTATE_BUFFER(regs, XiveTCTX), VMSTATE_END_OF_LIST() @@ -990,9 +1026,11 @@ static void xive_source_realize(DeviceState *dev, Error **errp) xsrc->status = g_malloc0(xsrc->nr_irqs); xsrc->lsi_map = bitmap_new(xsrc->nr_irqs); - memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), - &xive_source_esb_ops, xsrc, "xive.esb", - (1ull << xsrc->esb_shift) * xsrc->nr_irqs); + if (!kvm_irqchip_in_kernel()) { + memory_region_init_io(&xsrc->esb_mmio, OBJECT(xsrc), + &xive_source_esb_ops, xsrc, "xive.esb", + (1ull << xsrc->esb_shift) * xsrc->nr_irqs); + } qemu_register_reset(xive_source_reset, dev); } @@ -1042,8 +1080,7 @@ static const TypeInfo xive_source_info = { void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) { - uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 - | be32_to_cpu(end->w3); + uint64_t qaddr_base = xive_end_qaddr(end); uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); uint32_t qentries = 1 << (qsize + 10); @@ -1072,8 +1109,7 @@ void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) { - uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 - | be32_to_cpu(end->w3); + uint64_t qaddr_base = xive_end_qaddr(end); uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); @@ -1101,8 +1137,7 @@ void xive_end_pic_print_info(XiveEND *end, uint32_t end_idx, Monitor *mon) static void xive_end_enqueue(XiveEND *end, uint32_t data) { - uint64_t qaddr_base = (uint64_t) be32_to_cpu(end->w2 & 0x0fffffff) << 32 - | be32_to_cpu(end->w3); + uint64_t qaddr_base = xive_end_qaddr(end); uint32_t qsize = xive_get_field32(END_W0_QSIZE, end->w0); uint32_t qindex = xive_get_field32(END_W1_PAGE_OFF, end->w1); uint32_t qgen = xive_get_field32(END_W1_GENERATION, end->w1); diff --git a/hw/isa/i82378.c b/hw/isa/i82378.c index a5d67bc6d7..c08970b24a 100644 --- a/hw/isa/i82378.c +++ b/hw/isa/i82378.c @@ -21,7 +21,6 @@ #include "hw/pci/pci.h" #include "hw/i386/pc.h" #include "hw/timer/i8254.h" -#include "hw/timer/mc146818rtc.h" #include "hw/audio/pcspk.h" #define TYPE_I82378 "i82378" @@ -105,9 +104,6 @@ static void i82378_realize(PCIDevice *pci, Error **errp) /* 2 82C37 (dma) */ isa = isa_create_simple(isabus, "i82374"); - - /* timer */ - isa_create_simple(isabus, TYPE_MC146818_RTC); } static void i82378_init(Object *obj) diff --git a/hw/misc/edu.c b/hw/misc/edu.c index 91af452c9e..19e5545e2c 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -98,23 +98,24 @@ static void edu_lower_irq(EduState *edu, uint32_t val) } } -static bool within(uint32_t addr, uint32_t start, uint32_t end) +static bool within(uint64_t addr, uint64_t start, uint64_t end) { return start <= addr && addr < end; } -static void edu_check_range(uint32_t addr, uint32_t size1, uint32_t start, - uint32_t size2) +static void edu_check_range(uint64_t addr, uint64_t size1, uint64_t start, + uint64_t size2) { - uint32_t end1 = addr + size1; - uint32_t end2 = start + size2; + uint64_t end1 = addr + size1; + uint64_t end2 = start + size2; if (within(addr, start, end2) && end1 > addr && within(end1, start, end2)) { return; } - hw_error("EDU: DMA range 0x%.8x-0x%.8x out of bounds (0x%.8x-0x%.8x)!", + hw_error("EDU: DMA range 0x%016"PRIx64"-0x%016"PRIx64 + " out of bounds (0x%016"PRIx64"-0x%016"PRIx64")!", addr, end1 - 1, start, end2 - 1); } @@ -139,13 +140,13 @@ static void edu_dma_timer(void *opaque) } if (EDU_DMA_DIR(edu->dma.cmd) == EDU_DMA_FROM_PCI) { - uint32_t dst = edu->dma.dst; + uint64_t dst = edu->dma.dst; edu_check_range(dst, edu->dma.cnt, DMA_START, DMA_SIZE); dst -= DMA_START; pci_dma_read(&edu->pdev, edu_clamp_addr(edu, edu->dma.src), edu->dma_buf + dst, edu->dma.cnt); } else { - uint32_t src = edu->dma.src; + uint64_t src = edu->dma.src; edu_check_range(src, edu->dma.cnt, DMA_START, DMA_SIZE); src -= DMA_START; pci_dma_write(&edu->pdev, edu_clamp_addr(edu, edu->dma.dst), @@ -185,7 +186,11 @@ static uint64_t edu_mmio_read(void *opaque, hwaddr addr, unsigned size) EduState *edu = opaque; uint64_t val = ~0ULL; - if (size != 4) { + if (addr < 0x80 && size != 4) { + return val; + } + + if (addr >= 0x80 && size != 4 && size != 8) { return val; } @@ -289,6 +294,15 @@ static const MemoryRegionOps edu_mmio_ops = { .read = edu_mmio_read, .write = edu_mmio_write, .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, + }; /* diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index a3465155f0..f927ec9c74 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -122,3 +122,8 @@ config XIVE_SPAPR default y depends on PSERIES select XIVE + +config XIVE_KVM + bool + default y + depends on XIVE_SPAPR && KVM diff --git a/hw/ppc/pnv.c b/hw/ppc/pnv.c index 31aa20ee25..046f0a83c8 100644 --- a/hw/ppc/pnv.c +++ b/hw/ppc/pnv.c @@ -450,7 +450,8 @@ static void pnv_dt_power_mgt(void *fdt) static void *pnv_dt_create(MachineState *machine) { - const char plat_compat[] = "qemu,powernv\0ibm,powernv"; + const char plat_compat8[] = "qemu,powernv8\0qemu,powernv\0ibm,powernv"; + const char plat_compat9[] = "qemu,powernv9\0ibm,powernv"; PnvMachineState *pnv = PNV_MACHINE(machine); void *fdt; char *buf; @@ -465,8 +466,14 @@ static void *pnv_dt_create(MachineState *machine) _FDT((fdt_setprop_cell(fdt, 0, "#size-cells", 0x2))); _FDT((fdt_setprop_string(fdt, 0, "model", "IBM PowerNV (emulated by qemu)"))); - _FDT((fdt_setprop(fdt, 0, "compatible", plat_compat, - sizeof(plat_compat)))); + if (pnv_is_power9(pnv)) { + _FDT((fdt_setprop(fdt, 0, "compatible", plat_compat9, + sizeof(plat_compat9)))); + } else { + _FDT((fdt_setprop(fdt, 0, "compatible", plat_compat8, + sizeof(plat_compat8)))); + } + buf = qemu_uuid_unparse_strdup(&qemu_uuid); _FDT((fdt_setprop_string(fdt, 0, "vm,uuid", buf))); diff --git a/hw/ppc/pnv_xscom.c b/hw/ppc/pnv_xscom.c index c285ef514e..f53a6d7a94 100644 --- a/hw/ppc/pnv_xscom.c +++ b/hw/ppc/pnv_xscom.c @@ -29,6 +29,12 @@ #include <libfdt.h> +/* PRD registers */ +#define PRD_P8_IPOLL_REG_MASK 0x01020013 +#define PRD_P8_IPOLL_REG_STATUS 0x01020014 +#define PRD_P9_IPOLL_REG_MASK 0x000F0033 +#define PRD_P9_IPOLL_REG_STATUS 0x000F0034 + static void xscom_complete(CPUState *cs, uint64_t hmer_bits) { /* @@ -70,6 +76,12 @@ static uint64_t xscom_read_default(PnvChip *chip, uint32_t pcba) case 0x1010c00: /* PIBAM FIR */ case 0x1010c03: /* PIBAM FIR MASK */ + /* PRD registers */ + case PRD_P8_IPOLL_REG_MASK: + case PRD_P8_IPOLL_REG_STATUS: + case PRD_P9_IPOLL_REG_MASK: + case PRD_P9_IPOLL_REG_STATUS: + /* P9 xscom reset */ case 0x0090018: /* Receive status reg */ case 0x0090012: /* log register */ @@ -124,6 +136,12 @@ static bool xscom_write_default(PnvChip *chip, uint32_t pcba, uint64_t val) case 0x201302a: /* CAPP stuff */ case 0x2013801: /* CAPP stuff */ case 0x2013802: /* CAPP stuff */ + + /* P8 PRD registers */ + case PRD_P8_IPOLL_REG_MASK: + case PRD_P8_IPOLL_REG_STATUS: + case PRD_P9_IPOLL_REG_MASK: + case PRD_P9_IPOLL_REG_STATUS: return true; default: return false; diff --git a/hw/ppc/prep.c b/hw/ppc/prep.c index b7f459d475..2a8009e20b 100644 --- a/hw/ppc/prep.c +++ b/hw/ppc/prep.c @@ -601,7 +601,7 @@ static int prep_set_cmos_checksum(DeviceState *dev, void *opaque) uint16_t checksum = *(uint16_t *)opaque; ISADevice *rtc; - if (object_dynamic_cast(OBJECT(dev), "mc146818rtc")) { + if (object_dynamic_cast(OBJECT(dev), TYPE_MC146818_RTC)) { rtc = ISA_DEVICE(dev); rtc_set_memory(rtc, 0x2e, checksum & 0xff); rtc_set_memory(rtc, 0x3e, checksum & 0xff); @@ -675,6 +675,11 @@ static void ibm_40p_init(MachineState *machine) qdev_prop_set_uint32(dev, "ram-size", machine->ram_size); qdev_init_nofail(dev); + /* RTC */ + dev = DEVICE(isa_create(isa_bus, TYPE_MC146818_RTC)); + qdev_prop_set_int32(dev, "base_year", 1900); + qdev_init_nofail(dev); + /* initialize CMOS checksums */ cmos_checksum = 0x6aa9; qbus_walk_children(BUS(isa_bus), prep_set_cmos_checksum, NULL, NULL, NULL, diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 2ef3ce4362..e2b33e5890 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -500,7 +500,10 @@ static void spapr_populate_cpu_dt(CPUState *cs, void *fdt, int offset, _FDT((fdt_setprop(fdt, offset, "64-bit", NULL, 0))); if (env->spr_cb[SPR_PURR].oea_read) { - _FDT((fdt_setprop(fdt, offset, "ibm,purr", NULL, 0))); + _FDT((fdt_setprop_cell(fdt, offset, "ibm,purr", 1))); + } + if (env->spr_cb[SPR_SPURR].oea_read) { + _FDT((fdt_setprop_cell(fdt, offset, "ibm,spurr", 1))); } if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) { @@ -2122,6 +2125,7 @@ static const VMStateDescription vmstate_spapr = { &vmstate_spapr_cap_cfpc, &vmstate_spapr_cap_sbbc, &vmstate_spapr_cap_ibs, + &vmstate_spapr_cap_hpt_maxpagesize, &vmstate_spapr_irq_map, &vmstate_spapr_cap_nested_kvm_hv, &vmstate_spapr_dtb, @@ -4348,7 +4352,7 @@ static void spapr_machine_class_init(ObjectClass *oc, void *data) smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_ON; smc->default_caps.caps[SPAPR_CAP_CCF_ASSIST] = SPAPR_CAP_OFF; spapr_caps_add_properties(smc, &error_abort); - smc->irq = &spapr_irq_xics; + smc->irq = &spapr_irq_dual; smc->dr_phb_enabled = true; } @@ -4407,18 +4411,7 @@ DEFINE_SPAPR_MACHINE(4_1, "4.1", true); /* * pseries-4.0 */ -static void spapr_machine_4_0_class_options(MachineClass *mc) -{ - spapr_machine_4_1_class_options(mc); - compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); -} - -DEFINE_SPAPR_MACHINE(4_0, "4.0", false); - -/* - * pseries-3.1 - */ -static void phb_placement_3_1(SpaprMachineState *spapr, uint32_t index, +static void phb_placement_4_0(SpaprMachineState *spapr, uint32_t index, uint64_t *buid, hwaddr *pio, hwaddr *mmio32, hwaddr *mmio64, unsigned n_dma, uint32_t *liobns, @@ -4430,6 +4423,22 @@ static void phb_placement_3_1(SpaprMachineState *spapr, uint32_t index, *nv2atsd = 0; } +static void spapr_machine_4_0_class_options(MachineClass *mc) +{ + SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); + + spapr_machine_4_1_class_options(mc); + compat_props_add(mc->compat_props, hw_compat_4_0, hw_compat_4_0_len); + smc->phb_placement = phb_placement_4_0; + smc->irq = &spapr_irq_xics; + smc->pre_4_1_migration = true; +} + +DEFINE_SPAPR_MACHINE(4_0, "4.0", false); + +/* + * pseries-3.1 + */ static void spapr_machine_3_1_class_options(MachineClass *mc) { SpaprMachineClass *smc = SPAPR_MACHINE_CLASS(mc); @@ -4445,7 +4454,6 @@ static void spapr_machine_3_1_class_options(MachineClass *mc) smc->default_caps.caps[SPAPR_CAP_SBBC] = SPAPR_CAP_BROKEN; smc->default_caps.caps[SPAPR_CAP_IBS] = SPAPR_CAP_BROKEN; smc->default_caps.caps[SPAPR_CAP_LARGE_DECREMENTER] = SPAPR_CAP_OFF; - smc->phb_placement = phb_placement_3_1; } DEFINE_SPAPR_MACHINE(3_1, "3.1", false); diff --git a/hw/ppc/spapr_caps.c b/hw/ppc/spapr_caps.c index 9b1c10baa6..31b4661399 100644 --- a/hw/ppc/spapr_caps.c +++ b/hw/ppc/spapr_caps.c @@ -64,6 +64,7 @@ typedef struct SpaprCapabilityInfo { void (*apply)(SpaprMachineState *spapr, uint8_t val, Error **errp); void (*cpu_apply)(SpaprMachineState *spapr, PowerPCCPU *cpu, uint8_t val, Error **errp); + bool (*migrate_needed)(void *opaque); } SpaprCapabilityInfo; static void spapr_cap_get_bool(Object *obj, Visitor *v, const char *name, @@ -350,6 +351,11 @@ static void cap_hpt_maxpagesize_apply(SpaprMachineState *spapr, spapr_check_pagesize(spapr, qemu_minrampagesize(), errp); } +static bool cap_hpt_maxpagesize_migrate_needed(void *opaque) +{ + return !SPAPR_MACHINE_GET_CLASS(opaque)->pre_4_1_migration; +} + static bool spapr_pagesize_cb(void *opaque, uint32_t seg_pshift, uint32_t pshift) { @@ -542,6 +548,7 @@ SpaprCapabilityInfo capability_table[SPAPR_CAP_NUM] = { .type = "int", .apply = cap_hpt_maxpagesize_apply, .cpu_apply = cap_hpt_maxpagesize_cpu_apply, + .migrate_needed = cap_hpt_maxpagesize_migrate_needed, }, [SPAPR_CAP_NESTED_KVM_HV] = { .name = "nested-hv", @@ -679,8 +686,11 @@ int spapr_caps_post_migration(SpaprMachineState *spapr) static bool spapr_cap_##sname##_needed(void *opaque) \ { \ SpaprMachineState *spapr = opaque; \ + bool (*needed)(void *opaque) = \ + capability_table[cap].migrate_needed; \ \ - return spapr->cmd_line_caps[cap] && \ + return needed ? needed(opaque) : true && \ + spapr->cmd_line_caps[cap] && \ (spapr->eff.caps[cap] != \ spapr->def.caps[cap]); \ } \ @@ -703,6 +713,7 @@ SPAPR_CAP_MIG_STATE(dfp, SPAPR_CAP_DFP); SPAPR_CAP_MIG_STATE(cfpc, SPAPR_CAP_CFPC); SPAPR_CAP_MIG_STATE(sbbc, SPAPR_CAP_SBBC); SPAPR_CAP_MIG_STATE(ibs, SPAPR_CAP_IBS); +SPAPR_CAP_MIG_STATE(hpt_maxpagesize, SPAPR_CAP_HPT_MAXPAGESIZE); SPAPR_CAP_MIG_STATE(nested_kvm_hv, SPAPR_CAP_NESTED_KVM_HV); SPAPR_CAP_MIG_STATE(large_decr, SPAPR_CAP_LARGE_DECREMENTER); SPAPR_CAP_MIG_STATE(ccf_assist, SPAPR_CAP_CCF_ASSIST); diff --git a/hw/ppc/spapr_cpu_core.c b/hw/ppc/spapr_cpu_core.c index f04e06cdf6..5621fb9a3d 100644 --- a/hw/ppc/spapr_cpu_core.c +++ b/hw/ppc/spapr_cpu_core.c @@ -58,9 +58,11 @@ static void spapr_cpu_reset(void *opaque) * * Disable Power-saving mode Exit Cause exceptions for the CPU, so * we don't get spurious wakups before an RTAS start-cpu call. + * For the same reason, set PSSCR_EC. */ lpcr &= ~(LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV | pcc->lpcr_pm); lpcr |= LPCR_LPES0 | LPCR_LPES1; + env->spr[SPR_PSSCR] |= PSSCR_EC; /* Set RMLS to the max (ie, 16G) */ lpcr &= ~LPCR_RMLS; diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 6c16d2b120..0a050ad3d8 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1513,6 +1513,7 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu, bool guest_radix; Error *local_err = NULL; bool raw_mode_supported = false; + bool guest_xive; cas_pvr = cas_check_pvr(spapr, cpu, &addr, &raw_mode_supported, &local_err); if (local_err) { @@ -1545,10 +1546,17 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu, error_report("guest requested hash and radix MMU, which is invalid."); exit(EXIT_FAILURE); } + if (spapr_ovec_test(ov5_guest, OV5_XIVE_BOTH)) { + error_report("guest requested an invalid interrupt mode"); + exit(EXIT_FAILURE); + } + /* The radix/hash bit in byte 24 requires special handling: */ guest_radix = spapr_ovec_test(ov5_guest, OV5_MMU_RADIX_300); spapr_ovec_clear(ov5_guest, OV5_MMU_RADIX_300); + guest_xive = spapr_ovec_test(ov5_guest, OV5_XIVE_EXPLOIT); + /* * HPT resizing is a bit of a special case, because when enabled * we assume an HPT guest will support it until it says it @@ -1633,6 +1641,24 @@ static target_ulong h_client_architecture_support(PowerPCCPU *cpu, } /* + * Ensure the guest asks for an interrupt mode we support; otherwise + * terminate the boot. + */ + if (guest_xive) { + if (spapr->irq->ov5 == SPAPR_OV5_XIVE_LEGACY) { + error_report( +"Guest requested unavailable interrupt mode (XIVE), try the ic-mode=xive or ic-mode=dual machine property"); + exit(EXIT_FAILURE); + } + } else { + if (spapr->irq->ov5 == SPAPR_OV5_XIVE_EXPLOIT) { + error_report( +"Guest requested unavailable interrupt mode (XICS), either don't set the ic-mode machine property or try ic-mode=xics or ic-mode=dual"); + exit(EXIT_FAILURE); + } + } + + /* * Generate a machine reset when we have an update of the * interrupt mode. Only required when the machine supports both * modes. diff --git a/hw/ppc/spapr_irq.c b/hw/ppc/spapr_irq.c index b1f79ea9de..3156daf093 100644 --- a/hw/ppc/spapr_irq.c +++ b/hw/ppc/spapr_irq.c @@ -62,38 +62,46 @@ void spapr_irq_msi_reset(SpaprMachineState *spapr) bitmap_clear(spapr->irq_map, 0, spapr->irq_map_nr); } - -/* - * XICS IRQ backend. - */ - -static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs, - Error **errp) +static void spapr_irq_init_device(SpaprMachineState *spapr, + SpaprIrq *irq, Error **errp) { MachineState *machine = MACHINE(spapr); - Object *obj; Error *local_err = NULL; - bool xics_kvm = false; - if (kvm_enabled()) { - if (machine_kernel_irqchip_allowed(machine) && - !xics_kvm_init(spapr, &local_err)) { - xics_kvm = true; - } - if (machine_kernel_irqchip_required(machine) && !xics_kvm) { + if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) { + irq->init_kvm(spapr, &local_err); + if (local_err && machine_kernel_irqchip_required(machine)) { error_prepend(&local_err, "kernel_irqchip requested but unavailable: "); error_propagate(errp, local_err); return; } - error_free(local_err); - local_err = NULL; - } - if (!xics_kvm) { - xics_spapr_init(spapr); + if (!local_err) { + return; + } + + /* + * We failed to initialize the KVM device, fallback to + * emulated mode + */ + error_prepend(&local_err, "kernel_irqchip allowed but unavailable: "); + warn_report_err(local_err); } + irq->init_emu(spapr, errp); +} + +/* + * XICS IRQ backend. + */ + +static void spapr_irq_init_xics(SpaprMachineState *spapr, int nr_irqs, + Error **errp) +{ + Object *obj; + Error *local_err = NULL; + obj = object_new(TYPE_ICS_SIMPLE); object_property_add_child(OBJECT(spapr), "ics", obj, &error_abort); object_property_add_const_link(obj, ICS_PROP_XICS, OBJECT(spapr), @@ -212,7 +220,13 @@ static void spapr_irq_set_irq_xics(void *opaque, int srcno, int val) static void spapr_irq_reset_xics(SpaprMachineState *spapr, Error **errp) { - /* TODO: create the KVM XICS device */ + Error *local_err = NULL; + + spapr_irq_init_device(spapr, &spapr_irq_xics, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } } static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr) @@ -220,6 +234,18 @@ static const char *spapr_irq_get_nodename_xics(SpaprMachineState *spapr) return XICS_NODENAME; } +static void spapr_irq_init_emu_xics(SpaprMachineState *spapr, Error **errp) +{ + xics_spapr_init(spapr); +} + +static void spapr_irq_init_kvm_xics(SpaprMachineState *spapr, Error **errp) +{ + if (kvm_enabled()) { + xics_kvm_init(spapr, errp); + } +} + #define SPAPR_IRQ_XICS_NR_IRQS 0x1000 #define SPAPR_IRQ_XICS_NR_MSIS \ (XICS_IRQ_BASE + SPAPR_IRQ_XICS_NR_IRQS - SPAPR_IRQ_MSI) @@ -240,6 +266,8 @@ SpaprIrq spapr_irq_xics = { .reset = spapr_irq_reset_xics, .set_irq = spapr_irq_set_irq_xics, .get_nodename = spapr_irq_get_nodename_xics, + .init_emu = spapr_irq_init_emu_xics, + .init_kvm = spapr_irq_init_kvm_xics, }; /* @@ -248,19 +276,10 @@ SpaprIrq spapr_irq_xics = { static void spapr_irq_init_xive(SpaprMachineState *spapr, int nr_irqs, Error **errp) { - MachineState *machine = MACHINE(spapr); uint32_t nr_servers = spapr_max_server_number(spapr); DeviceState *dev; int i; - /* KVM XIVE device not yet available */ - if (kvm_enabled()) { - if (machine_kernel_irqchip_required(machine)) { - error_setg(errp, "kernel_irqchip requested. no KVM XIVE support"); - return; - } - } - dev = qdev_create(NULL, TYPE_SPAPR_XIVE); qdev_prop_set_uint32(dev, "nr-irqs", nr_irqs); /* @@ -350,12 +369,13 @@ static void spapr_irq_cpu_intc_create_xive(SpaprMachineState *spapr, static int spapr_irq_post_load_xive(SpaprMachineState *spapr, int version_id) { - return 0; + return spapr_xive_post_load(spapr->xive, version_id); } static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp) { CPUState *cs; + Error *local_err = NULL; CPU_FOREACH(cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); @@ -364,6 +384,12 @@ static void spapr_irq_reset_xive(SpaprMachineState *spapr, Error **errp) spapr_xive_set_tctx_os_cam(spapr_cpu_state(cpu)->tctx); } + spapr_irq_init_device(spapr, &spapr_irq_xive, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + /* Activate the XIVE MMIOs */ spapr_xive_mmio_set_enabled(spapr->xive, true); } @@ -372,7 +398,11 @@ static void spapr_irq_set_irq_xive(void *opaque, int srcno, int val) { SpaprMachineState *spapr = opaque; - xive_source_set_irq(&spapr->xive->source, srcno, val); + if (kvm_irqchip_in_kernel()) { + kvmppc_xive_source_set_irq(&spapr->xive->source, srcno, val); + } else { + xive_source_set_irq(&spapr->xive->source, srcno, val); + } } static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr) @@ -380,6 +410,18 @@ static const char *spapr_irq_get_nodename_xive(SpaprMachineState *spapr) return spapr->xive->nodename; } +static void spapr_irq_init_emu_xive(SpaprMachineState *spapr, Error **errp) +{ + spapr_xive_init(spapr->xive, errp); +} + +static void spapr_irq_init_kvm_xive(SpaprMachineState *spapr, Error **errp) +{ + if (kvm_enabled()) { + kvmppc_xive_connect(spapr->xive, errp); + } +} + /* * XIVE uses the full IRQ number space. Set it to 8K to be compatible * with XICS. @@ -404,6 +446,8 @@ SpaprIrq spapr_irq_xive = { .reset = spapr_irq_reset_xive, .set_irq = spapr_irq_set_irq_xive, .get_nodename = spapr_irq_get_nodename_xive, + .init_emu = spapr_irq_init_emu_xive, + .init_kvm = spapr_irq_init_kvm_xive, }; /* @@ -428,14 +472,8 @@ static SpaprIrq *spapr_irq_current(SpaprMachineState *spapr) static void spapr_irq_init_dual(SpaprMachineState *spapr, int nr_irqs, Error **errp) { - MachineState *machine = MACHINE(spapr); Error *local_err = NULL; - if (kvm_enabled() && machine_kernel_irqchip_allowed(machine)) { - error_setg(errp, "No KVM support for the 'dual' machine"); - return; - } - spapr_irq_xics.init(spapr, spapr_irq_xics.nr_irqs, &local_err); if (local_err) { error_propagate(errp, local_err); @@ -514,6 +552,9 @@ static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id) * defaults to XICS at startup. */ if (spapr_ovec_test(spapr->ov5_cas, OV5_XIVE_EXPLOIT)) { + if (kvm_irqchip_in_kernel()) { + xics_kvm_disconnect(spapr, &error_fatal); + } spapr_irq_xive.reset(spapr, &error_fatal); } @@ -522,12 +563,30 @@ static int spapr_irq_post_load_dual(SpaprMachineState *spapr, int version_id) static void spapr_irq_reset_dual(SpaprMachineState *spapr, Error **errp) { + Error *local_err = NULL; + /* * Deactivate the XIVE MMIOs. The XIVE backend will reenable them * if selected. */ spapr_xive_mmio_set_enabled(spapr->xive, false); + /* Destroy all KVM devices */ + if (kvm_irqchip_in_kernel()) { + xics_kvm_disconnect(spapr, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_prepend(errp, "KVM XICS disconnect failed: "); + return; + } + kvmppc_xive_disconnect(spapr->xive, &local_err); + if (local_err) { + error_propagate(errp, local_err); + error_prepend(errp, "KVM XIVE disconnect failed: "); + return; + } + } + spapr_irq_current(spapr)->reset(spapr, errp); } @@ -565,6 +624,8 @@ SpaprIrq spapr_irq_dual = { .reset = spapr_irq_reset_dual, .set_irq = spapr_irq_set_irq_dual, .get_nodename = spapr_irq_get_nodename_dual, + .init_emu = NULL, /* should not be used */ + .init_kvm = NULL, /* should not be used */ }; @@ -763,6 +824,9 @@ SpaprIrq spapr_irq_xics_legacy = { .dt_populate = spapr_dt_xics, .cpu_intc_create = spapr_irq_cpu_intc_create_xics, .post_load = spapr_irq_post_load_xics, + .reset = spapr_irq_reset_xics, .set_irq = spapr_irq_set_irq_xics, .get_nodename = spapr_irq_get_nodename_xics, + .init_emu = spapr_irq_init_emu_xics, + .init_kvm = spapr_irq_init_kvm_xics, }; diff --git a/hw/ppc/spapr_rtas.c b/hw/ppc/spapr_rtas.c index ee24212765..5bc1a93271 100644 --- a/hw/ppc/spapr_rtas.c +++ b/hw/ppc/spapr_rtas.c @@ -177,6 +177,7 @@ static void rtas_start_cpu(PowerPCCPU *callcpu, SpaprMachineState *spapr, } else { lpcr &= ~(LPCR_UPRT | LPCR_GTSE | LPCR_HR); } + env->spr[SPR_PSSCR] &= ~PSSCR_EC; } ppc_store_lpcr(newcpu, lpcr); @@ -205,8 +206,11 @@ static void rtas_stop_self(PowerPCCPU *cpu, SpaprMachineState *spapr, /* Disable Power-saving mode Exit Cause exceptions for the CPU. * This could deliver an interrupt on a dying CPU and crash the - * guest */ + * guest. + * For the same reason, set PSSCR_EC. + */ ppc_store_lpcr(cpu, env->spr[SPR_LPCR] & ~pcc->lpcr_pm); + env->spr[SPR_PSSCR] |= PSSCR_EC; cs->halted = 1; kvmppc_set_reg_ppc_online(cpu, 0); qemu_cpu_kick(cs); diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index e7e865ab3b..7b89ac798b 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -2336,6 +2336,13 @@ static void scsi_realize(SCSIDevice *dev, Error **errp) return; } + if (blk_get_aio_context(s->qdev.conf.blk) != qemu_get_aio_context() && + !s->qdev.hba_supports_iothread) + { + error_setg(errp, "HBA does not support iothreads"); + return; + } + if (dev->type == TYPE_DISK) { if (!blkconf_geometry(&dev->conf, NULL, 65535, 255, 255, errp)) { return; @@ -2417,7 +2424,7 @@ static void scsi_cd_realize(SCSIDevice *dev, Error **errp) if (!dev->conf.blk) { /* Anonymous BlockBackend for an empty drive. As we put it into * dev->conf, qdev takes care of detaching on unplug. */ - dev->conf.blk = blk_new(0, BLK_PERM_ALL); + dev->conf.blk = blk_new(qemu_get_aio_context(), 0, BLK_PERM_ALL); ret = blk_attach_dev(dev->conf.blk, &dev->qdev); assert(ret == 0); } @@ -2929,13 +2936,14 @@ static const TypeInfo scsi_disk_base_info = { .abstract = true, }; -#define DEFINE_SCSI_DISK_PROPERTIES() \ - DEFINE_BLOCK_PROPERTIES(SCSIDiskState, qdev.conf), \ - DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ - DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ - DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ - DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ - DEFINE_PROP_STRING("product", SCSIDiskState, product), \ +#define DEFINE_SCSI_DISK_PROPERTIES() \ + DEFINE_PROP_DRIVE_IOTHREAD("drive", SCSIDiskState, qdev.conf.blk), \ + DEFINE_BLOCK_PROPERTIES_BASE(SCSIDiskState, qdev.conf), \ + DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), \ + DEFINE_PROP_STRING("ver", SCSIDiskState, version), \ + DEFINE_PROP_STRING("serial", SCSIDiskState, serial), \ + DEFINE_PROP_STRING("vendor", SCSIDiskState, vendor), \ + DEFINE_PROP_STRING("product", SCSIDiskState, product), \ DEFINE_PROP_STRING("device_id", SCSIDiskState, device_id) diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 839f120256..2994f0738f 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -789,28 +789,31 @@ static void virtio_scsi_change(SCSIBus *bus, SCSIDevice *dev, SCSISense sense) } } +static void virtio_scsi_pre_hotplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + SCSIDevice *sd = SCSI_DEVICE(dev); + sd->hba_supports_iothread = true; +} + static void virtio_scsi_hotplug(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(hotplug_dev); VirtIOSCSI *s = VIRTIO_SCSI(vdev); SCSIDevice *sd = SCSI_DEVICE(dev); + int ret; if (s->ctx && !s->dataplane_fenced) { - AioContext *ctx; if (blk_op_is_blocked(sd->conf.blk, BLOCK_OP_TYPE_DATAPLANE, errp)) { return; } - ctx = blk_get_aio_context(sd->conf.blk); - if (ctx != s->ctx && ctx != qemu_get_aio_context()) { - error_setg(errp, "Cannot attach a blockdev that is using " - "a different iothread"); - return; - } virtio_scsi_acquire(s); - blk_set_aio_context(sd->conf.blk, s->ctx); + ret = blk_set_aio_context(sd->conf.blk, s->ctx, errp); virtio_scsi_release(s); - + if (ret < 0) { + return; + } } if (virtio_vdev_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG)) { @@ -839,7 +842,8 @@ static void virtio_scsi_hotunplug(HotplugHandler *hotplug_dev, DeviceState *dev, if (s->ctx) { virtio_scsi_acquire(s); - blk_set_aio_context(sd->conf.blk, qemu_get_aio_context()); + /* If other users keep the BlockBackend in the iothread, that's ok */ + blk_set_aio_context(sd->conf.blk, qemu_get_aio_context(), NULL); virtio_scsi_release(s); } @@ -986,6 +990,7 @@ static void virtio_scsi_class_init(ObjectClass *klass, void *data) vdc->reset = virtio_scsi_reset; vdc->start_ioeventfd = virtio_scsi_dataplane_start; vdc->stop_ioeventfd = virtio_scsi_dataplane_stop; + hc->pre_plug = virtio_scsi_pre_hotplug; hc->plug = virtio_scsi_hotplug; hc->unplug = virtio_scsi_hotunplug; } diff --git a/hw/usb/core.c b/hw/usb/core.c index 8fbd9c7d57..3ab48a1607 100644 --- a/hw/usb/core.c +++ b/hw/usb/core.c @@ -87,10 +87,10 @@ void usb_device_reset(USBDevice *dev) if (dev == NULL || !dev->attached) { return; } + usb_device_handle_reset(dev); dev->remote_wakeup = 0; dev->addr = 0; dev->state = USB_STATE_DEFAULT; - usb_device_handle_reset(dev); } void usb_wakeup(USBEndpoint *ep, unsigned int stream) diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index 7e9339b8a8..2b64d6ef03 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -24,12 +24,13 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu-common.h" +#include "qemu/timer.h" #include "trace.h" #include "hw/usb.h" #include "desc.h" #include "qemu/error-report.h" -#define NUM_PORTS 8 +#define MAX_PORTS 8 typedef struct USBHubPort { USBPort port; @@ -40,7 +41,10 @@ typedef struct USBHubPort { typedef struct USBHubState { USBDevice dev; USBEndpoint *intr; - USBHubPort ports[NUM_PORTS]; + uint32_t num_ports; + bool port_power; + QEMUTimer *port_timer; + USBHubPort ports[MAX_PORTS]; } USBHubState; #define TYPE_USB_HUB "usb-hub" @@ -109,7 +113,7 @@ static const USBDescIface desc_iface_hub = { { .bEndpointAddress = USB_DIR_IN | 0x01, .bmAttributes = USB_ENDPOINT_XFER_INT, - .wMaxPacketSize = 1 + DIV_ROUND_UP(NUM_PORTS, 8), + .wMaxPacketSize = 1 + DIV_ROUND_UP(MAX_PORTS, 8), .bInterval = 0xff, }, } @@ -158,19 +162,71 @@ static const uint8_t qemu_hub_hub_descriptor[] = /* DeviceRemovable and PortPwrCtrlMask patched in later */ }; +static bool usb_hub_port_change(USBHubPort *port, uint16_t status) +{ + bool notify = false; + + if (status & 0x1f) { + port->wPortChange |= status; + notify = true; + } + return notify; +} + +static bool usb_hub_port_set(USBHubPort *port, uint16_t status) +{ + if (port->wPortStatus & status) { + return false; + } + port->wPortStatus |= status; + return usb_hub_port_change(port, status); +} + +static bool usb_hub_port_clear(USBHubPort *port, uint16_t status) +{ + if (!(port->wPortStatus & status)) { + return false; + } + port->wPortStatus &= ~status; + return usb_hub_port_change(port, status); +} + +static bool usb_hub_port_update(USBHubPort *port) +{ + bool notify = false; + + if (port->port.dev && port->port.dev->attached) { + notify = usb_hub_port_set(port, PORT_STAT_CONNECTION); + if (port->port.dev->speed == USB_SPEED_LOW) { + usb_hub_port_set(port, PORT_STAT_LOW_SPEED); + } else { + usb_hub_port_clear(port, PORT_STAT_LOW_SPEED); + } + } + return notify; +} + +static void usb_hub_port_update_timer(void *opaque) +{ + USBHubState *s = opaque; + bool notify = false; + int i; + + for (i = 0; i < s->num_ports; i++) { + notify |= usb_hub_port_update(&s->ports[i]); + } + if (notify) { + usb_wakeup(s->intr, 0); + } +} + static void usb_hub_attach(USBPort *port1) { USBHubState *s = port1->opaque; USBHubPort *port = &s->ports[port1->index]; trace_usb_hub_attach(s->dev.addr, port1->index + 1); - port->wPortStatus |= PORT_STAT_CONNECTION; - port->wPortChange |= PORT_STAT_C_CONNECTION; - if (port->port.dev->speed == USB_SPEED_LOW) { - port->wPortStatus |= PORT_STAT_LOW_SPEED; - } else { - port->wPortStatus &= ~PORT_STAT_LOW_SPEED; - } + usb_hub_port_update(port); usb_wakeup(s->intr, 0); } @@ -185,16 +241,9 @@ static void usb_hub_detach(USBPort *port1) /* Let upstream know the device on this port is gone */ s->dev.port->ops->child_detach(s->dev.port, port1->dev); - port->wPortStatus &= ~PORT_STAT_CONNECTION; - port->wPortChange |= PORT_STAT_C_CONNECTION; - if (port->wPortStatus & PORT_STAT_ENABLE) { - port->wPortStatus &= ~PORT_STAT_ENABLE; - port->wPortChange |= PORT_STAT_C_ENABLE; - } - if (port->wPortStatus & PORT_STAT_SUSPEND) { - port->wPortStatus &= ~PORT_STAT_SUSPEND; - port->wPortChange |= PORT_STAT_C_SUSPEND; - } + usb_hub_port_clear(port, PORT_STAT_CONNECTION); + usb_hub_port_clear(port, PORT_STAT_ENABLE); + usb_hub_port_clear(port, PORT_STAT_SUSPEND); usb_wakeup(s->intr, 0); } @@ -211,9 +260,7 @@ static void usb_hub_wakeup(USBPort *port1) USBHubState *s = port1->opaque; USBHubPort *port = &s->ports[port1->index]; - if (port->wPortStatus & PORT_STAT_SUSPEND) { - port->wPortStatus &= ~PORT_STAT_SUSPEND; - port->wPortChange |= PORT_STAT_C_SUSPEND; + if (usb_hub_port_clear(port, PORT_STAT_SUSPEND)) { usb_wakeup(s->intr, 0); } } @@ -242,7 +289,7 @@ static USBDevice *usb_hub_find_device(USBDevice *dev, uint8_t addr) USBDevice *downstream; int i; - for (i = 0; i < NUM_PORTS; i++) { + for (i = 0; i < s->num_ports; i++) { port = &s->ports[i]; if (!(port->wPortStatus & PORT_STAT_ENABLE)) { continue; @@ -262,17 +309,12 @@ static void usb_hub_handle_reset(USBDevice *dev) int i; trace_usb_hub_reset(s->dev.addr); - for (i = 0; i < NUM_PORTS; i++) { + for (i = 0; i < s->num_ports; i++) { port = s->ports + i; - port->wPortStatus = PORT_STAT_POWER; + port->wPortStatus = 0; port->wPortChange = 0; - if (port->port.dev && port->port.dev->attached) { - port->wPortStatus |= PORT_STAT_CONNECTION; - port->wPortChange |= PORT_STAT_C_CONNECTION; - if (port->port.dev->speed == USB_SPEED_LOW) { - port->wPortStatus |= PORT_STAT_LOW_SPEED; - } - } + usb_hub_port_set(port, PORT_STAT_POWER); + usb_hub_port_update(port); } } @@ -287,11 +329,11 @@ static const char *feature_name(int feature) [PORT_POWER] = "power", [PORT_LOWSPEED] = "lowspeed", [PORT_HIGHSPEED] = "highspeed", - [PORT_C_CONNECTION] = "change connection", - [PORT_C_ENABLE] = "change enable", - [PORT_C_SUSPEND] = "change suspend", - [PORT_C_OVERCURRENT] = "change overcurrent", - [PORT_C_RESET] = "change reset", + [PORT_C_CONNECTION] = "change-connection", + [PORT_C_ENABLE] = "change-enable", + [PORT_C_SUSPEND] = "change-suspend", + [PORT_C_OVERCURRENT] = "change-overcurrent", + [PORT_C_RESET] = "change-reset", [PORT_TEST] = "test", [PORT_INDICATOR] = "indicator", }; @@ -332,7 +374,7 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, { unsigned int n = index - 1; USBHubPort *port; - if (n >= NUM_PORTS) { + if (n >= s->num_ports) { goto fail; } port = &s->ports[n]; @@ -361,7 +403,7 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, trace_usb_hub_set_port_feature(s->dev.addr, index, feature_name(value)); - if (n >= NUM_PORTS) { + if (n >= s->num_ports) { goto fail; } port = &s->ports[n]; @@ -371,15 +413,20 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, port->wPortStatus |= PORT_STAT_SUSPEND; break; case PORT_RESET: + usb_hub_port_set(port, PORT_STAT_RESET); + usb_hub_port_clear(port, PORT_STAT_RESET); if (dev && dev->attached) { usb_device_reset(dev); - port->wPortChange |= PORT_STAT_C_RESET; - /* set enable bit */ - port->wPortStatus |= PORT_STAT_ENABLE; - usb_wakeup(s->intr, 0); + usb_hub_port_set(port, PORT_STAT_ENABLE); } + usb_wakeup(s->intr, 0); break; case PORT_POWER: + if (s->port_power) { + int64_t now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + usb_hub_port_set(port, PORT_STAT_POWER); + timer_mod(s->port_timer, now + 5000000); /* 5 ms */ + } break; default: goto fail; @@ -394,7 +441,7 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, trace_usb_hub_clear_port_feature(s->dev.addr, index, feature_name(value)); - if (n >= NUM_PORTS) { + if (n >= s->num_ports) { goto fail; } port = &s->ports[n]; @@ -406,20 +453,7 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, port->wPortChange &= ~PORT_STAT_C_ENABLE; break; case PORT_SUSPEND: - if (port->wPortStatus & PORT_STAT_SUSPEND) { - port->wPortStatus &= ~PORT_STAT_SUSPEND; - - /* - * USB Spec rev2.0 11.24.2.7.2.3 C_PORT_SUSPEND - * "This bit is set on the following transitions: - * - On transition from the Resuming state to the - * SendEOP [sic] state" - * - * Note that this includes both remote wake-up and - * explicit ClearPortFeature(PORT_SUSPEND). - */ - port->wPortChange |= PORT_STAT_C_SUSPEND; - } + usb_hub_port_clear(port, PORT_STAT_SUSPEND); break; case PORT_C_SUSPEND: port->wPortChange &= ~PORT_STAT_C_SUSPEND; @@ -433,6 +467,14 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, case PORT_C_RESET: port->wPortChange &= ~PORT_STAT_C_RESET; break; + case PORT_POWER: + if (s->port_power) { + usb_hub_port_clear(port, PORT_STAT_POWER); + usb_hub_port_clear(port, PORT_STAT_CONNECTION); + usb_hub_port_clear(port, PORT_STAT_ENABLE); + usb_hub_port_clear(port, PORT_STAT_SUSPEND); + port->wPortChange = 0; + } default: goto fail; } @@ -443,17 +485,22 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, unsigned int n, limit, var_hub_size = 0; memcpy(data, qemu_hub_hub_descriptor, sizeof(qemu_hub_hub_descriptor)); - data[2] = NUM_PORTS; + data[2] = s->num_ports; + + if (s->port_power) { + data[3] &= ~0x03; + data[3] |= 0x01; + } /* fill DeviceRemovable bits */ - limit = DIV_ROUND_UP(NUM_PORTS + 1, 8) + 7; + limit = DIV_ROUND_UP(s->num_ports + 1, 8) + 7; for (n = 7; n < limit; n++) { data[n] = 0x00; var_hub_size++; } /* fill PortPwrCtrlMask bits */ - limit = limit + DIV_ROUND_UP(NUM_PORTS, 8); + limit = limit + DIV_ROUND_UP(s->num_ports, 8); for (;n < limit; n++) { data[n] = 0xff; var_hub_size++; @@ -481,7 +528,7 @@ static void usb_hub_handle_data(USBDevice *dev, USBPacket *p) unsigned int status; uint8_t buf[4]; int i, n; - n = DIV_ROUND_UP(NUM_PORTS + 1, 8); + n = DIV_ROUND_UP(s->num_ports + 1, 8); if (p->iov.size == 1) { /* FreeBSD workaround */ n = 1; } else if (n > p->iov.size) { @@ -489,7 +536,7 @@ static void usb_hub_handle_data(USBDevice *dev, USBPacket *p) return; } status = 0; - for(i = 0; i < NUM_PORTS; i++) { + for (i = 0; i < s->num_ports; i++) { port = &s->ports[i]; if (port->wPortChange) status |= (1 << (i + 1)); @@ -520,10 +567,13 @@ static void usb_hub_unrealize(USBDevice *dev, Error **errp) USBHubState *s = (USBHubState *)dev; int i; - for (i = 0; i < NUM_PORTS; i++) { + for (i = 0; i < s->num_ports; i++) { usb_unregister_port(usb_bus_from_device(dev), &s->ports[i].port); } + + timer_del(s->port_timer); + timer_free(s->port_timer); } static USBPortOps usb_hub_port_ops = { @@ -540,6 +590,12 @@ static void usb_hub_realize(USBDevice *dev, Error **errp) USBHubPort *port; int i; + if (s->num_ports < 1 || s->num_ports > MAX_PORTS) { + error_setg(errp, "num_ports (%d) out of range (1..%d)", + s->num_ports, MAX_PORTS); + return; + } + if (dev->port->hubcount == 5) { error_setg(errp, "usb hub chain too deep"); return; @@ -547,8 +603,10 @@ static void usb_hub_realize(USBDevice *dev, Error **errp) usb_desc_create_serial(dev); usb_desc_init(dev); + s->port_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, + usb_hub_port_update_timer, s); s->intr = usb_ep_get(dev, USB_TOKEN_IN, 1); - for (i = 0; i < NUM_PORTS; i++) { + for (i = 0; i < s->num_ports; i++) { port = &s->ports[i]; usb_register_port(usb_bus_from_device(dev), &port->port, s, i, &usb_hub_port_ops, @@ -569,18 +627,46 @@ static const VMStateDescription vmstate_usb_hub_port = { } }; +static bool usb_hub_port_timer_needed(void *opaque) +{ + USBHubState *s = opaque; + + return s->port_power; +} + +static const VMStateDescription vmstate_usb_hub_port_timer = { + .name = "usb-hub/port-timer", + .version_id = 1, + .minimum_version_id = 1, + .needed = usb_hub_port_timer_needed, + .fields = (VMStateField[]) { + VMSTATE_TIMER_PTR(port_timer, USBHubState), + VMSTATE_END_OF_LIST() + }, +}; + static const VMStateDescription vmstate_usb_hub = { .name = "usb-hub", .version_id = 1, .minimum_version_id = 1, .fields = (VMStateField[]) { VMSTATE_USB_DEVICE(dev, USBHubState), - VMSTATE_STRUCT_ARRAY(ports, USBHubState, NUM_PORTS, 0, + VMSTATE_STRUCT_ARRAY(ports, USBHubState, MAX_PORTS, 0, vmstate_usb_hub_port, USBHubPort), VMSTATE_END_OF_LIST() + }, + .subsections = (const VMStateDescription * []) { + &vmstate_usb_hub_port_timer, + NULL } }; +static Property usb_hub_properties[] = { + DEFINE_PROP_UINT32("ports", USBHubState, num_ports, 8), + DEFINE_PROP_BOOL("port-power", USBHubState, port_power, false), + DEFINE_PROP_END_OF_LIST(), +}; + static void usb_hub_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -597,6 +683,7 @@ static void usb_hub_class_initfn(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_BRIDGE, dc->categories); dc->fw_name = "hub"; dc->vmsd = &vmstate_usb_hub; + dc->props = usb_hub_properties; } static const TypeInfo hub_info = { diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index 67b7465915..4f765d7f9a 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -1225,19 +1225,21 @@ static void usb_host_set_address(USBHostDevice *s, int addr) static void usb_host_set_config(USBHostDevice *s, int config, USBPacket *p) { - int rc; + int rc = 0; trace_usb_host_set_config(s->bus_num, s->addr, config); usb_host_release_interfaces(s); - rc = libusb_set_configuration(s->dh, config); - if (rc != 0) { - usb_host_libusb_error("libusb_set_configuration", rc); - p->status = USB_RET_STALL; - if (rc == LIBUSB_ERROR_NO_DEVICE) { - usb_host_nodev(s); + if (s->ddesc.bNumConfigurations != 1) { + rc = libusb_set_configuration(s->dh, config); + if (rc != 0) { + usb_host_libusb_error("libusb_set_configuration", rc); + p->status = USB_RET_STALL; + if (rc == LIBUSB_ERROR_NO_DEVICE) { + usb_host_nodev(s); + } + return; } - return; } p->status = usb_host_claim_interfaces(s, config); if (p->status != USB_RET_SUCCESS) { @@ -1459,6 +1461,9 @@ static void usb_host_handle_reset(USBDevice *udev) if (!s->allow_guest_reset) { return; } + if (udev->addr == 0) { + return; + } trace_usb_host_reset(s->bus_num, s->addr); diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index 553319c7ac..4ca5b2551e 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -96,6 +96,7 @@ typedef enum VhostUserRequest { VHOST_USER_POSTCOPY_END = 30, VHOST_USER_GET_INFLIGHT_FD = 31, VHOST_USER_SET_INFLIGHT_FD = 32, + VHOST_USER_GPU_SET_SOCKET = 33, VHOST_USER_MAX } VhostUserRequest; @@ -353,6 +354,16 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, return 0; } +int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd) +{ + VhostUserMsg msg = { + .hdr.request = VHOST_USER_GPU_SET_SOCKET, + .hdr.flags = VHOST_USER_VERSION, + }; + + return vhost_user_write(dev, &msg, &fd, 1); +} + static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, struct vhost_log *log) { |