diff options
Diffstat (limited to 'hw')
37 files changed, 1030 insertions, 194 deletions
diff --git a/hw/arm/allwinner-a10.c b/hw/arm/allwinner-a10.c index ff249af335..43dc0a12de 100644 --- a/hw/arm/allwinner-a10.c +++ b/hw/arm/allwinner-a10.c @@ -103,6 +103,12 @@ static void aw_a10_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = aw_a10_realize; + + /* + * Reason: creates an ARM CPU, thus use after free(), see + * arm_cpu_class_init() + */ + dc->cannot_destroy_with_object_finalize_yet = true; } static const TypeInfo aw_a10_type_info = { diff --git a/hw/arm/digic.c b/hw/arm/digic.c index ec8c330602..90f8190c48 100644 --- a/hw/arm/digic.c +++ b/hw/arm/digic.c @@ -97,6 +97,12 @@ static void digic_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = digic_realize; + + /* + * Reason: creates an ARM CPU, thus use after free(), see + * arm_cpu_class_init() + */ + dc->cannot_destroy_with_object_finalize_yet = true; } static const TypeInfo digic_type_info = { diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c index 86fde42e34..e1cadac997 100644 --- a/hw/arm/fsl-imx25.c +++ b/hw/arm/fsl-imx25.c @@ -284,6 +284,12 @@ static void fsl_imx25_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = fsl_imx25_realize; + + /* + * Reason: creates an ARM CPU, thus use after free(), see + * arm_cpu_class_init() + */ + dc->cannot_destroy_with_object_finalize_yet = true; } static const TypeInfo fsl_imx25_type_info = { diff --git a/hw/arm/fsl-imx31.c b/hw/arm/fsl-imx31.c index 8e1ed4811b..53d4473250 100644 --- a/hw/arm/fsl-imx31.c +++ b/hw/arm/fsl-imx31.c @@ -258,6 +258,12 @@ static void fsl_imx31_class_init(ObjectClass *oc, void *data) DeviceClass *dc = DEVICE_CLASS(oc); dc->realize = fsl_imx31_realize; + + /* + * Reason: creates an ARM CPU, thus use after free(), see + * arm_cpu_class_init() + */ + dc->cannot_destroy_with_object_finalize_yet = true; } static const TypeInfo fsl_imx31_type_info = { diff --git a/hw/arm/pxa2xx.c b/hw/arm/pxa2xx.c index 164260a9b6..79d22d91e5 100644 --- a/hw/arm/pxa2xx.c +++ b/hw/arm/pxa2xx.c @@ -1958,7 +1958,7 @@ static void pxa2xx_fir_instance_init(Object *obj) PXA2xxFIrState *s = PXA2XX_FIR(obj); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); - memory_region_init_io(&s->iomem, NULL, &pxa2xx_fir_ops, s, + memory_region_init_io(&s->iomem, obj, &pxa2xx_fir_ops, s, "pxa2xx-fir", 0x1000); sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index a9097f9b72..b36ca3da74 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -271,6 +271,12 @@ static void xlnx_zynqmp_class_init(ObjectClass *oc, void *data) dc->props = xlnx_zynqmp_props; dc->realize = xlnx_zynqmp_realize; + + /* + * Reason: creates an ARM CPU, thus use after free(), see + * arm_cpu_class_init() + */ + dc->cannot_destroy_with_object_finalize_yet = true; } static const TypeInfo xlnx_zynqmp_type_info = { diff --git a/hw/char/etraxfs_ser.c b/hw/char/etraxfs_ser.c index 857c13621c..562021e28b 100644 --- a/hw/char/etraxfs_ser.c +++ b/hw/char/etraxfs_ser.c @@ -182,15 +182,13 @@ static void serial_receive(void *opaque, const uint8_t *buf, int size) static int serial_can_receive(void *opaque) { ETRAXSerial *s = opaque; - int r; /* Is the receiver enabled? */ if (!(s->regs[RW_REC_CTRL] & (1 << 3))) { return 0; } - r = sizeof(s->rx_fifo) - s->rx_fifo_len; - return r; + return sizeof(s->rx_fifo) - s->rx_fifo_len; } static void serial_event(void *opaque, int event) diff --git a/hw/char/imx_serial.c b/hw/char/imx_serial.c index e8f32c46c2..f0c4c722d6 100644 --- a/hw/char/imx_serial.c +++ b/hw/char/imx_serial.c @@ -66,7 +66,9 @@ static void imx_update(IMXSerialState *s) uint32_t flags; flags = (s->usr1 & s->ucr1) & (USR1_TRDY|USR1_RRDY); - if (!(s->ucr1 & UCR1_TXMPTYEN)) { + if (s->ucr1 & UCR1_TXMPTYEN) { + flags |= (s->uts1 & UTS1_TXEMPTY); + } else { flags &= ~USR1_TRDY; } diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs index dd8ea76d17..f0cf431a0f 100644 --- a/hw/display/Makefile.objs +++ b/hw/display/Makefile.objs @@ -35,6 +35,10 @@ obj-$(CONFIG_VGA) += vga.o common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o -obj-$(CONFIG_VIRTIO) += virtio-gpu.o +obj-$(CONFIG_VIRTIO) += virtio-gpu.o virtio-gpu-3d.o obj-$(CONFIG_VIRTIO_PCI) += virtio-gpu-pci.o obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o +virtio-gpu.o-cflags := $(VIRGL_CFLAGS) +virtio-gpu.o-libs += $(VIRGL_LIBS) +virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS) +virtio-gpu-3d.o-libs += $(VIRGL_LIBS) diff --git a/hw/display/cg3.c b/hw/display/cg3.c index d2a0d97320..e309fbe92e 100644 --- a/hw/display/cg3.c +++ b/hw/display/cg3.c @@ -280,12 +280,12 @@ static void cg3_initfn(Object *obj) SysBusDevice *sbd = SYS_BUS_DEVICE(obj); CG3State *s = CG3(obj); - memory_region_init_ram(&s->rom, NULL, "cg3.prom", FCODE_MAX_ROM_SIZE, + memory_region_init_ram(&s->rom, obj, "cg3.prom", FCODE_MAX_ROM_SIZE, &error_fatal); memory_region_set_readonly(&s->rom, true); sysbus_init_mmio(sbd, &s->rom); - memory_region_init_io(&s->reg, NULL, &cg3_reg_ops, s, "cg3.reg", + memory_region_init_io(&s->reg, obj, &cg3_reg_ops, s, "cg3.reg", CG3_REG_SIZE); sysbus_init_mmio(sbd, &s->reg); } diff --git a/hw/display/tcx.c b/hw/display/tcx.c index 463580094a..bf119bc89a 100644 --- a/hw/display/tcx.c +++ b/hw/display/tcx.c @@ -944,7 +944,7 @@ static void tcx_initfn(Object *obj) SysBusDevice *sbd = SYS_BUS_DEVICE(obj); TCXState *s = TCX(obj); - memory_region_init_ram(&s->rom, NULL, "tcx.prom", FCODE_MAX_ROM_SIZE, + memory_region_init_ram(&s->rom, OBJECT(s), "tcx.prom", FCODE_MAX_ROM_SIZE, &error_fatal); memory_region_set_readonly(&s->rom, true); sysbus_init_mmio(sbd, &s->rom); diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c new file mode 100644 index 0000000000..28dccfdeca --- /dev/null +++ b/hw/display/virtio-gpu-3d.c @@ -0,0 +1,598 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Gerd Hoffmann <kraxel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu-common.h" +#include "qemu/iov.h" +#include "trace.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-gpu.h" + +#ifdef CONFIG_VIRGL + +#include "virglrenderer.h" + +static struct virgl_renderer_callbacks virtio_gpu_3d_cbs; + +static void virgl_cmd_create_resource_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_create_2d c2d; + struct virgl_renderer_resource_create_args args; + + VIRTIO_GPU_FILL_CMD(c2d); + trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, + c2d.width, c2d.height); + + args.handle = c2d.resource_id; + args.target = 2; + args.format = c2d.format; + args.bind = (1 << 1); + args.width = c2d.width; + args.height = c2d.height; + args.depth = 1; + args.array_size = 1; + args.last_level = 0; + args.nr_samples = 0; + args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP; + virgl_renderer_resource_create(&args, NULL, 0); +} + +static void virgl_cmd_create_resource_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_create_3d c3d; + struct virgl_renderer_resource_create_args args; + + VIRTIO_GPU_FILL_CMD(c3d); + trace_virtio_gpu_cmd_res_create_3d(c3d.resource_id, c3d.format, + c3d.width, c3d.height, c3d.depth); + + args.handle = c3d.resource_id; + args.target = c3d.target; + args.format = c3d.format; + args.bind = c3d.bind; + args.width = c3d.width; + args.height = c3d.height; + args.depth = c3d.depth; + args.array_size = c3d.array_size; + args.last_level = c3d.last_level; + args.nr_samples = c3d.nr_samples; + args.flags = c3d.flags; + virgl_renderer_resource_create(&args, NULL, 0); +} + +static void virgl_cmd_resource_unref(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_unref unref; + + VIRTIO_GPU_FILL_CMD(unref); + trace_virtio_gpu_cmd_res_unref(unref.resource_id); + + virgl_renderer_resource_unref(unref.resource_id); +} + +static void virgl_cmd_context_create(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_create cc; + + VIRTIO_GPU_FILL_CMD(cc); + trace_virtio_gpu_cmd_ctx_create(cc.hdr.ctx_id, + cc.debug_name); + + virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen, + cc.debug_name); +} + +static void virgl_cmd_context_destroy(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_destroy cd; + + VIRTIO_GPU_FILL_CMD(cd); + trace_virtio_gpu_cmd_ctx_destroy(cd.hdr.ctx_id); + + virgl_renderer_context_destroy(cd.hdr.ctx_id); +} + +static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y, + int width, int height) +{ + if (!g->scanout[idx].con) { + return; + } + + dpy_gl_update(g->scanout[idx].con, x, y, width, height); +} + +static void virgl_cmd_resource_flush(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_flush rf; + int i; + + VIRTIO_GPU_FILL_CMD(rf); + trace_virtio_gpu_cmd_res_flush(rf.resource_id, + rf.r.width, rf.r.height, rf.r.x, rf.r.y); + + for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { + if (g->scanout[i].resource_id != rf.resource_id) { + continue; + } + virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height); + } +} + +static void virgl_cmd_set_scanout(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_set_scanout ss; + struct virgl_renderer_resource_info info; + int ret; + + VIRTIO_GPU_FILL_CMD(ss); + trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, ss.r.y); + + if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + g->enable = 1; + + memset(&info, 0, sizeof(info)); + + if (ss.resource_id && ss.r.width && ss.r.height) { + ret = virgl_renderer_resource_get_info(ss.resource_id, &info); + if (ret == -1) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: illegal resource specified %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + qemu_console_resize(g->scanout[ss.scanout_id].con, + ss.r.width, ss.r.height); + virgl_renderer_force_ctx_0(); + dpy_gl_scanout(g->scanout[ss.scanout_id].con, info.tex_id, + info.flags & 1 /* FIXME: Y_0_TOP */, + ss.r.x, ss.r.y, ss.r.width, ss.r.height); + } else { + if (ss.scanout_id != 0) { + dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); + } + dpy_gl_scanout(g->scanout[ss.scanout_id].con, 0, false, + 0, 0, 0, 0); + } + g->scanout[ss.scanout_id].resource_id = ss.resource_id; +} + +static void virgl_cmd_submit_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_cmd_submit cs; + void *buf; + size_t s; + + VIRTIO_GPU_FILL_CMD(cs); + trace_virtio_gpu_cmd_ctx_submit(cs.hdr.ctx_id, cs.size); + + buf = g_malloc(cs.size); + s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, + sizeof(cs), buf, cs.size); + if (s != cs.size) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: size mismatch (%zd/%d)", + __func__, s, cs.size); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + if (virtio_gpu_stats_enabled(g->conf)) { + g->stats.req_3d++; + g->stats.bytes_3d += cs.size; + } + + virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4); + + g_free(buf); +} + +static void virgl_cmd_transfer_to_host_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_transfer_to_host_2d t2d; + struct virtio_gpu_box box; + + VIRTIO_GPU_FILL_CMD(t2d); + trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); + + box.x = t2d.r.x; + box.y = t2d.r.y; + box.z = 0; + box.w = t2d.r.width; + box.h = t2d.r.height; + box.d = 1; + + virgl_renderer_transfer_write_iov(t2d.resource_id, + 0, + 0, + 0, + 0, + (struct virgl_box *)&box, + t2d.offset, NULL, 0); +} + +static void virgl_cmd_transfer_to_host_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_transfer_host_3d t3d; + + VIRTIO_GPU_FILL_CMD(t3d); + trace_virtio_gpu_cmd_res_xfer_toh_3d(t3d.resource_id); + + virgl_renderer_transfer_write_iov(t3d.resource_id, + t3d.hdr.ctx_id, + t3d.level, + t3d.stride, + t3d.layer_stride, + (struct virgl_box *)&t3d.box, + t3d.offset, NULL, 0); +} + +static void +virgl_cmd_transfer_from_host_3d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_transfer_host_3d tf3d; + + VIRTIO_GPU_FILL_CMD(tf3d); + trace_virtio_gpu_cmd_res_xfer_fromh_3d(tf3d.resource_id); + + virgl_renderer_transfer_read_iov(tf3d.resource_id, + tf3d.hdr.ctx_id, + tf3d.level, + tf3d.stride, + tf3d.layer_stride, + (struct virgl_box *)&tf3d.box, + tf3d.offset, NULL, 0); +} + + +static void virgl_resource_attach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_attach_backing att_rb; + struct iovec *res_iovs; + int ret; + + VIRTIO_GPU_FILL_CMD(att_rb); + trace_virtio_gpu_cmd_res_back_attach(att_rb.resource_id); + + ret = virtio_gpu_create_mapping_iov(&att_rb, cmd, &res_iovs); + if (ret != 0) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + virgl_renderer_resource_attach_iov(att_rb.resource_id, + res_iovs, att_rb.nr_entries); +} + +static void virgl_resource_detach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resource_detach_backing detach_rb; + struct iovec *res_iovs = NULL; + int num_iovs = 0; + + VIRTIO_GPU_FILL_CMD(detach_rb); + trace_virtio_gpu_cmd_res_back_detach(detach_rb.resource_id); + + virgl_renderer_resource_detach_iov(detach_rb.resource_id, + &res_iovs, + &num_iovs); + if (res_iovs == NULL || num_iovs == 0) { + return; + } + virtio_gpu_cleanup_mapping_iov(res_iovs, num_iovs); +} + + +static void virgl_cmd_ctx_attach_resource(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_resource att_res; + + VIRTIO_GPU_FILL_CMD(att_res); + trace_virtio_gpu_cmd_ctx_res_attach(att_res.hdr.ctx_id, + att_res.resource_id); + + virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id); +} + +static void virgl_cmd_ctx_detach_resource(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_ctx_resource det_res; + + VIRTIO_GPU_FILL_CMD(det_res); + trace_virtio_gpu_cmd_ctx_res_detach(det_res.hdr.ctx_id, + det_res.resource_id); + + virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id); +} + +static void virgl_cmd_get_capset_info(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_get_capset_info info; + struct virtio_gpu_resp_capset_info resp; + + VIRTIO_GPU_FILL_CMD(info); + + if (info.capset_index == 0) { + resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL; + virgl_renderer_get_cap_set(resp.capset_id, + &resp.capset_max_version, + &resp.capset_max_size); + } else { + resp.capset_max_version = 0; + resp.capset_max_size = 0; + } + resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO; + virtio_gpu_ctrl_response(g, cmd, &resp.hdr, sizeof(resp)); +} + +static void virgl_cmd_get_capset(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_get_capset gc; + struct virtio_gpu_resp_capset *resp; + uint32_t max_ver, max_size; + VIRTIO_GPU_FILL_CMD(gc); + + virgl_renderer_get_cap_set(gc.capset_id, &max_ver, + &max_size); + resp = g_malloc(sizeof(*resp) + max_size); + + resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET; + virgl_renderer_fill_caps(gc.capset_id, + gc.capset_version, + (void *)resp->capset_data); + virtio_gpu_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size); + g_free(resp); +} + +void virtio_gpu_virgl_process_cmd(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); + + virgl_renderer_force_ctx_0(); + switch (cmd->cmd_hdr.type) { + case VIRTIO_GPU_CMD_CTX_CREATE: + virgl_cmd_context_create(g, cmd); + break; + case VIRTIO_GPU_CMD_CTX_DESTROY: + virgl_cmd_context_destroy(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: + virgl_cmd_create_resource_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D: + virgl_cmd_create_resource_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_SUBMIT_3D: + virgl_cmd_submit_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: + virgl_cmd_transfer_to_host_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D: + virgl_cmd_transfer_to_host_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D: + virgl_cmd_transfer_from_host_3d(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: + virgl_resource_attach_backing(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: + virgl_resource_detach_backing(g, cmd); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT: + virgl_cmd_set_scanout(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_FLUSH: + virgl_cmd_resource_flush(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNREF: + virgl_cmd_resource_unref(g, cmd); + break; + case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE: + /* TODO add security */ + virgl_cmd_ctx_attach_resource(g, cmd); + break; + case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE: + /* TODO add security */ + virgl_cmd_ctx_detach_resource(g, cmd); + break; + case VIRTIO_GPU_CMD_GET_CAPSET_INFO: + virgl_cmd_get_capset_info(g, cmd); + break; + case VIRTIO_GPU_CMD_GET_CAPSET: + virgl_cmd_get_capset(g, cmd); + break; + + case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: + virtio_gpu_get_display_info(g, cmd); + break; + default: + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + break; + } + + if (cmd->finished) { + return; + } + if (cmd->error) { + fprintf(stderr, "%s: ctrl 0x%x, error 0x%x\n", __func__, + cmd->cmd_hdr.type, cmd->error); + virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error); + return; + } + if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) { + virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); + return; + } + + trace_virtio_gpu_fence_ctrl(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); + virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type); +} + +static void virgl_write_fence(void *opaque, uint32_t fence) +{ + VirtIOGPU *g = opaque; + struct virtio_gpu_ctrl_command *cmd, *tmp; + + QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) { + /* + * the guest can end up emitting fences out of order + * so we should check all fenced cmds not just the first one. + */ + if (cmd->cmd_hdr.fence_id > fence) { + continue; + } + trace_virtio_gpu_fence_resp(cmd->cmd_hdr.fence_id); + virtio_gpu_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA); + QTAILQ_REMOVE(&g->fenceq, cmd, next); + g_free(cmd); + g->inflight--; + if (virtio_gpu_stats_enabled(g->conf)) { + fprintf(stderr, "inflight: %3d (-)\r", g->inflight); + } + } +} + +static virgl_renderer_gl_context +virgl_create_context(void *opaque, int scanout_idx, + struct virgl_renderer_gl_ctx_param *params) +{ + VirtIOGPU *g = opaque; + QEMUGLContext ctx; + QEMUGLParams qparams; + + qparams.major_ver = params->major_ver; + qparams.minor_ver = params->minor_ver; + + ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams); + return (virgl_renderer_gl_context)ctx; +} + +static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx) +{ + VirtIOGPU *g = opaque; + QEMUGLContext qctx = (QEMUGLContext)ctx; + + dpy_gl_ctx_destroy(g->scanout[0].con, qctx); +} + +static int virgl_make_context_current(void *opaque, int scanout_idx, + virgl_renderer_gl_context ctx) +{ + VirtIOGPU *g = opaque; + QEMUGLContext qctx = (QEMUGLContext)ctx; + + return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx); +} + +static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = { + .version = 1, + .write_fence = virgl_write_fence, + .create_gl_context = virgl_create_context, + .destroy_gl_context = virgl_destroy_context, + .make_current = virgl_make_context_current, +}; + +static void virtio_gpu_print_stats(void *opaque) +{ + VirtIOGPU *g = opaque; + + if (g->stats.requests) { + fprintf(stderr, "stats: vq req %4d, %3d -- 3D %4d (%5d)\n", + g->stats.requests, + g->stats.max_inflight, + g->stats.req_3d, + g->stats.bytes_3d); + g->stats.requests = 0; + g->stats.max_inflight = 0; + g->stats.req_3d = 0; + g->stats.bytes_3d = 0; + } else { + fprintf(stderr, "stats: idle\r"); + } + timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); +} + +static void virtio_gpu_fence_poll(void *opaque) +{ + VirtIOGPU *g = opaque; + + virgl_renderer_poll(); + if (g->inflight) { + timer_mod(g->fence_poll, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 10); + } +} + +void virtio_gpu_virgl_fence_poll(VirtIOGPU *g) +{ + virtio_gpu_fence_poll(g); +} + +void virtio_gpu_virgl_reset(VirtIOGPU *g) +{ + int i; + + /* virgl_renderer_reset() ??? */ + for (i = 0; i < g->conf.max_outputs; i++) { + if (i != 0) { + dpy_gfx_replace_surface(g->scanout[i].con, NULL); + } + dpy_gl_scanout(g->scanout[i].con, 0, false, 0, 0, 0, 0); + } +} + +int virtio_gpu_virgl_init(VirtIOGPU *g) +{ + int ret; + + ret = virgl_renderer_init(g, 0, &virtio_gpu_3d_cbs); + if (ret != 0) { + return ret; + } + + g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL, + virtio_gpu_fence_poll, g); + + if (virtio_gpu_stats_enabled(g->conf)) { + g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL, + virtio_gpu_print_stats, g); + timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000); + } + return 0; +} + +#endif /* CONFIG_VIRGL */ diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c index 5bc62cf344..eef137f813 100644 --- a/hw/display/virtio-gpu-pci.c +++ b/hw/display/virtio-gpu-pci.c @@ -6,8 +6,8 @@ * Authors: * Dave Airlie * - * This work is licensed under the terms of the GNU GPL, version 2. See - * the COPYING file in the top-level directory. + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. * */ #include "hw/pci/pci.h" diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index a67d927f56..a836ce3859 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -7,7 +7,7 @@ * Dave Airlie <airlied@redhat.com> * Gerd Hoffmann <kraxel@redhat.com> * - * This work is licensed under the terms of the GNU GPL, version 2. + * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ @@ -22,6 +22,23 @@ static struct virtio_gpu_simple_resource* virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); +#ifdef CONFIG_VIRGL +#include "virglrenderer.h" +#define VIRGL(_g, _virgl, _simple, ...) \ + do { \ + if (_g->use_virgl_renderer) { \ + _virgl(__VA_ARGS__); \ + } else { \ + _simple(__VA_ARGS__); \ + } \ + } while (0) +#else +#define VIRGL(_g, _virgl, _simple, ...) \ + do { \ + _simple(__VA_ARGS__); \ + } while (0) +#endif + static void update_cursor_data_simple(VirtIOGPU *g, struct virtio_gpu_scanout *s, uint32_t resource_id) @@ -45,16 +62,49 @@ static void update_cursor_data_simple(VirtIOGPU *g, pixels * sizeof(uint32_t)); } +#ifdef CONFIG_VIRGL + +static void update_cursor_data_virgl(VirtIOGPU *g, + struct virtio_gpu_scanout *s, + uint32_t resource_id) +{ + uint32_t width, height; + uint32_t pixels, *data; + + data = virgl_renderer_get_cursor_data(resource_id, &width, &height); + if (!data) { + return; + } + + if (width != s->current_cursor->width || + height != s->current_cursor->height) { + return; + } + + pixels = s->current_cursor->width * s->current_cursor->height; + memcpy(s->current_cursor->data, data, pixels * sizeof(uint32_t)); + free(data); +} + +#endif + static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) { struct virtio_gpu_scanout *s; + bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR; if (cursor->pos.scanout_id >= g->conf.max_outputs) { return; } s = &g->scanout[cursor->pos.scanout_id]; - if (cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR) { + trace_virtio_gpu_update_cursor(cursor->pos.scanout_id, + cursor->pos.x, + cursor->pos.y, + move ? "move" : "update", + cursor->resource_id); + + if (move) { if (!s->current_cursor) { s->current_cursor = cursor_alloc(64, 64); } @@ -63,7 +113,8 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) s->current_cursor->hot_y = cursor->hot_y; if (cursor->resource_id > 0) { - update_cursor_data_simple(g, s, cursor->resource_id); + VIRGL(g, update_cursor_data_virgl, update_cursor_data_simple, + g, s, cursor->resource_id); } dpy_cursor_define(s->con, s->current_cursor); } @@ -92,9 +143,23 @@ static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features, Error **errp) { + VirtIOGPU *g = VIRTIO_GPU(vdev); + + if (virtio_gpu_virgl_enabled(g->conf)) { + features |= (1 << VIRTIO_GPU_FEATURE_VIRGL); + } return features; } +static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features) +{ + static const uint32_t virgl = (1 << VIRTIO_GPU_FEATURE_VIRGL); + VirtIOGPU *g = VIRTIO_GPU(vdev); + + g->use_virgl_renderer = ((features & virgl) == virgl); + trace_virtio_gpu_features(g->use_virgl_renderer); +} + static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) { g->virtio_config.events_read |= event_type; @@ -563,7 +628,6 @@ int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, __func__, ab->resource_id, i); virtio_gpu_cleanup_mapping_iov(*iov, i); g_free(ents); - g_free(*iov); *iov = NULL; return -1; } @@ -580,12 +644,12 @@ void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, iov[i].iov_len); } + g_free(iov); } static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) { virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); - g_free(res->iov); res->iov = NULL; res->iov_cnt = 0; } @@ -699,25 +763,43 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) return; } +#ifdef CONFIG_VIRGL + if (!g->renderer_inited && g->use_virgl_renderer) { + virtio_gpu_virgl_init(g); + g->renderer_inited = true; + } +#endif + cmd = g_new(struct virtio_gpu_ctrl_command, 1); while (virtqueue_pop(vq, &cmd->elem)) { cmd->vq = vq; cmd->error = 0; cmd->finished = false; - g->stats.requests++; + if (virtio_gpu_stats_enabled(g->conf)) { + g->stats.requests++; + } - virtio_gpu_simple_process_cmd(g, cmd); + VIRGL(g, virtio_gpu_virgl_process_cmd, virtio_gpu_simple_process_cmd, + g, cmd); if (!cmd->finished) { QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); - g->stats.inflight++; - if (g->stats.max_inflight < g->stats.inflight) { - g->stats.max_inflight = g->stats.inflight; + g->inflight++; + if (virtio_gpu_stats_enabled(g->conf)) { + if (g->stats.max_inflight < g->inflight) { + g->stats.max_inflight = g->inflight; + } + fprintf(stderr, "inflight: %3d (+)\r", g->inflight); } - fprintf(stderr, "inflight: %3d (+)\r", g->stats.inflight); cmd = g_new(struct virtio_gpu_ctrl_command, 1); } } g_free(cmd); + +#ifdef CONFIG_VIRGL + if (g->use_virgl_renderer) { + virtio_gpu_virgl_fence_poll(g); + } +#endif } static void virtio_gpu_ctrl_bh(void *opaque) @@ -804,6 +886,7 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(qdev); VirtIOGPU *g = VIRTIO_GPU(qdev); + bool have_virgl; int i; g->config_size = sizeof(struct virtio_gpu_config); @@ -814,8 +897,25 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) g->req_state[0].width = 1024; g->req_state[0].height = 768; - g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); - g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); + g->use_virgl_renderer = false; +#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN) + have_virgl = false; +#else + have_virgl = display_opengl; +#endif + if (!have_virgl) { + g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED); + } + + if (virtio_gpu_virgl_enabled(g->conf)) { + /* use larger control queue in 3d mode */ + g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb); + g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); + g->virtio_config.num_capsets = 1; + } else { + g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); + g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); + } g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); @@ -869,10 +969,23 @@ static void virtio_gpu_reset(VirtIODevice *vdev) g->scanout[i].ds = NULL; } g->enabled_output_bitmask = 1; + +#ifdef CONFIG_VIRGL + if (g->use_virgl_renderer) { + virtio_gpu_virgl_reset(g); + g->use_virgl_renderer = 0; + } +#endif } static Property virtio_gpu_properties[] = { DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1), +#ifdef CONFIG_VIRGL + DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags, + VIRTIO_GPU_FLAG_VIRGL_ENABLED, true), + DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags, + VIRTIO_GPU_FLAG_STATS_ENABLED, false), +#endif DEFINE_PROP_END_OF_LIST(), }; @@ -885,6 +998,7 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data) vdc->get_config = virtio_gpu_get_config; vdc->set_config = virtio_gpu_set_config; vdc->get_features = virtio_gpu_get_features; + vdc->set_features = virtio_gpu_set_features; vdc->reset = virtio_gpu_reset; @@ -917,3 +1031,14 @@ QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); + +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24); diff --git a/hw/i386/kvm/pci-assign.c b/hw/i386/kvm/pci-assign.c index b1beaa66b2..44beee3a05 100644 --- a/hw/i386/kvm/pci-assign.c +++ b/hw/i386/kvm/pci-assign.c @@ -22,7 +22,6 @@ */ #include <stdio.h> #include <unistd.h> -#include <sys/io.h> #include <sys/mman.h> #include <sys/types.h> #include <sys/stat.h> diff --git a/hw/i386/pc.c b/hw/i386/pc.c index 9275297adc..682867a8a9 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -985,6 +985,10 @@ static void load_linux(PCMachineState *pcms, setup_size = 4; } setup_size = (setup_size+1)*512; + if (setup_size > kernel_size) { + fprintf(stderr, "qemu: invalid kernel header\n"); + exit(1); + } kernel_size -= setup_size; setup = g_malloc(setup_size); diff --git a/hw/input/Makefile.objs b/hw/input/Makefile.objs index 624ba7ea40..7715d7230d 100644 --- a/hw/input/Makefile.objs +++ b/hw/input/Makefile.objs @@ -8,9 +8,9 @@ common-obj-$(CONFIG_STELLARIS_INPUT) += stellaris_input.o common-obj-$(CONFIG_TSC2005) += tsc2005.o common-obj-$(CONFIG_VMMOUSE) += vmmouse.o -ifeq ($(CONFIG_LINUX),y) common-obj-$(CONFIG_VIRTIO) += virtio-input.o common-obj-$(CONFIG_VIRTIO) += virtio-input-hid.o +ifeq ($(CONFIG_LINUX),y) common-obj-$(CONFIG_VIRTIO) += virtio-input-host.o endif diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 67881c7109..9ff5796414 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -848,7 +848,7 @@ static target_ulong h_xirr_x(PowerPCCPU *cpu, sPAPRMachineState *spapr, uint32_t xirr = icp_accept(ss); args[0] = xirr; - args[1] = cpu_get_real_ticks(); + args[1] = cpu_get_host_ticks(); return H_SUCCESS; } diff --git a/hw/mem/pc-dimm.c b/hw/mem/pc-dimm.c index 6cc6ac30e7..506fe0d2a8 100644 --- a/hw/mem/pc-dimm.c +++ b/hw/mem/pc-dimm.c @@ -417,10 +417,11 @@ static void pc_dimm_realize(DeviceState *dev, Error **errp) error_setg(errp, "'" PC_DIMM_MEMDEV_PROP "' property is not set"); return; } - if ((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) { + if (((nb_numa_nodes > 0) && (dimm->node >= nb_numa_nodes)) || + (!nb_numa_nodes && dimm->node)) { error_setg(errp, "'DIMM property " PC_DIMM_NODE_PROP " has value %" PRIu32 "' which exceeds the number of numa nodes: %d", - dimm->node, nb_numa_nodes); + dimm->node, nb_numa_nodes ? nb_numa_nodes : 1); return; } } diff --git a/hw/misc/arm_integrator_debug.c b/hw/misc/arm_integrator_debug.c index 99b720fbb9..6d9dd74e38 100644 --- a/hw/misc/arm_integrator_debug.c +++ b/hw/misc/arm_integrator_debug.c @@ -79,7 +79,7 @@ static void intdbg_control_init(Object *obj) SysBusDevice *sd = SYS_BUS_DEVICE(obj); IntegratorDebugState *s = INTEGRATOR_DEBUG(obj); - memory_region_init_io(&s->iomem, NULL, &intdbg_control_ops, + memory_region_init_io(&s->iomem, obj, &intdbg_control_ops, NULL, "dbg-leds", 0x1000000); sysbus_init_mmio(sd, &s->iomem); } diff --git a/hw/misc/macio/cuda.c b/hw/misc/macio/cuda.c index f3984e3a20..5d7043e99c 100644 --- a/hw/misc/macio/cuda.c +++ b/hw/misc/macio/cuda.c @@ -713,7 +713,7 @@ static void cuda_initfn(Object *obj) CUDAState *s = CUDA(obj); int i; - memory_region_init_io(&s->mem, NULL, &cuda_ops, s, "cuda", 0x2000); + memory_region_init_io(&s->mem, obj, &cuda_ops, s, "cuda", 0x2000); sysbus_init_mmio(d, &s->mem); sysbus_init_irq(d, &s->irq); diff --git a/hw/misc/macio/macio.c b/hw/misc/macio/macio.c index e3c0242d41..c661f86c21 100644 --- a/hw/misc/macio/macio.c +++ b/hw/misc/macio/macio.c @@ -105,10 +105,10 @@ static void macio_escc_legacy_setup(MacIOState *macio_state) 0xF0, 0xE0, }; - memory_region_init(escc_legacy, NULL, "escc-legacy", 256); + memory_region_init(escc_legacy, OBJECT(macio_state), "escc-legacy", 256); for (i = 0; i < ARRAY_SIZE(maps); i += 2) { MemoryRegion *port = g_new(MemoryRegion, 1); - memory_region_init_alias(port, NULL, "escc-legacy-port", + memory_region_init_alias(port, OBJECT(macio_state), "escc-legacy-port", macio_state->escc_mem, maps[i+1], 0x2); memory_region_add_subregion(escc_legacy, maps[i], port); } @@ -131,6 +131,10 @@ static void macio_common_realize(PCIDevice *d, Error **errp) MacIOState *s = MACIO(d); SysBusDevice *sysbus_dev; Error *err = NULL; + MemoryRegion *dbdma_mem; + + s->dbdma = DBDMA_init(&dbdma_mem); + memory_region_add_subregion(&s->bar, 0x08000, dbdma_mem); object_property_set_bool(OBJECT(&s->cuda), true, "realized", &err); if (err) { @@ -328,16 +332,12 @@ static void macio_newworld_init(Object *obj) static void macio_instance_init(Object *obj) { MacIOState *s = MACIO(obj); - MemoryRegion *dbdma_mem; - memory_region_init(&s->bar, NULL, "macio", 0x80000); + memory_region_init(&s->bar, obj, "macio", 0x80000); object_initialize(&s->cuda, sizeof(s->cuda), TYPE_CUDA); qdev_set_parent_bus(DEVICE(&s->cuda), sysbus_get_default()); object_property_add_child(obj, "cuda", OBJECT(&s->cuda), NULL); - - s->dbdma = DBDMA_init(&dbdma_mem); - memory_region_add_subregion(&s->bar, 0x08000, dbdma_mem); } static const VMStateDescription vmstate_macio_oldworld = { diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c index 7e7bda4987..bb6fdc364d 100644 --- a/hw/net/rocker/rocker.c +++ b/hw/net/rocker/rocker.c @@ -1362,7 +1362,7 @@ static int pci_rocker_init(PCIDevice *dev) r->fp_ports = ROCKER_FP_PORTS_MAX; } - r->rings = g_malloc(sizeof(DescRing *) * rocker_pci_ring_count(r)); + r->rings = g_new(DescRing *, rocker_pci_ring_count(r)); if (!r->rings) { goto err_rings_alloc; } diff --git a/hw/net/rocker/rocker_desc.c b/hw/net/rocker/rocker_desc.c index b5c0b4a241..5e697b19e3 100644 --- a/hw/net/rocker/rocker_desc.c +++ b/hw/net/rocker/rocker_desc.c @@ -142,7 +142,7 @@ bool desc_ring_set_size(DescRing *ring, uint32_t size) ring->size = size; ring->head = ring->tail = 0; - ring->info = g_realloc(ring->info, size * sizeof(DescInfo)); + ring->info = g_renew(DescInfo, ring->info, size); if (!ring->info) { return false; } @@ -345,7 +345,7 @@ DescRing *desc_ring_alloc(Rocker *r, int index) { DescRing *ring; - ring = g_malloc0(sizeof(DescRing)); + ring = g_new0(DescRing, 1); if (!ring) { return NULL; } diff --git a/hw/net/rocker/rocker_fp.c b/hw/net/rocker/rocker_fp.c index c693ae5081..5906396b9d 100644 --- a/hw/net/rocker/rocker_fp.c +++ b/hw/net/rocker/rocker_fp.c @@ -218,7 +218,7 @@ FpPort *fp_port_alloc(Rocker *r, char *sw_name, MACAddr *start_mac, unsigned int index, NICPeers *peers) { - FpPort *port = g_malloc0(sizeof(FpPort)); + FpPort *port = g_new0(FpPort, 1); if (!port) { return NULL; diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c index 874fb01d69..1ad2791965 100644 --- a/hw/net/rocker/rocker_of_dpa.c +++ b/hw/net/rocker/rocker_of_dpa.c @@ -367,7 +367,7 @@ static OfDpaFlow *of_dpa_flow_alloc(uint64_t cookie) OfDpaFlow *flow; int64_t now = qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) / 1000; - flow = g_malloc0(sizeof(OfDpaFlow)); + flow = g_new0(OfDpaFlow, 1); if (!flow) { return NULL; } @@ -811,7 +811,7 @@ static int of_dpa_group_get_stats(OfDpa *of_dpa, uint32_t id) static OfDpaGroup *of_dpa_group_alloc(uint32_t id) { - OfDpaGroup *group = g_malloc0(sizeof(OfDpaGroup)); + OfDpaGroup *group = g_new0(OfDpaGroup, 1); if (!group) { return NULL; @@ -2039,15 +2039,14 @@ static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group, group->l2_flood.group_count = rocker_tlv_get_le16(group_tlvs[ROCKER_TLV_OF_DPA_GROUP_COUNT]); - tlvs = g_malloc0((group->l2_flood.group_count + 1) * - sizeof(RockerTlv *)); + tlvs = g_new0(RockerTlv *, group->l2_flood.group_count + 1); if (!tlvs) { return -ROCKER_ENOMEM; } g_free(group->l2_flood.group_ids); group->l2_flood.group_ids = - g_malloc0(group->l2_flood.group_count * sizeof(uint32_t)); + g_new0(uint32_t, group->l2_flood.group_count); if (!group->l2_flood.group_ids) { err = -ROCKER_ENOMEM; goto err_out; diff --git a/hw/pci-host/versatile.c b/hw/pci-host/versatile.c index 6d23553094..7172b90958 100644 --- a/hw/pci-host/versatile.c +++ b/hw/pci-host/versatile.c @@ -500,6 +500,8 @@ static void pci_vpb_class_init(ObjectClass *klass, void *data) dc->reset = pci_vpb_reset; dc->vmsd = &pci_vpb_vmstate; dc->props = pci_vpb_properties; + /* Reason: object_unref() hangs */ + dc->cannot_destroy_with_object_finalize_yet = true; } static const TypeInfo pci_vpb_info = { @@ -521,10 +523,19 @@ static void pci_realview_init(Object *obj) s->mem_win_size[2] = 0x08000000; } +static void pci_realview_class_init(ObjectClass *class, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(class); + + /* Reason: object_unref() hangs */ + dc->cannot_destroy_with_object_finalize_yet = true; +} + static const TypeInfo pci_realview_info = { .name = "realview_pci", .parent = TYPE_VERSATILE_PCI, .instance_init = pci_realview_init, + .class_init = pci_realview_class_init, }; static void versatile_pci_register_types(void) diff --git a/hw/pcmcia/pxa2xx.c b/hw/pcmcia/pxa2xx.c index a7e187743d..812716e1c8 100644 --- a/hw/pcmcia/pxa2xx.c +++ b/hw/pcmcia/pxa2xx.c @@ -163,7 +163,7 @@ static void pxa2xx_pcmcia_initfn(Object *obj) sysbus_init_mmio(sbd, &s->container_mem); /* Socket I/O Memory Space */ - memory_region_init_io(&s->iomem, NULL, &pxa2xx_pcmcia_io_ops, s, + memory_region_init_io(&s->iomem, obj, &pxa2xx_pcmcia_io_ops, s, "pxa2xx-pcmcia-io", 0x04000000); memory_region_add_subregion(&s->container_mem, 0x00000000, &s->iomem); @@ -171,13 +171,13 @@ static void pxa2xx_pcmcia_initfn(Object *obj) /* Then next 64 MB is reserved */ /* Socket Attribute Memory Space */ - memory_region_init_io(&s->attr_iomem, NULL, &pxa2xx_pcmcia_attr_ops, s, + memory_region_init_io(&s->attr_iomem, obj, &pxa2xx_pcmcia_attr_ops, s, "pxa2xx-pcmcia-attribute", 0x04000000); memory_region_add_subregion(&s->container_mem, 0x08000000, &s->attr_iomem); /* Socket Common Memory Space */ - memory_region_init_io(&s->common_iomem, NULL, &pxa2xx_pcmcia_common_ops, s, + memory_region_init_io(&s->common_iomem, obj, &pxa2xx_pcmcia_common_ops, s, "pxa2xx-pcmcia-common", 0x04000000); memory_region_add_subregion(&s->container_mem, 0x0c000000, &s->common_iomem); diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index b77e30357a..2c604ef49d 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -834,7 +834,7 @@ static void cpu_ppc_set_tb_clk (void *opaque, uint32_t freq) static void timebase_pre_save(void *opaque) { PPCTimebase *tb = opaque; - uint64_t ticks = cpu_get_real_ticks(); + uint64_t ticks = cpu_get_host_ticks(); PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu); if (!first_ppc_cpu->env.tb_env) { @@ -878,7 +878,7 @@ static int timebase_post_load(void *opaque, int version_id) NANOSECONDS_PER_SECOND); guest_tb = tb_remote->guest_timebase + MIN(0, migration_duration_tb); - tb_off_adj = guest_tb - cpu_get_real_ticks(); + tb_off_adj = guest_tb - cpu_get_host_ticks(); tb_off = first_ppc_cpu->env.tb_env->tb_offset; trace_ppc_tb_adjust(tb_off, tb_off_adj, tb_off_adj - tb_off, diff --git a/hw/s390x/event-facility.c b/hw/s390x/event-facility.c index ef2a05160a..907b48560c 100644 --- a/hw/s390x/event-facility.c +++ b/hw/s390x/event-facility.c @@ -27,8 +27,6 @@ typedef struct SCLPEventsBus { struct SCLPEventFacility { SysBusDevice parent_obj; SCLPEventsBus sbus; - SCLPEvent quiesce_event; - SCLPEvent cpu_hotplug_event; /* guest' receive mask */ unsigned int receive_mask; }; @@ -347,19 +345,21 @@ static void init_event_facility(Object *obj) { SCLPEventFacility *event_facility = EVENT_FACILITY(obj); DeviceState *sdev = DEVICE(obj); + Object *new; /* Spawn a new bus for SCLP events */ qbus_create_inplace(&event_facility->sbus, sizeof(event_facility->sbus), TYPE_SCLP_EVENTS_BUS, sdev, NULL); - object_initialize(&event_facility->quiesce_event, sizeof(SCLPEvent), - TYPE_SCLP_QUIESCE); - qdev_set_parent_bus(DEVICE(&event_facility->quiesce_event), - &event_facility->sbus.qbus); - object_initialize(&event_facility->cpu_hotplug_event, sizeof(SCLPEvent), - TYPE_SCLP_CPU_HOTPLUG); - qdev_set_parent_bus(DEVICE(&event_facility->cpu_hotplug_event), - &event_facility->sbus.qbus); + new = object_new(TYPE_SCLP_QUIESCE); + object_property_add_child(obj, TYPE_SCLP_QUIESCE, new, NULL); + object_unref(new); + qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus); + + new = object_new(TYPE_SCLP_CPU_HOTPLUG); + object_property_add_child(obj, TYPE_SCLP_CPU_HOTPLUG, new, NULL); + object_unref(new); + qdev_set_parent_bus(DEVICE(new), &event_facility->sbus.qbus); /* the facility will automatically realize the devices via the bus */ } diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index c53ebc1ae1..6195f132fc 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -35,7 +35,7 @@ typedef struct S390CcwMachineState { bool dea_key_wrap; } S390CcwMachineState; -void io_subsystem_reset(void) +void subsystem_reset(void) { DeviceState *css, *sclp, *flic, *diag288; diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index 65304cff5e..8b0f9f0df5 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -22,6 +22,7 @@ * with this program; if not, see <http://www.gnu.org/licenses/>. */ +#include <inttypes.h> #include "hw/hw.h" #include "sysemu/block-backend.h" #include "sysemu/blockdev.h" @@ -36,24 +37,24 @@ #define SDHC_DEBUG 0 #endif -#if SDHC_DEBUG == 0 - #define DPRINT_L1(fmt, args...) do { } while (0) - #define DPRINT_L2(fmt, args...) do { } while (0) - #define ERRPRINT(fmt, args...) do { } while (0) -#elif SDHC_DEBUG == 1 - #define DPRINT_L1(fmt, args...) \ - do {fprintf(stderr, "QEMU SDHC: "fmt, ## args); } while (0) - #define DPRINT_L2(fmt, args...) do { } while (0) - #define ERRPRINT(fmt, args...) \ - do {fprintf(stderr, "QEMU SDHC ERROR: "fmt, ## args); } while (0) -#else - #define DPRINT_L1(fmt, args...) \ - do {fprintf(stderr, "QEMU SDHC: "fmt, ## args); } while (0) - #define DPRINT_L2(fmt, args...) \ - do {fprintf(stderr, "QEMU SDHC: "fmt, ## args); } while (0) - #define ERRPRINT(fmt, args...) \ - do {fprintf(stderr, "QEMU SDHC ERROR: "fmt, ## args); } while (0) -#endif +#define DPRINT_L1(fmt, args...) \ + do { \ + if (SDHC_DEBUG) { \ + fprintf(stderr, "QEMU SDHC: " fmt, ## args); \ + } \ + } while (0) +#define DPRINT_L2(fmt, args...) \ + do { \ + if (SDHC_DEBUG > 1) { \ + fprintf(stderr, "QEMU SDHC: " fmt, ## args); \ + } \ + } while (0) +#define ERRPRINT(fmt, args...) \ + do { \ + if (SDHC_DEBUG) { \ + fprintf(stderr, "QEMU SDHC ERROR: " fmt, ## args); \ + } \ + } while (0) /* Default SD/MMC host controller features information, which will be * presented in CAPABILITIES register of generic SD host controller at reset. @@ -719,7 +720,8 @@ static void sdhci_do_adma(SDHCIState *s) break; case SDHC_ADMA_ATTR_ACT_LINK: /* link to next descriptor table */ s->admasysaddr = dscr.addr; - DPRINT_L1("ADMA link: admasysaddr=0x%lx\n", s->admasysaddr); + DPRINT_L1("ADMA link: admasysaddr=0x%" PRIx64 "\n", + s->admasysaddr); break; default: s->admasysaddr += dscr.incr; @@ -727,7 +729,8 @@ static void sdhci_do_adma(SDHCIState *s) } if (dscr.attr & SDHC_ADMA_ATTR_INT) { - DPRINT_L1("ADMA interrupt: admasysaddr=0x%lx\n", s->admasysaddr); + DPRINT_L1("ADMA interrupt: admasysaddr=0x%" PRIx64 "\n", + s->admasysaddr); if (s->norintstsen & SDHC_NISEN_DMA) { s->norintsts |= SDHC_NIS_DMA; } diff --git a/hw/timer/m48t59.c b/hw/timer/m48t59.c index 8ab683ddac..b3df8f9ec9 100644 --- a/hw/timer/m48t59.c +++ b/hw/timer/m48t59.c @@ -590,10 +590,8 @@ static void nvram_writel (void *opaque, hwaddr addr, uint32_t value) static uint32_t nvram_readb (void *opaque, hwaddr addr) { M48t59State *NVRAM = opaque; - uint32_t retval; - retval = m48t59_read(NVRAM, addr); - return retval; + return m48t59_read(NVRAM, addr); } static uint32_t nvram_readw (void *opaque, hwaddr addr) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 0d341a33ff..6797208cc1 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -312,11 +312,15 @@ out: rcu_read_unlock(); } +static hwaddr vfio_container_granularity(VFIOContainer *container) +{ + return (hwaddr)1 << ctz64(container->iova_pgsizes); +} + static void vfio_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, - iommu_data.type1.listener); + VFIOContainer *container = container_of(listener, VFIOContainer, listener); hwaddr iova, end; Int128 llend; void *vaddr; @@ -344,14 +348,22 @@ static void vfio_listener_region_add(MemoryListener *listener, if (int128_ge(int128_make64(iova), llend)) { return; } + end = int128_get64(llend); + + if ((iova < container->min_iova) || ((end - 1) > container->max_iova)) { + error_report("vfio: IOMMU container %p can't map guest IOVA region" + " 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, + container, iova, end - 1); + ret = -EFAULT; + goto fail; + } memory_region_ref(section->mr); if (memory_region_is_iommu(section->mr)) { VFIOGuestIOMMU *giommu; - trace_vfio_listener_region_add_iommu(iova, - int128_get64(int128_sub(llend, int128_one()))); + trace_vfio_listener_region_add_iommu(iova, end - 1); /* * FIXME: We should do some checking to see if the * capabilities of the host VFIO IOMMU are adequate to model @@ -362,33 +374,22 @@ static void vfio_listener_region_add(MemoryListener *listener, * would be the right place to wire that up (tell the KVM * device emulation the VFIO iommu handles to use). */ - /* - * This assumes that the guest IOMMU is empty of - * mappings at this point. - * - * One way of doing this is: - * 1. Avoid sharing IOMMUs between emulated devices or different - * IOMMU groups. - * 2. Implement VFIO_IOMMU_ENABLE in the host kernel to fail if - * there are some mappings in IOMMU. - * - * VFIO on SPAPR does that. Other IOMMU models may do that different, - * they must make sure there are no existing mappings or - * loop through existing mappings to map them into VFIO. - */ giommu = g_malloc0(sizeof(*giommu)); giommu->iommu = section->mr; giommu->container = container; giommu->n.notify = vfio_iommu_map_notify; QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next); + memory_region_register_iommu_notifier(giommu->iommu, &giommu->n); + memory_region_iommu_replay(giommu->iommu, &giommu->n, + vfio_container_granularity(container), + false); return; } /* Here we assume that memory_region_is_ram(section->mr)==true */ - end = int128_get64(llend); vaddr = memory_region_get_ram_ptr(section->mr) + section->offset_within_region + (iova - section->offset_within_address_space); @@ -400,27 +401,30 @@ static void vfio_listener_region_add(MemoryListener *listener, error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx", %p) = %d (%m)", container, iova, end - iova, vaddr, ret); + goto fail; + } - /* - * On the initfn path, store the first error in the container so we - * can gracefully fail. Runtime, there's not much we can do other - * than throw a hardware error. - */ - if (!container->iommu_data.type1.initialized) { - if (!container->iommu_data.type1.error) { - container->iommu_data.type1.error = ret; - } - } else { - hw_error("vfio: DMA mapping failed, unable to continue"); + return; + +fail: + /* + * On the initfn path, store the first error in the container so we + * can gracefully fail. Runtime, there's not much we can do other + * than throw a hardware error. + */ + if (!container->initialized) { + if (!container->error) { + container->error = ret; } + } else { + hw_error("vfio: DMA mapping failed, unable to continue"); } } static void vfio_listener_region_del(MemoryListener *listener, MemoryRegionSection *section) { - VFIOContainer *container = container_of(listener, VFIOContainer, - iommu_data.type1.listener); + VFIOContainer *container = container_of(listener, VFIOContainer, listener); hwaddr iova, end; int ret; @@ -485,7 +489,7 @@ static const MemoryListener vfio_memory_listener = { static void vfio_listener_release(VFIOContainer *container) { - memory_listener_unregister(&container->iommu_data.type1.listener); + memory_listener_unregister(&container->listener); } int vfio_mmap_region(Object *obj, VFIORegion *region, @@ -668,6 +672,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU) || ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU)) { bool v2 = !!ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1v2_IOMMU); + struct vfio_iommu_type1_info info; ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); if (ret) { @@ -684,21 +689,27 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) goto free_container_exit; } - container->iommu_data.type1.listener = vfio_memory_listener; - container->iommu_data.release = vfio_listener_release; - - memory_listener_register(&container->iommu_data.type1.listener, - container->space->as); - - if (container->iommu_data.type1.error) { - ret = container->iommu_data.type1.error; - error_report("vfio: memory listener initialization failed for container"); - goto listener_release_exit; + /* + * FIXME: This assumes that a Type1 IOMMU can map any 64-bit + * IOVA whatsoever. That's not actually true, but the current + * kernel interface doesn't tell us what it can map, and the + * existing Type1 IOMMUs generally support any IOVA we're + * going to actually try in practice. + */ + container->min_iova = 0; + container->max_iova = (hwaddr)-1; + + /* Assume just 4K IOVA page size */ + container->iova_pgsizes = 0x1000; + info.argsz = sizeof(info); + ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info); + /* Ignore errors */ + if ((ret == 0) && (info.flags & VFIO_IOMMU_INFO_PGSIZES)) { + container->iova_pgsizes = info.iova_pgsizes; } - - container->iommu_data.type1.initialized = true; - } else if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_SPAPR_TCE_IOMMU)) { + struct vfio_iommu_spapr_tce_info info; + ret = ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &fd); if (ret) { error_report("vfio: failed to set group container: %m"); @@ -724,18 +735,41 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as) goto free_container_exit; } - container->iommu_data.type1.listener = vfio_memory_listener; - container->iommu_data.release = vfio_listener_release; - - memory_listener_register(&container->iommu_data.type1.listener, - container->space->as); + /* + * This only considers the host IOMMU's 32-bit window. At + * some point we need to add support for the optional 64-bit + * window and dynamic windows + */ + info.argsz = sizeof(info); + ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); + if (ret) { + error_report("vfio: VFIO_IOMMU_SPAPR_TCE_GET_INFO failed: %m"); + ret = -errno; + goto free_container_exit; + } + container->min_iova = info.dma32_window_start; + container->max_iova = container->min_iova + info.dma32_window_size - 1; + /* Assume just 4K IOVA pages for now */ + container->iova_pgsizes = 0x1000; } else { error_report("vfio: No available IOMMU models"); ret = -EINVAL; goto free_container_exit; } + container->listener = vfio_memory_listener; + + memory_listener_register(&container->listener, container->space->as); + + if (container->error) { + ret = container->error; + error_report("vfio: memory listener initialization failed for container"); + goto listener_release_exit; + } + + container->initialized = true; + QLIST_INIT(&container->group_list); QLIST_INSERT_HEAD(&space->containers, container, next); @@ -774,9 +808,7 @@ static void vfio_disconnect_container(VFIOGroup *group) VFIOAddressSpace *space = container->space; VFIOGuestIOMMU *giommu, *tmp; - if (container->iommu_data.release) { - container->iommu_data.release(container); - } + vfio_listener_release(container); QLIST_REMOVE(container, next); QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) { diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c index a6726cd779..5c1156ca3b 100644 --- a/hw/vfio/platform.c +++ b/hw/vfio/platform.c @@ -32,6 +32,11 @@ * Functions used whatever the injection method */ +static inline bool vfio_irq_is_automasked(VFIOINTp *intp) +{ + return intp->flags & VFIO_IRQ_INFO_AUTOMASKED; +} + /** * vfio_init_intp - allocate, initialize the IRQ struct pointer * and add it into the list of IRQs @@ -57,18 +62,25 @@ static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, sysbus_init_irq(sbdev, &intp->qemuirq); /* Get an eventfd for trigger */ - ret = event_notifier_init(&intp->interrupt, 0); + intp->interrupt = g_malloc0(sizeof(EventNotifier)); + ret = event_notifier_init(intp->interrupt, 0); if (ret) { + g_free(intp->interrupt); g_free(intp); error_report("vfio: Error: trigger event_notifier_init failed "); return NULL; } - /* Get an eventfd for resample/unmask */ - ret = event_notifier_init(&intp->unmask, 0); - if (ret) { - g_free(intp); - error_report("vfio: Error: resamplefd event_notifier_init failed"); - return NULL; + if (vfio_irq_is_automasked(intp)) { + /* Get an eventfd for resample/unmask */ + intp->unmask = g_malloc0(sizeof(EventNotifier)); + ret = event_notifier_init(intp->unmask, 0); + if (ret) { + g_free(intp->interrupt); + g_free(intp->unmask); + g_free(intp); + error_report("vfio: Error: resamplefd event_notifier_init failed"); + return NULL; + } } QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); @@ -100,7 +112,7 @@ static int vfio_set_trigger_eventfd(VFIOINTp *intp, irq_set->start = 0; irq_set->count = 1; pfd = (int32_t *)&irq_set->data; - *pfd = event_notifier_get_fd(&intp->interrupt); + *pfd = event_notifier_get_fd(intp->interrupt); qemu_set_fd_handler(*pfd, (IOHandler *)handler, NULL, intp); ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); g_free(irq_set); @@ -182,7 +194,7 @@ static void vfio_intp_mmap_enable(void *opaque) static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) { trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, - event_notifier_get_fd(&intp->interrupt)); + event_notifier_get_fd(intp->interrupt)); intp->state = VFIO_IRQ_ACTIVE; @@ -224,18 +236,18 @@ static void vfio_intp_interrupt(VFIOINTp *intp) trace_vfio_intp_interrupt_set_pending(intp->pin); QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, intp, pqnext); - ret = event_notifier_test_and_clear(&intp->interrupt); + ret = event_notifier_test_and_clear(intp->interrupt); qemu_mutex_unlock(&vdev->intp_mutex); return; } trace_vfio_platform_intp_interrupt(intp->pin, - event_notifier_get_fd(&intp->interrupt)); + event_notifier_get_fd(intp->interrupt)); - ret = event_notifier_test_and_clear(&intp->interrupt); + ret = event_notifier_test_and_clear(intp->interrupt); if (!ret) { error_report("Error when clearing fd=%d (ret = %d)", - event_notifier_get_fd(&intp->interrupt), ret); + event_notifier_get_fd(intp->interrupt), ret); } intp->state = VFIO_IRQ_ACTIVE; @@ -283,13 +295,13 @@ static void vfio_platform_eoi(VFIODevice *vbasedev) QLIST_FOREACH(intp, &vdev->intp_list, next) { if (intp->state == VFIO_IRQ_ACTIVE) { trace_vfio_platform_eoi(intp->pin, - event_notifier_get_fd(&intp->interrupt)); + event_notifier_get_fd(intp->interrupt)); intp->state = VFIO_IRQ_INACTIVE; /* deassert the virtual IRQ */ qemu_set_irq(intp->qemuirq, 0); - if (intp->flags & VFIO_IRQ_INFO_AUTOMASKED) { + if (vfio_irq_is_automasked(intp)) { /* unmasks the physical level-sensitive IRQ */ vfio_unmask_single_irqindex(vbasedev, intp->pin); } @@ -310,18 +322,29 @@ static void vfio_platform_eoi(VFIODevice *vbasedev) /** * vfio_start_eventfd_injection - starts the virtual IRQ injection using * user-side handled eventfds - * @intp: the IRQ struct pointer + * @sbdev: the sysbus device handle + * @irq: the qemu irq handle */ -static int vfio_start_eventfd_injection(VFIOINTp *intp) +static void vfio_start_eventfd_injection(SysBusDevice *sbdev, qemu_irq irq) { int ret; + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); + VFIOINTp *intp; + + QLIST_FOREACH(intp, &vdev->intp_list, next) { + if (intp->qemuirq == irq) { + break; + } + } + assert(intp); ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt); if (ret) { - error_report("vfio: Error: Failed to pass IRQ fd to the driver: %m"); + error_report("vfio: failed to start eventfd signaling for IRQ %d: %m", + intp->pin); + abort(); } - return ret; } /* @@ -349,7 +372,7 @@ static int vfio_set_resample_eventfd(VFIOINTp *intp) irq_set->start = 0; irq_set->count = 1; pfd = (int32_t *)&irq_set->data; - *pfd = event_notifier_get_fd(&intp->unmask); + *pfd = event_notifier_get_fd(intp->unmask); qemu_set_fd_handler(*pfd, NULL, NULL, NULL); ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); g_free(irq_set); @@ -359,6 +382,15 @@ static int vfio_set_resample_eventfd(VFIOINTp *intp) return ret; } +/** + * vfio_start_irqfd_injection - starts the virtual IRQ injection using + * irqfd + * + * @sbdev: the sysbus device handle + * @irq: the qemu irq handle + * + * In case the irqfd setup fails, we fallback to userspace handled eventfd + */ static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) { VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(sbdev); @@ -366,7 +398,7 @@ static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) if (!kvm_irqfds_enabled() || !kvm_resamplefds_enabled() || !vdev->irqfd_allowed) { - return; + goto fail_irqfd; } QLIST_FOREACH(intp, &vdev->intp_list, next) { @@ -376,39 +408,36 @@ static void vfio_start_irqfd_injection(SysBusDevice *sbdev, qemu_irq irq) } assert(intp); - /* Get to a known interrupt state */ - qemu_set_fd_handler(event_notifier_get_fd(&intp->interrupt), - NULL, NULL, vdev); - - vfio_mask_single_irqindex(&vdev->vbasedev, intp->pin); - qemu_set_irq(intp->qemuirq, 0); - - if (kvm_irqchip_add_irqfd_notifier(kvm_state, &intp->interrupt, - &intp->unmask, irq) < 0) { + if (kvm_irqchip_add_irqfd_notifier(kvm_state, intp->interrupt, + intp->unmask, irq) < 0) { goto fail_irqfd; } if (vfio_set_trigger_eventfd(intp, NULL) < 0) { goto fail_vfio; } - if (vfio_set_resample_eventfd(intp) < 0) { - goto fail_vfio; + if (vfio_irq_is_automasked(intp)) { + if (vfio_set_resample_eventfd(intp) < 0) { + goto fail_vfio; + } + trace_vfio_platform_start_level_irqfd_injection(intp->pin, + event_notifier_get_fd(intp->interrupt), + event_notifier_get_fd(intp->unmask)); + } else { + trace_vfio_platform_start_edge_irqfd_injection(intp->pin, + event_notifier_get_fd(intp->interrupt)); } - /* Let's resume injection with irqfd setup */ - vfio_unmask_single_irqindex(&vdev->vbasedev, intp->pin); - intp->kvm_accel = true; - trace_vfio_platform_start_irqfd_injection(intp->pin, - event_notifier_get_fd(&intp->interrupt), - event_notifier_get_fd(&intp->unmask)); return; fail_vfio: - kvm_irqchip_remove_irqfd_notifier(kvm_state, &intp->interrupt, irq); + kvm_irqchip_remove_irqfd_notifier(kvm_state, intp->interrupt, irq); + error_report("vfio: failed to start eventfd signaling for IRQ %d: %m", + intp->pin); + abort(); fail_irqfd: - vfio_start_eventfd_injection(intp); - vfio_unmask_single_irqindex(&vdev->vbasedev, intp->pin); + vfio_start_eventfd_injection(sbdev, irq); return; } @@ -646,7 +675,6 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp) VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); VFIODevice *vbasedev = &vdev->vbasedev; - VFIOINTp *intp; int i, ret; vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; @@ -665,10 +693,6 @@ static void vfio_platform_realize(DeviceState *dev, Error **errp) vfio_map_region(vdev, i); sysbus_init_mmio(sbdev, &vdev->regions[i]->mem); } - - QLIST_FOREACH(intp, &vdev->intp_list, next) { - vfio_start_eventfd_injection(intp); - } } static const VMStateDescription vfio_platform_vmstate = { diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 6703806f83..e5c406d1d2 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -2134,14 +2134,6 @@ static void virtio_tablet_initfn(Object *obj) TYPE_VIRTIO_TABLET); } -static void virtio_host_initfn(Object *obj) -{ - VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj); - - virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), - TYPE_VIRTIO_INPUT_HOST); -} - static const TypeInfo virtio_input_pci_info = { .name = TYPE_VIRTIO_INPUT_PCI, .parent = TYPE_VIRTIO_PCI, @@ -2180,12 +2172,22 @@ static const TypeInfo virtio_tablet_pci_info = { .instance_init = virtio_tablet_initfn, }; +#ifdef CONFIG_LINUX +static void virtio_host_initfn(Object *obj) +{ + VirtIOInputHostPCI *dev = VIRTIO_INPUT_HOST_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_INPUT_HOST); +} + static const TypeInfo virtio_host_pci_info = { .name = TYPE_VIRTIO_INPUT_HOST_PCI, .parent = TYPE_VIRTIO_INPUT_PCI, .instance_size = sizeof(VirtIOInputHostPCI), .instance_init = virtio_host_initfn, }; +#endif /* virtio-pci-bus */ @@ -2233,7 +2235,9 @@ static void virtio_pci_register_types(void) type_register_static(&virtio_keyboard_pci_info); type_register_static(&virtio_mouse_pci_info); type_register_static(&virtio_tablet_pci_info); +#ifdef CONFIG_LINUX type_register_static(&virtio_host_pci_info); +#endif type_register_static(&virtio_pci_bus_info); type_register_static(&virtio_pci_info); #ifdef CONFIG_VIRTFS diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h index b6c442f522..801c23aef3 100644 --- a/hw/virtio/virtio-pci.h +++ b/hw/virtio/virtio-pci.h @@ -267,6 +267,8 @@ struct VirtIOInputHIDPCI { VirtIOInputHID vdev; }; +#ifdef CONFIG_LINUX + #define TYPE_VIRTIO_INPUT_HOST_PCI "virtio-input-host-pci" #define VIRTIO_INPUT_HOST_PCI(obj) \ OBJECT_CHECK(VirtIOInputHostPCI, (obj), TYPE_VIRTIO_INPUT_HOST_PCI) @@ -276,6 +278,8 @@ struct VirtIOInputHostPCI { VirtIOInputHost vdev; }; +#endif + /* * virtio-gpu-pci: This extends VirtioPCIProxy. */ |