diff options
53 files changed, 2675 insertions, 371 deletions
diff --git a/block/blkdebug.c b/block/blkdebug.c index 3c30edba73..1e92607ef3 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -216,10 +216,9 @@ static int get_event_by_name(const char *name, BlkDebugEvent *event) struct add_rule_data { BDRVBlkdebugState *s; int action; - Error **errp; }; -static int add_rule(QemuOpts *opts, void *opaque) +static int add_rule(void *opaque, QemuOpts *opts, Error **errp) { struct add_rule_data *d = opaque; BDRVBlkdebugState *s = d->s; @@ -230,10 +229,10 @@ static int add_rule(QemuOpts *opts, void *opaque) /* Find the right event for the rule */ event_name = qemu_opt_get(opts, "event"); if (!event_name) { - error_setg(d->errp, "Missing event name for rule"); + error_setg(errp, "Missing event name for rule"); return -1; } else if (get_event_by_name(event_name, &event) < 0) { - error_setg(d->errp, "Invalid event name \"%s\"", event_name); + error_setg(errp, "Invalid event name \"%s\"", event_name); return -1; } @@ -319,8 +318,7 @@ static int read_config(BDRVBlkdebugState *s, const char *filename, d.s = s; d.action = ACTION_INJECT_ERROR; - d.errp = &local_err; - qemu_opts_foreach(&inject_error_opts, add_rule, &d, 1); + qemu_opts_foreach(&inject_error_opts, add_rule, &d, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; @@ -328,7 +326,7 @@ static int read_config(BDRVBlkdebugState *s, const char *filename, } d.action = ACTION_SET_STATE; - qemu_opts_foreach(&set_state_opts, add_rule, &d, 1); + qemu_opts_foreach(&set_state_opts, add_rule, &d, &local_err); if (local_err) { error_propagate(errp, local_err); ret = -EINVAL; diff --git a/docs/specs/fw_cfg.txt b/docs/specs/fw_cfg.txt index 6accd924bd..74351dd18f 100644 --- a/docs/specs/fw_cfg.txt +++ b/docs/specs/fw_cfg.txt @@ -203,3 +203,24 @@ completes fully overwriting the item's data. NOTE: This function is deprecated, and will be completely removed starting with QEMU v2.4. + +== Externally Provided Items == + +As of v2.4, "file" fw_cfg items (i.e., items with selector keys above +FW_CFG_FILE_FIRST, and with a corresponding entry in the fw_cfg file +directory structure) may be inserted via the QEMU command line, using +the following syntax: + + -fw_cfg [name=]<item_name>,file=<path> + +where <item_name> is the fw_cfg item name, and <path> is the location +on the host file system of a file containing the data to be inserted. + +NOTE: Users *SHOULD* choose item names beginning with the prefix "opt/" +when using the "-fw_cfg" command line option, to avoid conflicting with +item names used internally by QEMU. For instance: + + -fw_cfg name=opt/my_item_name,file=./my_blob.bin + +Similarly, QEMU developers *SHOULD NOT* use item names prefixed with +"opt/" when inserting items programmatically, e.g. via fw_cfg_add_file(). diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index c413226a97..0309fe5767 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -389,7 +389,7 @@ void qdev_set_nic_properties(DeviceState *dev, NICInfo *nd) nd->instantiated = 1; } -static int qdev_add_one_global(QemuOpts *opts, void *opaque) +static int qdev_add_one_global(void *opaque, QemuOpts *opts, Error **errp) { GlobalProperty *g; @@ -404,5 +404,6 @@ static int qdev_add_one_global(QemuOpts *opts, void *opaque) void qemu_add_globals(void) { - qemu_opts_foreach(qemu_find_opts("global"), qdev_add_one_global, NULL, 0); + qemu_opts_foreach(qemu_find_opts("global"), + qdev_add_one_global, NULL, NULL); } diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs index 3ea106d9f3..61c80f319d 100644 --- a/hw/display/Makefile.objs +++ b/hw/display/Makefile.objs @@ -34,3 +34,5 @@ obj-$(CONFIG_CG3) += cg3.o obj-$(CONFIG_VGA) += vga.o common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o + +obj-$(CONFIG_VIRTIO) += virtio-gpu.o diff --git a/hw/display/vga-pci.c b/hw/display/vga-pci.c index ff5dfb2c23..0ed44c7831 100644 --- a/hw/display/vga-pci.c +++ b/hw/display/vga-pci.c @@ -54,9 +54,7 @@ typedef struct PCIVGAState { VGACommonState vga; uint32_t flags; MemoryRegion mmio; - MemoryRegion ioport; - MemoryRegion bochs; - MemoryRegion qext; + MemoryRegion mrs[3]; } PCIVGAState; #define TYPE_PCI_VGA "pci-vga" @@ -76,16 +74,16 @@ static const VMStateDescription vmstate_vga_pci = { static uint64_t pci_vga_ioport_read(void *ptr, hwaddr addr, unsigned size) { - PCIVGAState *d = ptr; + VGACommonState *s = ptr; uint64_t ret = 0; switch (size) { case 1: - ret = vga_ioport_read(&d->vga, addr); + ret = vga_ioport_read(s, addr + 0x3c0); break; case 2: - ret = vga_ioport_read(&d->vga, addr); - ret |= vga_ioport_read(&d->vga, addr+1) << 8; + ret = vga_ioport_read(s, addr + 0x3c0); + ret |= vga_ioport_read(s, addr + 0x3c1) << 8; break; } return ret; @@ -94,11 +92,11 @@ static uint64_t pci_vga_ioport_read(void *ptr, hwaddr addr, static void pci_vga_ioport_write(void *ptr, hwaddr addr, uint64_t val, unsigned size) { - PCIVGAState *d = ptr; + VGACommonState *s = ptr; switch (size) { case 1: - vga_ioport_write(&d->vga, addr + 0x3c0, val); + vga_ioport_write(s, addr + 0x3c0, val); break; case 2: /* @@ -106,8 +104,8 @@ static void pci_vga_ioport_write(void *ptr, hwaddr addr, * indexed registers with a single word write because the * index byte is updated first. */ - vga_ioport_write(&d->vga, addr + 0x3c0, val & 0xff); - vga_ioport_write(&d->vga, addr + 0x3c1, (val >> 8) & 0xff); + vga_ioport_write(s, addr + 0x3c0, val & 0xff); + vga_ioport_write(s, addr + 0x3c1, (val >> 8) & 0xff); break; } } @@ -125,21 +123,21 @@ static const MemoryRegionOps pci_vga_ioport_ops = { static uint64_t pci_vga_bochs_read(void *ptr, hwaddr addr, unsigned size) { - PCIVGAState *d = ptr; + VGACommonState *s = ptr; int index = addr >> 1; - vbe_ioport_write_index(&d->vga, 0, index); - return vbe_ioport_read_data(&d->vga, 0); + vbe_ioport_write_index(s, 0, index); + return vbe_ioport_read_data(s, 0); } static void pci_vga_bochs_write(void *ptr, hwaddr addr, uint64_t val, unsigned size) { - PCIVGAState *d = ptr; + VGACommonState *s = ptr; int index = addr >> 1; - vbe_ioport_write_index(&d->vga, 0, index); - vbe_ioport_write_data(&d->vga, 0, val); + vbe_ioport_write_index(s, 0, index); + vbe_ioport_write_data(s, 0, val); } static const MemoryRegionOps pci_vga_bochs_ops = { @@ -154,13 +152,13 @@ static const MemoryRegionOps pci_vga_bochs_ops = { static uint64_t pci_vga_qext_read(void *ptr, hwaddr addr, unsigned size) { - PCIVGAState *d = ptr; + VGACommonState *s = ptr; switch (addr) { case PCI_VGA_QEXT_REG_SIZE: return PCI_VGA_QEXT_SIZE; case PCI_VGA_QEXT_REG_BYTEORDER: - return d->vga.big_endian_fb ? + return s->big_endian_fb ? PCI_VGA_QEXT_BIG_ENDIAN : PCI_VGA_QEXT_LITTLE_ENDIAN; default: return 0; @@ -170,15 +168,15 @@ static uint64_t pci_vga_qext_read(void *ptr, hwaddr addr, unsigned size) static void pci_vga_qext_write(void *ptr, hwaddr addr, uint64_t val, unsigned size) { - PCIVGAState *d = ptr; + VGACommonState *s = ptr; switch (addr) { case PCI_VGA_QEXT_REG_BYTEORDER: if (val == PCI_VGA_QEXT_BIG_ENDIAN) { - d->vga.big_endian_fb = true; + s->big_endian_fb = true; } if (val == PCI_VGA_QEXT_LITTLE_ENDIAN) { - d->vga.big_endian_fb = false; + s->big_endian_fb = false; } break; } @@ -206,10 +204,34 @@ static const MemoryRegionOps pci_vga_qext_ops = { .endianness = DEVICE_LITTLE_ENDIAN, }; +static void pci_std_vga_mmio_region_init(VGACommonState *s, + MemoryRegion *parent, + MemoryRegion *subs, + bool qext) +{ + memory_region_init_io(&subs[0], NULL, &pci_vga_ioport_ops, s, + "vga ioports remapped", PCI_VGA_IOPORT_SIZE); + memory_region_add_subregion(parent, PCI_VGA_IOPORT_OFFSET, + &subs[0]); + + memory_region_init_io(&subs[1], NULL, &pci_vga_bochs_ops, s, + "bochs dispi interface", PCI_VGA_BOCHS_SIZE); + memory_region_add_subregion(parent, PCI_VGA_BOCHS_OFFSET, + &subs[1]); + + if (qext) { + memory_region_init_io(&subs[2], NULL, &pci_vga_qext_ops, s, + "qemu extended regs", PCI_VGA_QEXT_SIZE); + memory_region_add_subregion(parent, PCI_VGA_QEXT_OFFSET, + &subs[2]); + } +} + static void pci_std_vga_realize(PCIDevice *dev, Error **errp) { PCIVGAState *d = PCI_VGA(dev); VGACommonState *s = &d->vga; + bool qext = false; /* vga + console init */ vga_common_init(s, OBJECT(dev), true); @@ -224,23 +246,12 @@ static void pci_std_vga_realize(PCIDevice *dev, Error **errp) /* mmio bar for vga register access */ if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_MMIO)) { memory_region_init(&d->mmio, NULL, "vga.mmio", 4096); - memory_region_init_io(&d->ioport, NULL, &pci_vga_ioport_ops, d, - "vga ioports remapped", PCI_VGA_IOPORT_SIZE); - memory_region_init_io(&d->bochs, NULL, &pci_vga_bochs_ops, d, - "bochs dispi interface", PCI_VGA_BOCHS_SIZE); - - memory_region_add_subregion(&d->mmio, PCI_VGA_IOPORT_OFFSET, - &d->ioport); - memory_region_add_subregion(&d->mmio, PCI_VGA_BOCHS_OFFSET, - &d->bochs); if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_QEXT)) { - memory_region_init_io(&d->qext, NULL, &pci_vga_qext_ops, d, - "qemu extended regs", PCI_VGA_QEXT_SIZE); - memory_region_add_subregion(&d->mmio, PCI_VGA_QEXT_OFFSET, - &d->qext); + qext = true; pci_set_byte(&d->dev.config[PCI_REVISION_ID], 2); } + pci_std_vga_mmio_region_init(s, &d->mmio, d->mrs, qext); pci_register_bar(&d->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio); } @@ -262,6 +273,7 @@ static void pci_secondary_vga_realize(PCIDevice *dev, Error **errp) { PCIVGAState *d = PCI_VGA(dev); VGACommonState *s = &d->vga; + bool qext = false; /* vga + console init */ vga_common_init(s, OBJECT(dev), false); @@ -269,23 +281,12 @@ static void pci_secondary_vga_realize(PCIDevice *dev, Error **errp) /* mmio bar */ memory_region_init(&d->mmio, OBJECT(dev), "vga.mmio", 4096); - memory_region_init_io(&d->ioport, OBJECT(dev), &pci_vga_ioport_ops, d, - "vga ioports remapped", PCI_VGA_IOPORT_SIZE); - memory_region_init_io(&d->bochs, OBJECT(dev), &pci_vga_bochs_ops, d, - "bochs dispi interface", PCI_VGA_BOCHS_SIZE); - - memory_region_add_subregion(&d->mmio, PCI_VGA_IOPORT_OFFSET, - &d->ioport); - memory_region_add_subregion(&d->mmio, PCI_VGA_BOCHS_OFFSET, - &d->bochs); if (d->flags & (1 << PCI_VGA_FLAG_ENABLE_QEXT)) { - memory_region_init_io(&d->qext, NULL, &pci_vga_qext_ops, d, - "qemu extended regs", PCI_VGA_QEXT_SIZE); - memory_region_add_subregion(&d->mmio, PCI_VGA_QEXT_OFFSET, - &d->qext); + qext = true; pci_set_byte(&d->dev.config[PCI_REVISION_ID], 2); } + pci_std_vga_mmio_region_init(s, &d->mmio, d->mrs, qext); pci_register_bar(&d->dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &s->vram); pci_register_bar(&d->dev, 2, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->mmio); diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c new file mode 100644 index 0000000000..4b10ca1ebb --- /dev/null +++ b/hw/display/virtio-gpu.c @@ -0,0 +1,918 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Gerd Hoffmann <kraxel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#include "qemu-common.h" +#include "qemu/iov.h" +#include "ui/console.h" +#include "trace.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-gpu.h" +#include "hw/virtio/virtio-bus.h" + +static struct virtio_gpu_simple_resource* +virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id); + +static void update_cursor_data_simple(VirtIOGPU *g, + struct virtio_gpu_scanout *s, + uint32_t resource_id) +{ + struct virtio_gpu_simple_resource *res; + uint32_t pixels; + + res = virtio_gpu_find_resource(g, resource_id); + if (!res) { + return; + } + + if (pixman_image_get_width(res->image) != s->current_cursor->width || + pixman_image_get_height(res->image) != s->current_cursor->height) { + return; + } + + pixels = s->current_cursor->width * s->current_cursor->height; + memcpy(s->current_cursor->data, + pixman_image_get_data(res->image), + pixels * sizeof(uint32_t)); +} + +static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor) +{ + struct virtio_gpu_scanout *s; + + if (cursor->pos.scanout_id >= g->conf.max_outputs) { + return; + } + s = &g->scanout[cursor->pos.scanout_id]; + + if (cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR) { + if (!s->current_cursor) { + s->current_cursor = cursor_alloc(64, 64); + } + + s->current_cursor->hot_x = cursor->hot_x; + s->current_cursor->hot_y = cursor->hot_y; + + if (cursor->resource_id > 0) { + update_cursor_data_simple(g, s, cursor->resource_id); + } + dpy_cursor_define(s->con, s->current_cursor); + } + dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y, + cursor->resource_id ? 1 : 0); +} + +static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + memcpy(config, &g->virtio_config, sizeof(g->virtio_config)); +} + +static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + struct virtio_gpu_config vgconfig; + + memcpy(&vgconfig, config, sizeof(g->virtio_config)); + + if (vgconfig.events_clear) { + g->virtio_config.events_read &= ~vgconfig.events_clear; + } +} + +static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features) +{ + return features; +} + +static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type) +{ + g->virtio_config.events_read |= event_type; + virtio_notify_config(&g->parent_obj); +} + +static struct virtio_gpu_simple_resource * +virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id) +{ + struct virtio_gpu_simple_resource *res; + + QTAILQ_FOREACH(res, &g->reslist, next) { + if (res->resource_id == resource_id) { + return res; + } + } + return NULL; +} + +void virtio_gpu_ctrl_response(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + struct virtio_gpu_ctrl_hdr *resp, + size_t resp_len) +{ + size_t s; + + if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) { + resp->flags |= VIRTIO_GPU_FLAG_FENCE; + resp->fence_id = cmd->cmd_hdr.fence_id; + resp->ctx_id = cmd->cmd_hdr.ctx_id; + } + s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len); + if (s != resp_len) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: response size incorrect %zu vs %zu\n", + __func__, s, resp_len); + } + virtqueue_push(cmd->vq, &cmd->elem, s); + virtio_notify(VIRTIO_DEVICE(g), cmd->vq); + cmd->finished = true; +} + +void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + enum virtio_gpu_ctrl_type type) +{ + struct virtio_gpu_ctrl_hdr resp; + + memset(&resp, 0, sizeof(resp)); + resp.type = type; + virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp)); +} + +static void +virtio_gpu_fill_display_info(VirtIOGPU *g, + struct virtio_gpu_resp_display_info *dpy_info) +{ + int i; + + for (i = 0; i < g->conf.max_outputs; i++) { + if (g->enabled_output_bitmask & (1 << i)) { + dpy_info->pmodes[i].enabled = 1; + dpy_info->pmodes[i].r.width = g->req_state[i].width; + dpy_info->pmodes[i].r.height = g->req_state[i].height; + } + } +} + +void virtio_gpu_get_display_info(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_resp_display_info display_info; + + trace_virtio_gpu_cmd_get_display_info(); + memset(&display_info, 0, sizeof(display_info)); + display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO; + virtio_gpu_fill_display_info(g, &display_info); + virtio_gpu_ctrl_response(g, cmd, &display_info.hdr, + sizeof(display_info)); +} + +static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format) +{ + switch (virtio_gpu_format) { +#ifdef HOST_WORDS_BIGENDIAN + case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: + return PIXMAN_b8g8r8x8; + case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: + return PIXMAN_b8g8r8a8; + case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: + return PIXMAN_x8r8g8b8; + case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: + return PIXMAN_a8r8g8b8; + case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: + return PIXMAN_r8g8b8x8; + case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: + return PIXMAN_r8g8b8a8; + case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: + return PIXMAN_x8b8g8r8; + case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: + return PIXMAN_a8b8g8r8; +#else + case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM: + return PIXMAN_x8r8g8b8; + case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM: + return PIXMAN_a8r8g8b8; + case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM: + return PIXMAN_b8g8r8x8; + case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM: + return PIXMAN_b8g8r8a8; + case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM: + return PIXMAN_x8b8g8r8; + case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM: + return PIXMAN_a8b8g8r8; + case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM: + return PIXMAN_r8g8b8x8; + case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM: + return PIXMAN_r8g8b8a8; +#endif + default: + return 0; + } +} + +static void virtio_gpu_resource_create_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + pixman_format_code_t pformat; + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_create_2d c2d; + + VIRTIO_GPU_FILL_CMD(c2d); + trace_virtio_gpu_cmd_res_create_2d(c2d.resource_id, c2d.format, + c2d.width, c2d.height); + + if (c2d.resource_id == 0) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource id 0 is not allowed\n", + __func__); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = virtio_gpu_find_resource(g, c2d.resource_id); + if (res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: resource already exists %d\n", + __func__, c2d.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + res = g_new0(struct virtio_gpu_simple_resource, 1); + + res->width = c2d.width; + res->height = c2d.height; + res->format = c2d.format; + res->resource_id = c2d.resource_id; + + pformat = get_pixman_format(c2d.format); + if (!pformat) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: host couldn't handle guest format %d\n", + __func__, c2d.format); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + res->image = pixman_image_create_bits(pformat, + c2d.width, + c2d.height, + NULL, 0); + + if (!res->image) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: resource creation failed %d %d %d\n", + __func__, c2d.resource_id, c2d.width, c2d.height); + g_free(res); + cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY; + return; + } + + QTAILQ_INSERT_HEAD(&g->reslist, res, next); +} + +static void virtio_gpu_resource_destroy(VirtIOGPU *g, + struct virtio_gpu_simple_resource *res) +{ + pixman_image_unref(res->image); + QTAILQ_REMOVE(&g->reslist, res, next); + g_free(res); +} + +static void virtio_gpu_resource_unref(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_unref unref; + + VIRTIO_GPU_FILL_CMD(unref); + trace_virtio_gpu_cmd_res_unref(unref.resource_id); + + res = virtio_gpu_find_resource(g, unref.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, unref.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + virtio_gpu_resource_destroy(g, res); +} + +static void virtio_gpu_transfer_to_host_2d(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + int h; + uint32_t src_offset, dst_offset, stride; + int bpp; + pixman_format_code_t format; + struct virtio_gpu_transfer_to_host_2d t2d; + + VIRTIO_GPU_FILL_CMD(t2d); + trace_virtio_gpu_cmd_res_xfer_toh_2d(t2d.resource_id); + + res = virtio_gpu_find_resource(g, t2d.resource_id); + if (!res || !res->iov) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, t2d.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + if (t2d.r.x > res->width || + t2d.r.y > res->height || + t2d.r.width > res->width || + t2d.r.height > res->height || + t2d.r.x + t2d.r.width > res->width || + t2d.r.y + t2d.r.height > res->height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: transfer bounds outside resource" + " bounds for resource %d: %d %d %d %d vs %d %d\n", + __func__, t2d.resource_id, t2d.r.x, t2d.r.y, + t2d.r.width, t2d.r.height, res->width, res->height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + format = pixman_image_get_format(res->image); + bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; + stride = pixman_image_get_stride(res->image); + + if (t2d.offset || t2d.r.x || t2d.r.y || + t2d.r.width != pixman_image_get_width(res->image)) { + void *img_data = pixman_image_get_data(res->image); + for (h = 0; h < t2d.r.height; h++) { + src_offset = t2d.offset + stride * h; + dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp); + + iov_to_buf(res->iov, res->iov_cnt, src_offset, + (uint8_t *)img_data + + dst_offset, t2d.r.width * bpp); + } + } else { + iov_to_buf(res->iov, res->iov_cnt, 0, + pixman_image_get_data(res->image), + pixman_image_get_stride(res->image) + * pixman_image_get_height(res->image)); + } +} + +static void virtio_gpu_resource_flush(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_flush rf; + pixman_region16_t flush_region; + int i; + + VIRTIO_GPU_FILL_CMD(rf); + trace_virtio_gpu_cmd_res_flush(rf.resource_id, + rf.r.width, rf.r.height, rf.r.x, rf.r.y); + + res = virtio_gpu_find_resource(g, rf.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, rf.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + if (rf.r.x > res->width || + rf.r.y > res->height || + rf.r.width > res->width || + rf.r.height > res->height || + rf.r.x + rf.r.width > res->width || + rf.r.y + rf.r.height > res->height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: flush bounds outside resource" + " bounds for resource %d: %d %d %d %d vs %d %d\n", + __func__, rf.resource_id, rf.r.x, rf.r.y, + rf.r.width, rf.r.height, res->width, res->height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + pixman_region_init_rect(&flush_region, + rf.r.x, rf.r.y, rf.r.width, rf.r.height); + for (i = 0; i < VIRTIO_GPU_MAX_SCANOUT; i++) { + struct virtio_gpu_scanout *scanout; + pixman_region16_t region, finalregion; + pixman_box16_t *extents; + + if (!(res->scanout_bitmask & (1 << i))) { + continue; + } + scanout = &g->scanout[i]; + + pixman_region_init(&finalregion); + pixman_region_init_rect(®ion, scanout->x, scanout->y, + scanout->width, scanout->height); + + pixman_region_intersect(&finalregion, &flush_region, ®ion); + pixman_region_translate(&finalregion, -scanout->x, -scanout->y); + extents = pixman_region_extents(&finalregion); + /* work out the area we need to update for each console */ + dpy_gfx_update(g->scanout[i].con, + extents->x1, extents->y1, + extents->x2 - extents->x1, + extents->y2 - extents->y1); + + pixman_region_fini(®ion); + pixman_region_fini(&finalregion); + } + pixman_region_fini(&flush_region); +} + +static void virtio_gpu_set_scanout(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_scanout *scanout; + pixman_format_code_t format; + uint32_t offset; + int bpp; + struct virtio_gpu_set_scanout ss; + + VIRTIO_GPU_FILL_CMD(ss); + trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id, + ss.r.width, ss.r.height, ss.r.x, ss.r.y); + + g->enable = 1; + if (ss.resource_id == 0) { + scanout = &g->scanout[ss.scanout_id]; + if (scanout->resource_id) { + res = virtio_gpu_find_resource(g, scanout->resource_id); + if (res) { + res->scanout_bitmask &= ~(1 << ss.scanout_id); + } + } + if (ss.scanout_id == 0 || + ss.scanout_id >= g->conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL); + scanout->ds = NULL; + scanout->width = 0; + scanout->height = 0; + return; + } + + /* create a surface for this scanout */ + if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUT || + ss.scanout_id >= g->conf.max_outputs) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d", + __func__, ss.scanout_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID; + return; + } + + res = virtio_gpu_find_resource(g, ss.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, ss.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + if (ss.r.x > res->width || + ss.r.y > res->height || + ss.r.width > res->width || + ss.r.height > res->height || + ss.r.x + ss.r.width > res->width || + ss.r.y + ss.r.height > res->height) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout %d bounds for" + " resource %d, (%d,%d)+%d,%d vs %d %d\n", + __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y, + ss.r.width, ss.r.height, res->width, res->height); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER; + return; + } + + scanout = &g->scanout[ss.scanout_id]; + + format = pixman_image_get_format(res->image); + bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8; + offset = (ss.r.x * bpp) + ss.r.y * pixman_image_get_stride(res->image); + if (!scanout->ds || surface_data(scanout->ds) + != ((uint8_t *)pixman_image_get_data(res->image) + offset) || + scanout->width != ss.r.width || + scanout->height != ss.r.height) { + /* realloc the surface ptr */ + scanout->ds = qemu_create_displaysurface_from + (ss.r.width, ss.r.height, format, + pixman_image_get_stride(res->image), + (uint8_t *)pixman_image_get_data(res->image) + offset); + if (!scanout->ds) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds); + } + + res->scanout_bitmask |= (1 << ss.scanout_id); + scanout->resource_id = ss.resource_id; + scanout->x = ss.r.x; + scanout->y = ss.r.y; + scanout->width = ss.r.width; + scanout->height = ss.r.height; +} + +int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, + struct virtio_gpu_ctrl_command *cmd, + struct iovec **iov) +{ + struct virtio_gpu_mem_entry *ents; + size_t esize, s; + int i; + + if (ab->nr_entries > 16384) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: nr_entries is too big (%d > 1024)\n", + __func__, ab->nr_entries); + return -1; + } + + esize = sizeof(*ents) * ab->nr_entries; + ents = g_malloc(esize); + s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, + sizeof(*ab), ents, esize); + if (s != esize) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: command data size incorrect %zu vs %zu\n", + __func__, s, esize); + g_free(ents); + return -1; + } + + *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries); + for (i = 0; i < ab->nr_entries; i++) { + hwaddr len = ents[i].length; + (*iov)[i].iov_len = ents[i].length; + (*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1); + if (!(*iov)[i].iov_base || len != ents[i].length) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" + " resource %d element %d\n", + __func__, ab->resource_id, i); + virtio_gpu_cleanup_mapping_iov(*iov, i); + g_free(ents); + g_free(*iov); + *iov = NULL; + return -1; + } + } + g_free(ents); + return 0; +} + +void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count) +{ + int i; + + for (i = 0; i < count; i++) { + cpu_physical_memory_unmap(iov[i].iov_base, iov[i].iov_len, 1, + iov[i].iov_len); + } +} + +static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res) +{ + virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt); + g_free(res->iov); + res->iov = NULL; + res->iov_cnt = 0; +} + +static void +virtio_gpu_resource_attach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_attach_backing ab; + int ret; + + VIRTIO_GPU_FILL_CMD(ab); + trace_virtio_gpu_cmd_res_back_attach(ab.resource_id); + + res = virtio_gpu_find_resource(g, ab.resource_id); + if (!res) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, ab.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + + ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov); + if (ret != 0) { + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + return; + } + + res->iov_cnt = ab.nr_entries; +} + +static void +virtio_gpu_resource_detach_backing(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + struct virtio_gpu_simple_resource *res; + struct virtio_gpu_resource_detach_backing detach; + + VIRTIO_GPU_FILL_CMD(detach); + trace_virtio_gpu_cmd_res_back_detach(detach.resource_id); + + res = virtio_gpu_find_resource(g, detach.resource_id); + if (!res || !res->iov) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal resource specified %d\n", + __func__, detach.resource_id); + cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID; + return; + } + virtio_gpu_cleanup_mapping(res); +} + +static void virtio_gpu_simple_process_cmd(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd) +{ + VIRTIO_GPU_FILL_CMD(cmd->cmd_hdr); + + switch (cmd->cmd_hdr.type) { + case VIRTIO_GPU_CMD_GET_DISPLAY_INFO: + virtio_gpu_get_display_info(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: + virtio_gpu_resource_create_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_UNREF: + virtio_gpu_resource_unref(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_FLUSH: + virtio_gpu_resource_flush(g, cmd); + break; + case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: + virtio_gpu_transfer_to_host_2d(g, cmd); + break; + case VIRTIO_GPU_CMD_SET_SCANOUT: + virtio_gpu_set_scanout(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING: + virtio_gpu_resource_attach_backing(g, cmd); + break; + case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING: + virtio_gpu_resource_detach_backing(g, cmd); + break; + default: + cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC; + break; + } + if (!cmd->finished) { + virtio_gpu_ctrl_response_nodata(g, cmd, cmd->error ? cmd->error : + VIRTIO_GPU_RESP_OK_NODATA); + } +} + +static void virtio_gpu_handle_ctrl_cb(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + qemu_bh_schedule(g->ctrl_bh); +} + +static void virtio_gpu_handle_cursor_cb(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + qemu_bh_schedule(g->cursor_bh); +} + +static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + struct virtio_gpu_ctrl_command *cmd; + + if (!virtio_queue_ready(vq)) { + return; + } + + cmd = g_new(struct virtio_gpu_ctrl_command, 1); + while (virtqueue_pop(vq, &cmd->elem)) { + cmd->vq = vq; + cmd->error = 0; + cmd->finished = false; + g->stats.requests++; + + virtio_gpu_simple_process_cmd(g, cmd); + if (!cmd->finished) { + QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next); + g->stats.inflight++; + if (g->stats.max_inflight < g->stats.inflight) { + g->stats.max_inflight = g->stats.inflight; + } + fprintf(stderr, "inflight: %3d (+)\r", g->stats.inflight); + cmd = g_new(struct virtio_gpu_ctrl_command, 1); + } + } + g_free(cmd); +} + +static void virtio_gpu_ctrl_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq); +} + +static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + VirtQueueElement elem; + size_t s; + struct virtio_gpu_update_cursor cursor_info; + + if (!virtio_queue_ready(vq)) { + return; + } + while (virtqueue_pop(vq, &elem)) { + s = iov_to_buf(elem.out_sg, elem.out_num, 0, + &cursor_info, sizeof(cursor_info)); + if (s != sizeof(cursor_info)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: cursor size incorrect %zu vs %zu\n", + __func__, s, sizeof(cursor_info)); + } else { + update_cursor(g, &cursor_info); + } + virtqueue_push(vq, &elem, 0); + virtio_notify(vdev, vq); + } +} + +static void virtio_gpu_cursor_bh(void *opaque) +{ + VirtIOGPU *g = opaque; + virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq); +} + +static void virtio_gpu_invalidate_display(void *opaque) +{ +} + +static void virtio_gpu_update_display(void *opaque) +{ +} + +static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata) +{ +} + +static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) +{ + VirtIOGPU *g = opaque; + + if (idx > g->conf.max_outputs) { + return -1; + } + + g->req_state[idx].x = info->xoff; + g->req_state[idx].y = info->yoff; + g->req_state[idx].width = info->width; + g->req_state[idx].height = info->height; + + if (info->width && info->height) { + g->enabled_output_bitmask |= (1 << idx); + } else { + g->enabled_output_bitmask &= ~(1 << idx); + } + + /* send event to guest */ + virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY); + return 0; +} + +const GraphicHwOps virtio_gpu_ops = { + .invalidate = virtio_gpu_invalidate_display, + .gfx_update = virtio_gpu_update_display, + .text_update = virtio_gpu_text_update, + .ui_info = virtio_gpu_ui_info, +}; + +static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(qdev); + VirtIOGPU *g = VIRTIO_GPU(qdev); + int i; + + g->config_size = sizeof(struct virtio_gpu_config); + g->virtio_config.num_scanouts = g->conf.max_outputs; + virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU, + g->config_size); + + g->req_state[0].width = 1024; + g->req_state[0].height = 768; + + g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb); + g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb); + + g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); + g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); + QTAILQ_INIT(&g->reslist); + QTAILQ_INIT(&g->fenceq); + + g->enabled_output_bitmask = 1; + g->qdev = qdev; + + for (i = 0; i < g->conf.max_outputs; i++) { + g->scanout[i].con = + graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g); + if (i > 0) { + dpy_gfx_replace_surface(g->scanout[i].con, NULL); + } + } +} + +static void virtio_gpu_instance_init(Object *obj) +{ +} + +static void virtio_gpu_reset(VirtIODevice *vdev) +{ + VirtIOGPU *g = VIRTIO_GPU(vdev); + struct virtio_gpu_simple_resource *res, *tmp; + int i; + + g->enable = 0; + + QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) { + virtio_gpu_resource_destroy(g, res); + } + for (i = 0; i < g->conf.max_outputs; i++) { +#if 0 + g->req_state[i].x = 0; + g->req_state[i].y = 0; + if (i == 0) { + g->req_state[0].width = 1024; + g->req_state[0].height = 768; + } else { + g->req_state[i].width = 0; + g->req_state[i].height = 0; + } +#endif + g->scanout[i].resource_id = 0; + g->scanout[i].width = 0; + g->scanout[i].height = 0; + g->scanout[i].x = 0; + g->scanout[i].y = 0; + g->scanout[i].ds = NULL; + } + g->enabled_output_bitmask = 1; +} + +static Property virtio_gpu_properties[] = { + DEFINE_VIRTIO_GPU_PROPERTIES(VirtIOGPU, conf), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_gpu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + vdc->realize = virtio_gpu_device_realize; + vdc->get_config = virtio_gpu_get_config; + vdc->set_config = virtio_gpu_set_config; + vdc->get_features = virtio_gpu_get_features; + + vdc->reset = virtio_gpu_reset; + + dc->props = virtio_gpu_properties; +} + +static const TypeInfo virtio_gpu_info = { + .name = TYPE_VIRTIO_GPU, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VirtIOGPU), + .instance_init = virtio_gpu_instance_init, + .class_init = virtio_gpu_class_init, +}; + +static void virtio_register_types(void) +{ + type_register_static(&virtio_gpu_info); +} + +type_init(virtio_register_types) + +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32); +QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408); diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c index bdfd38f4ca..68b9981983 100644 --- a/hw/net/pcnet.c +++ b/hw/net/pcnet.c @@ -1241,6 +1241,14 @@ static void pcnet_transmit(PCNetState *s) } bcnt = 4096 - GET_FIELD(tmd.length, TMDL, BCNT); + + /* if multi-tmd packet outsizes s->buffer then skip it silently. + Note: this is not what real hw does */ + if (s->xmit_pos + bcnt > sizeof(s->buffer)) { + s->xmit_pos = -1; + goto txdone; + } + s->phys_mem_read(s->dma_opaque, PHYSADDR(s, tmd.tbadr), s->buffer + s->xmit_pos, bcnt, CSR_BSWP(s)); s->xmit_pos += bcnt; diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index 68eff77983..88481b78c4 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -46,7 +46,6 @@ typedef struct FWCfgEntry { uint32_t len; uint8_t *data; void *callback_opaque; - FWCfgCallback callback; FWCfgReadCallback read_callback; } FWCfgEntry; @@ -232,19 +231,7 @@ static void fw_cfg_reboot(FWCfgState *s) static void fw_cfg_write(FWCfgState *s, uint8_t value) { - int arch = !!(s->cur_entry & FW_CFG_ARCH_LOCAL); - FWCfgEntry *e = &s->entries[arch][s->cur_entry & FW_CFG_ENTRY_MASK]; - - trace_fw_cfg_write(s, value); - - if (s->cur_entry & FW_CFG_WRITE_CHANNEL && e->callback && - s->cur_offset < e->len) { - e->data[s->cur_offset++] = value; - if (s->cur_offset == e->len) { - e->callback(e->callback_opaque, e->data); - s->cur_offset = 0; - } - } + /* nothing, write support removed in QEMU v2.4+ */ } static int fw_cfg_select(FWCfgState *s, uint16_t key) @@ -436,6 +423,7 @@ static void fw_cfg_add_bytes_read_callback(FWCfgState *s, uint16_t key, key &= FW_CFG_ENTRY_MASK; assert(key < FW_CFG_MAX_ENTRY && len < UINT32_MAX); + assert(s->entries[arch][key].data == NULL); /* avoid key conflict */ s->entries[arch][key].data = data; s->entries[arch][key].len = (uint32_t)len; @@ -458,7 +446,6 @@ static void *fw_cfg_modify_bytes_read(FWCfgState *s, uint16_t key, s->entries[arch][key].data = data; s->entries[arch][key].len = len; s->entries[arch][key].callback_opaque = NULL; - s->entries[arch][key].callback = NULL; return ptr; } @@ -484,6 +471,16 @@ void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value) fw_cfg_add_bytes(s, key, copy, sizeof(value)); } +void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value) +{ + uint16_t *copy, *old; + + copy = g_malloc(sizeof(value)); + *copy = cpu_to_le16(value); + old = fw_cfg_modify_bytes_read(s, key, copy, sizeof(value)); + g_free(old); +} + void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value) { uint32_t *copy; @@ -502,23 +499,6 @@ void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value) fw_cfg_add_bytes(s, key, copy, sizeof(value)); } -void fw_cfg_add_callback(FWCfgState *s, uint16_t key, FWCfgCallback callback, - void *callback_opaque, void *data, size_t len) -{ - int arch = !!(key & FW_CFG_ARCH_LOCAL); - - assert(key & FW_CFG_WRITE_CHANNEL); - - key &= FW_CFG_ENTRY_MASK; - - assert(key < FW_CFG_MAX_ENTRY && len <= UINT32_MAX); - - s->entries[arch][key].data = data; - s->entries[arch][key].len = (uint32_t)len; - s->entries[arch][key].callback_opaque = callback_opaque; - s->entries[arch][key].callback = callback; -} - void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, FWCfgReadCallback callback, void *callback_opaque, void *data, size_t len) @@ -535,18 +515,19 @@ void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, index = be32_to_cpu(s->files->count); assert(index < FW_CFG_FILE_SLOTS); - fw_cfg_add_bytes_read_callback(s, FW_CFG_FILE_FIRST + index, - callback, callback_opaque, data, len); - pstrcpy(s->files->f[index].name, sizeof(s->files->f[index].name), filename); for (i = 0; i < index; i++) { if (strcmp(s->files->f[index].name, s->files->f[i].name) == 0) { - trace_fw_cfg_add_file_dupe(s, s->files->f[index].name); - return; + error_report("duplicate fw_cfg file name: %s", + s->files->f[index].name); + exit(1); } } + fw_cfg_add_bytes_read_callback(s, FW_CFG_FILE_FIRST + index, + callback, callback_opaque, data, len); + s->files->f[index].size = cpu_to_be32(len); s->files->f[index].select = cpu_to_be16(FW_CFG_FILE_FIRST + index); trace_fw_cfg_add_file(s, index, s->files->f[index].name, len); diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index a365bf9223..0f3e34122a 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -119,7 +119,7 @@ static const MemoryRegionOps unin_ops = { static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { - fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); } static uint64_t translate_kernel_address(void *opaque, uint64_t addr) diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index f26133dedd..99879dd2d5 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -52,7 +52,7 @@ static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { - fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); } static uint64_t translate_kernel_address(void *opaque, uint64_t addr) diff --git a/hw/s390x/virtio-ccw.c b/hw/s390x/virtio-ccw.c index ef90feddea..ab5fc7f40e 100644 --- a/hw/s390x/virtio-ccw.c +++ b/hw/s390x/virtio-ccw.c @@ -1,8 +1,9 @@ /* * virtio ccw target implementation * - * Copyright 2012,2014 IBM Corp. + * Copyright 2012,2015 IBM Corp. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Pierre Morel <pmorel@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level @@ -1310,6 +1311,7 @@ static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); SubchDev *s = dev->sch; + VirtIODevice *vdev = virtio_ccw_get_vdev(s); subch_device_save(s, f); if (dev->indicators != NULL) { @@ -1333,6 +1335,7 @@ static void virtio_ccw_save_config(DeviceState *d, QEMUFile *f) qemu_put_be32(f, 0); qemu_put_be64(f, 0UL); } + qemu_put_be16(f, vdev->config_vector); qemu_put_be64(f, dev->routes.adapter.ind_offset); qemu_put_byte(f, dev->thinint_isc); } @@ -1341,6 +1344,7 @@ static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) { VirtioCcwDevice *dev = VIRTIO_CCW_DEVICE(d); SubchDev *s = dev->sch; + VirtIODevice *vdev = virtio_ccw_get_vdev(s); int len; s->driver_data = dev; @@ -1366,6 +1370,7 @@ static int virtio_ccw_load_config(DeviceState *d, QEMUFile *f) qemu_get_be64(f); dev->summary_indicator = NULL; } + qemu_get_be16s(f, &vdev->config_vector); dev->routes.adapter.ind_offset = qemu_get_be64(f); dev->thinint_isc = qemu_get_byte(f); if (s->thinint_active) { @@ -1730,6 +1735,56 @@ static const TypeInfo virtio_ccw_bus_info = { .class_init = virtio_ccw_bus_class_init, }; +#ifdef CONFIG_VIRTFS +static Property virtio_ccw_9p_properties[] = { + DEFINE_PROP_STRING("devno", VirtioCcwDevice, bus_id), + DEFINE_PROP_BIT("ioeventfd", VirtioCcwDevice, flags, + VIRTIO_CCW_FLAG_USE_IOEVENTFD_BIT, true), + DEFINE_PROP_END_OF_LIST(), +}; + +static void virtio_ccw_9p_realize(VirtioCcwDevice *ccw_dev, Error **errp) +{ + V9fsCCWState *dev = VIRTIO_9P_CCW(ccw_dev); + DeviceState *vdev = DEVICE(&dev->vdev); + Error *err = NULL; + + qdev_set_parent_bus(vdev, BUS(&ccw_dev->bus)); + object_property_set_bool(OBJECT(vdev), true, "realized", &err); + if (err) { + error_propagate(errp, err); + } +} + +static void virtio_ccw_9p_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_CLASS(klass); + + k->exit = virtio_ccw_exit; + k->realize = virtio_ccw_9p_realize; + dc->reset = virtio_ccw_reset; + dc->props = virtio_ccw_9p_properties; + set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); +} + +static void virtio_ccw_9p_instance_init(Object *obj) +{ + V9fsCCWState *dev = VIRTIO_9P_CCW(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VIRTIO_9P); +} + +static const TypeInfo virtio_ccw_9p_info = { + .name = TYPE_VIRTIO_9P_CCW, + .parent = TYPE_VIRTIO_CCW_DEVICE, + .instance_size = sizeof(V9fsCCWState), + .instance_init = virtio_ccw_9p_instance_init, + .class_init = virtio_ccw_9p_class_init, +}; +#endif + static void virtio_ccw_register(void) { type_register_static(&virtio_ccw_bus_info); @@ -1745,6 +1800,9 @@ static void virtio_ccw_register(void) #endif type_register_static(&virtio_ccw_rng); type_register_static(&virtual_css_bridge_info); +#ifdef CONFIG_VIRTFS + type_register_static(&virtio_ccw_9p_info); +#endif } type_init(virtio_ccw_register) diff --git a/hw/s390x/virtio-ccw.h b/hw/s390x/virtio-ccw.h index ad3af7626a..d729263960 100644 --- a/hw/s390x/virtio-ccw.h +++ b/hw/s390x/virtio-ccw.h @@ -1,8 +1,9 @@ /* * virtio ccw target definitions * - * Copyright 2012 IBM Corp. + * Copyright 2012,2015 IBM Corp. * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> + * Pierre Morel <pmorel@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level @@ -189,4 +190,19 @@ typedef struct VirtIORNGCcw { VirtualCssBus *virtual_css_bus_init(void); void virtio_ccw_device_update_status(SubchDev *sch); VirtIODevice *virtio_ccw_get_vdev(SubchDev *sch); + +#ifdef CONFIG_VIRTFS +#include "hw/9pfs/virtio-9p.h" + +#define TYPE_VIRTIO_9P_CCW "virtio-9p-ccw" +#define VIRTIO_9P_CCW(obj) \ + OBJECT_CHECK(V9fsCCWState, (obj), TYPE_VIRTIO_9P_CCW) + +typedef struct V9fsCCWState { + VirtioCcwDevice parent_obj; + V9fsState vdev; +} V9fsCCWState; + +#endif /* CONFIG_VIRTFS */ + #endif diff --git a/hw/sparc/sun4m.c b/hw/sparc/sun4m.c index 8a3599c403..68ac4d8bba 100644 --- a/hw/sparc/sun4m.c +++ b/hw/sparc/sun4m.c @@ -124,7 +124,7 @@ void DMA_register_channel (int nchan, static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { - fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); } static void nvram_init(Nvram *nvram, uint8_t *macaddr, diff --git a/hw/sparc64/sun4u.c b/hw/sparc64/sun4u.c index 6f34e87935..30cfa0e0a0 100644 --- a/hw/sparc64/sun4u.c +++ b/hw/sparc64/sun4u.c @@ -127,7 +127,7 @@ void DMA_register_channel (int nchan, static void fw_cfg_boot_set(void *opaque, const char *boot_device, Error **errp) { - fw_cfg_add_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); } static int sun4u_NVRAM_set_params(Nvram *nvram, uint16_t NVRAM_size, diff --git a/hw/vfio/Makefile.objs b/hw/vfio/Makefile.objs index e31f30ec09..d540c9d140 100644 --- a/hw/vfio/Makefile.objs +++ b/hw/vfio/Makefile.objs @@ -1,4 +1,6 @@ ifeq ($(CONFIG_LINUX), y) obj-$(CONFIG_SOFTMMU) += common.o obj-$(CONFIG_PCI) += pci.o +obj-$(CONFIG_SOFTMMU) += platform.o +obj-$(CONFIG_SOFTMMU) += calxeda-xgmac.o endif diff --git a/hw/vfio/calxeda-xgmac.c b/hw/vfio/calxeda-xgmac.c new file mode 100644 index 0000000000..eb914f0d0b --- /dev/null +++ b/hw/vfio/calxeda-xgmac.c @@ -0,0 +1,55 @@ +/* + * calxeda xgmac VFIO device + * + * Copyright Linaro Limited, 2014 + * + * Authors: + * Eric Auger <eric.auger@linaro.org> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#include "hw/vfio/vfio-calxeda-xgmac.h" + +static void calxeda_xgmac_realize(DeviceState *dev, Error **errp) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); + VFIOCalxedaXgmacDeviceClass *k = VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(dev); + + vdev->compat = g_strdup("calxeda,hb-xgmac"); + + k->parent_realize(dev, errp); +} + +static const VMStateDescription vfio_platform_calxeda_xgmac_vmstate = { + .name = TYPE_VFIO_CALXEDA_XGMAC, + .unmigratable = 1, +}; + +static void vfio_calxeda_xgmac_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VFIOCalxedaXgmacDeviceClass *vcxc = + VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass); + vcxc->parent_realize = dc->realize; + dc->realize = calxeda_xgmac_realize; + dc->desc = "VFIO Calxeda XGMAC"; + dc->vmsd = &vfio_platform_calxeda_xgmac_vmstate; +} + +static const TypeInfo vfio_calxeda_xgmac_dev_info = { + .name = TYPE_VFIO_CALXEDA_XGMAC, + .parent = TYPE_VFIO_PLATFORM, + .instance_size = sizeof(VFIOCalxedaXgmacDevice), + .class_init = vfio_calxeda_xgmac_class_init, + .class_size = sizeof(VFIOCalxedaXgmacDeviceClass), +}; + +static void register_calxeda_xgmac_dev_type(void) +{ + type_register_static(&vfio_calxeda_xgmac_dev_info); +} + +type_init(register_calxeda_xgmac_dev_type) diff --git a/hw/vfio/platform.c b/hw/vfio/platform.c new file mode 100644 index 0000000000..9382bb7f36 --- /dev/null +++ b/hw/vfio/platform.c @@ -0,0 +1,615 @@ +/* + * vfio based device assignment support - platform devices + * + * Copyright Linaro Limited, 2014 + * + * Authors: + * Kim Phillips <kim.phillips@linaro.org> + * Eric Auger <eric.auger@linaro.org> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on vfio based PCI device assignment support: + * Copyright Red Hat, Inc. 2012 + */ + +#include <linux/vfio.h> +#include <sys/ioctl.h> + +#include "hw/vfio/vfio-platform.h" +#include "qemu/error-report.h" +#include "qemu/range.h" +#include "sysemu/sysemu.h" +#include "exec/memory.h" +#include "qemu/queue.h" +#include "hw/sysbus.h" +#include "trace.h" +#include "hw/platform-bus.h" + +/* + * Functions used whatever the injection method + */ + +/** + * vfio_init_intp - allocate, initialize the IRQ struct pointer + * and add it into the list of IRQs + * @vbasedev: the VFIO device handle + * @info: irq info struct retrieved from VFIO driver + */ +static VFIOINTp *vfio_init_intp(VFIODevice *vbasedev, + struct vfio_irq_info info) +{ + int ret; + VFIOPlatformDevice *vdev = + container_of(vbasedev, VFIOPlatformDevice, vbasedev); + SysBusDevice *sbdev = SYS_BUS_DEVICE(vdev); + VFIOINTp *intp; + + intp = g_malloc0(sizeof(*intp)); + intp->vdev = vdev; + intp->pin = info.index; + intp->flags = info.flags; + intp->state = VFIO_IRQ_INACTIVE; + + sysbus_init_irq(sbdev, &intp->qemuirq); + + /* Get an eventfd for trigger */ + ret = event_notifier_init(&intp->interrupt, 0); + if (ret) { + g_free(intp); + error_report("vfio: Error: trigger event_notifier_init failed "); + return NULL; + } + + QLIST_INSERT_HEAD(&vdev->intp_list, intp, next); + return intp; +} + +/** + * vfio_set_trigger_eventfd - set VFIO eventfd handling + * + * @intp: IRQ struct handle + * @handler: handler to be called on eventfd signaling + * + * Setup VFIO signaling and attach an optional user-side handler + * to the eventfd + */ +static int vfio_set_trigger_eventfd(VFIOINTp *intp, + eventfd_user_side_handler_t handler) +{ + VFIODevice *vbasedev = &intp->vdev->vbasedev; + struct vfio_irq_set *irq_set; + int argsz, ret; + int32_t *pfd; + + argsz = sizeof(*irq_set) + sizeof(*pfd); + irq_set = g_malloc0(argsz); + irq_set->argsz = argsz; + irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER; + irq_set->index = intp->pin; + irq_set->start = 0; + irq_set->count = 1; + pfd = (int32_t *)&irq_set->data; + *pfd = event_notifier_get_fd(&intp->interrupt); + qemu_set_fd_handler(*pfd, (IOHandler *)handler, NULL, intp); + ret = ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set); + g_free(irq_set); + if (ret < 0) { + error_report("vfio: Failed to set trigger eventfd: %m"); + qemu_set_fd_handler(*pfd, NULL, NULL, NULL); + } + return ret; +} + +/* + * Functions only used when eventfds are handled on user-side + * ie. without irqfd + */ + +/** + * vfio_mmap_set_enabled - enable/disable the fast path mode + * @vdev: the VFIO platform device + * @enabled: the target mmap state + * + * enabled = true ~ fast path = MMIO region is mmaped (no KVM TRAP); + * enabled = false ~ slow path = MMIO region is trapped and region callbacks + * are called; slow path enables to trap the device IRQ status register reset +*/ + +static void vfio_mmap_set_enabled(VFIOPlatformDevice *vdev, bool enabled) +{ + int i; + + trace_vfio_platform_mmap_set_enabled(enabled); + + for (i = 0; i < vdev->vbasedev.num_regions; i++) { + VFIORegion *region = vdev->regions[i]; + + memory_region_set_enabled(®ion->mmap_mem, enabled); + } +} + +/** + * vfio_intp_mmap_enable - timer function, restores the fast path + * if there is no more active IRQ + * @opaque: actually points to the VFIO platform device + * + * Called on mmap timer timout, this function checks whether the + * IRQ is still active and if not, restores the fast path. + * by construction a single eventfd is handled at a time. + * if the IRQ is still active, the timer is re-programmed. + */ +static void vfio_intp_mmap_enable(void *opaque) +{ + VFIOINTp *tmp; + VFIOPlatformDevice *vdev = (VFIOPlatformDevice *)opaque; + + qemu_mutex_lock(&vdev->intp_mutex); + QLIST_FOREACH(tmp, &vdev->intp_list, next) { + if (tmp->state == VFIO_IRQ_ACTIVE) { + trace_vfio_platform_intp_mmap_enable(tmp->pin); + /* re-program the timer to check active status later */ + timer_mod(vdev->mmap_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + vdev->mmap_timeout); + qemu_mutex_unlock(&vdev->intp_mutex); + return; + } + } + vfio_mmap_set_enabled(vdev, true); + qemu_mutex_unlock(&vdev->intp_mutex); +} + +/** + * vfio_intp_inject_pending_lockheld - Injects a pending IRQ + * @opaque: opaque pointer, in practice the VFIOINTp handle + * + * The function is called on a previous IRQ completion, from + * vfio_platform_eoi, while the intp_mutex is locked. + * Also in such situation, the slow path already is set and + * the mmap timer was already programmed. + */ +static void vfio_intp_inject_pending_lockheld(VFIOINTp *intp) +{ + trace_vfio_platform_intp_inject_pending_lockheld(intp->pin, + event_notifier_get_fd(&intp->interrupt)); + + intp->state = VFIO_IRQ_ACTIVE; + + /* trigger the virtual IRQ */ + qemu_set_irq(intp->qemuirq, 1); +} + +/** + * vfio_intp_interrupt - The user-side eventfd handler + * @opaque: opaque pointer which in practice is the VFIOINTp handle + * + * the function is entered in event handler context: + * the vIRQ is injected into the guest if there is no other active + * or pending IRQ. + */ +static void vfio_intp_interrupt(VFIOINTp *intp) +{ + int ret; + VFIOINTp *tmp; + VFIOPlatformDevice *vdev = intp->vdev; + bool delay_handling = false; + + qemu_mutex_lock(&vdev->intp_mutex); + if (intp->state == VFIO_IRQ_INACTIVE) { + QLIST_FOREACH(tmp, &vdev->intp_list, next) { + if (tmp->state == VFIO_IRQ_ACTIVE || + tmp->state == VFIO_IRQ_PENDING) { + delay_handling = true; + break; + } + } + } + if (delay_handling) { + /* + * the new IRQ gets a pending status and is pushed in + * the pending queue + */ + intp->state = VFIO_IRQ_PENDING; + trace_vfio_intp_interrupt_set_pending(intp->pin); + QSIMPLEQ_INSERT_TAIL(&vdev->pending_intp_queue, + intp, pqnext); + ret = event_notifier_test_and_clear(&intp->interrupt); + qemu_mutex_unlock(&vdev->intp_mutex); + return; + } + + trace_vfio_platform_intp_interrupt(intp->pin, + event_notifier_get_fd(&intp->interrupt)); + + ret = event_notifier_test_and_clear(&intp->interrupt); + if (!ret) { + error_report("Error when clearing fd=%d (ret = %d)\n", + event_notifier_get_fd(&intp->interrupt), ret); + } + + intp->state = VFIO_IRQ_ACTIVE; + + /* sets slow path */ + vfio_mmap_set_enabled(vdev, false); + + /* trigger the virtual IRQ */ + qemu_set_irq(intp->qemuirq, 1); + + /* + * Schedule the mmap timer which will restore fastpath when no IRQ + * is active anymore + */ + if (vdev->mmap_timeout) { + timer_mod(vdev->mmap_timer, + qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + + vdev->mmap_timeout); + } + qemu_mutex_unlock(&vdev->intp_mutex); +} + +/** + * vfio_platform_eoi - IRQ completion routine + * @vbasedev: the VFIO device handle + * + * De-asserts the active virtual IRQ and unmasks the physical IRQ + * (effective for level sensitive IRQ auto-masked by the VFIO driver). + * Then it handles next pending IRQ if any. + * eoi function is called on the first access to any MMIO region + * after an IRQ was triggered, trapped since slow path was set. + * It is assumed this access corresponds to the IRQ status + * register reset. With such a mechanism, a single IRQ can be + * handled at a time since there is no way to know which IRQ + * was completed by the guest (we would need additional details + * about the IRQ status register mask). + */ +static void vfio_platform_eoi(VFIODevice *vbasedev) +{ + VFIOINTp *intp; + VFIOPlatformDevice *vdev = + container_of(vbasedev, VFIOPlatformDevice, vbasedev); + + qemu_mutex_lock(&vdev->intp_mutex); + QLIST_FOREACH(intp, &vdev->intp_list, next) { + if (intp->state == VFIO_IRQ_ACTIVE) { + trace_vfio_platform_eoi(intp->pin, + event_notifier_get_fd(&intp->interrupt)); + intp->state = VFIO_IRQ_INACTIVE; + + /* deassert the virtual IRQ */ + qemu_set_irq(intp->qemuirq, 0); + + if (intp->flags & VFIO_IRQ_INFO_AUTOMASKED) { + /* unmasks the physical level-sensitive IRQ */ + vfio_unmask_single_irqindex(vbasedev, intp->pin); + } + + /* a single IRQ can be active at a time */ + break; + } + } + /* in case there are pending IRQs, handle the first one */ + if (!QSIMPLEQ_EMPTY(&vdev->pending_intp_queue)) { + intp = QSIMPLEQ_FIRST(&vdev->pending_intp_queue); + vfio_intp_inject_pending_lockheld(intp); + QSIMPLEQ_REMOVE_HEAD(&vdev->pending_intp_queue, pqnext); + } + qemu_mutex_unlock(&vdev->intp_mutex); +} + +/** + * vfio_start_eventfd_injection - starts the virtual IRQ injection using + * user-side handled eventfds + * @intp: the IRQ struct pointer + */ + +static int vfio_start_eventfd_injection(VFIOINTp *intp) +{ + int ret; + + ret = vfio_set_trigger_eventfd(intp, vfio_intp_interrupt); + if (ret) { + error_report("vfio: Error: Failed to pass IRQ fd to the driver: %m"); + } + return ret; +} + +/* VFIO skeleton */ + +static void vfio_platform_compute_needs_reset(VFIODevice *vbasedev) +{ + vbasedev->needs_reset = true; +} + +/* not implemented yet */ +static int vfio_platform_hot_reset_multi(VFIODevice *vbasedev) +{ + return -1; +} + +/** + * vfio_populate_device - Allocate and populate MMIO region + * and IRQ structs according to driver returned information + * @vbasedev: the VFIO device handle + * + */ +static int vfio_populate_device(VFIODevice *vbasedev) +{ + VFIOINTp *intp, *tmp; + int i, ret = -1; + VFIOPlatformDevice *vdev = + container_of(vbasedev, VFIOPlatformDevice, vbasedev); + + if (!(vbasedev->flags & VFIO_DEVICE_FLAGS_PLATFORM)) { + error_report("vfio: Um, this isn't a platform device"); + return ret; + } + + vdev->regions = g_new0(VFIORegion *, vbasedev->num_regions); + + for (i = 0; i < vbasedev->num_regions; i++) { + struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) }; + VFIORegion *ptr; + + vdev->regions[i] = g_malloc0(sizeof(VFIORegion)); + ptr = vdev->regions[i]; + reg_info.index = i; + ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, ®_info); + if (ret) { + error_report("vfio: Error getting region %d info: %m", i); + goto reg_error; + } + ptr->flags = reg_info.flags; + ptr->size = reg_info.size; + ptr->fd_offset = reg_info.offset; + ptr->nr = i; + ptr->vbasedev = vbasedev; + + trace_vfio_platform_populate_regions(ptr->nr, + (unsigned long)ptr->flags, + (unsigned long)ptr->size, + ptr->vbasedev->fd, + (unsigned long)ptr->fd_offset); + } + + vdev->mmap_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, + vfio_intp_mmap_enable, vdev); + + QSIMPLEQ_INIT(&vdev->pending_intp_queue); + + for (i = 0; i < vbasedev->num_irqs; i++) { + struct vfio_irq_info irq = { .argsz = sizeof(irq) }; + + irq.index = i; + ret = ioctl(vbasedev->fd, VFIO_DEVICE_GET_IRQ_INFO, &irq); + if (ret) { + error_printf("vfio: error getting device %s irq info", + vbasedev->name); + goto irq_err; + } else { + trace_vfio_platform_populate_interrupts(irq.index, + irq.count, + irq.flags); + intp = vfio_init_intp(vbasedev, irq); + if (!intp) { + error_report("vfio: Error installing IRQ %d up", i); + goto irq_err; + } + } + } + return 0; +irq_err: + timer_del(vdev->mmap_timer); + QLIST_FOREACH_SAFE(intp, &vdev->intp_list, next, tmp) { + QLIST_REMOVE(intp, next); + g_free(intp); + } +reg_error: + for (i = 0; i < vbasedev->num_regions; i++) { + g_free(vdev->regions[i]); + } + g_free(vdev->regions); + return ret; +} + +/* specialized functions for VFIO Platform devices */ +static VFIODeviceOps vfio_platform_ops = { + .vfio_compute_needs_reset = vfio_platform_compute_needs_reset, + .vfio_hot_reset_multi = vfio_platform_hot_reset_multi, + .vfio_eoi = vfio_platform_eoi, +}; + +/** + * vfio_base_device_init - perform preliminary VFIO setup + * @vbasedev: the VFIO device handle + * + * Implement the VFIO command sequence that allows to discover + * assigned device resources: group extraction, device + * fd retrieval, resource query. + * Precondition: the device name must be initialized + */ +static int vfio_base_device_init(VFIODevice *vbasedev) +{ + VFIOGroup *group; + VFIODevice *vbasedev_iter; + char path[PATH_MAX], iommu_group_path[PATH_MAX], *group_name; + ssize_t len; + struct stat st; + int groupid; + int ret; + + /* name must be set prior to the call */ + if (!vbasedev->name || strchr(vbasedev->name, '/')) { + return -EINVAL; + } + + /* Check that the host device exists */ + g_snprintf(path, sizeof(path), "/sys/bus/platform/devices/%s/", + vbasedev->name); + + if (stat(path, &st) < 0) { + error_report("vfio: error: no such host device: %s", path); + return -errno; + } + + g_strlcat(path, "iommu_group", sizeof(path)); + len = readlink(path, iommu_group_path, sizeof(iommu_group_path)); + if (len < 0 || len >= sizeof(iommu_group_path)) { + error_report("vfio: error no iommu_group for device"); + return len < 0 ? -errno : -ENAMETOOLONG; + } + + iommu_group_path[len] = 0; + group_name = basename(iommu_group_path); + + if (sscanf(group_name, "%d", &groupid) != 1) { + error_report("vfio: error reading %s: %m", path); + return -errno; + } + + trace_vfio_platform_base_device_init(vbasedev->name, groupid); + + group = vfio_get_group(groupid, &address_space_memory); + if (!group) { + error_report("vfio: failed to get group %d", groupid); + return -ENOENT; + } + + g_snprintf(path, sizeof(path), "%s", vbasedev->name); + + QLIST_FOREACH(vbasedev_iter, &group->device_list, next) { + if (strcmp(vbasedev_iter->name, vbasedev->name) == 0) { + error_report("vfio: error: device %s is already attached", path); + vfio_put_group(group); + return -EBUSY; + } + } + ret = vfio_get_device(group, path, vbasedev); + if (ret) { + error_report("vfio: failed to get device %s", path); + vfio_put_group(group); + return ret; + } + + ret = vfio_populate_device(vbasedev); + if (ret) { + error_report("vfio: failed to populate device %s", path); + vfio_put_group(group); + } + + return ret; +} + +/** + * vfio_map_region - initialize the 2 memory regions for a given + * MMIO region index + * @vdev: the VFIO platform device handle + * @nr: the index of the region + * + * Init the top memory region and the mmapped memory region beneath + * VFIOPlatformDevice is used since VFIODevice is not a QOM Object + * and could not be passed to memory region functions +*/ +static void vfio_map_region(VFIOPlatformDevice *vdev, int nr) +{ + VFIORegion *region = vdev->regions[nr]; + uint64_t size = region->size; + char name[64]; + + if (!size) { + return; + } + + g_snprintf(name, sizeof(name), "VFIO %s region %d", + vdev->vbasedev.name, nr); + + /* A "slow" read/write mapping underlies all regions */ + memory_region_init_io(®ion->mem, OBJECT(vdev), &vfio_region_ops, + region, name, size); + + g_strlcat(name, " mmap", sizeof(name)); + + if (vfio_mmap_region(OBJECT(vdev), region, ®ion->mem, + ®ion->mmap_mem, ®ion->mmap, size, 0, name)) { + error_report("%s unsupported. Performance may be slow", name); + } +} + +/** + * vfio_platform_realize - the device realize function + * @dev: device state pointer + * @errp: error + * + * initialize the device, its memory regions and IRQ structures + * IRQ are started separately + */ +static void vfio_platform_realize(DeviceState *dev, Error **errp) +{ + VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(dev); + SysBusDevice *sbdev = SYS_BUS_DEVICE(dev); + VFIODevice *vbasedev = &vdev->vbasedev; + VFIOINTp *intp; + int i, ret; + + vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM; + vbasedev->ops = &vfio_platform_ops; + + trace_vfio_platform_realize(vbasedev->name, vdev->compat); + + ret = vfio_base_device_init(vbasedev); + if (ret) { + error_setg(errp, "vfio: vfio_base_device_init failed for %s", + vbasedev->name); + return; + } + + for (i = 0; i < vbasedev->num_regions; i++) { + vfio_map_region(vdev, i); + sysbus_init_mmio(sbdev, &vdev->regions[i]->mem); + } + + QLIST_FOREACH(intp, &vdev->intp_list, next) { + vfio_start_eventfd_injection(intp); + } +} + +static const VMStateDescription vfio_platform_vmstate = { + .name = TYPE_VFIO_PLATFORM, + .unmigratable = 1, +}; + +static Property vfio_platform_dev_properties[] = { + DEFINE_PROP_STRING("host", VFIOPlatformDevice, vbasedev.name), + DEFINE_PROP_BOOL("x-mmap", VFIOPlatformDevice, vbasedev.allow_mmap, true), + DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice, + mmap_timeout, 1100), + DEFINE_PROP_END_OF_LIST(), +}; + +static void vfio_platform_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->realize = vfio_platform_realize; + dc->props = vfio_platform_dev_properties; + dc->vmsd = &vfio_platform_vmstate; + dc->desc = "VFIO-based platform device assignment"; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); +} + +static const TypeInfo vfio_platform_dev_info = { + .name = TYPE_VFIO_PLATFORM, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(VFIOPlatformDevice), + .class_init = vfio_platform_class_init, + .class_size = sizeof(VFIOPlatformDeviceClass), + .abstract = true, +}; + +static void register_vfio_platform_dev_type(void) +{ + type_register_static(&vfio_platform_dev_info); +} + +type_init(register_vfio_platform_dev_type) diff --git a/include/hw/nvram/fw_cfg.h b/include/hw/nvram/fw_cfg.h index 6d8a8ac564..e60d3ca212 100644 --- a/include/hw/nvram/fw_cfg.h +++ b/include/hw/nvram/fw_cfg.h @@ -67,10 +67,9 @@ typedef void (*FWCfgReadCallback)(void *opaque, uint32_t offset); void fw_cfg_add_bytes(FWCfgState *s, uint16_t key, void *data, size_t len); void fw_cfg_add_string(FWCfgState *s, uint16_t key, const char *value); void fw_cfg_add_i16(FWCfgState *s, uint16_t key, uint16_t value); +void fw_cfg_modify_i16(FWCfgState *s, uint16_t key, uint16_t value); void fw_cfg_add_i32(FWCfgState *s, uint16_t key, uint32_t value); void fw_cfg_add_i64(FWCfgState *s, uint16_t key, uint64_t value); -void fw_cfg_add_callback(FWCfgState *s, uint16_t key, FWCfgCallback callback, - void *callback_opaque, void *data, size_t len); void fw_cfg_add_file(FWCfgState *s, const char *filename, void *data, size_t len); void fw_cfg_add_file_callback(FWCfgState *s, const char *filename, diff --git a/include/hw/vfio/vfio-calxeda-xgmac.h b/include/hw/vfio/vfio-calxeda-xgmac.h new file mode 100644 index 0000000000..f994775c09 --- /dev/null +++ b/include/hw/vfio/vfio-calxeda-xgmac.h @@ -0,0 +1,46 @@ +/* + * VFIO calxeda xgmac device + * + * Copyright Linaro Limited, 2014 + * + * Authors: + * Eric Auger <eric.auger@linaro.org> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + */ + +#ifndef HW_VFIO_VFIO_CALXEDA_XGMAC_H +#define HW_VFIO_VFIO_CALXEDA_XGMAC_H + +#include "hw/vfio/vfio-platform.h" + +#define TYPE_VFIO_CALXEDA_XGMAC "vfio-calxeda-xgmac" + +/** + * This device exposes: + * - a single MMIO region corresponding to its register space + * - 3 IRQS (main and 2 power related IRQs) + */ +typedef struct VFIOCalxedaXgmacDevice { + VFIOPlatformDevice vdev; +} VFIOCalxedaXgmacDevice; + +typedef struct VFIOCalxedaXgmacDeviceClass { + /*< private >*/ + VFIOPlatformDeviceClass parent_class; + /*< public >*/ + DeviceRealize parent_realize; +} VFIOCalxedaXgmacDeviceClass; + +#define VFIO_CALXEDA_XGMAC_DEVICE(obj) \ + OBJECT_CHECK(VFIOCalxedaXgmacDevice, (obj), TYPE_VFIO_CALXEDA_XGMAC) +#define VFIO_CALXEDA_XGMAC_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(VFIOCalxedaXgmacDeviceClass, (klass), \ + TYPE_VFIO_CALXEDA_XGMAC) +#define VFIO_CALXEDA_XGMAC_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(VFIOCalxedaXgmacDeviceClass, (obj), \ + TYPE_VFIO_CALXEDA_XGMAC) + +#endif diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 0d1fb805bb..59a321d479 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -42,6 +42,7 @@ enum { VFIO_DEVICE_TYPE_PCI = 0, + VFIO_DEVICE_TYPE_PLATFORM = 1, }; typedef struct VFIORegion { diff --git a/include/hw/vfio/vfio-platform.h b/include/hw/vfio/vfio-platform.h new file mode 100644 index 0000000000..26b2ad6f4e --- /dev/null +++ b/include/hw/vfio/vfio-platform.h @@ -0,0 +1,75 @@ +/* + * vfio based device assignment support - platform devices + * + * Copyright Linaro Limited, 2014 + * + * Authors: + * Kim Phillips <kim.phillips@linaro.org> + * + * This work is licensed under the terms of the GNU GPL, version 2. See + * the COPYING file in the top-level directory. + * + * Based on vfio based PCI device assignment support: + * Copyright Red Hat, Inc. 2012 + */ + +#ifndef HW_VFIO_VFIO_PLATFORM_H +#define HW_VFIO_VFIO_PLATFORM_H + +#include "hw/sysbus.h" +#include "hw/vfio/vfio-common.h" +#include "qemu/event_notifier.h" +#include "qemu/queue.h" +#include "hw/irq.h" + +#define TYPE_VFIO_PLATFORM "vfio-platform" + +enum { + VFIO_IRQ_INACTIVE = 0, + VFIO_IRQ_PENDING = 1, + VFIO_IRQ_ACTIVE = 2, + /* VFIO_IRQ_ACTIVE_AND_PENDING cannot happen with VFIO */ +}; + +typedef struct VFIOINTp { + QLIST_ENTRY(VFIOINTp) next; /* entry for IRQ list */ + QSIMPLEQ_ENTRY(VFIOINTp) pqnext; /* entry for pending IRQ queue */ + EventNotifier interrupt; /* eventfd triggered on interrupt */ + EventNotifier unmask; /* eventfd for unmask on QEMU bypass */ + qemu_irq qemuirq; + struct VFIOPlatformDevice *vdev; /* back pointer to device */ + int state; /* inactive, pending, active */ + uint8_t pin; /* index */ + uint32_t flags; /* IRQ info flags */ +} VFIOINTp; + +/* function type for user side eventfd handler */ +typedef void (*eventfd_user_side_handler_t)(VFIOINTp *intp); + +typedef struct VFIOPlatformDevice { + SysBusDevice sbdev; + VFIODevice vbasedev; /* not a QOM object */ + VFIORegion **regions; + QLIST_HEAD(, VFIOINTp) intp_list; /* list of IRQs */ + /* queue of pending IRQs */ + QSIMPLEQ_HEAD(pending_intp_queue, VFIOINTp) pending_intp_queue; + char *compat; /* compatibility string */ + uint32_t mmap_timeout; /* delay to re-enable mmaps after interrupt */ + QEMUTimer *mmap_timer; /* allows fast-path resume after IRQ hit */ + QemuMutex intp_mutex; /* protect the intp_list IRQ state */ +} VFIOPlatformDevice; + +typedef struct VFIOPlatformDeviceClass { + /*< private >*/ + SysBusDeviceClass parent_class; + /*< public >*/ +} VFIOPlatformDeviceClass; + +#define VFIO_PLATFORM_DEVICE(obj) \ + OBJECT_CHECK(VFIOPlatformDevice, (obj), TYPE_VFIO_PLATFORM) +#define VFIO_PLATFORM_DEVICE_CLASS(klass) \ + OBJECT_CLASS_CHECK(VFIOPlatformDeviceClass, (klass), TYPE_VFIO_PLATFORM) +#define VFIO_PLATFORM_DEVICE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(VFIOPlatformDeviceClass, (obj), TYPE_VFIO_PLATFORM) + +#endif /*HW_VFIO_VFIO_PLATFORM_H*/ diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h new file mode 100644 index 0000000000..b8c9244b21 --- /dev/null +++ b/include/hw/virtio/virtio-gpu.h @@ -0,0 +1,145 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Gerd Hoffmann <kraxel@redhat.com> + * + * This work is licensed under the terms of the GNU GPL, version 2. + * See the COPYING file in the top-level directory. + */ + +#ifndef _QEMU_VIRTIO_VGA_H +#define _QEMU_VIRTIO_VGA_H + +#include "qemu/queue.h" +#include "ui/qemu-pixman.h" +#include "ui/console.h" +#include "hw/virtio/virtio.h" +#include "hw/pci/pci.h" + +#include "standard-headers/linux/virtio_gpu.h" +#define TYPE_VIRTIO_GPU "virtio-gpu-device" +#define VIRTIO_GPU(obj) \ + OBJECT_CHECK(VirtIOGPU, (obj), TYPE_VIRTIO_GPU) + +#define VIRTIO_ID_GPU 16 + +#define VIRTIO_GPU_MAX_SCANOUT 4 + +struct virtio_gpu_simple_resource { + uint32_t resource_id; + uint32_t width; + uint32_t height; + uint32_t format; + struct iovec *iov; + unsigned int iov_cnt; + uint32_t scanout_bitmask; + pixman_image_t *image; + QTAILQ_ENTRY(virtio_gpu_simple_resource) next; +}; + +struct virtio_gpu_scanout { + QemuConsole *con; + DisplaySurface *ds; + uint32_t width, height; + int x, y; + int invalidate; + uint32_t resource_id; + QEMUCursor *current_cursor; +}; + +struct virtio_gpu_requested_state { + uint32_t width, height; + int x, y; +}; + +struct virtio_gpu_conf { + uint32_t max_outputs; +}; + +struct virtio_gpu_ctrl_command { + VirtQueueElement elem; + VirtQueue *vq; + struct virtio_gpu_ctrl_hdr cmd_hdr; + uint32_t error; + bool finished; + QTAILQ_ENTRY(virtio_gpu_ctrl_command) next; +}; + +typedef struct VirtIOGPU { + VirtIODevice parent_obj; + + QEMUBH *ctrl_bh; + QEMUBH *cursor_bh; + VirtQueue *ctrl_vq; + VirtQueue *cursor_vq; + + int enable; + + int config_size; + DeviceState *qdev; + + QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist; + QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq; + + struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUT]; + struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUT]; + + struct virtio_gpu_conf conf; + int enabled_output_bitmask; + struct virtio_gpu_config virtio_config; + + QEMUTimer *fence_poll; + QEMUTimer *print_stats; + + struct { + uint32_t inflight; + uint32_t max_inflight; + uint32_t requests; + uint32_t req_3d; + uint32_t bytes_3d; + } stats; +} VirtIOGPU; + +extern const GraphicHwOps virtio_gpu_ops; + +/* to share between PCI and VGA */ +#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state) \ + DEFINE_PROP_BIT("ioeventfd", _state, flags, \ + VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \ + DEFINE_PROP_UINT32("vectors", _state, nvectors, 3) + +#define DEFINE_VIRTIO_GPU_PROPERTIES(_state, _conf_field) \ + DEFINE_PROP_UINT32("max_outputs", _state, _conf_field.max_outputs, 1) + +#define VIRTIO_GPU_FILL_CMD(out) do { \ + size_t s; \ + s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \ + &out, sizeof(out)); \ + if (s != sizeof(out)) { \ + qemu_log_mask(LOG_GUEST_ERROR, \ + "%s: command size incorrect %zu vs %zu\n", \ + __func__, s, sizeof(out)); \ + return; \ + } \ + } while (0) + +/* virtio-gpu.c */ +void virtio_gpu_ctrl_response(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + struct virtio_gpu_ctrl_hdr *resp, + size_t resp_len); +void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd, + enum virtio_gpu_ctrl_type type); +void virtio_gpu_get_display_info(VirtIOGPU *g, + struct virtio_gpu_ctrl_command *cmd); +int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab, + struct virtio_gpu_ctrl_command *cmd, + struct iovec **iov); +void virtio_gpu_cleanup_mapping_iov(struct iovec *iov, uint32_t count); + +#endif diff --git a/include/qemu/option.h b/include/qemu/option.h index f88b545dfc..ac0e43b7e5 100644 --- a/include/qemu/option.h +++ b/include/qemu/option.h @@ -100,9 +100,11 @@ void qemu_opt_set_bool(QemuOpts *opts, const char *name, bool val, Error **errp); void qemu_opt_set_number(QemuOpts *opts, const char *name, int64_t val, Error **errp); -typedef int (*qemu_opt_loopfunc)(const char *name, const char *value, void *opaque); +typedef int (*qemu_opt_loopfunc)(void *opaque, + const char *name, const char *value, + Error **errp); int qemu_opt_foreach(QemuOpts *opts, qemu_opt_loopfunc func, void *opaque, - int abort_on_failure); + Error **errp); QemuOpts *qemu_opts_find(QemuOptsList *list, const char *id); QemuOpts *qemu_opts_create(QemuOptsList *list, const char *id, @@ -125,10 +127,10 @@ QemuOpts *qemu_opts_from_qdict(QemuOptsList *list, const QDict *qdict, QDict *qemu_opts_to_qdict(QemuOpts *opts, QDict *qdict); void qemu_opts_absorb_qdict(QemuOpts *opts, QDict *qdict, Error **errp); -typedef int (*qemu_opts_loopfunc)(QemuOpts *opts, void *opaque); +typedef int (*qemu_opts_loopfunc)(void *opaque, QemuOpts *opts, Error **errp); +int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, + void *opaque, Error **errp); void qemu_opts_print(QemuOpts *opts, const char *sep); -int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, void *opaque, - int abort_on_failure); void qemu_opts_print_help(QemuOptsList *list); void qemu_opts_free(QemuOptsList *list); QemuOptsList *qemu_opts_append(QemuOptsList *dst, QemuOptsList *list); diff --git a/include/standard-headers/linux/virtio_gpu.h b/include/standard-headers/linux/virtio_gpu.h new file mode 100644 index 0000000000..cfcfb463fc --- /dev/null +++ b/include/standard-headers/linux/virtio_gpu.h @@ -0,0 +1,204 @@ +/* + * Virtio GPU Device + * + * Copyright Red Hat, Inc. 2013-2014 + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Gerd Hoffmann <kraxel@redhat.com> + * + * This header is BSD licensed so anyone can use the definitions + * to implement compatible drivers/servers: + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of IBM nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL IBM OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF + * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT + * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#ifndef VIRTIO_GPU_HW_H +#define VIRTIO_GPU_HW_H + +enum virtio_gpu_ctrl_type { + VIRTIO_GPU_UNDEFINED = 0, + + /* 2d commands */ + VIRTIO_GPU_CMD_GET_DISPLAY_INFO = 0x0100, + VIRTIO_GPU_CMD_RESOURCE_CREATE_2D, + VIRTIO_GPU_CMD_RESOURCE_UNREF, + VIRTIO_GPU_CMD_SET_SCANOUT, + VIRTIO_GPU_CMD_RESOURCE_FLUSH, + VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D, + VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING, + VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING, + + /* cursor commands */ + VIRTIO_GPU_CMD_UPDATE_CURSOR = 0x0300, + VIRTIO_GPU_CMD_MOVE_CURSOR, + + /* success responses */ + VIRTIO_GPU_RESP_OK_NODATA = 0x1100, + VIRTIO_GPU_RESP_OK_DISPLAY_INFO, + + /* error responses */ + VIRTIO_GPU_RESP_ERR_UNSPEC = 0x1200, + VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY, + VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID, + VIRTIO_GPU_RESP_ERR_INVALID_CONTEXT_ID, + VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER, +}; + +#define VIRTIO_GPU_FLAG_FENCE (1 << 0) + +struct virtio_gpu_ctrl_hdr { + uint32_t type; + uint32_t flags; + uint64_t fence_id; + uint32_t ctx_id; + uint32_t padding; +}; + +/* data passed in the cursor vq */ + +struct virtio_gpu_cursor_pos { + uint32_t scanout_id; + uint32_t x; + uint32_t y; + uint32_t padding; +}; + +/* VIRTIO_GPU_CMD_UPDATE_CURSOR, VIRTIO_GPU_CMD_MOVE_CURSOR */ +struct virtio_gpu_update_cursor { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_cursor_pos pos; /* update & move */ + uint32_t resource_id; /* update only */ + uint32_t hot_x; /* update only */ + uint32_t hot_y; /* update only */ + uint32_t padding; +}; + +/* data passed in the control vq, 2d related */ + +struct virtio_gpu_rect { + uint32_t x; + uint32_t y; + uint32_t width; + uint32_t height; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_UNREF */ +struct virtio_gpu_resource_unref { + struct virtio_gpu_ctrl_hdr hdr; + uint32_t resource_id; + uint32_t padding; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_CREATE_2D: create a 2d resource with a format */ +struct virtio_gpu_resource_create_2d { + struct virtio_gpu_ctrl_hdr hdr; + uint32_t resource_id; + uint32_t format; + uint32_t width; + uint32_t height; +}; + +/* VIRTIO_GPU_CMD_SET_SCANOUT */ +struct virtio_gpu_set_scanout { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + uint32_t scanout_id; + uint32_t resource_id; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_FLUSH */ +struct virtio_gpu_resource_flush { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + uint32_t resource_id; + uint32_t padding; +}; + +/* VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D: simple transfer to_host */ +struct virtio_gpu_transfer_to_host_2d { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_rect r; + uint64_t offset; + uint32_t resource_id; + uint32_t padding; +}; + +struct virtio_gpu_mem_entry { + uint64_t addr; + uint32_t length; + uint32_t padding; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING */ +struct virtio_gpu_resource_attach_backing { + struct virtio_gpu_ctrl_hdr hdr; + uint32_t resource_id; + uint32_t nr_entries; +}; + +/* VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING */ +struct virtio_gpu_resource_detach_backing { + struct virtio_gpu_ctrl_hdr hdr; + uint32_t resource_id; + uint32_t padding; +}; + +/* VIRTIO_GPU_RESP_OK_DISPLAY_INFO */ +#define VIRTIO_GPU_MAX_SCANOUTS 16 +struct virtio_gpu_resp_display_info { + struct virtio_gpu_ctrl_hdr hdr; + struct virtio_gpu_display_one { + struct virtio_gpu_rect r; + uint32_t enabled; + uint32_t flags; + } pmodes[VIRTIO_GPU_MAX_SCANOUTS]; +}; + +#define VIRTIO_GPU_EVENT_DISPLAY (1 << 0) + +struct virtio_gpu_config { + uint32_t events_read; + uint32_t events_clear; + uint32_t num_scanouts; + uint32_t reserved; +}; + +/* simple formats for fbcon/X use */ +enum virtio_gpu_formats { + VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM = 1, + VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM = 2, + VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM = 3, + VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM = 4, + + VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM = 67, + VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM = 68, + + VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM = 121, + VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM = 134, +}; + +#endif diff --git a/include/standard-headers/linux/virtio_ids.h b/include/standard-headers/linux/virtio_ids.h index 5f60aa4be5..77925f587b 100644 --- a/include/standard-headers/linux/virtio_ids.h +++ b/include/standard-headers/linux/virtio_ids.h @@ -39,6 +39,7 @@ #define VIRTIO_ID_9P 9 /* 9p virtio console */ #define VIRTIO_ID_RPROC_SERIAL 11 /* virtio remoteproc serial link */ #define VIRTIO_ID_CAIF 12 /* Virtio caif */ +#define VIRTIO_ID_GPU 16 /* virtio GPU */ #define VIRTIO_ID_INPUT 18 /* virtio input */ #endif /* _LINUX_VIRTIO_IDS_H */ diff --git a/include/ui/console.h b/include/ui/console.h index 06e47399f1..de92523bbb 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -370,7 +370,7 @@ char *vnc_display_local_addr(const char *id); int vnc_display_password(const char *id, const char *password); int vnc_display_pw_expire(const char *id, time_t expires); QemuOpts *vnc_parse_func(const char *str); -int vnc_init_func(QemuOpts *opts, void *opaque); +int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp); #else static inline int vnc_display_password(const char *id, const char *password) { @@ -1329,7 +1329,7 @@ void net_check_clients(void) } } -static int net_init_client(QemuOpts *opts, void *dummy) +static int net_init_client(void *dummy, QemuOpts *opts, Error **errp) { Error *local_err = NULL; @@ -1342,7 +1342,7 @@ static int net_init_client(QemuOpts *opts, void *dummy) return 0; } -static int net_init_netdev(QemuOpts *opts, void *dummy) +static int net_init_netdev(void *dummy, QemuOpts *opts, Error **errp) { Error *local_err = NULL; int ret; @@ -1373,10 +1373,12 @@ int net_init_clients(void) QTAILQ_INIT(&net_clients); - if (qemu_opts_foreach(qemu_find_opts("netdev"), net_init_netdev, NULL, 1) == -1) + if (qemu_opts_foreach(qemu_find_opts("netdev"), + net_init_netdev, NULL, NULL)) { return -1; + } - if (qemu_opts_foreach(net, net_init_client, NULL, 1) == -1) { + if (qemu_opts_foreach(net, net_init_client, NULL, NULL)) { return -1; } diff --git a/net/vhost-user.c b/net/vhost-user.c index 8d2672846f..3930741fb6 100644 --- a/net/vhost-user.c +++ b/net/vhost-user.c @@ -157,8 +157,9 @@ static int net_vhost_user_init(NetClientState *peer, const char *device, return 0; } -static int net_vhost_chardev_opts(const char *name, const char *value, - void *opaque) +static int net_vhost_chardev_opts(void *opaque, + const char *name, const char *value, + Error **errp) { VhostUserChardevProps *props = opaque; @@ -169,33 +170,34 @@ static int net_vhost_chardev_opts(const char *name, const char *value, } else if (strcmp(name, "server") == 0) { props->is_server = true; } else { - error_report("vhost-user does not support a chardev" - " with the following option:\n %s = %s", - name, value); + error_setg(errp, + "vhost-user does not support a chardev with option %s=%s", + name, value); return -1; } return 0; } -static CharDriverState *net_vhost_parse_chardev(const NetdevVhostUserOptions *opts) +static CharDriverState *net_vhost_parse_chardev( + const NetdevVhostUserOptions *opts, Error **errp) { CharDriverState *chr = qemu_chr_find(opts->chardev); VhostUserChardevProps props; if (chr == NULL) { - error_report("chardev \"%s\" not found", opts->chardev); + error_setg(errp, "chardev \"%s\" not found", opts->chardev); return NULL; } /* inspect chardev opts */ memset(&props, 0, sizeof(props)); - if (qemu_opt_foreach(chr->opts, net_vhost_chardev_opts, &props, true) != 0) { + if (qemu_opt_foreach(chr->opts, net_vhost_chardev_opts, &props, errp)) { return NULL; } if (!props.is_socket || !props.is_unix) { - error_report("chardev \"%s\" is not a unix socket", - opts->chardev); + error_setg(errp, "chardev \"%s\" is not a unix socket", + opts->chardev); return NULL; } @@ -204,7 +206,7 @@ static CharDriverState *net_vhost_parse_chardev(const NetdevVhostUserOptions *op return chr; } -static int net_vhost_check_net(QemuOpts *opts, void *opaque) +static int net_vhost_check_net(void *opaque, QemuOpts *opts, Error **errp) { const char *name = opaque; const char *driver, *netdev; @@ -219,7 +221,7 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque) if (strcmp(netdev, name) == 0 && strncmp(driver, virtio_name, strlen(virtio_name)) != 0) { - error_report("vhost-user requires frontend driver virtio-net-*"); + error_setg(errp, "vhost-user requires frontend driver virtio-net-*"); return -1; } @@ -229,7 +231,6 @@ static int net_vhost_check_net(QemuOpts *opts, void *opaque) int net_init_vhost_user(const NetClientOptions *opts, const char *name, NetClientState *peer, Error **errp) { - /* FIXME error_setg(errp, ...) on failure */ uint32_t queues; const NetdevVhostUserOptions *vhost_user_opts; CharDriverState *chr; @@ -237,15 +238,14 @@ int net_init_vhost_user(const NetClientOptions *opts, const char *name, assert(opts->kind == NET_CLIENT_OPTIONS_KIND_VHOST_USER); vhost_user_opts = opts->vhost_user; - chr = net_vhost_parse_chardev(vhost_user_opts); + chr = net_vhost_parse_chardev(vhost_user_opts, errp); if (!chr) { - error_report("No suitable chardev found"); return -1; } /* verify net frontend */ if (qemu_opts_foreach(qemu_find_opts("device"), net_vhost_check_net, - (char *)name, true) == -1) { + (char *)name, errp)) { return -1; } @@ -125,7 +125,7 @@ static void numa_node_parse(NumaNodeOptions *node, QemuOpts *opts, Error **errp) max_numa_nodeid = MAX(max_numa_nodeid, nodenr + 1); } -static int parse_numa(QemuOpts *opts, void *opaque) +static int parse_numa(void *opaque, QemuOpts *opts, Error **errp) { NumaOptions *object = NULL; Error *err = NULL; @@ -216,8 +216,7 @@ void parse_numa_opts(MachineClass *mc) { int i; - if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, - NULL, 1) != 0) { + if (qemu_opts_foreach(qemu_find_opts("numa"), parse_numa, NULL, NULL)) { exit(1); } diff --git a/qdev-monitor.c b/qdev-monitor.c index 9f17c81d9f..7dd62dd094 100644 --- a/qdev-monitor.c +++ b/qdev-monitor.c @@ -143,7 +143,8 @@ static void qdev_print_devinfos(bool show_no_user) g_slist_free(list); } -static int set_property(const char *name, const char *value, void *opaque) +static int set_property(void *opaque, const char *name, const char *value, + Error **errp) { Object *obj = opaque; Error *err = NULL; @@ -564,7 +565,7 @@ DeviceState *qdev_device_add(QemuOpts *opts) } /* set properties */ - if (qemu_opt_foreach(opts, set_property, dev, 1) != 0) { + if (qemu_opt_foreach(opts, set_property, dev, NULL)) { object_unparent(OBJECT(dev)); object_unref(OBJECT(dev)); return NULL; diff --git a/qemu-options.hx b/qemu-options.hx index 4be98f71df..1d281f6818 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -2686,6 +2686,17 @@ STEXI @table @option ETEXI +DEF("fw_cfg", HAS_ARG, QEMU_OPTION_fwcfg, + "-fw_cfg [name=]<name>,file=<file>\n" + " add named fw_cfg entry from file\n", + QEMU_ARCH_ALL) +STEXI +@item -fw_cfg [name=]@var{name},file=@var{file} +@findex -fw_cfg +Add named fw_cfg entry from file. @var{name} determines the name of +the entry in the fw_cfg file directory exposed to the guest. +ETEXI + DEF("serial", HAS_ARG, QEMU_OPTION_serial, \ "-serial dev redirect the serial port to char device 'dev'\n", QEMU_ARCH_ALL) diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c index 2740ec4eef..f6f61b9619 100644 --- a/target-s390x/kvm.c +++ b/target-s390x/kvm.c @@ -2175,13 +2175,14 @@ int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu) struct kvm_s390_irq_state irq_state; int r; + if (cpu->irqstate_saved_size == 0) { + return 0; + } + if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) { return -ENOSYS; } - if (cpu->irqstate_saved_size == 0) { - return 0; - } irq_state.buf = (uint64_t) cpu->irqstate; irq_state.len = cpu->irqstate_saved_size; diff --git a/target-s390x/machine.c b/target-s390x/machine.c index e52d76032e..004474959a 100644 --- a/target-s390x/machine.c +++ b/target-s390x/machine.c @@ -70,6 +70,7 @@ const VMStateDescription vmstate_fpu = { static inline bool fpu_needed(void *opaque) { + /* This looks odd, but we might want to NOT transfer fprs in the future */ return true; } diff --git a/tcg/aarch64/tcg-target.c b/tcg/aarch64/tcg-target.c index b3be6f3177..fe44ad709c 100644 --- a/tcg/aarch64/tcg-target.c +++ b/tcg/aarch64/tcg-target.c @@ -1004,7 +1004,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X1, lb->addrlo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, oi); tcg_out_adr(s, TCG_REG_X3, lb->raddr); - tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); if (opc & MO_SIGN) { tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); } else { @@ -1027,7 +1027,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_mov(s, size == MO_64, TCG_REG_X2, lb->datalo_reg); tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, oi); tcg_out_adr(s, TCG_REG_X4, lb->raddr); - tcg_out_call(s, qemu_st_helpers[opc]); + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tcg_out_goto(s, lb->raddr); } diff --git a/tcg/arm/tcg-target.c b/tcg/arm/tcg-target.c index 06a8064a9f..ae2ec7a922 100644 --- a/tcg/arm/tcg-target.c +++ b/tcg/arm/tcg-target.c @@ -1260,9 +1260,9 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) icache usage. For pre-armv6, use the signed helpers since we do not have a single insn sign-extend. */ if (use_armv6_instructions) { - func = qemu_ld_helpers[opc & ~MO_SIGN]; + func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]; } else { - func = qemu_ld_helpers[opc]; + func = qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]; if (opc & MO_SIGN) { opc = MO_UL; } @@ -1337,7 +1337,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) argreg = tcg_out_arg_reg32(s, argreg, TCG_REG_R14); /* Tail-call to the helper, which will return to the fast path. */ - tcg_out_goto(s, COND_AL, qemu_st_helpers[opc]); + tcg_out_goto(s, COND_AL, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); } #endif /* SOFTMMU */ diff --git a/tcg/i386/tcg-target.c b/tcg/i386/tcg-target.c index 2e4bf52aae..ff4d9cfec7 100644 --- a/tcg/i386/tcg-target.c +++ b/tcg/i386/tcg-target.c @@ -1307,7 +1307,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) (uintptr_t)l->raddr); } - tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); data_reg = l->datalo_reg; switch (opc & MO_SSIZE) { @@ -1413,7 +1413,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) /* "Tail call" to the helper, with the return address back inline. */ tcg_out_push(s, retaddr); - tcg_out_jmp(s, qemu_st_helpers[opc]); + tcg_out_jmp(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); } #elif defined(__x86_64__) && defined(__linux__) # include <asm/prctl.h> diff --git a/tcg/mips/tcg-target.c b/tcg/mips/tcg-target.c index f64c89c3c0..f643eca3df 100644 --- a/tcg/mips/tcg-target.c +++ b/tcg/mips/tcg-target.c @@ -1031,7 +1031,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) } i = tcg_out_call_iarg_imm(s, i, oi); i = tcg_out_call_iarg_imm(s, i, (intptr_t)l->raddr); - tcg_out_call_int(s, qemu_ld_helpers[opc], false); + tcg_out_call_int(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)], false); /* delay slot */ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); @@ -1094,7 +1094,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) computation to take place in the return address register. */ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (intptr_t)l->raddr); i = tcg_out_call_iarg_reg(s, i, TCG_REG_RA); - tcg_out_call_int(s, qemu_st_helpers[opc], true); + tcg_out_call_int(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)], true); /* delay slot */ tcg_out_mov(s, TCG_TYPE_PTR, tcg_target_call_iarg_regs[0], TCG_AREG0); } diff --git a/tcg/optimize.c b/tcg/optimize.c index 585f1ed7bb..0f6f7008da 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -193,10 +193,42 @@ static bool temps_are_copies(TCGArg arg1, TCGArg arg2) return false; } +static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg *args, + TCGArg dst, TCGArg val) +{ + TCGOpcode new_op = op_to_movi(op->opc); + tcg_target_ulong mask; + + op->opc = new_op; + + reset_temp(dst); + temps[dst].state = TCG_TEMP_CONST; + temps[dst].val = val; + mask = val; + if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { + /* High bits of the destination are now garbage. */ + mask |= ~0xffffffffull; + } + temps[dst].mask = mask; + + args[0] = dst; + args[1] = val; +} + static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg *args, - TCGOpcode old_op, TCGArg dst, TCGArg src) + TCGArg dst, TCGArg src) { - TCGOpcode new_op = op_to_mov(old_op); + if (temps_are_copies(dst, src)) { + tcg_op_remove(s, op); + return; + } + + if (temps[src].state == TCG_TEMP_CONST) { + tcg_opt_gen_movi(s, op, args, dst, temps[src].val); + return; + } + + TCGOpcode new_op = op_to_mov(op->opc); tcg_target_ulong mask; op->opc = new_op; @@ -228,28 +260,6 @@ static void tcg_opt_gen_mov(TCGContext *s, TCGOp *op, TCGArg *args, args[1] = src; } -static void tcg_opt_gen_movi(TCGContext *s, TCGOp *op, TCGArg *args, - TCGOpcode old_op, TCGArg dst, TCGArg val) -{ - TCGOpcode new_op = op_to_movi(old_op); - tcg_target_ulong mask; - - op->opc = new_op; - - reset_temp(dst); - temps[dst].state = TCG_TEMP_CONST; - temps[dst].val = val; - mask = val; - if (TCG_TARGET_REG_BITS > 32 && new_op == INDEX_op_mov_i32) { - /* High bits of the destination are now garbage. */ - mask |= ~0xffffffffull; - } - temps[dst].mask = mask; - - args[0] = dst; - args[1] = val; -} - static TCGArg do_constant_folding_2(TCGOpcode op, TCGArg x, TCGArg y) { uint64_t l64, h64; @@ -564,7 +574,7 @@ static bool swap_commutative2(TCGArg *p1, TCGArg *p2) } /* Propagate constants and copies, fold constant expressions. */ -static void tcg_constant_folding(TCGContext *s) +void tcg_optimize(TCGContext *s) { int oi, oi_next, nb_temps, nb_globals; @@ -670,7 +680,7 @@ static void tcg_constant_folding(TCGContext *s) CASE_OP_32_64(rotr): if (temps[args[1]].state == TCG_TEMP_CONST && temps[args[1]].val == 0) { - tcg_opt_gen_movi(s, op, args, opc, args[0], 0); + tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } break; @@ -775,7 +785,8 @@ static void tcg_constant_folding(TCGContext *s) if (temps[args[1]].state != TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == 0) { - goto do_mov3; + tcg_opt_gen_mov(s, op, args, args[0], args[1]); + continue; } break; CASE_OP_32_64(and): @@ -784,16 +795,10 @@ static void tcg_constant_folding(TCGContext *s) if (temps[args[1]].state != TCG_TEMP_CONST && temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == -1) { - goto do_mov3; + tcg_opt_gen_mov(s, op, args, args[0], args[1]); + continue; } break; - do_mov3: - if (temps_are_copies(args[0], args[1])) { - tcg_op_remove(s, op); - } else { - tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]); - } - continue; default: break; } @@ -942,19 +947,12 @@ static void tcg_constant_folding(TCGContext *s) if (partmask == 0) { assert(nb_oargs == 1); - tcg_opt_gen_movi(s, op, args, opc, args[0], 0); + tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } if (affected == 0) { assert(nb_oargs == 1); - if (temps_are_copies(args[0], args[1])) { - tcg_op_remove(s, op); - } else if (temps[args[1]].state != TCG_TEMP_CONST) { - tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]); - } else { - tcg_opt_gen_movi(s, op, args, opc, - args[0], temps[args[1]].val); - } + tcg_opt_gen_mov(s, op, args, args[0], args[1]); continue; } @@ -966,7 +964,7 @@ static void tcg_constant_folding(TCGContext *s) CASE_OP_32_64(mulsh): if ((temps[args[2]].state == TCG_TEMP_CONST && temps[args[2]].val == 0)) { - tcg_opt_gen_movi(s, op, args, opc, args[0], 0); + tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } break; @@ -979,14 +977,7 @@ static void tcg_constant_folding(TCGContext *s) CASE_OP_32_64(or): CASE_OP_32_64(and): if (temps_are_copies(args[1], args[2])) { - if (temps_are_copies(args[0], args[1])) { - tcg_op_remove(s, op); - } else if (temps[args[1]].state != TCG_TEMP_CONST) { - tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]); - } else { - tcg_opt_gen_movi(s, op, args, opc, - args[0], temps[args[1]].val); - } + tcg_opt_gen_mov(s, op, args, args[0], args[1]); continue; } break; @@ -1000,7 +991,7 @@ static void tcg_constant_folding(TCGContext *s) CASE_OP_32_64(sub): CASE_OP_32_64(xor): if (temps_are_copies(args[1], args[2])) { - tcg_opt_gen_movi(s, op, args, opc, args[0], 0); + tcg_opt_gen_movi(s, op, args, args[0], 0); continue; } break; @@ -1013,20 +1004,10 @@ static void tcg_constant_folding(TCGContext *s) allocator where needed and possible. Also detect copies. */ switch (opc) { CASE_OP_32_64(mov): - if (temps_are_copies(args[0], args[1])) { - tcg_op_remove(s, op); - break; - } - if (temps[args[1]].state != TCG_TEMP_CONST) { - tcg_opt_gen_mov(s, op, args, opc, args[0], args[1]); - break; - } - /* Source argument is constant. Rewrite the operation and - let movi case handle it. */ - args[1] = temps[args[1]].val; - /* fallthrough */ + tcg_opt_gen_mov(s, op, args, args[0], args[1]); + break; CASE_OP_32_64(movi): - tcg_opt_gen_movi(s, op, args, opc, args[0], args[1]); + tcg_opt_gen_movi(s, op, args, args[0], args[1]); break; CASE_OP_32_64(not): @@ -1039,7 +1020,7 @@ static void tcg_constant_folding(TCGContext *s) case INDEX_op_ext32u_i64: if (temps[args[1]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(opc, temps[args[1]].val, 0); - tcg_opt_gen_movi(s, op, args, opc, args[0], tmp); + tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; @@ -1047,7 +1028,7 @@ static void tcg_constant_folding(TCGContext *s) case INDEX_op_trunc_shr_i32: if (temps[args[1]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(opc, temps[args[1]].val, args[2]); - tcg_opt_gen_movi(s, op, args, opc, args[0], tmp); + tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; @@ -1078,7 +1059,7 @@ static void tcg_constant_folding(TCGContext *s) && temps[args[2]].state == TCG_TEMP_CONST) { tmp = do_constant_folding(opc, temps[args[1]].val, temps[args[2]].val); - tcg_opt_gen_movi(s, op, args, opc, args[0], tmp); + tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; @@ -1088,7 +1069,7 @@ static void tcg_constant_folding(TCGContext *s) && temps[args[2]].state == TCG_TEMP_CONST) { tmp = deposit64(temps[args[1]].val, args[3], args[4], temps[args[2]].val); - tcg_opt_gen_movi(s, op, args, opc, args[0], tmp); + tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; @@ -1096,7 +1077,7 @@ static void tcg_constant_folding(TCGContext *s) CASE_OP_32_64(setcond): tmp = do_constant_folding_cond(opc, args[1], args[2], args[3]); if (tmp != 2) { - tcg_opt_gen_movi(s, op, args, opc, args[0], tmp); + tcg_opt_gen_movi(s, op, args, args[0], tmp); break; } goto do_default; @@ -1118,14 +1099,7 @@ static void tcg_constant_folding(TCGContext *s) CASE_OP_32_64(movcond): tmp = do_constant_folding_cond(opc, args[1], args[2], args[5]); if (tmp != 2) { - if (temps_are_copies(args[0], args[4-tmp])) { - tcg_op_remove(s, op); - } else if (temps[args[4-tmp]].state == TCG_TEMP_CONST) { - tcg_opt_gen_movi(s, op, args, opc, - args[0], temps[args[4-tmp]].val); - } else { - tcg_opt_gen_mov(s, op, args, opc, args[0], args[4-tmp]); - } + tcg_opt_gen_mov(s, op, args, args[0], args[4-tmp]); break; } goto do_default; @@ -1154,8 +1128,8 @@ static void tcg_constant_folding(TCGContext *s) rl = args[0]; rh = args[1]; - tcg_opt_gen_movi(s, op, args, opc, rl, (uint32_t)a); - tcg_opt_gen_movi(s, op2, args2, opc, rh, (uint32_t)(a >> 32)); + tcg_opt_gen_movi(s, op, args, rl, (uint32_t)a); + tcg_opt_gen_movi(s, op2, args2, rh, (uint32_t)(a >> 32)); /* We've done all we need to do with the movi. Skip it. */ oi_next = op2->next; @@ -1175,8 +1149,8 @@ static void tcg_constant_folding(TCGContext *s) rl = args[0]; rh = args[1]; - tcg_opt_gen_movi(s, op, args, opc, rl, (uint32_t)r); - tcg_opt_gen_movi(s, op2, args2, opc, rh, (uint32_t)(r >> 32)); + tcg_opt_gen_movi(s, op, args, rl, (uint32_t)r); + tcg_opt_gen_movi(s, op2, args2, rh, (uint32_t)(r >> 32)); /* We've done all we need to do with the movi. Skip it. */ oi_next = op2->next; @@ -1260,7 +1234,7 @@ static void tcg_constant_folding(TCGContext *s) tmp = do_constant_folding_cond2(&args[1], &args[3], args[5]); if (tmp != 2) { do_setcond_const: - tcg_opt_gen_movi(s, op, args, opc, args[0], tmp); + tcg_opt_gen_movi(s, op, args, args[0], tmp); } else if ((args[5] == TCG_COND_LT || args[5] == TCG_COND_GE) && temps[args[3]].state == TCG_TEMP_CONST && temps[args[4]].state == TCG_TEMP_CONST @@ -1354,8 +1328,3 @@ static void tcg_constant_folding(TCGContext *s) } } } - -void tcg_optimize(TCGContext *s) -{ - tcg_constant_folding(s); -} diff --git a/tcg/ppc/tcg-target.c b/tcg/ppc/tcg-target.c index d49c7d925f..2b6eafa03c 100644 --- a/tcg/ppc/tcg-target.c +++ b/tcg/ppc/tcg-target.c @@ -1495,7 +1495,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); tcg_out32(s, MFSPR | RT(arg) | LR); - tcg_out_call(s, qemu_ld_helpers[opc & ~MO_SIGN]); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); lo = lb->datalo_reg; hi = lb->datahi_reg; @@ -1565,7 +1565,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) tcg_out_movi(s, TCG_TYPE_I32, arg++, oi); tcg_out32(s, MFSPR | RT(arg) | LR); - tcg_out_call(s, qemu_st_helpers[opc]); + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tcg_out_b(s, 0, lb->raddr); } @@ -1624,7 +1624,7 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) tcg_out32(s, LWZ | TAI(datalo, addrlo, 4)); } } else { - uint32_t insn = qemu_ldx_opc[opc]; + uint32_t insn = qemu_ldx_opc[opc & (MO_BSWAP | MO_SSIZE)]; if (!HAVE_ISA_2_06 && insn == LDBRX) { tcg_out32(s, ADDI | TAI(TCG_REG_R0, addrlo, 4)); tcg_out32(s, LWBRX | TAB(datalo, rbase, addrlo)); @@ -1696,7 +1696,7 @@ static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) tcg_out32(s, STW | TAI(datalo, addrlo, 4)); } } else { - uint32_t insn = qemu_stx_opc[opc]; + uint32_t insn = qemu_stx_opc[opc & (MO_BSWAP | MO_SIZE)]; if (!HAVE_ISA_2_06 && insn == STDBRX) { tcg_out32(s, STWBRX | SAB(datalo, rbase, addrlo)); tcg_out32(s, ADDI | TAI(TCG_REG_TMP1, addrlo, 4)); diff --git a/tcg/s390/tcg-target.c b/tcg/s390/tcg-target.c index 46dedc9f82..669fafe24f 100644 --- a/tcg/s390/tcg-target.c +++ b/tcg/s390/tcg-target.c @@ -1573,7 +1573,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R4, oi); tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R5, (uintptr_t)lb->raddr); - tcg_out_call(s, qemu_ld_helpers[opc]); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); tcg_out_mov(s, TCG_TYPE_I64, data_reg, TCG_REG_R2); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); @@ -1610,7 +1610,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) } tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R5, oi); tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R6, (uintptr_t)lb->raddr); - tcg_out_call(s, qemu_st_helpers[opc]); + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); tgen_gotoi(s, S390_CC_ALWAYS, lb->raddr); } diff --git a/tcg/sparc/tcg-target.c b/tcg/sparc/tcg-target.c index c1794a33ed..1a870a81d7 100644 --- a/tcg/sparc/tcg-target.c +++ b/tcg/sparc/tcg-target.c @@ -1075,12 +1075,11 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); - TCGMemOp s_bits = memop & MO_SIZE; TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; - addrz = tcg_out_tlb_load(s, addr, memi, s_bits, + addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE, offsetof(CPUTLBEntry, addr_read)); /* The fast path is exactly one insn. Thus we can perform the @@ -1092,7 +1091,8 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_ld_opc[memop]); + tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, + qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); /* TLB Miss. */ @@ -1105,10 +1105,10 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, /* We use the helpers to extend SB and SW data, leaving the case of SL needing explicit extending below. */ - if ((memop & ~MO_BSWAP) == MO_SL) { - func = qemu_ld_trampoline[memop & ~MO_SIGN]; + if ((memop & MO_SSIZE) == MO_SL) { + func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SIZE)]; } else { - func = qemu_ld_trampoline[memop]; + func = qemu_ld_trampoline[memop & (MO_BSWAP | MO_SSIZE)]; } assert(func != NULL); tcg_out_call_nodelay(s, func); @@ -1119,13 +1119,13 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, Which complicates things for sparcv8plus. */ if (SPARC64) { /* We let the helper sign-extend SB and SW, but leave SL for here. */ - if (is_64 && (memop & ~MO_BSWAP) == MO_SL) { + if (is_64 && (memop & MO_SSIZE) == MO_SL) { tcg_out_arithi(s, data, TCG_REG_O0, 0, SHIFT_SRA); } else { tcg_out_mov(s, TCG_TYPE_REG, data, TCG_REG_O0); } } else { - if (s_bits == MO_64) { + if ((memop & MO_SIZE) == MO_64) { tcg_out_arithi(s, TCG_REG_O0, TCG_REG_O0, 32, SHIFT_SLLX); tcg_out_arithi(s, TCG_REG_O1, TCG_REG_O1, 0, SHIFT_SRL); tcg_out_arith(s, data, TCG_REG_O0, TCG_REG_O1, ARITH_OR); @@ -1147,7 +1147,7 @@ static void tcg_out_qemu_ld(TCGContext *s, TCGReg data, TCGReg addr, } tcg_out_ldst_rr(s, data, addr, (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), - qemu_ld_opc[memop]); + qemu_ld_opc[memop & (MO_BSWAP | MO_SSIZE)]); #endif /* CONFIG_SOFTMMU */ } @@ -1157,12 +1157,11 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, TCGMemOp memop = get_memop(oi); #ifdef CONFIG_SOFTMMU unsigned memi = get_mmuidx(oi); - TCGMemOp s_bits = memop & MO_SIZE; TCGReg addrz, param; tcg_insn_unit *func; tcg_insn_unit *label_ptr; - addrz = tcg_out_tlb_load(s, addr, memi, s_bits, + addrz = tcg_out_tlb_load(s, addr, memi, memop & MO_SIZE, offsetof(CPUTLBEntry, addr_write)); /* The fast path is exactly one insn. Thus we can perform the entire @@ -1172,7 +1171,8 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, tcg_out_bpcc0(s, COND_E, BPCC_A | BPCC_PT | (TARGET_LONG_BITS == 64 ? BPCC_XCC : BPCC_ICC), 0); /* delay slot */ - tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, qemu_st_opc[memop]); + tcg_out_ldst_rr(s, data, addrz, TCG_REG_O1, + qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); /* TLB Miss. */ @@ -1182,13 +1182,13 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, addr); - if (!SPARC64 && s_bits == MO_64) { + if (!SPARC64 && (memop & MO_SIZE) == MO_64) { /* Skip the high-part; we'll perform the extract in the trampoline. */ param++; } tcg_out_mov(s, TCG_TYPE_REG, param++, data); - func = qemu_st_trampoline[memop]; + func = qemu_st_trampoline[memop & (MO_BSWAP | MO_SIZE)]; assert(func != NULL); tcg_out_call_nodelay(s, func); /* delay slot */ @@ -1202,7 +1202,7 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data, TCGReg addr, } tcg_out_ldst_rr(s, data, addr, (GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_G0), - qemu_st_opc[memop]); + qemu_st_opc[memop & (MO_BSWAP | MO_SIZE)]); #endif /* CONFIG_SOFTMMU */ } @@ -1076,10 +1076,19 @@ void tcg_dump_ops(TCGContext *s) TCGMemOp op = get_memop(oi); unsigned ix = get_mmuidx(oi); - if (op < ARRAY_SIZE(ldst_name) && ldst_name[op]) { - qemu_log(",%s,%u", ldst_name[op], ix); - } else { + if (op & ~(MO_AMASK | MO_BSWAP | MO_SSIZE)) { qemu_log(",$0x%x,%u", op, ix); + } else { + const char *s_al = "", *s_op; + if (op & MO_AMASK) { + if ((op & MO_AMASK) == MO_ALIGN) { + s_al = "al+"; + } else { + s_al = "un+"; + } + } + s_op = ldst_name[op & (MO_BSWAP | MO_SSIZE)]; + qemu_log(",%s%s,%u", s_al, s_op, ix); } i = 1; } @@ -1378,16 +1387,20 @@ static void tcg_liveness_analysis(TCGContext *s) memset(dead_temps, 1, s->nb_globals); } - /* input args are live */ + /* record arguments that die in this helper */ for (i = nb_oargs; i < nb_iargs + nb_oargs; i++) { arg = args[i]; if (arg != TCG_CALL_DUMMY_ARG) { if (dead_temps[arg]) { dead_args |= (1 << i); } - dead_temps[arg] = 0; } } + /* input arguments are live for preceeding opcodes */ + for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + arg = args[i]; + dead_temps[arg] = 0; + } s->op_dead_args[oi] = dead_args; s->op_sync_args[oi] = sync_args; } @@ -1522,12 +1535,16 @@ static void tcg_liveness_analysis(TCGContext *s) memset(mem_temps, 1, s->nb_globals); } - /* input args are live */ + /* record arguments that die in this opcode */ for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { arg = args[i]; if (dead_temps[arg]) { dead_args |= (1 << i); } + } + /* input arguments are live for preceeding opcodes */ + for (i = nb_oargs; i < nb_oargs + nb_iargs; i++) { + arg = args[i]; dead_temps[arg] = 0; } s->op_dead_args[oi] = dead_args; @@ -1998,6 +2015,16 @@ static void tcg_reg_alloc_op(TCGContext *s, if (!IS_DEAD_ARG(i)) { goto allocate_in_reg; } + /* check if the current register has already been allocated + for another input aliased to an output */ + int k2, i2; + for (k2 = 0 ; k2 < k ; k2++) { + i2 = def->sorted_args[nb_oargs + k2]; + if ((def->args_ct[i2].ct & TCG_CT_IALIAS) && + (new_args[i2] == ts->reg)) { + goto allocate_in_reg; + } + } } } reg = ts->reg; @@ -1107,7 +1107,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) t0 = *tb_ptr++; taddr = tci_read_ulong(&tb_ptr); oi = tci_read_i(&tb_ptr); - switch (get_memop(oi)) { + switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) { case MO_UB: tmp32 = qemu_ld_ub; break; @@ -1144,7 +1144,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) } taddr = tci_read_ulong(&tb_ptr); oi = tci_read_i(&tb_ptr); - switch (get_memop(oi)) { + switch (get_memop(oi) & (MO_BSWAP | MO_SSIZE)) { case MO_UB: tmp64 = qemu_ld_ub; break; @@ -1193,7 +1193,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) t0 = tci_read_r(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); oi = tci_read_i(&tb_ptr); - switch (get_memop(oi)) { + switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) { case MO_UB: qemu_st_b(t0); break; @@ -1217,7 +1217,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) tmp64 = tci_read_r64(&tb_ptr); taddr = tci_read_ulong(&tb_ptr); oi = tci_read_i(&tb_ptr); - switch (get_memop(oi)) { + switch (get_memop(oi) & (MO_BSWAP | MO_SIZE)) { case MO_UB: qemu_st_b(tmp64); break; diff --git a/tests/bios-tables-test.c b/tests/bios-tables-test.c index 7e85dc45e3..0de1742d7d 100644 --- a/tests/bios-tables-test.c +++ b/tests/bios-tables-test.c @@ -599,35 +599,15 @@ static void test_acpi_asl(test_data *data) free_test_data(&exp_data); } -static void test_smbios_ep_address(test_data *data) -{ - uint32_t off; - - /* find smbios entry point structure */ - for (off = 0xf0000; off < 0x100000; off += 0x10) { - uint8_t sig[] = "_SM_"; - int i; - - for (i = 0; i < sizeof sig - 1; ++i) { - sig[i] = readb(off + i); - } - - if (!memcmp(sig, "_SM_", sizeof sig)) { - break; - } - } - - g_assert_cmphex(off, <, 0x100000); - data->smbios_ep_addr = off; -} - -static void test_smbios_ep_table(test_data *data) +static bool smbios_ep_table_ok(test_data *data) { struct smbios_entry_point *ep_table = &data->smbios_ep_table; uint32_t addr = data->smbios_ep_addr; ACPI_READ_ARRAY(ep_table->anchor_string, addr); - g_assert(!memcmp(ep_table->anchor_string, "_SM_", 4)); + if (memcmp(ep_table->anchor_string, "_SM_", 4)) { + return false; + } ACPI_READ_FIELD(ep_table->checksum, addr); ACPI_READ_FIELD(ep_table->length, addr); ACPI_READ_FIELD(ep_table->smbios_major_version, addr); @@ -636,17 +616,50 @@ static void test_smbios_ep_table(test_data *data) ACPI_READ_FIELD(ep_table->entry_point_revision, addr); ACPI_READ_ARRAY(ep_table->formatted_area, addr); ACPI_READ_ARRAY(ep_table->intermediate_anchor_string, addr); - g_assert(!memcmp(ep_table->intermediate_anchor_string, "_DMI_", 5)); + if (memcmp(ep_table->intermediate_anchor_string, "_DMI_", 5)) { + return false; + } ACPI_READ_FIELD(ep_table->intermediate_checksum, addr); ACPI_READ_FIELD(ep_table->structure_table_length, addr); - g_assert_cmpuint(ep_table->structure_table_length, >, 0); + if (ep_table->structure_table_length == 0) { + return false; + } ACPI_READ_FIELD(ep_table->structure_table_address, addr); ACPI_READ_FIELD(ep_table->number_of_structures, addr); - g_assert_cmpuint(ep_table->number_of_structures, >, 0); + if (ep_table->number_of_structures == 0) { + return false; + } ACPI_READ_FIELD(ep_table->smbios_bcd_revision, addr); - g_assert(!acpi_checksum((uint8_t *)ep_table, sizeof *ep_table)); - g_assert(!acpi_checksum((uint8_t *)ep_table + 0x10, - sizeof *ep_table - 0x10)); + if (acpi_checksum((uint8_t *)ep_table, sizeof *ep_table) || + acpi_checksum((uint8_t *)ep_table + 0x10, sizeof *ep_table - 0x10)) { + return false; + } + return true; +} + +static void test_smbios_entry_point(test_data *data) +{ + uint32_t off; + + /* find smbios entry point structure */ + for (off = 0xf0000; off < 0x100000; off += 0x10) { + uint8_t sig[] = "_SM_"; + int i; + + for (i = 0; i < sizeof sig - 1; ++i) { + sig[i] = readb(off + i); + } + + if (!memcmp(sig, "_SM_", sizeof sig)) { + /* signature match, but is this a valid entry point? */ + data->smbios_ep_addr = off; + if (smbios_ep_table_ok(data)) { + break; + } + } + } + + g_assert_cmphex(off, <, 0x100000); } static inline bool smbios_single_instance(uint8_t type) @@ -767,8 +780,7 @@ static void test_acpi_one(const char *params, test_data *data) } } - test_smbios_ep_address(data); - test_smbios_ep_table(data); + test_smbios_entry_point(data); test_smbios_structs(data); qtest_quit(global_qtest); @@ -182,7 +182,7 @@ static int configure_tpm(QemuOpts *opts) return 0; } -static int tpm_init_tpmdev(QemuOpts *opts, void *dummy) +static int tpm_init_tpmdev(void *dummy, QemuOpts *opts, Error **errp) { return configure_tpm(opts); } @@ -208,12 +208,11 @@ void tpm_cleanup(void) int tpm_init(void) { if (qemu_opts_foreach(qemu_find_opts("tpmdev"), - tpm_init_tpmdev, NULL, 1) != 0) { + tpm_init_tpmdev, NULL, NULL)) { return -1; } atexit(tpm_cleanup); - return 0; } diff --git a/trace-events b/trace-events index a589650597..2662ffa850 100644 --- a/trace-events +++ b/trace-events @@ -193,10 +193,8 @@ ecc_diag_mem_writeb(uint64_t addr, uint32_t val) "Write diagnostic %"PRId64" = % ecc_diag_mem_readb(uint64_t addr, uint32_t ret) "Read diagnostic %"PRId64"= %02x" # hw/nvram/fw_cfg.c -fw_cfg_write(void *s, uint8_t value) "%p %d" fw_cfg_select(void *s, uint16_t key, int ret) "%p key %d = %d" fw_cfg_read(void *s, uint8_t ret) "%p = %d" -fw_cfg_add_file_dupe(void *s, char *name) "%p %s" fw_cfg_add_file(void *s, int index, char *name, size_t len) "%p #%d: %s (%zd bytes)" # hw/block/hd-geometry.c @@ -1167,6 +1165,20 @@ vmware_scratch_read(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_scratch_write(uint32_t index, uint32_t value) "index %d, value 0x%x" vmware_setmode(uint32_t w, uint32_t h, uint32_t bpp) "%dx%d @ %d bpp" +# hw/display/virtio-gpu.c +virtio_gpu_cmd_get_display_info(void) "" +virtio_gpu_cmd_get_caps(void) "" +virtio_gpu_cmd_set_scanout(uint32_t id, uint32_t res, uint32_t w, uint32_t h, uint32_t x, uint32_t y) "id %d, res 0x%x, w %d, h %d, x %d, y %d" +virtio_gpu_cmd_res_create_2d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h) "res 0x%x, fmt 0x%x, w %d, h %d" +virtio_gpu_cmd_res_create_3d(uint32_t res, uint32_t fmt, uint32_t w, uint32_t h, uint32_t d) "res 0x%x, fmt 0x%x, w %d, h %d, d %d" +virtio_gpu_cmd_res_unref(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_back_attach(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_back_detach(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_xfer_toh_2d(uint32_t res) "res 0x%x" +virtio_gpu_cmd_res_flush(uint32_t res, uint32_t w, uint32_t h, uint32_t x, uint32_t y) "res 0x%x, w %d, h %d, x %d, y %d" +virtio_gpu_fence_ctrl(uint64_t fence, uint32_t type) "fence 0x%" PRIx64 ", type 0x%x" +virtio_gpu_fence_resp(uint64_t fence) "fence 0x%" PRIx64 + # savevm.c qemu_loadvm_state_section(unsigned int section_type) "%d" qemu_loadvm_state_section_partend(uint32_t section_id) "%u" @@ -1564,6 +1576,18 @@ vfio_put_group(int fd) "close group->fd=%d" vfio_get_device(const char * name, unsigned int flags, unsigned int num_regions, unsigned int num_irqs) "Device %s flags: %u, regions: %u, irqs: %u" vfio_put_base_device(int fd) "close vdev->fd=%d" +# hw/vfio/platform.c +vfio_platform_populate_regions(int region_index, unsigned long flag, unsigned long size, int fd, unsigned long offset) "- region %d flags = 0x%lx, size = 0x%lx, fd= %d, offset = 0x%lx" +vfio_platform_base_device_init(char *name, int groupid) "%s belongs to group #%d" +vfio_platform_realize(char *name, char *compat) "vfio device %s, compat = %s" +vfio_platform_eoi(int pin, int fd) "EOI IRQ pin %d (fd=%d)" +vfio_platform_mmap_set_enabled(bool enabled) "fast path = %d" +vfio_platform_intp_mmap_enable(int pin) "IRQ #%d still active, stay in slow path" +vfio_platform_intp_interrupt(int pin, int fd) "Inject IRQ #%d (fd = %d)" +vfio_platform_intp_inject_pending_lockheld(int pin, int fd) "Inject pending IRQ #%d (fd = %d)" +vfio_platform_populate_interrupts(int pin, int count, int flags) "- IRQ index %d: count %d, flags=0x%x" +vfio_intp_interrupt_set_pending(int index) "irq %d is set PENDING" + #hw/acpi/memory_hotplug.c mhp_acpi_invalid_slot_selected(uint32_t slot) "0x%"PRIx32 mhp_acpi_ejecting_invalid_slot(uint32_t slot) "0x%"PRIx32 @@ -1917,12 +1917,19 @@ static void gd_set_keycode_type(GtkDisplayState *s) #endif } +static gboolean gtkinit; + void gtk_display_init(DisplayState *ds, bool full_screen, bool grab_on_hover) { GtkDisplayState *s = g_malloc0(sizeof(*s)); char *filename; GdkDisplay *window_display; + if (!gtkinit) { + fprintf(stderr, "gtk initialization failed\n"); + exit(1); + } + s->window = gtk_window_new(GTK_WINDOW_TOPLEVEL); #if GTK_CHECK_VERSION(3, 2, 0) s->vbox = gtk_box_new(GTK_ORIENTATION_VERTICAL, 0); @@ -2003,7 +2010,11 @@ void gtk_display_init(DisplayState *ds, bool full_screen, bool grab_on_hover) void early_gtk_display_init(int opengl) { - gtk_init(NULL, NULL); + gtkinit = gtk_init_check(NULL, NULL); + if (!gtkinit) { + /* don't exit yet, that'll break -help */ + return; + } switch (opengl) { case -1: /* default */ diff --git a/ui/spice-core.c b/ui/spice-core.c index 2e8384e653..a30da3cf9f 100644 --- a/ui/spice-core.c +++ b/ui/spice-core.c @@ -583,7 +583,8 @@ int qemu_spice_migrate_info(const char *hostname, int port, int tls_port, return ret; } -static int add_channel(const char *name, const char *value, void *opaque) +static int add_channel(void *opaque, const char *name, const char *value, + Error **errp) { int security = 0; int rc; @@ -782,7 +783,7 @@ void qemu_spice_init(void) spice_server_set_playback_compression (spice_server, qemu_opt_get_bool(opts, "playback-compression", 1)); - qemu_opt_foreach(opts, add_channel, &tls_port, 0); + qemu_opt_foreach(opts, add_channel, &tls_port, NULL); spice_server_set_name(spice_server, qemu_name); spice_server_set_uuid(spice_server, qemu_uuid); diff --git a/ui/spice-display.c b/ui/spice-display.c index 9c63132054..cc4a6ce98d 100644 --- a/ui/spice-display.c +++ b/ui/spice-display.c @@ -199,7 +199,7 @@ static void qemu_spice_create_update(SimpleSpiceDisplay *ssd) static const int blksize = 32; int blocks = (surface_width(ssd->ds) + blksize - 1) / blksize; int dirty_top[blocks]; - int y, yoff, x, xoff, blk, bw; + int y, yoff1, yoff2, x, xoff, blk, bw; int bpp = surface_bytes_per_pixel(ssd->ds); uint8_t *guest, *mirror; @@ -214,13 +214,14 @@ static void qemu_spice_create_update(SimpleSpiceDisplay *ssd) guest = surface_data(ssd->ds); mirror = (void *)pixman_image_get_data(ssd->mirror); for (y = ssd->dirty.top; y < ssd->dirty.bottom; y++) { - yoff = y * surface_stride(ssd->ds); + yoff1 = y * surface_stride(ssd->ds); + yoff2 = y * pixman_image_get_stride(ssd->mirror); for (x = ssd->dirty.left; x < ssd->dirty.right; x += blksize) { xoff = x * bpp; blk = x / blksize; bw = MIN(blksize, ssd->dirty.right - x); - if (memcmp(guest + yoff + xoff, - mirror + yoff + xoff, + if (memcmp(guest + yoff1 + xoff, + mirror + yoff2 + xoff, bw * bpp) == 0) { if (dirty_top[blk] != -1) { QXLRect update = { @@ -660,7 +661,10 @@ static int interface_client_monitors_config(QXLInstance *sin, { SimpleSpiceDisplay *ssd = container_of(sin, SimpleSpiceDisplay, qxl); QemuUIInfo info; - int rc; + + if (!dpy_ui_info_supported(ssd->dcl.con)) { + return 0; /* == not supported by guest */ + } if (!mc) { return 1; @@ -675,14 +679,10 @@ static int interface_client_monitors_config(QXLInstance *sin, info.width = mc->monitors[0].width; info.height = mc->monitors[0].height; } - rc = dpy_set_ui_info(ssd->dcl.con, &info); - dprint(1, "%s/%d: size %dx%d, rc %d <--- ==========================\n", - __func__, ssd->qxl.id, info.width, info.height, rc); - if (rc != 0) { - return 0; /* == not supported by guest */ - } else { - return 1; - } + dpy_set_ui_info(ssd->dcl.con, &info); + dprint(1, "%s/%d: size %dx%d\n", __func__, ssd->qxl.id, + info.width, info.height); + return 1; } static const QXLInterface dpy_interface = { @@ -3770,7 +3770,7 @@ QemuOpts *vnc_parse_func(const char *str) return opts; } -int vnc_init_func(QemuOpts *opts, void *opaque) +int vnc_init_func(void *opaque, QemuOpts *opts, Error **errp) { Error *local_err = NULL; char *id = (char *)qemu_opts_id(opts); diff --git a/util/qemu-config.c b/util/qemu-config.c index 30d6dcf526..6cfdd7204a 100644 --- a/util/qemu-config.c +++ b/util/qemu-config.c @@ -6,7 +6,7 @@ #include "qapi/error.h" #include "qmp-commands.h" -static QemuOptsList *vm_config_groups[32]; +static QemuOptsList *vm_config_groups[48]; static QemuOptsList *drive_config_groups[4]; static QemuOptsList *find_list(QemuOptsList **lists, const char *group, @@ -335,7 +335,8 @@ struct ConfigWriteData { FILE *fp; }; -static int config_write_opt(const char *name, const char *value, void *opaque) +static int config_write_opt(void *opaque, const char *name, const char *value, + Error **errp) { struct ConfigWriteData *data = opaque; @@ -343,7 +344,7 @@ static int config_write_opt(const char *name, const char *value, void *opaque) return 0; } -static int config_write_opts(QemuOpts *opts, void *opaque) +static int config_write_opts(void *opaque, QemuOpts *opts, Error **errp) { struct ConfigWriteData *data = opaque; const char *id = qemu_opts_id(opts); @@ -353,7 +354,7 @@ static int config_write_opts(QemuOpts *opts, void *opaque) } else { fprintf(data->fp, "[%s]\n", data->list->name); } - qemu_opt_foreach(opts, config_write_opt, data, 0); + qemu_opt_foreach(opts, config_write_opt, data, NULL); fprintf(data->fp, "\n"); return 0; } @@ -367,7 +368,7 @@ void qemu_config_write(FILE *fp) fprintf(fp, "# qemu config file\n\n"); for (i = 0; lists[i] != NULL; i++) { data.list = lists[i]; - qemu_opts_foreach(data.list, config_write_opts, &data, 0); + qemu_opts_foreach(data.list, config_write_opts, &data, NULL); } } diff --git a/util/qemu-option.c b/util/qemu-option.c index fda4e5fcbf..840f5f7a5b 100644 --- a/util/qemu-option.c +++ b/util/qemu-option.c @@ -596,18 +596,26 @@ void qemu_opt_set_number(QemuOpts *opts, const char *name, int64_t val, QTAILQ_INSERT_TAIL(&opts->head, opt, next); } +/** + * For each member of @opts, call @func(@opaque, name, value, @errp). + * @func() may store an Error through @errp, but must return non-zero then. + * When @func() returns non-zero, break the loop and return that value. + * Return zero when the loop completes. + */ int qemu_opt_foreach(QemuOpts *opts, qemu_opt_loopfunc func, void *opaque, - int abort_on_failure) + Error **errp) { QemuOpt *opt; - int rc = 0; + int rc; QTAILQ_FOREACH(opt, &opts->head, next) { - rc = func(opt->name, opt->str, opaque); - if (abort_on_failure && rc != 0) - break; + rc = func(opaque, opt->name, opt->str, errp); + if (rc) { + return rc; + } + assert(!errp || !*errp); } - return rc; + return 0; } QemuOpts *qemu_opts_find(QemuOptsList *list, const char *id) @@ -1046,22 +1054,31 @@ void qemu_opts_validate(QemuOpts *opts, const QemuOptDesc *desc, Error **errp) } } -int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, void *opaque, - int abort_on_failure) +/** + * For each member of @list, call @func(@opaque, member, @errp). + * Call it with the current location temporarily set to the member's. + * @func() may store an Error through @errp, but must return non-zero then. + * When @func() returns non-zero, break the loop and return that value. + * Return zero when the loop completes. + */ +int qemu_opts_foreach(QemuOptsList *list, qemu_opts_loopfunc func, + void *opaque, Error **errp) { Location loc; QemuOpts *opts; - int rc = 0; + int rc; loc_push_none(&loc); QTAILQ_FOREACH(opts, &list->head, next) { loc_restore(&opts->loc); - rc |= func(opts, opaque); - if (abort_on_failure && rc != 0) - break; + rc = func(opaque, opts, errp); + if (rc) { + return rc; + } + assert(!errp || !*errp); } loc_pop(&loc); - return rc; + return 0; } static size_t count_opts_list(QemuOptsList *list) @@ -492,6 +492,25 @@ static QemuOptsList qemu_semihosting_config_opts = { }, }; +static QemuOptsList qemu_fw_cfg_opts = { + .name = "fw_cfg", + .implied_opt_name = "name", + .head = QTAILQ_HEAD_INITIALIZER(qemu_fw_cfg_opts.head), + .desc = { + { + .name = "name", + .type = QEMU_OPT_STRING, + .help = "Sets the fw_cfg name of the blob to be inserted", + }, { + .name = "file", + .type = QEMU_OPT_STRING, + .help = "Sets the name of the file from which\n" + "the fw_cfg blob will be loaded", + }, + { /* end of list */ } + }, +}; + /** * Get machine options * @@ -515,7 +534,7 @@ static void res_free(void) } } -static int default_driver_check(QemuOpts *opts, void *opaque) +static int default_driver_check(void *opaque, QemuOpts *opts, Error **errp) { const char *driver = qemu_opt_get(opts, "driver"); int i; @@ -961,7 +980,7 @@ static int bt_parse(const char *opt) return 1; } -static int parse_sandbox(QemuOpts *opts, void *opaque) +static int parse_sandbox(void *opaque, QemuOpts *opts, Error **errp) { /* FIXME: change this to true for 1.3 */ if (qemu_opt_get_bool(opts, "enable", false)) { @@ -981,7 +1000,7 @@ static int parse_sandbox(QemuOpts *opts, void *opaque) return 0; } -static int parse_name(QemuOpts *opts, void *opaque) +static int parse_name(void *opaque, QemuOpts *opts, Error **errp) { const char *proc_name; @@ -1009,7 +1028,7 @@ bool usb_enabled(void) } #ifndef _WIN32 -static int parse_add_fd(QemuOpts *opts, void *opaque) +static int parse_add_fd(void *opaque, QemuOpts *opts, Error **errp) { int fd, dupfd, flags; int64_t fdset_id; @@ -1071,7 +1090,7 @@ static int parse_add_fd(QemuOpts *opts, void *opaque) return 0; } -static int cleanup_add_fd(QemuOpts *opts, void *opaque) +static int cleanup_add_fd(void *opaque, QemuOpts *opts, Error **errp) { int fd; @@ -1092,14 +1111,14 @@ static int cleanup_add_fd(QemuOpts *opts, void *opaque) #define MTD_OPTS "" #define SD_OPTS "" -static int drive_init_func(QemuOpts *opts, void *opaque) +static int drive_init_func(void *opaque, QemuOpts *opts, Error **errp) { BlockInterfaceType *block_default_type = opaque; return drive_new(opts, *block_default_type) == NULL; } -static int drive_enable_snapshot(QemuOpts *opts, void *opaque) +static int drive_enable_snapshot(void *opaque, QemuOpts *opts, Error **errp) { if (qemu_opt_get(opts, "snapshot") == NULL) { qemu_opt_set(opts, "snapshot", "on", &error_abort); @@ -1119,7 +1138,7 @@ static void default_drive(int enable, int snapshot, BlockInterfaceType type, opts = drive_add(type, index, NULL, optstr); if (snapshot) { - drive_enable_snapshot(opts, NULL); + drive_enable_snapshot(NULL, opts, NULL); } dinfo = drive_new(opts, type); @@ -2127,12 +2146,44 @@ char *qemu_find_file(int type, const char *name) return NULL; } -static int device_help_func(QemuOpts *opts, void *opaque) +static int parse_fw_cfg(void *opaque, QemuOpts *opts, Error **errp) +{ + gchar *buf; + size_t size; + const char *name, *file; + + if (opaque == NULL) { + error_report("fw_cfg device not available"); + return -1; + } + name = qemu_opt_get(opts, "name"); + file = qemu_opt_get(opts, "file"); + if (name == NULL || *name == '\0' || file == NULL || *file == '\0') { + error_report("invalid argument value"); + return -1; + } + if (strlen(name) > FW_CFG_MAX_FILE_PATH - 1) { + error_report("name too long (max. %d char)", FW_CFG_MAX_FILE_PATH - 1); + return -1; + } + if (strncmp(name, "opt/", 4) != 0) { + error_report("WARNING: externally provided fw_cfg item names " + "should be prefixed with \"opt/\"!"); + } + if (!g_file_get_contents(file, &buf, &size, NULL)) { + error_report("can't load %s", file); + return -1; + } + fw_cfg_add_file((FWCfgState *)opaque, name, buf, size); + return 0; +} + +static int device_help_func(void *opaque, QemuOpts *opts, Error **errp) { return qdev_device_help(opts); } -static int device_init_func(QemuOpts *opts, void *opaque) +static int device_init_func(void *opaque, QemuOpts *opts, Error **errp) { DeviceState *dev; @@ -2143,7 +2194,7 @@ static int device_init_func(QemuOpts *opts, void *opaque) return 0; } -static int chardev_init_func(QemuOpts *opts, void *opaque) +static int chardev_init_func(void *opaque, QemuOpts *opts, Error **errp) { Error *local_err = NULL; @@ -2156,7 +2207,7 @@ static int chardev_init_func(QemuOpts *opts, void *opaque) } #ifdef CONFIG_VIRTFS -static int fsdev_init_func(QemuOpts *opts, void *opaque) +static int fsdev_init_func(void *opaque, QemuOpts *opts, Error **errp) { int ret; ret = qemu_fsdev_add(opts); @@ -2165,7 +2216,7 @@ static int fsdev_init_func(QemuOpts *opts, void *opaque) } #endif -static int mon_init_func(QemuOpts *opts, void *opaque) +static int mon_init_func(void *opaque, QemuOpts *opts, Error **errp) { CharDriverState *chr; const char *chardev; @@ -2576,8 +2627,9 @@ static void free_and_trace(gpointer mem) free(mem); } -static int machine_set_property(const char *name, const char *value, - void *opaque) +static int machine_set_property(void *opaque, + const char *name, const char *value, + Error **errp) { Object *obj = OBJECT(opaque); Error *local_err = NULL; @@ -2606,7 +2658,7 @@ static int machine_set_property(const char *name, const char *value, return 0; } -static int object_create(QemuOpts *opts, void *opaque) +static int object_create(void *opaque, QemuOpts *opts, Error **errp) { Error *err = NULL; char *type = NULL; @@ -2821,6 +2873,7 @@ int main(int argc, char **argv, char **envp) qemu_add_opts(&qemu_numa_opts); qemu_add_opts(&qemu_icount_opts); qemu_add_opts(&qemu_semihosting_config_opts); + qemu_add_opts(&qemu_fw_cfg_opts); runstate_init(); @@ -3437,6 +3490,12 @@ int main(int argc, char **argv, char **envp) } do_smbios_option(opts); break; + case QEMU_OPTION_fwcfg: + opts = qemu_opts_parse(qemu_find_opts("fw_cfg"), optarg, 1); + if (opts == NULL) { + exit(1); + } + break; case QEMU_OPTION_enable_kvm: olist = qemu_find_opts("machine"); qemu_opts_parse(olist, "accel=kvm", 0); @@ -3797,20 +3856,24 @@ int main(int argc, char **argv, char **envp) exit(1); } - if (qemu_opts_foreach(qemu_find_opts("sandbox"), parse_sandbox, NULL, 0)) { + if (qemu_opts_foreach(qemu_find_opts("sandbox"), + parse_sandbox, NULL, NULL)) { exit(1); } - if (qemu_opts_foreach(qemu_find_opts("name"), parse_name, NULL, 1)) { + if (qemu_opts_foreach(qemu_find_opts("name"), + parse_name, NULL, NULL)) { exit(1); } #ifndef _WIN32 - if (qemu_opts_foreach(qemu_find_opts("add-fd"), parse_add_fd, NULL, 1)) { + if (qemu_opts_foreach(qemu_find_opts("add-fd"), + parse_add_fd, NULL, NULL)) { exit(1); } - if (qemu_opts_foreach(qemu_find_opts("add-fd"), cleanup_add_fd, NULL, 1)) { + if (qemu_opts_foreach(qemu_find_opts("add-fd"), + cleanup_add_fd, NULL, NULL)) { exit(1); } #endif @@ -3897,8 +3960,10 @@ int main(int argc, char **argv, char **envp) machine_class->default_machine_opts, 0); } - qemu_opts_foreach(qemu_find_opts("device"), default_driver_check, NULL, 0); - qemu_opts_foreach(qemu_find_opts("global"), default_driver_check, NULL, 0); + qemu_opts_foreach(qemu_find_opts("device"), + default_driver_check, NULL, NULL); + qemu_opts_foreach(qemu_find_opts("global"), + default_driver_check, NULL, NULL); if (!vga_model && !default_vga) { vga_interface_type = VGA_DEVICE; @@ -4036,10 +4101,14 @@ int main(int argc, char **argv, char **envp) socket_init(); - if (qemu_opts_foreach(qemu_find_opts("chardev"), chardev_init_func, NULL, 1) != 0) + if (qemu_opts_foreach(qemu_find_opts("chardev"), + chardev_init_func, NULL, NULL)) { exit(1); + } + #ifdef CONFIG_VIRTFS - if (qemu_opts_foreach(qemu_find_opts("fsdev"), fsdev_init_func, NULL, 1) != 0) { + if (qemu_opts_foreach(qemu_find_opts("fsdev"), + fsdev_init_func, NULL, NULL)) { exit(1); } #endif @@ -4049,19 +4118,19 @@ int main(int argc, char **argv, char **envp) exit(1); } - if (qemu_opts_foreach(qemu_find_opts("device"), device_help_func, NULL, 0) - != 0) { + if (qemu_opts_foreach(qemu_find_opts("device"), + device_help_func, NULL, NULL)) { exit(0); } if (qemu_opts_foreach(qemu_find_opts("object"), - object_create, NULL, 0) != 0) { + object_create, NULL, NULL)) { exit(1); } machine_opts = qemu_get_machine_opts(); if (qemu_opt_foreach(machine_opts, machine_set_property, current_machine, - 1) < 0) { + NULL)) { object_unref(OBJECT(current_machine)); exit(1); } @@ -4189,9 +4258,10 @@ int main(int argc, char **argv, char **envp) /* open the virtual block devices */ if (snapshot) - qemu_opts_foreach(qemu_find_opts("drive"), drive_enable_snapshot, NULL, 0); + qemu_opts_foreach(qemu_find_opts("drive"), + drive_enable_snapshot, NULL, NULL); if (qemu_opts_foreach(qemu_find_opts("drive"), drive_init_func, - &machine_class->block_default_type, 1) != 0) { + &machine_class->block_default_type, NULL)) { exit(1); } @@ -4202,7 +4272,8 @@ int main(int argc, char **argv, char **envp) parse_numa_opts(machine_class); - if (qemu_opts_foreach(qemu_find_opts("mon"), mon_init_func, NULL, 1) != 0) { + if (qemu_opts_foreach(qemu_find_opts("mon"), + mon_init_func, NULL, NULL)) { exit(1); } @@ -4261,6 +4332,11 @@ int main(int argc, char **argv, char **envp) numa_post_machine_init(); + if (qemu_opts_foreach(qemu_find_opts("fw_cfg"), + parse_fw_cfg, fw_cfg_find(), NULL) != 0) { + exit(1); + } + /* init USB devices */ if (usb_enabled()) { if (foreach_device_config(DEV_USB, usb_parse) < 0) @@ -4268,8 +4344,10 @@ int main(int argc, char **argv, char **envp) } /* init generic devices */ - if (qemu_opts_foreach(qemu_find_opts("device"), device_init_func, NULL, 1) != 0) + if (qemu_opts_foreach(qemu_find_opts("device"), + device_init_func, NULL, NULL)) { exit(1); + } /* Did we create any drives that we failed to create a device for? */ drive_check_orphaned(); @@ -4321,7 +4399,8 @@ int main(int argc, char **argv, char **envp) #ifdef CONFIG_VNC /* init remote displays */ - qemu_opts_foreach(qemu_find_opts("vnc"), vnc_init_func, NULL, 0); + qemu_opts_foreach(qemu_find_opts("vnc"), + vnc_init_func, NULL, NULL); if (show_vnc_port) { char *ret = vnc_display_local_addr("default"); printf("VNC server running on `%s'\n", ret); |