aboutsummaryrefslogtreecommitdiff
path: root/hw/display/virtio-gpu.c
diff options
context:
space:
mode:
authorGerd Hoffmann <kraxel@redhat.com>2016-05-23 15:22:07 +0200
committerGerd Hoffmann <kraxel@redhat.com>2016-06-06 09:04:34 +0200
commit0c244e50ee12311037efd507ee37df0e846e4a18 (patch)
tree6122f7c296e79cbe6f4b327018b80e154c6f42b2 /hw/display/virtio-gpu.c
parent4e68a0ee17dad7b8d870df0081d4ab2e079016c2 (diff)
virtio-gpu: add live migration support
Store some additional state for cursor and resource backing storage, so we can write out and reload things. Implement vmsave+vmload for 2d mode. Continue blocking live migration in 3d/virgl mode. Signed-off-by: Gerd Hoffmann <kraxel@redhat.com> Message-id: 1464009727-7753-1-git-send-email-kraxel@redhat.com
Diffstat (limited to 'hw/display/virtio-gpu.c')
-rw-r--r--hw/display/virtio-gpu.c188
1 files changed, 182 insertions, 6 deletions
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index a836e3571d..136c095b7d 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -22,6 +22,8 @@
#include "qemu/log.h"
#include "qapi/error.h"
+#define VIRTIO_GPU_VM_VERSION 1
+
static struct virtio_gpu_simple_resource*
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
@@ -94,7 +96,7 @@ static void update_cursor_data_virgl(VirtIOGPU *g,
static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
{
struct virtio_gpu_scanout *s;
- bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR;
+ bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
if (cursor->pos.scanout_id >= g->conf.max_outputs) {
return;
@@ -107,7 +109,7 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
move ? "move" : "update",
cursor->resource_id);
- if (move) {
+ if (!move) {
if (!s->current_cursor) {
s->current_cursor = cursor_alloc(64, 64);
}
@@ -120,6 +122,11 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
g, s, cursor->resource_id);
}
dpy_cursor_define(s->con, s->current_cursor);
+
+ s->cursor = *cursor;
+ } else {
+ s->cursor.pos.x = cursor->pos.x;
+ s->cursor.pos.y = cursor->pos.y;
}
dpy_mouse_set(s->con, cursor->pos.x, cursor->pos.y,
cursor->resource_id ? 1 : 0);
@@ -602,7 +609,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
struct virtio_gpu_ctrl_command *cmd,
- struct iovec **iov)
+ uint64_t **addr, struct iovec **iov)
{
struct virtio_gpu_mem_entry *ents;
size_t esize, s;
@@ -628,10 +635,16 @@ int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
}
*iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
+ if (addr) {
+ *addr = g_malloc0(sizeof(uint64_t) * ab->nr_entries);
+ }
for (i = 0; i < ab->nr_entries; i++) {
hwaddr len = ents[i].length;
(*iov)[i].iov_len = ents[i].length;
(*iov)[i].iov_base = cpu_physical_memory_map(ents[i].addr, &len, 1);
+ if (addr) {
+ (*addr)[i] = ents[i].addr;
+ }
if (!(*iov)[i].iov_base || len != ents[i].length) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for"
" resource %d element %d\n",
@@ -639,6 +652,10 @@ int virtio_gpu_create_mapping_iov(struct virtio_gpu_resource_attach_backing *ab,
virtio_gpu_cleanup_mapping_iov(*iov, i);
g_free(ents);
*iov = NULL;
+ if (addr) {
+ g_free(*addr);
+ *addr = NULL;
+ }
return -1;
}
}
@@ -662,6 +679,8 @@ static void virtio_gpu_cleanup_mapping(struct virtio_gpu_simple_resource *res)
virtio_gpu_cleanup_mapping_iov(res->iov, res->iov_cnt);
res->iov = NULL;
res->iov_cnt = 0;
+ g_free(res->addrs);
+ res->addrs = NULL;
}
static void
@@ -683,7 +702,7 @@ virtio_gpu_resource_attach_backing(VirtIOGPU *g,
return;
}
- ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->iov);
+ ret = virtio_gpu_create_mapping_iov(&ab, cmd, &res->addrs, &res->iov);
if (ret != 0) {
cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
return;
@@ -929,11 +948,163 @@ const GraphicHwOps virtio_gpu_ops = {
.gl_block = virtio_gpu_gl_block,
};
+static const VMStateDescription vmstate_virtio_gpu_scanout = {
+ .name = "virtio-gpu-one-scanout",
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT32(resource_id, struct virtio_gpu_scanout),
+ VMSTATE_UINT32(width, struct virtio_gpu_scanout),
+ VMSTATE_UINT32(height, struct virtio_gpu_scanout),
+ VMSTATE_INT32(x, struct virtio_gpu_scanout),
+ VMSTATE_INT32(y, struct virtio_gpu_scanout),
+ VMSTATE_UINT32(cursor.resource_id, struct virtio_gpu_scanout),
+ VMSTATE_UINT32(cursor.hot_x, struct virtio_gpu_scanout),
+ VMSTATE_UINT32(cursor.hot_y, struct virtio_gpu_scanout),
+ VMSTATE_UINT32(cursor.pos.x, struct virtio_gpu_scanout),
+ VMSTATE_UINT32(cursor.pos.y, struct virtio_gpu_scanout),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static const VMStateDescription vmstate_virtio_gpu_scanouts = {
+ .name = "virtio-gpu-scanouts",
+ .version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_INT32(enable, struct VirtIOGPU),
+ VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU),
+ VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
+ conf.max_outputs, 1,
+ vmstate_virtio_gpu_scanout,
+ struct virtio_gpu_scanout),
+ VMSTATE_END_OF_LIST()
+ },
+};
+
static const VMStateDescription vmstate_virtio_gpu_unmigratable = {
- .name = "virtio-gpu",
+ .name = "virtio-gpu-with-virgl",
.unmigratable = 1,
};
+static void virtio_gpu_save(QEMUFile *f, void *opaque)
+{
+ VirtIOGPU *g = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(g);
+ struct virtio_gpu_simple_resource *res;
+ int i;
+
+ virtio_save(vdev, f);
+
+ /* in 2d mode we should never find unprocessed commands here */
+ assert(QTAILQ_EMPTY(&g->cmdq));
+
+ QTAILQ_FOREACH(res, &g->reslist, next) {
+ qemu_put_be32(f, res->resource_id);
+ qemu_put_be32(f, res->width);
+ qemu_put_be32(f, res->height);
+ qemu_put_be32(f, res->format);
+ qemu_put_be32(f, res->iov_cnt);
+ for (i = 0; i < res->iov_cnt; i++) {
+ qemu_put_be64(f, res->addrs[i]);
+ qemu_put_be32(f, res->iov[i].iov_len);
+ }
+ qemu_put_buffer(f, (void *)pixman_image_get_data(res->image),
+ pixman_image_get_stride(res->image) * res->height);
+ }
+ qemu_put_be32(f, 0); /* end of list */
+
+ vmstate_save_state(f, &vmstate_virtio_gpu_scanouts, g, NULL);
+}
+
+static int virtio_gpu_load(QEMUFile *f, void *opaque, int version_id)
+{
+ VirtIOGPU *g = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(g);
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_scanout *scanout;
+ uint32_t resource_id, pformat;
+ int i, ret;
+
+ if (version_id != VIRTIO_GPU_VM_VERSION) {
+ return -EINVAL;
+ }
+
+ ret = virtio_load(vdev, f, version_id);
+ if (ret) {
+ return ret;
+ }
+
+ resource_id = qemu_get_be32(f);
+ while (resource_id != 0) {
+ res = g_new0(struct virtio_gpu_simple_resource, 1);
+ res->resource_id = resource_id;
+ res->width = qemu_get_be32(f);
+ res->height = qemu_get_be32(f);
+ res->format = qemu_get_be32(f);
+ res->iov_cnt = qemu_get_be32(f);
+
+ /* allocate */
+ pformat = get_pixman_format(res->format);
+ if (!pformat) {
+ return -EINVAL;
+ }
+ res->image = pixman_image_create_bits(pformat,
+ res->width, res->height,
+ NULL, 0);
+ if (!res->image) {
+ return -EINVAL;
+ }
+
+ res->addrs = g_new(uint64_t, res->iov_cnt);
+ res->iov = g_new(struct iovec, res->iov_cnt);
+
+ /* read data */
+ for (i = 0; i < res->iov_cnt; i++) {
+ res->addrs[i] = qemu_get_be64(f);
+ res->iov[i].iov_len = qemu_get_be32(f);
+ }
+ qemu_get_buffer(f, (void *)pixman_image_get_data(res->image),
+ pixman_image_get_stride(res->image) * res->height);
+
+ /* restore mapping */
+ for (i = 0; i < res->iov_cnt; i++) {
+ hwaddr len = res->iov[i].iov_len;
+ res->iov[i].iov_base =
+ cpu_physical_memory_map(res->addrs[i], &len, 1);
+ if (!res->iov[i].iov_base || len != res->iov[i].iov_len) {
+ return -EINVAL;
+ }
+ }
+
+ QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+
+ resource_id = qemu_get_be32(f);
+ }
+
+ /* load & apply scanout state */
+ vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ scanout = &g->scanout[i];
+ if (!scanout->resource_id) {
+ continue;
+ }
+ res = virtio_gpu_find_resource(g, scanout->resource_id);
+ if (!res) {
+ return -EINVAL;
+ }
+ scanout->ds = qemu_create_displaysurface_pixman(res->image);
+ if (!scanout->ds) {
+ return -EINVAL;
+ }
+
+ dpy_gfx_replace_surface(scanout->con, scanout->ds);
+ dpy_gfx_update(scanout->con, 0, 0, scanout->width, scanout->height);
+ update_cursor(g, &scanout->cursor);
+ res->scanout_bitmask |= (1 << i);
+ }
+
+ return 0;
+}
+
static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
@@ -991,7 +1162,12 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
}
}
- vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g);
+ if (virtio_gpu_virgl_enabled(g->conf)) {
+ vmstate_register(qdev, -1, &vmstate_virtio_gpu_unmigratable, g);
+ } else {
+ register_savevm(qdev, "virtio-gpu", -1, VIRTIO_GPU_VM_VERSION,
+ virtio_gpu_save, virtio_gpu_load, g);
+ }
}
static void virtio_gpu_instance_init(Object *obj)