aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPeter Maydell <peter.maydell@linaro.org>2019-05-30 13:10:00 +0100
committerPeter Maydell <peter.maydell@linaro.org>2019-05-30 13:10:00 +0100
commit95172e24051ce48237f371849498bfc1027df578 (patch)
treea081b5ffc86495a129e31bcdd04d92191695686b
parent62f6849e7a74fe34f5e376fe41389298100a2d1e (diff)
parent267f664658fee4138f80013fa25354191e8091cb (diff)
Merge remote-tracking branch 'remotes/kraxel/tags/vga-20190529-pull-request' into staging
vga: add vhost-user-gpu. # gpg: Signature made Wed 29 May 2019 05:40:02 BST # gpg: using RSA key 4CB6D8EED3E87138 # gpg: Good signature from "Gerd Hoffmann (work) <kraxel@redhat.com>" [full] # gpg: aka "Gerd Hoffmann <gerd@kraxel.org>" [full] # gpg: aka "Gerd Hoffmann (private) <kraxel@gmail.com>" [full] # Primary key fingerprint: A032 8CFF B93A 17A7 9901 FE7D 4CB6 D8EE D3E8 7138 * remotes/kraxel/tags/vga-20190529-pull-request: hw/display: add vhost-user-vga & gpu-pci virtio-gpu: split virtio-gpu-pci & virtio-vga virtio-gpu: split virtio-gpu, introduce virtio-gpu-base spice-app: fix running when !CONFIG_OPENGL contrib: add vhost-user-gpu util: compile drm.o on posix virtio-gpu: add a pixman helper header virtio-gpu: add bswap helpers header vhost-user: add vhost_user_gpu_set_socket() virtio-gpu: add sanity check Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
-rw-r--r--MAINTAINERS10
-rw-r--r--Makefile24
-rw-r--r--Makefile.objs1
-rwxr-xr-xconfigure17
-rw-r--r--contrib/libvhost-user/libvhost-user.c1
-rw-r--r--contrib/libvhost-user/libvhost-user.h1
-rw-r--r--contrib/vhost-user-gpu/50-qemu-gpu.json.in5
-rw-r--r--contrib/vhost-user-gpu/Makefile.objs10
-rw-r--r--contrib/vhost-user-gpu/main.c1185
-rw-r--r--contrib/vhost-user-gpu/virgl.c579
-rw-r--r--contrib/vhost-user-gpu/virgl.h25
-rw-r--r--contrib/vhost-user-gpu/vugbm.c328
-rw-r--r--contrib/vhost-user-gpu/vugbm.h67
-rw-r--r--contrib/vhost-user-gpu/vugpu.h177
-rw-r--r--docs/interop/index.rst1
-rw-r--r--docs/interop/vhost-user-gpu.rst242
-rw-r--r--docs/interop/vhost-user.rst9
-rw-r--r--hw/display/Kconfig10
-rw-r--r--hw/display/Makefile.objs5
-rw-r--r--hw/display/vhost-user-gpu-pci.c51
-rw-r--r--hw/display/vhost-user-gpu.c607
-rw-r--r--hw/display/vhost-user-vga.c52
-rw-r--r--hw/display/virtio-gpu-3d.c49
-rw-r--r--hw/display/virtio-gpu-base.c268
-rw-r--r--hw/display/virtio-gpu-pci.c55
-rw-r--r--hw/display/virtio-gpu.c450
-rw-r--r--hw/display/virtio-vga.c138
-rw-r--r--hw/display/virtio-vga.h32
-rw-r--r--hw/virtio/vhost-user.c11
-rw-r--r--include/hw/virtio/vhost-backend.h2
-rw-r--r--include/hw/virtio/virtio-gpu-bswap.h61
-rw-r--r--include/hw/virtio/virtio-gpu-pci.h40
-rw-r--r--include/hw/virtio/virtio-gpu-pixman.h45
-rw-r--r--include/hw/virtio/virtio-gpu.h92
-rw-r--r--rules.mak9
-rw-r--r--ui/spice-app.c3
-rw-r--r--util/Makefile.objs2
-rw-r--r--vl.c1
38 files changed, 4178 insertions, 487 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 1f5f8b7a2c..67dcffdc22 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1675,9 +1675,17 @@ virtio-gpu
M: Gerd Hoffmann <kraxel@redhat.com>
S: Maintained
F: hw/display/virtio-gpu*
-F: hw/display/virtio-vga.c
+F: hw/display/virtio-vga.*
F: include/hw/virtio/virtio-gpu.h
+vhost-user-gpu
+M: Marc-André Lureau <marcandre.lureau@redhat.com>
+M: Gerd Hoffmann <kraxel@redhat.com>
+S: Maintained
+F: docs/interop/vhost-user-gpu.rst
+F: contrib/vhost-user-gpu
+F: hw/display/vhost-user-*
+
Cirrus VGA
M: Gerd Hoffmann <kraxel@redhat.com>
S: Odd Fixes
diff --git a/Makefile b/Makefile
index f0be624f47..8e2fc6624c 100644
--- a/Makefile
+++ b/Makefile
@@ -314,8 +314,20 @@ $(call set-vpath, $(SRC_PATH))
LIBS+=-lz $(LIBS_TOOLS)
+vhost-user-json-y =
+HELPERS-y =
+
HELPERS-$(call land,$(CONFIG_SOFTMMU),$(CONFIG_LINUX)) = qemu-bridge-helper$(EXESUF)
+ifdef CONFIG_LINUX
+ifdef CONFIG_VIRGL
+ifdef CONFIG_GBM
+HELPERS-y += vhost-user-gpu$(EXESUF)
+vhost-user-json-y += contrib/vhost-user-gpu/50-qemu-gpu.json
+endif
+endif
+endif
+
ifdef BUILD_DOCS
DOCS=qemu-doc.html qemu-doc.txt qemu.1 qemu-img.1 qemu-nbd.8 qemu-ga.8
DOCS+=docs/interop/qemu-qmp-ref.html docs/interop/qemu-qmp-ref.txt docs/interop/qemu-qmp-ref.7
@@ -409,6 +421,7 @@ dummy := $(call unnest-vars,, \
vhost-user-scsi-obj-y \
vhost-user-blk-obj-y \
vhost-user-input-obj-y \
+ vhost-user-gpu-obj-y \
qga-vss-dll-obj-y \
block-obj-y \
block-obj-m \
@@ -426,7 +439,7 @@ dummy := $(call unnest-vars,, \
include $(SRC_PATH)/tests/Makefile.include
-all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) $(HELPERS-y) recurse-all modules
+all: $(DOCS) $(if $(BUILD_DOCS),sphinxdocs) $(TOOLS) $(HELPERS-y) recurse-all modules $(vhost-user-json-y)
qemu-version.h: FORCE
$(call quiet-command, \
@@ -619,6 +632,9 @@ rdmacm-mux$(EXESUF): LIBS += "-libumad"
rdmacm-mux$(EXESUF): $(rdmacm-mux-obj-y) $(COMMON_LDADDS)
$(call LINK, $^)
+vhost-user-gpu$(EXESUF): $(vhost-user-gpu-obj-y) $(libvhost-user-obj-y) libqemuutil.a libqemustub.a
+ $(call LINK, $^)
+
ifdef CONFIG_VHOST_USER_INPUT
ifdef CONFIG_LINUX
vhost-user-input$(EXESUF): $(vhost-user-input-obj-y) libvhost-user.a libqemuutil.a
@@ -827,6 +843,12 @@ endif
ifneq ($(HELPERS-y),)
$(call install-prog,$(HELPERS-y),$(DESTDIR)$(libexecdir))
endif
+ifneq ($(vhost-user-json-y),)
+ $(INSTALL_DIR) "$(DESTDIR)$(qemu_datadir)/vhost-user/"
+ for x in $(vhost-user-json-y); do \
+ $(INSTALL_DATA) $$x "$(DESTDIR)$(qemu_datadir)/vhost-user/"; \
+ done
+endif
ifdef CONFIG_TRACE_SYSTEMTAP
$(INSTALL_PROG) "scripts/qemu-trace-stap" $(DESTDIR)$(bindir)
endif
diff --git a/Makefile.objs b/Makefile.objs
index b61568ad06..c8337fa34b 100644
--- a/Makefile.objs
+++ b/Makefile.objs
@@ -123,6 +123,7 @@ vhost-user-scsi-obj-y = contrib/vhost-user-scsi/
vhost-user-blk-obj-y = contrib/vhost-user-blk/
rdmacm-mux-obj-y = contrib/rdmacm-mux/
vhost-user-input-obj-y = contrib/vhost-user-input/
+vhost-user-gpu-obj-y = contrib/vhost-user-gpu/
######################################################################
trace-events-subdirs =
diff --git a/configure b/configure
index 528b9ff705..8148ac6cae 100755
--- a/configure
+++ b/configure
@@ -4099,6 +4099,13 @@ libs_softmmu="$libs_softmmu $fdt_libs"
##########################################
# opengl probe (for sdl2, gtk, milkymist-tmu2)
+gbm="no"
+if $pkg_config gbm; then
+ gbm_cflags="$($pkg_config --cflags gbm)"
+ gbm_libs="$($pkg_config --libs gbm)"
+ gbm="yes"
+fi
+
if test "$opengl" != "no" ; then
opengl_pkgs="epoxy gbm"
if $pkg_config $opengl_pkgs; then
@@ -6964,6 +6971,13 @@ if test "$opengl" = "yes" ; then
fi
fi
+if test "$gbm" = "yes" ; then
+ echo "CONFIG_GBM=y" >> $config_host_mak
+ echo "GBM_LIBS=$gbm_libs" >> $config_host_mak
+ echo "GBM_CFLAGS=$gbm_cflags" >> $config_host_mak
+fi
+
+
if test "$malloc_trim" = "yes" ; then
echo "CONFIG_MALLOC_TRIM=y" >> $config_host_mak
fi
@@ -7859,6 +7873,9 @@ echo "QEMU_CFLAGS+=$cflags" >> $config_target_mak
done # for target in $targets
+echo "PIXMAN_CFLAGS=$pixman_cflags" >> $config_host_mak
+echo "PIXMAN_LIBS=$pixman_libs" >> $config_host_mak
+
if test -n "$enabled_cross_compilers"; then
echo
echo "NOTE: cross-compilers enabled: $enabled_cross_compilers"
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index c56f2dfe44..443b7e08c3 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -130,6 +130,7 @@ vu_request_to_string(unsigned int req)
REQ(VHOST_USER_POSTCOPY_END),
REQ(VHOST_USER_GET_INFLIGHT_FD),
REQ(VHOST_USER_SET_INFLIGHT_FD),
+ REQ(VHOST_USER_GPU_SET_SOCKET),
REQ(VHOST_USER_MAX),
};
#undef REQ
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
index 78b33306e8..3b888ff0a5 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -94,6 +94,7 @@ typedef enum VhostUserRequest {
VHOST_USER_POSTCOPY_END = 30,
VHOST_USER_GET_INFLIGHT_FD = 31,
VHOST_USER_SET_INFLIGHT_FD = 32,
+ VHOST_USER_GPU_SET_SOCKET = 33,
VHOST_USER_MAX
} VhostUserRequest;
diff --git a/contrib/vhost-user-gpu/50-qemu-gpu.json.in b/contrib/vhost-user-gpu/50-qemu-gpu.json.in
new file mode 100644
index 0000000000..658b545864
--- /dev/null
+++ b/contrib/vhost-user-gpu/50-qemu-gpu.json.in
@@ -0,0 +1,5 @@
+{
+ "description": "QEMU vhost-user-gpu",
+ "type": "gpu",
+ "binary": "@libexecdir@/vhost-user-gpu",
+}
diff --git a/contrib/vhost-user-gpu/Makefile.objs b/contrib/vhost-user-gpu/Makefile.objs
new file mode 100644
index 0000000000..6170c919e4
--- /dev/null
+++ b/contrib/vhost-user-gpu/Makefile.objs
@@ -0,0 +1,10 @@
+vhost-user-gpu-obj-y = main.o virgl.o vugbm.o
+
+main.o-cflags := $(PIXMAN_CFLAGS) $(GBM_CFLAGS)
+main.o-libs := $(PIXMAN_LIBS)
+
+virgl.o-cflags := $(VIRGL_CFLAGS) $(GBM_CFLAGS)
+virgl.o-libs := $(VIRGL_LIBS)
+
+vugbm.o-cflags := $(GBM_CFLAGS)
+vugbm.o-libs := $(GBM_LIBS)
diff --git a/contrib/vhost-user-gpu/main.c b/contrib/vhost-user-gpu/main.c
new file mode 100644
index 0000000000..f9e2146b69
--- /dev/null
+++ b/contrib/vhost-user-gpu/main.c
@@ -0,0 +1,1185 @@
+/*
+ * Virtio vhost-user GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2018
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#include "qemu/osdep.h"
+#include "qemu/drm.h"
+#include "qapi/error.h"
+#include "qemu/sockets.h"
+
+#include <pixman.h>
+#include <glib-unix.h>
+
+#include "vugpu.h"
+#include "hw/virtio/virtio-gpu-bswap.h"
+#include "hw/virtio/virtio-gpu-pixman.h"
+#include "virgl.h"
+#include "vugbm.h"
+
+struct virtio_gpu_simple_resource {
+ uint32_t resource_id;
+ uint32_t width;
+ uint32_t height;
+ uint32_t format;
+ struct iovec *iov;
+ unsigned int iov_cnt;
+ uint32_t scanout_bitmask;
+ pixman_image_t *image;
+ struct vugbm_buffer buffer;
+ QTAILQ_ENTRY(virtio_gpu_simple_resource) next;
+};
+
+static gboolean opt_print_caps;
+static int opt_fdnum = -1;
+static char *opt_socket_path;
+static char *opt_render_node;
+static gboolean opt_virgl;
+
+static void vg_handle_ctrl(VuDev *dev, int qidx);
+
+static const char *
+vg_cmd_to_string(int cmd)
+{
+#define CMD(cmd) [cmd] = #cmd
+ static const char *vg_cmd_str[] = {
+ CMD(VIRTIO_GPU_UNDEFINED),
+
+ /* 2d commands */
+ CMD(VIRTIO_GPU_CMD_GET_DISPLAY_INFO),
+ CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D),
+ CMD(VIRTIO_GPU_CMD_RESOURCE_UNREF),
+ CMD(VIRTIO_GPU_CMD_SET_SCANOUT),
+ CMD(VIRTIO_GPU_CMD_RESOURCE_FLUSH),
+ CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D),
+ CMD(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING),
+ CMD(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING),
+ CMD(VIRTIO_GPU_CMD_GET_CAPSET_INFO),
+ CMD(VIRTIO_GPU_CMD_GET_CAPSET),
+
+ /* 3d commands */
+ CMD(VIRTIO_GPU_CMD_CTX_CREATE),
+ CMD(VIRTIO_GPU_CMD_CTX_DESTROY),
+ CMD(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE),
+ CMD(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE),
+ CMD(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D),
+ CMD(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D),
+ CMD(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D),
+ CMD(VIRTIO_GPU_CMD_SUBMIT_3D),
+
+ /* cursor commands */
+ CMD(VIRTIO_GPU_CMD_UPDATE_CURSOR),
+ CMD(VIRTIO_GPU_CMD_MOVE_CURSOR),
+ };
+#undef REQ
+
+ if (cmd >= 0 && cmd < G_N_ELEMENTS(vg_cmd_str)) {
+ return vg_cmd_str[cmd];
+ } else {
+ return "unknown";
+ }
+}
+
+static int
+vg_sock_fd_read(int sock, void *buf, ssize_t buflen)
+{
+ int ret;
+
+ do {
+ ret = read(sock, buf, buflen);
+ } while (ret < 0 && (errno == EINTR || errno == EAGAIN));
+
+ g_warn_if_fail(ret == buflen);
+ return ret;
+}
+
+static void
+vg_sock_fd_close(VuGpu *g)
+{
+ if (g->sock_fd >= 0) {
+ close(g->sock_fd);
+ g->sock_fd = -1;
+ }
+}
+
+static gboolean
+source_wait_cb(gint fd, GIOCondition condition, gpointer user_data)
+{
+ VuGpu *g = user_data;
+
+ if (!vg_recv_msg(g, VHOST_USER_GPU_DMABUF_UPDATE, 0, NULL)) {
+ return G_SOURCE_CONTINUE;
+ }
+
+ /* resume */
+ g->wait_ok = 0;
+ vg_handle_ctrl(&g->dev.parent, 0);
+
+ return G_SOURCE_REMOVE;
+}
+
+void
+vg_wait_ok(VuGpu *g)
+{
+ assert(g->wait_ok == 0);
+ g->wait_ok = g_unix_fd_add(g->sock_fd, G_IO_IN | G_IO_HUP,
+ source_wait_cb, g);
+}
+
+static int
+vg_sock_fd_write(int sock, const void *buf, ssize_t buflen, int fd)
+{
+ ssize_t ret;
+ struct msghdr msg;
+ struct iovec iov;
+ union {
+ struct cmsghdr cmsghdr;
+ char control[CMSG_SPACE(sizeof(int))];
+ } cmsgu;
+ struct cmsghdr *cmsg;
+
+ iov.iov_base = (void *)buf;
+ iov.iov_len = buflen;
+
+ msg.msg_name = NULL;
+ msg.msg_namelen = 0;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+
+ if (fd != -1) {
+ msg.msg_control = cmsgu.control;
+ msg.msg_controllen = sizeof(cmsgu.control);
+
+ cmsg = CMSG_FIRSTHDR(&msg);
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+
+ *((int *)CMSG_DATA(cmsg)) = fd;
+ } else {
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ }
+
+ do {
+ ret = sendmsg(sock, &msg, 0);
+ } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
+
+ g_warn_if_fail(ret == buflen);
+ return ret;
+}
+
+void
+vg_send_msg(VuGpu *vg, const VhostUserGpuMsg *msg, int fd)
+{
+ if (vg_sock_fd_write(vg->sock_fd, msg,
+ VHOST_USER_GPU_HDR_SIZE + msg->size, fd) < 0) {
+ vg_sock_fd_close(vg);
+ }
+}
+
+bool
+vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
+ gpointer payload)
+{
+ uint32_t req, flags, size;
+
+ if (vg_sock_fd_read(g->sock_fd, &req, sizeof(req)) < 0 ||
+ vg_sock_fd_read(g->sock_fd, &flags, sizeof(flags)) < 0 ||
+ vg_sock_fd_read(g->sock_fd, &size, sizeof(size)) < 0) {
+ goto err;
+ }
+
+ g_return_val_if_fail(req == expect_req, false);
+ g_return_val_if_fail(flags & VHOST_USER_GPU_MSG_FLAG_REPLY, false);
+ g_return_val_if_fail(size == expect_size, false);
+
+ if (size && vg_sock_fd_read(g->sock_fd, payload, size) != size) {
+ goto err;
+ }
+
+ return true;
+
+err:
+ vg_sock_fd_close(g);
+ return false;
+}
+
+static struct virtio_gpu_simple_resource *
+virtio_gpu_find_resource(VuGpu *g, uint32_t resource_id)
+{
+ struct virtio_gpu_simple_resource *res;
+
+ QTAILQ_FOREACH(res, &g->reslist, next) {
+ if (res->resource_id == resource_id) {
+ return res;
+ }
+ }
+ return NULL;
+}
+
+void
+vg_ctrl_response(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ struct virtio_gpu_ctrl_hdr *resp,
+ size_t resp_len)
+{
+ size_t s;
+
+ if (cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE) {
+ resp->flags |= VIRTIO_GPU_FLAG_FENCE;
+ resp->fence_id = cmd->cmd_hdr.fence_id;
+ resp->ctx_id = cmd->cmd_hdr.ctx_id;
+ }
+ virtio_gpu_ctrl_hdr_bswap(resp);
+ s = iov_from_buf(cmd->elem.in_sg, cmd->elem.in_num, 0, resp, resp_len);
+ if (s != resp_len) {
+ g_critical("%s: response size incorrect %zu vs %zu",
+ __func__, s, resp_len);
+ }
+ vu_queue_push(&g->dev.parent, cmd->vq, &cmd->elem, s);
+ vu_queue_notify(&g->dev.parent, cmd->vq);
+ cmd->finished = true;
+}
+
+void
+vg_ctrl_response_nodata(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ enum virtio_gpu_ctrl_type type)
+{
+ struct virtio_gpu_ctrl_hdr resp = {
+ .type = type,
+ };
+
+ vg_ctrl_response(g, cmd, &resp, sizeof(resp));
+}
+
+void
+vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resp_display_info dpy_info = { {} };
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_GET_DISPLAY_INFO,
+ .size = 0,
+ };
+
+ assert(vg->wait_ok == 0);
+
+ vg_send_msg(vg, &msg, -1);
+ if (!vg_recv_msg(vg, msg.request, sizeof(dpy_info), &dpy_info)) {
+ return;
+ }
+
+ vg_ctrl_response(vg, cmd, &dpy_info.hdr, sizeof(dpy_info));
+}
+
+static void
+vg_resource_create_2d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ pixman_format_code_t pformat;
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_create_2d c2d;
+
+ VUGPU_FILL_CMD(c2d);
+ virtio_gpu_bswap_32(&c2d, sizeof(c2d));
+
+ if (c2d.resource_id == 0) {
+ g_critical("%s: resource id 0 is not allowed", __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = virtio_gpu_find_resource(g, c2d.resource_id);
+ if (res) {
+ g_critical("%s: resource already exists %d", __func__, c2d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ res = g_new0(struct virtio_gpu_simple_resource, 1);
+ res->width = c2d.width;
+ res->height = c2d.height;
+ res->format = c2d.format;
+ res->resource_id = c2d.resource_id;
+
+ pformat = virtio_gpu_get_pixman_format(c2d.format);
+ if (!pformat) {
+ g_critical("%s: host couldn't handle guest format %d",
+ __func__, c2d.format);
+ g_free(res);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+ vugbm_buffer_create(&res->buffer, &g->gdev, c2d.width, c2d.height);
+ res->image = pixman_image_create_bits(pformat,
+ c2d.width,
+ c2d.height,
+ (uint32_t *)res->buffer.mmap,
+ res->buffer.stride);
+ if (!res->image) {
+ g_critical("%s: resource creation failed %d %d %d",
+ __func__, c2d.resource_id, c2d.width, c2d.height);
+ g_free(res);
+ cmd->error = VIRTIO_GPU_RESP_ERR_OUT_OF_MEMORY;
+ return;
+ }
+
+ QTAILQ_INSERT_HEAD(&g->reslist, res, next);
+}
+
+static void
+vg_disable_scanout(VuGpu *g, int scanout_id)
+{
+ struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
+ struct virtio_gpu_simple_resource *res;
+
+ if (scanout->resource_id == 0) {
+ return;
+ }
+
+ res = virtio_gpu_find_resource(g, scanout->resource_id);
+ if (res) {
+ res->scanout_bitmask &= ~(1 << scanout_id);
+ }
+
+ scanout->width = 0;
+ scanout->height = 0;
+
+ {
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_SCANOUT,
+ .size = sizeof(VhostUserGpuScanout),
+ .payload.scanout.scanout_id = scanout_id,
+ };
+ vg_send_msg(g, &msg, -1);
+ }
+}
+
+static void
+vg_resource_destroy(VuGpu *g,
+ struct virtio_gpu_simple_resource *res)
+{
+ int i;
+
+ if (res->scanout_bitmask) {
+ for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
+ if (res->scanout_bitmask & (1 << i)) {
+ vg_disable_scanout(g, i);
+ }
+ }
+ }
+
+ vugbm_buffer_destroy(&res->buffer);
+ pixman_image_unref(res->image);
+ QTAILQ_REMOVE(&g->reslist, res, next);
+ g_free(res);
+}
+
+static void
+vg_resource_unref(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_unref unref;
+
+ VUGPU_FILL_CMD(unref);
+ virtio_gpu_bswap_32(&unref, sizeof(unref));
+
+ res = virtio_gpu_find_resource(g, unref.resource_id);
+ if (!res) {
+ g_critical("%s: illegal resource specified %d",
+ __func__, unref.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+ vg_resource_destroy(g, res);
+}
+
+int
+vg_create_mapping_iov(VuGpu *g,
+ struct virtio_gpu_resource_attach_backing *ab,
+ struct virtio_gpu_ctrl_command *cmd,
+ struct iovec **iov)
+{
+ struct virtio_gpu_mem_entry *ents;
+ size_t esize, s;
+ int i;
+
+ if (ab->nr_entries > 16384) {
+ g_critical("%s: nr_entries is too big (%d > 16384)",
+ __func__, ab->nr_entries);
+ return -1;
+ }
+
+ esize = sizeof(*ents) * ab->nr_entries;
+ ents = g_malloc(esize);
+ s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
+ sizeof(*ab), ents, esize);
+ if (s != esize) {
+ g_critical("%s: command data size incorrect %zu vs %zu",
+ __func__, s, esize);
+ g_free(ents);
+ return -1;
+ }
+
+ *iov = g_malloc0(sizeof(struct iovec) * ab->nr_entries);
+ for (i = 0; i < ab->nr_entries; i++) {
+ uint64_t len = ents[i].length;
+ (*iov)[i].iov_len = ents[i].length;
+ (*iov)[i].iov_base = vu_gpa_to_va(&g->dev.parent, &len, ents[i].addr);
+ if (!(*iov)[i].iov_base || len != ents[i].length) {
+ g_critical("%s: resource %d element %d",
+ __func__, ab->resource_id, i);
+ g_free(*iov);
+ g_free(ents);
+ *iov = NULL;
+ return -1;
+ }
+ }
+ g_free(ents);
+ return 0;
+}
+
+static void
+vg_resource_attach_backing(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_attach_backing ab;
+ int ret;
+
+ VUGPU_FILL_CMD(ab);
+ virtio_gpu_bswap_32(&ab, sizeof(ab));
+
+ res = virtio_gpu_find_resource(g, ab.resource_id);
+ if (!res) {
+ g_critical("%s: illegal resource specified %d",
+ __func__, ab.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ ret = vg_create_mapping_iov(g, &ab, cmd, &res->iov);
+ if (ret != 0) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ res->iov_cnt = ab.nr_entries;
+}
+
+static void
+vg_resource_detach_backing(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_detach_backing detach;
+
+ VUGPU_FILL_CMD(detach);
+ virtio_gpu_bswap_32(&detach, sizeof(detach));
+
+ res = virtio_gpu_find_resource(g, detach.resource_id);
+ if (!res || !res->iov) {
+ g_critical("%s: illegal resource specified %d",
+ __func__, detach.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ g_free(res->iov);
+ res->iov = NULL;
+ res->iov_cnt = 0;
+}
+
+static void
+vg_transfer_to_host_2d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ int h;
+ uint32_t src_offset, dst_offset, stride;
+ int bpp;
+ pixman_format_code_t format;
+ struct virtio_gpu_transfer_to_host_2d t2d;
+
+ VUGPU_FILL_CMD(t2d);
+ virtio_gpu_t2d_bswap(&t2d);
+
+ res = virtio_gpu_find_resource(g, t2d.resource_id);
+ if (!res || !res->iov) {
+ g_critical("%s: illegal resource specified %d",
+ __func__, t2d.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (t2d.r.x > res->width ||
+ t2d.r.y > res->height ||
+ t2d.r.width > res->width ||
+ t2d.r.height > res->height ||
+ t2d.r.x + t2d.r.width > res->width ||
+ t2d.r.y + t2d.r.height > res->height) {
+ g_critical("%s: transfer bounds outside resource"
+ " bounds for resource %d: %d %d %d %d vs %d %d",
+ __func__, t2d.resource_id, t2d.r.x, t2d.r.y,
+ t2d.r.width, t2d.r.height, res->width, res->height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ format = pixman_image_get_format(res->image);
+ bpp = (PIXMAN_FORMAT_BPP(format) + 7) / 8;
+ stride = pixman_image_get_stride(res->image);
+
+ if (t2d.offset || t2d.r.x || t2d.r.y ||
+ t2d.r.width != pixman_image_get_width(res->image)) {
+ void *img_data = pixman_image_get_data(res->image);
+ for (h = 0; h < t2d.r.height; h++) {
+ src_offset = t2d.offset + stride * h;
+ dst_offset = (t2d.r.y + h) * stride + (t2d.r.x * bpp);
+
+ iov_to_buf(res->iov, res->iov_cnt, src_offset,
+ img_data
+ + dst_offset, t2d.r.width * bpp);
+ }
+ } else {
+ iov_to_buf(res->iov, res->iov_cnt, 0,
+ pixman_image_get_data(res->image),
+ pixman_image_get_stride(res->image)
+ * pixman_image_get_height(res->image));
+ }
+}
+
+static void
+vg_set_scanout(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res, *ores;
+ struct virtio_gpu_scanout *scanout;
+ struct virtio_gpu_set_scanout ss;
+ int fd;
+
+ VUGPU_FILL_CMD(ss);
+ virtio_gpu_bswap_32(&ss, sizeof(ss));
+
+ if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
+ g_critical("%s: illegal scanout id specified %d",
+ __func__, ss.scanout_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
+ return;
+ }
+
+ if (ss.resource_id == 0) {
+ vg_disable_scanout(g, ss.scanout_id);
+ return;
+ }
+
+ /* create a surface for this scanout */
+ res = virtio_gpu_find_resource(g, ss.resource_id);
+ if (!res) {
+ g_critical("%s: illegal resource specified %d",
+ __func__, ss.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (ss.r.x > res->width ||
+ ss.r.y > res->height ||
+ ss.r.width > res->width ||
+ ss.r.height > res->height ||
+ ss.r.x + ss.r.width > res->width ||
+ ss.r.y + ss.r.height > res->height) {
+ g_critical("%s: illegal scanout %d bounds for"
+ " resource %d, (%d,%d)+%d,%d vs %d %d",
+ __func__, ss.scanout_id, ss.resource_id, ss.r.x, ss.r.y,
+ ss.r.width, ss.r.height, res->width, res->height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ scanout = &g->scanout[ss.scanout_id];
+
+ ores = virtio_gpu_find_resource(g, scanout->resource_id);
+ if (ores) {
+ ores->scanout_bitmask &= ~(1 << ss.scanout_id);
+ }
+
+ res->scanout_bitmask |= (1 << ss.scanout_id);
+ scanout->resource_id = ss.resource_id;
+ scanout->x = ss.r.x;
+ scanout->y = ss.r.y;
+ scanout->width = ss.r.width;
+ scanout->height = ss.r.height;
+
+ struct vugbm_buffer *buffer = &res->buffer;
+
+ if (vugbm_buffer_can_get_dmabuf_fd(buffer)) {
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_DMABUF_SCANOUT,
+ .size = sizeof(VhostUserGpuDMABUFScanout),
+ .payload.dmabuf_scanout = (VhostUserGpuDMABUFScanout) {
+ .scanout_id = ss.scanout_id,
+ .x = ss.r.x,
+ .y = ss.r.y,
+ .width = ss.r.width,
+ .height = ss.r.height,
+ .fd_width = buffer->width,
+ .fd_height = buffer->height,
+ .fd_stride = buffer->stride,
+ .fd_drm_fourcc = buffer->format
+ }
+ };
+
+ if (vugbm_buffer_get_dmabuf_fd(buffer, &fd)) {
+ vg_send_msg(g, &msg, fd);
+ close(fd);
+ }
+ } else {
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_SCANOUT,
+ .size = sizeof(VhostUserGpuScanout),
+ .payload.scanout = (VhostUserGpuScanout) {
+ .scanout_id = ss.scanout_id,
+ .width = scanout->width,
+ .height = scanout->height
+ }
+ };
+ vg_send_msg(g, &msg, -1);
+ }
+}
+
+static void
+vg_resource_flush(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_simple_resource *res;
+ struct virtio_gpu_resource_flush rf;
+ pixman_region16_t flush_region;
+ int i;
+
+ VUGPU_FILL_CMD(rf);
+ virtio_gpu_bswap_32(&rf, sizeof(rf));
+
+ res = virtio_gpu_find_resource(g, rf.resource_id);
+ if (!res) {
+ g_critical("%s: illegal resource specified %d\n",
+ __func__, rf.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ if (rf.r.x > res->width ||
+ rf.r.y > res->height ||
+ rf.r.width > res->width ||
+ rf.r.height > res->height ||
+ rf.r.x + rf.r.width > res->width ||
+ rf.r.y + rf.r.height > res->height) {
+ g_critical("%s: flush bounds outside resource"
+ " bounds for resource %d: %d %d %d %d vs %d %d\n",
+ __func__, rf.resource_id, rf.r.x, rf.r.y,
+ rf.r.width, rf.r.height, res->width, res->height);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ return;
+ }
+
+ pixman_region_init_rect(&flush_region,
+ rf.r.x, rf.r.y, rf.r.width, rf.r.height);
+ for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
+ struct virtio_gpu_scanout *scanout;
+ pixman_region16_t region, finalregion;
+ pixman_box16_t *extents;
+
+ if (!(res->scanout_bitmask & (1 << i))) {
+ continue;
+ }
+ scanout = &g->scanout[i];
+
+ pixman_region_init(&finalregion);
+ pixman_region_init_rect(&region, scanout->x, scanout->y,
+ scanout->width, scanout->height);
+
+ pixman_region_intersect(&finalregion, &flush_region, &region);
+
+ extents = pixman_region_extents(&finalregion);
+ size_t width = extents->x2 - extents->x1;
+ size_t height = extents->y2 - extents->y1;
+
+ if (vugbm_buffer_can_get_dmabuf_fd(&res->buffer)) {
+ VhostUserGpuMsg vmsg = {
+ .request = VHOST_USER_GPU_DMABUF_UPDATE,
+ .size = sizeof(VhostUserGpuUpdate),
+ .payload.update = (VhostUserGpuUpdate) {
+ .scanout_id = i,
+ .x = extents->x1,
+ .y = extents->y1,
+ .width = width,
+ .height = height,
+ }
+ };
+ vg_send_msg(g, &vmsg, -1);
+ vg_wait_ok(g);
+ } else {
+ size_t bpp =
+ PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) / 8;
+ size_t size = width * height * bpp;
+
+ void *p = g_malloc(VHOST_USER_GPU_HDR_SIZE +
+ sizeof(VhostUserGpuUpdate) + size);
+ VhostUserGpuMsg *msg = p;
+ msg->request = VHOST_USER_GPU_UPDATE;
+ msg->size = sizeof(VhostUserGpuUpdate) + size;
+ msg->payload.update = (VhostUserGpuUpdate) {
+ .scanout_id = i,
+ .x = extents->x1,
+ .y = extents->y1,
+ .width = width,
+ .height = height,
+ };
+ pixman_image_t *i =
+ pixman_image_create_bits(pixman_image_get_format(res->image),
+ msg->payload.update.width,
+ msg->payload.update.height,
+ p + offsetof(VhostUserGpuMsg,
+ payload.update.data),
+ width * bpp);
+ pixman_image_composite(PIXMAN_OP_SRC,
+ res->image, NULL, i,
+ extents->x1, extents->y1,
+ 0, 0, 0, 0,
+ width, height);
+ pixman_image_unref(i);
+ vg_send_msg(g, msg, -1);
+ g_free(msg);
+ }
+ pixman_region_fini(&region);
+ pixman_region_fini(&finalregion);
+ }
+ pixman_region_fini(&flush_region);
+}
+
+static void
+vg_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd)
+{
+ switch (cmd->cmd_hdr.type) {
+ case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
+ vg_get_display_info(vg, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
+ vg_resource_create_2d(vg, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_UNREF:
+ vg_resource_unref(vg, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
+ vg_resource_flush(vg, cmd);
+ break;
+ case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
+ vg_transfer_to_host_2d(vg, cmd);
+ break;
+ case VIRTIO_GPU_CMD_SET_SCANOUT:
+ vg_set_scanout(vg, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
+ vg_resource_attach_backing(vg, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
+ vg_resource_detach_backing(vg, cmd);
+ break;
+ /* case VIRTIO_GPU_CMD_GET_EDID: */
+ /* break */
+ default:
+ g_warning("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ break;
+ }
+ if (!cmd->finished) {
+ vg_ctrl_response_nodata(vg, cmd, cmd->error ? cmd->error :
+ VIRTIO_GPU_RESP_OK_NODATA);
+ }
+}
+
+static void
+vg_handle_ctrl(VuDev *dev, int qidx)
+{
+ VuGpu *vg = container_of(dev, VuGpu, dev.parent);
+ VuVirtq *vq = vu_get_queue(dev, qidx);
+ struct virtio_gpu_ctrl_command *cmd = NULL;
+ size_t len;
+
+ for (;;) {
+ if (vg->wait_ok != 0) {
+ return;
+ }
+
+ cmd = vu_queue_pop(dev, vq, sizeof(struct virtio_gpu_ctrl_command));
+ if (!cmd) {
+ break;
+ }
+ cmd->vq = vq;
+ cmd->error = 0;
+ cmd->finished = false;
+
+ len = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
+ 0, &cmd->cmd_hdr, sizeof(cmd->cmd_hdr));
+ if (len != sizeof(cmd->cmd_hdr)) {
+ g_warning("%s: command size incorrect %zu vs %zu\n",
+ __func__, len, sizeof(cmd->cmd_hdr));
+ }
+
+ virtio_gpu_ctrl_hdr_bswap(&cmd->cmd_hdr);
+ g_debug("%d %s\n", cmd->cmd_hdr.type,
+ vg_cmd_to_string(cmd->cmd_hdr.type));
+
+ if (vg->virgl) {
+ vg_virgl_process_cmd(vg, cmd);
+ } else {
+ vg_process_cmd(vg, cmd);
+ }
+
+ if (!cmd->finished) {
+ QTAILQ_INSERT_TAIL(&vg->fenceq, cmd, next);
+ vg->inflight++;
+ } else {
+ g_free(cmd);
+ }
+ }
+}
+
+static void
+update_cursor_data_simple(VuGpu *g, uint32_t resource_id, gpointer data)
+{
+ struct virtio_gpu_simple_resource *res;
+
+ res = virtio_gpu_find_resource(g, resource_id);
+ g_return_if_fail(res != NULL);
+ g_return_if_fail(pixman_image_get_width(res->image) == 64);
+ g_return_if_fail(pixman_image_get_height(res->image) == 64);
+ g_return_if_fail(
+ PIXMAN_FORMAT_BPP(pixman_image_get_format(res->image)) == 32);
+
+ memcpy(data, pixman_image_get_data(res->image), 64 * 64 * sizeof(uint32_t));
+}
+
+static void
+vg_process_cursor_cmd(VuGpu *g, struct virtio_gpu_update_cursor *cursor)
+{
+ bool move = cursor->hdr.type != VIRTIO_GPU_CMD_MOVE_CURSOR;
+
+ g_debug("%s move:%d\n", G_STRFUNC, move);
+
+ if (move) {
+ VhostUserGpuMsg msg = {
+ .request = cursor->resource_id ?
+ VHOST_USER_GPU_CURSOR_POS : VHOST_USER_GPU_CURSOR_POS_HIDE,
+ .size = sizeof(VhostUserGpuCursorPos),
+ .payload.cursor_pos = {
+ .scanout_id = cursor->pos.scanout_id,
+ .x = cursor->pos.x,
+ .y = cursor->pos.y,
+ }
+ };
+ vg_send_msg(g, &msg, -1);
+ } else {
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_CURSOR_UPDATE,
+ .size = sizeof(VhostUserGpuCursorUpdate),
+ .payload.cursor_update = {
+ .pos = {
+ .scanout_id = cursor->pos.scanout_id,
+ .x = cursor->pos.x,
+ .y = cursor->pos.y,
+ },
+ .hot_x = cursor->hot_x,
+ .hot_y = cursor->hot_y,
+ }
+ };
+ if (g->virgl) {
+ vg_virgl_update_cursor_data(g, cursor->resource_id,
+ msg.payload.cursor_update.data);
+ } else {
+ update_cursor_data_simple(g, cursor->resource_id,
+ msg.payload.cursor_update.data);
+ }
+ vg_send_msg(g, &msg, -1);
+ }
+}
+
+static void
+vg_handle_cursor(VuDev *dev, int qidx)
+{
+ VuGpu *g = container_of(dev, VuGpu, dev.parent);
+ VuVirtq *vq = vu_get_queue(dev, qidx);
+ VuVirtqElement *elem;
+ size_t len;
+ struct virtio_gpu_update_cursor cursor;
+
+ for (;;) {
+ elem = vu_queue_pop(dev, vq, sizeof(VuVirtqElement));
+ if (!elem) {
+ break;
+ }
+ g_debug("cursor out:%d in:%d\n", elem->out_num, elem->in_num);
+
+ len = iov_to_buf(elem->out_sg, elem->out_num,
+ 0, &cursor, sizeof(cursor));
+ if (len != sizeof(cursor)) {
+ g_warning("%s: cursor size incorrect %zu vs %zu\n",
+ __func__, len, sizeof(cursor));
+ } else {
+ virtio_gpu_bswap_32(&cursor, sizeof(cursor));
+ vg_process_cursor_cmd(g, &cursor);
+ }
+ vu_queue_push(dev, vq, elem, 0);
+ vu_queue_notify(dev, vq);
+ g_free(elem);
+ }
+}
+
+static void
+vg_panic(VuDev *dev, const char *msg)
+{
+ g_critical("%s\n", msg);
+ exit(1);
+}
+
+static void
+vg_queue_set_started(VuDev *dev, int qidx, bool started)
+{
+ VuVirtq *vq = vu_get_queue(dev, qidx);
+
+ g_debug("queue started %d:%d\n", qidx, started);
+
+ switch (qidx) {
+ case 0:
+ vu_set_queue_handler(dev, vq, started ? vg_handle_ctrl : NULL);
+ break;
+ case 1:
+ vu_set_queue_handler(dev, vq, started ? vg_handle_cursor : NULL);
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+set_gpu_protocol_features(VuGpu *g)
+{
+ uint64_t u64;
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_GET_PROTOCOL_FEATURES
+ };
+
+ assert(g->wait_ok == 0);
+ vg_send_msg(g, &msg, -1);
+ if (!vg_recv_msg(g, msg.request, sizeof(u64), &u64)) {
+ return;
+ }
+
+ msg = (VhostUserGpuMsg) {
+ .request = VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
+ .size = sizeof(uint64_t),
+ .payload.u64 = 0
+ };
+ vg_send_msg(g, &msg, -1);
+}
+
+static int
+vg_process_msg(VuDev *dev, VhostUserMsg *msg, int *do_reply)
+{
+ VuGpu *g = container_of(dev, VuGpu, dev.parent);
+
+ switch (msg->request) {
+ case VHOST_USER_GPU_SET_SOCKET: {
+ g_return_val_if_fail(msg->fd_num == 1, 1);
+ g_return_val_if_fail(g->sock_fd == -1, 1);
+ g->sock_fd = msg->fds[0];
+ set_gpu_protocol_features(g);
+ return 1;
+ }
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static uint64_t
+vg_get_features(VuDev *dev)
+{
+ uint64_t features = 0;
+
+ if (opt_virgl) {
+ features |= 1 << VIRTIO_GPU_F_VIRGL;
+ }
+
+ return features;
+}
+
+static void
+vg_set_features(VuDev *dev, uint64_t features)
+{
+ VuGpu *g = container_of(dev, VuGpu, dev.parent);
+ bool virgl = features & (1 << VIRTIO_GPU_F_VIRGL);
+
+ if (virgl && !g->virgl_inited) {
+ if (!vg_virgl_init(g)) {
+ vg_panic(dev, "Failed to initialize virgl");
+ }
+ g->virgl_inited = true;
+ }
+
+ g->virgl = virgl;
+}
+
+static int
+vg_get_config(VuDev *dev, uint8_t *config, uint32_t len)
+{
+ VuGpu *g = container_of(dev, VuGpu, dev.parent);
+
+ g_return_val_if_fail(len <= sizeof(struct virtio_gpu_config), -1);
+
+ if (opt_virgl) {
+ g->virtio_config.num_capsets = vg_virgl_get_num_capsets();
+ }
+
+ memcpy(config, &g->virtio_config, len);
+
+ return 0;
+}
+
+static int
+vg_set_config(VuDev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size,
+ uint32_t flags)
+{
+ VuGpu *g = container_of(dev, VuGpu, dev.parent);
+ struct virtio_gpu_config *config = (struct virtio_gpu_config *)data;
+
+ if (config->events_clear) {
+ g->virtio_config.events_read &= ~config->events_clear;
+ }
+
+ return 0;
+}
+
+static const VuDevIface vuiface = {
+ .set_features = vg_set_features,
+ .get_features = vg_get_features,
+ .queue_set_started = vg_queue_set_started,
+ .process_msg = vg_process_msg,
+ .get_config = vg_get_config,
+ .set_config = vg_set_config,
+};
+
+static void
+vg_destroy(VuGpu *g)
+{
+ struct virtio_gpu_simple_resource *res, *tmp;
+
+ vug_deinit(&g->dev);
+
+ vg_sock_fd_close(g);
+
+ QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
+ vg_resource_destroy(g, res);
+ }
+
+ vugbm_device_destroy(&g->gdev);
+}
+
+static GOptionEntry entries[] = {
+ { "print-capabilities", 'c', 0, G_OPTION_ARG_NONE, &opt_print_caps,
+ "Print capabilities", NULL },
+ { "fd", 'f', 0, G_OPTION_ARG_INT, &opt_fdnum,
+ "Use inherited fd socket", "FDNUM" },
+ { "socket-path", 's', 0, G_OPTION_ARG_FILENAME, &opt_socket_path,
+ "Use UNIX socket path", "PATH" },
+ { "render-node", 'r', 0, G_OPTION_ARG_FILENAME, &opt_render_node,
+ "Specify DRM render node", "PATH" },
+ { "virgl", 'v', 0, G_OPTION_ARG_NONE, &opt_virgl,
+ "Turn virgl rendering on", NULL },
+ { NULL, }
+};
+
+int
+main(int argc, char *argv[])
+{
+ GOptionContext *context;
+ GError *error = NULL;
+ GMainLoop *loop = NULL;
+ int fd;
+ VuGpu g = { .sock_fd = -1, .drm_rnode_fd = -1 };
+
+ QTAILQ_INIT(&g.reslist);
+ QTAILQ_INIT(&g.fenceq);
+
+ context = g_option_context_new("QEMU vhost-user-gpu");
+ g_option_context_add_main_entries(context, entries, NULL);
+ if (!g_option_context_parse(context, &argc, &argv, &error)) {
+ g_printerr("Option parsing failed: %s\n", error->message);
+ exit(EXIT_FAILURE);
+ }
+ g_option_context_free(context);
+
+ if (opt_print_caps) {
+ g_print("{\n");
+ g_print(" \"type\": \"gpu\",\n");
+ g_print(" \"features\": [\n");
+ g_print(" \"render-node\",\n");
+ g_print(" \"virgl\"\n");
+ g_print(" ]\n");
+ g_print("}\n");
+ exit(EXIT_SUCCESS);
+ }
+
+ g.drm_rnode_fd = qemu_drm_rendernode_open(opt_render_node);
+ if (opt_render_node && g.drm_rnode_fd == -1) {
+ g_printerr("Failed to open DRM rendernode.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (g.drm_rnode_fd >= 0) {
+ if (!vugbm_device_init(&g.gdev, g.drm_rnode_fd)) {
+ g_warning("Failed to init DRM device, using fallback path");
+ }
+ }
+
+ if ((!!opt_socket_path + (opt_fdnum != -1)) != 1) {
+ g_printerr("Please specify either --fd or --socket-path\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (opt_socket_path) {
+ int lsock = unix_listen(opt_socket_path, &error_fatal);
+ fd = accept(lsock, NULL, NULL);
+ close(lsock);
+ } else {
+ fd = opt_fdnum;
+ }
+ if (fd == -1) {
+ g_printerr("Invalid socket");
+ exit(EXIT_FAILURE);
+ }
+
+ vug_init(&g.dev, fd, vg_panic, &vuiface);
+
+ loop = g_main_loop_new(NULL, FALSE);
+ g_main_loop_run(loop);
+ g_main_loop_unref(loop);
+
+ vg_destroy(&g);
+ if (g.drm_rnode_fd >= 0) {
+ close(g.drm_rnode_fd);
+ }
+
+ return 0;
+}
diff --git a/contrib/vhost-user-gpu/virgl.c b/contrib/vhost-user-gpu/virgl.c
new file mode 100644
index 0000000000..43413e29df
--- /dev/null
+++ b/contrib/vhost-user-gpu/virgl.c
@@ -0,0 +1,579 @@
+/*
+ * Virtio vhost-user GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2018
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include <virglrenderer.h>
+#include "virgl.h"
+
+void
+vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id,
+ gpointer data)
+{
+ uint32_t width, height;
+ uint32_t *cursor;
+
+ cursor = virgl_renderer_get_cursor_data(resource_id, &width, &height);
+ g_return_if_fail(cursor != NULL);
+ g_return_if_fail(width == 64);
+ g_return_if_fail(height == 64);
+
+ memcpy(data, cursor, 64 * 64 * sizeof(uint32_t));
+ free(cursor);
+}
+
+static void
+virgl_cmd_context_create(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_ctx_create cc;
+
+ VUGPU_FILL_CMD(cc);
+
+ virgl_renderer_context_create(cc.hdr.ctx_id, cc.nlen,
+ cc.debug_name);
+}
+
+static void
+virgl_cmd_context_destroy(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_ctx_destroy cd;
+
+ VUGPU_FILL_CMD(cd);
+
+ virgl_renderer_context_destroy(cd.hdr.ctx_id);
+}
+
+static void
+virgl_cmd_create_resource_2d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_create_2d c2d;
+ struct virgl_renderer_resource_create_args args;
+
+ VUGPU_FILL_CMD(c2d);
+
+ args.handle = c2d.resource_id;
+ args.target = 2;
+ args.format = c2d.format;
+ args.bind = (1 << 1);
+ args.width = c2d.width;
+ args.height = c2d.height;
+ args.depth = 1;
+ args.array_size = 1;
+ args.last_level = 0;
+ args.nr_samples = 0;
+ args.flags = VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP;
+ virgl_renderer_resource_create(&args, NULL, 0);
+}
+
+static void
+virgl_cmd_create_resource_3d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_create_3d c3d;
+ struct virgl_renderer_resource_create_args args;
+
+ VUGPU_FILL_CMD(c3d);
+
+ args.handle = c3d.resource_id;
+ args.target = c3d.target;
+ args.format = c3d.format;
+ args.bind = c3d.bind;
+ args.width = c3d.width;
+ args.height = c3d.height;
+ args.depth = c3d.depth;
+ args.array_size = c3d.array_size;
+ args.last_level = c3d.last_level;
+ args.nr_samples = c3d.nr_samples;
+ args.flags = c3d.flags;
+ virgl_renderer_resource_create(&args, NULL, 0);
+}
+
+static void
+virgl_cmd_resource_unref(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_unref unref;
+
+ VUGPU_FILL_CMD(unref);
+
+ virgl_renderer_resource_unref(unref.resource_id);
+}
+
+/* Not yet(?) defined in standard-headers, remove when possible */
+#ifndef VIRTIO_GPU_CAPSET_VIRGL2
+#define VIRTIO_GPU_CAPSET_VIRGL2 2
+#endif
+
+static void
+virgl_cmd_get_capset_info(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_get_capset_info info;
+ struct virtio_gpu_resp_capset_info resp;
+
+ VUGPU_FILL_CMD(info);
+
+ if (info.capset_index == 0) {
+ resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL;
+ virgl_renderer_get_cap_set(resp.capset_id,
+ &resp.capset_max_version,
+ &resp.capset_max_size);
+ } else if (info.capset_index == 1) {
+ resp.capset_id = VIRTIO_GPU_CAPSET_VIRGL2;
+ virgl_renderer_get_cap_set(resp.capset_id,
+ &resp.capset_max_version,
+ &resp.capset_max_size);
+ } else {
+ resp.capset_max_version = 0;
+ resp.capset_max_size = 0;
+ }
+ resp.hdr.type = VIRTIO_GPU_RESP_OK_CAPSET_INFO;
+ vg_ctrl_response(g, cmd, &resp.hdr, sizeof(resp));
+}
+
+uint32_t
+vg_virgl_get_num_capsets(void)
+{
+ uint32_t capset2_max_ver, capset2_max_size;
+ virgl_renderer_get_cap_set(VIRTIO_GPU_CAPSET_VIRGL2,
+ &capset2_max_ver,
+ &capset2_max_size);
+
+ return capset2_max_ver ? 2 : 1;
+}
+
+static void
+virgl_cmd_get_capset(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_get_capset gc;
+ struct virtio_gpu_resp_capset *resp;
+ uint32_t max_ver, max_size;
+
+ VUGPU_FILL_CMD(gc);
+
+ virgl_renderer_get_cap_set(gc.capset_id, &max_ver,
+ &max_size);
+ resp = g_malloc0(sizeof(*resp) + max_size);
+
+ resp->hdr.type = VIRTIO_GPU_RESP_OK_CAPSET;
+ virgl_renderer_fill_caps(gc.capset_id,
+ gc.capset_version,
+ (void *)resp->capset_data);
+ vg_ctrl_response(g, cmd, &resp->hdr, sizeof(*resp) + max_size);
+ g_free(resp);
+}
+
+static void
+virgl_cmd_submit_3d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_cmd_submit cs;
+ void *buf;
+ size_t s;
+
+ VUGPU_FILL_CMD(cs);
+
+ buf = g_malloc(cs.size);
+ s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num,
+ sizeof(cs), buf, cs.size);
+ if (s != cs.size) {
+ g_critical("%s: size mismatch (%zd/%d)", __func__, s, cs.size);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
+ goto out;
+ }
+
+ virgl_renderer_submit_cmd(buf, cs.hdr.ctx_id, cs.size / 4);
+
+out:
+ g_free(buf);
+}
+
+static void
+virgl_cmd_transfer_to_host_2d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_transfer_to_host_2d t2d;
+ struct virtio_gpu_box box;
+
+ VUGPU_FILL_CMD(t2d);
+
+ box.x = t2d.r.x;
+ box.y = t2d.r.y;
+ box.z = 0;
+ box.w = t2d.r.width;
+ box.h = t2d.r.height;
+ box.d = 1;
+
+ virgl_renderer_transfer_write_iov(t2d.resource_id,
+ 0,
+ 0,
+ 0,
+ 0,
+ (struct virgl_box *)&box,
+ t2d.offset, NULL, 0);
+}
+
+static void
+virgl_cmd_transfer_to_host_3d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_transfer_host_3d t3d;
+
+ VUGPU_FILL_CMD(t3d);
+
+ virgl_renderer_transfer_write_iov(t3d.resource_id,
+ t3d.hdr.ctx_id,
+ t3d.level,
+ t3d.stride,
+ t3d.layer_stride,
+ (struct virgl_box *)&t3d.box,
+ t3d.offset, NULL, 0);
+}
+
+static void
+virgl_cmd_transfer_from_host_3d(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_transfer_host_3d tf3d;
+
+ VUGPU_FILL_CMD(tf3d);
+
+ virgl_renderer_transfer_read_iov(tf3d.resource_id,
+ tf3d.hdr.ctx_id,
+ tf3d.level,
+ tf3d.stride,
+ tf3d.layer_stride,
+ (struct virgl_box *)&tf3d.box,
+ tf3d.offset, NULL, 0);
+}
+
+static void
+virgl_resource_attach_backing(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_attach_backing att_rb;
+ struct iovec *res_iovs;
+ int ret;
+
+ VUGPU_FILL_CMD(att_rb);
+
+ ret = vg_create_mapping_iov(g, &att_rb, cmd, &res_iovs);
+ if (ret != 0) {
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ return;
+ }
+
+ virgl_renderer_resource_attach_iov(att_rb.resource_id,
+ res_iovs, att_rb.nr_entries);
+}
+
+static void
+virgl_resource_detach_backing(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_detach_backing detach_rb;
+ struct iovec *res_iovs = NULL;
+ int num_iovs = 0;
+
+ VUGPU_FILL_CMD(detach_rb);
+
+ virgl_renderer_resource_detach_iov(detach_rb.resource_id,
+ &res_iovs,
+ &num_iovs);
+ if (res_iovs == NULL || num_iovs == 0) {
+ return;
+ }
+ g_free(res_iovs);
+}
+
+static void
+virgl_cmd_set_scanout(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_set_scanout ss;
+ struct virgl_renderer_resource_info info;
+ int ret;
+
+ VUGPU_FILL_CMD(ss);
+
+ if (ss.scanout_id >= VIRTIO_GPU_MAX_SCANOUTS) {
+ g_critical("%s: illegal scanout id specified %d",
+ __func__, ss.scanout_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
+ return;
+ }
+
+ memset(&info, 0, sizeof(info));
+
+ if (ss.resource_id && ss.r.width && ss.r.height) {
+ ret = virgl_renderer_resource_get_info(ss.resource_id, &info);
+ if (ret == -1) {
+ g_critical("%s: illegal resource specified %d\n",
+ __func__, ss.resource_id);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+
+ int fd = -1;
+ if (virgl_renderer_get_fd_for_texture(info.tex_id, &fd) < 0) {
+ g_critical("%s: failed to get fd for texture\n", __func__);
+ cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
+ return;
+ }
+ assert(fd >= 0);
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_DMABUF_SCANOUT,
+ .size = sizeof(VhostUserGpuDMABUFScanout),
+ .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
+ .payload.dmabuf_scanout.x = ss.r.x,
+ .payload.dmabuf_scanout.y = ss.r.y,
+ .payload.dmabuf_scanout.width = ss.r.width,
+ .payload.dmabuf_scanout.height = ss.r.height,
+ .payload.dmabuf_scanout.fd_width = info.width,
+ .payload.dmabuf_scanout.fd_height = info.height,
+ .payload.dmabuf_scanout.fd_stride = info.stride,
+ .payload.dmabuf_scanout.fd_flags = info.flags,
+ .payload.dmabuf_scanout.fd_drm_fourcc = info.drm_fourcc
+ };
+ vg_send_msg(g, &msg, fd);
+ close(fd);
+ } else {
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_DMABUF_SCANOUT,
+ .size = sizeof(VhostUserGpuDMABUFScanout),
+ .payload.dmabuf_scanout.scanout_id = ss.scanout_id,
+ };
+ g_debug("disable scanout");
+ vg_send_msg(g, &msg, -1);
+ }
+ g->scanout[ss.scanout_id].resource_id = ss.resource_id;
+}
+
+static void
+virgl_cmd_resource_flush(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_resource_flush rf;
+ int i;
+
+ VUGPU_FILL_CMD(rf);
+
+ if (!rf.resource_id) {
+ g_debug("bad resource id for flush..?");
+ return;
+ }
+ for (i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
+ if (g->scanout[i].resource_id != rf.resource_id) {
+ continue;
+ }
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_DMABUF_UPDATE,
+ .size = sizeof(VhostUserGpuUpdate),
+ .payload.update.scanout_id = i,
+ .payload.update.x = rf.r.x,
+ .payload.update.y = rf.r.y,
+ .payload.update.width = rf.r.width,
+ .payload.update.height = rf.r.height
+ };
+ vg_send_msg(g, &msg, -1);
+ vg_wait_ok(g);
+ }
+}
+
+static void
+virgl_cmd_ctx_attach_resource(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_ctx_resource att_res;
+
+ VUGPU_FILL_CMD(att_res);
+
+ virgl_renderer_ctx_attach_resource(att_res.hdr.ctx_id, att_res.resource_id);
+}
+
+static void
+virgl_cmd_ctx_detach_resource(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd)
+{
+ struct virtio_gpu_ctx_resource det_res;
+
+ VUGPU_FILL_CMD(det_res);
+
+ virgl_renderer_ctx_detach_resource(det_res.hdr.ctx_id, det_res.resource_id);
+}
+
+void vg_virgl_process_cmd(VuGpu *g, struct virtio_gpu_ctrl_command *cmd)
+{
+ virgl_renderer_force_ctx_0();
+ switch (cmd->cmd_hdr.type) {
+ case VIRTIO_GPU_CMD_CTX_CREATE:
+ virgl_cmd_context_create(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_CTX_DESTROY:
+ virgl_cmd_context_destroy(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_2D:
+ virgl_cmd_create_resource_2d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_CREATE_3D:
+ virgl_cmd_create_resource_3d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_SUBMIT_3D:
+ virgl_cmd_submit_3d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D:
+ virgl_cmd_transfer_to_host_2d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D:
+ virgl_cmd_transfer_to_host_3d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D:
+ virgl_cmd_transfer_from_host_3d(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING:
+ virgl_resource_attach_backing(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING:
+ virgl_resource_detach_backing(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_SET_SCANOUT:
+ virgl_cmd_set_scanout(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_FLUSH:
+ virgl_cmd_resource_flush(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_RESOURCE_UNREF:
+ virgl_cmd_resource_unref(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE:
+ /* TODO add security */
+ virgl_cmd_ctx_attach_resource(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE:
+ /* TODO add security */
+ virgl_cmd_ctx_detach_resource(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_GET_CAPSET_INFO:
+ virgl_cmd_get_capset_info(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_GET_CAPSET:
+ virgl_cmd_get_capset(g, cmd);
+ break;
+ case VIRTIO_GPU_CMD_GET_DISPLAY_INFO:
+ vg_get_display_info(g, cmd);
+ break;
+ default:
+ g_debug("TODO handle ctrl %x\n", cmd->cmd_hdr.type);
+ cmd->error = VIRTIO_GPU_RESP_ERR_UNSPEC;
+ break;
+ }
+
+ if (cmd->finished) {
+ return;
+ }
+
+ if (cmd->error) {
+ g_warning("%s: ctrl 0x%x, error 0x%x\n", __func__,
+ cmd->cmd_hdr.type, cmd->error);
+ vg_ctrl_response_nodata(g, cmd, cmd->error);
+ return;
+ }
+
+ if (!(cmd->cmd_hdr.flags & VIRTIO_GPU_FLAG_FENCE)) {
+ vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
+ return;
+ }
+
+ g_debug("Creating fence id:%" PRId64 " type:%d",
+ cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
+ virgl_renderer_create_fence(cmd->cmd_hdr.fence_id, cmd->cmd_hdr.type);
+}
+
+static void
+virgl_write_fence(void *opaque, uint32_t fence)
+{
+ VuGpu *g = opaque;
+ struct virtio_gpu_ctrl_command *cmd, *tmp;
+
+ QTAILQ_FOREACH_SAFE(cmd, &g->fenceq, next, tmp) {
+ /*
+ * the guest can end up emitting fences out of order
+ * so we should check all fenced cmds not just the first one.
+ */
+ if (cmd->cmd_hdr.fence_id > fence) {
+ continue;
+ }
+ g_debug("FENCE %" PRIu64, cmd->cmd_hdr.fence_id);
+ vg_ctrl_response_nodata(g, cmd, VIRTIO_GPU_RESP_OK_NODATA);
+ QTAILQ_REMOVE(&g->fenceq, cmd, next);
+ g_free(cmd);
+ g->inflight--;
+ }
+}
+
+#if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
+ VIRGL_RENDERER_CALLBACKS_VERSION >= 2
+static int
+virgl_get_drm_fd(void *opaque)
+{
+ VuGpu *g = opaque;
+
+ return g->drm_rnode_fd;
+}
+#endif
+
+static struct virgl_renderer_callbacks virgl_cbs = {
+#if defined(VIRGL_RENDERER_CALLBACKS_VERSION) && \
+ VIRGL_RENDERER_CALLBACKS_VERSION >= 2
+ .get_drm_fd = virgl_get_drm_fd,
+ .version = 2,
+#else
+ .version = 1,
+#endif
+ .write_fence = virgl_write_fence,
+};
+
+static void
+vg_virgl_poll(VuDev *dev, int condition, void *data)
+{
+ virgl_renderer_poll();
+}
+
+bool
+vg_virgl_init(VuGpu *g)
+{
+ int ret;
+
+ if (g->drm_rnode_fd && virgl_cbs.version == 1) {
+ g_warning("virgl will use the default rendernode");
+ }
+
+ ret = virgl_renderer_init(g,
+ VIRGL_RENDERER_USE_EGL |
+ VIRGL_RENDERER_THREAD_SYNC,
+ &virgl_cbs);
+ if (ret != 0) {
+ return false;
+ }
+
+ ret = virgl_renderer_get_poll_fd();
+ if (ret != -1) {
+ g->renderer_source =
+ vug_source_new(&g->dev, ret, G_IO_IN, vg_virgl_poll, g);
+ }
+
+ return true;
+}
diff --git a/contrib/vhost-user-gpu/virgl.h b/contrib/vhost-user-gpu/virgl.h
new file mode 100644
index 0000000000..f952bc9d4f
--- /dev/null
+++ b/contrib/vhost-user-gpu/virgl.h
@@ -0,0 +1,25 @@
+/*
+ * Virtio vhost-user GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2018
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef VUGPU_VIRGL_H_
+#define VUGPU_VIRGL_H_
+
+#include "vugpu.h"
+
+bool vg_virgl_init(VuGpu *g);
+uint32_t vg_virgl_get_num_capsets(void);
+void vg_virgl_process_cmd(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
+void vg_virgl_update_cursor_data(VuGpu *g, uint32_t resource_id,
+ gpointer data);
+
+#endif
diff --git a/contrib/vhost-user-gpu/vugbm.c b/contrib/vhost-user-gpu/vugbm.c
new file mode 100644
index 0000000000..d3bb82ff0e
--- /dev/null
+++ b/contrib/vhost-user-gpu/vugbm.c
@@ -0,0 +1,328 @@
+/*
+ * Virtio vhost-user GPU Device
+ *
+ * DRM helpers
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "vugbm.h"
+
+static bool
+mem_alloc_bo(struct vugbm_buffer *buf)
+{
+ buf->mmap = g_malloc(buf->width * buf->height * 4);
+ buf->stride = buf->width * 4;
+ return true;
+}
+
+static void
+mem_free_bo(struct vugbm_buffer *buf)
+{
+ g_free(buf->mmap);
+}
+
+static bool
+mem_map_bo(struct vugbm_buffer *buf)
+{
+ return buf->mmap != NULL;
+}
+
+static void
+mem_unmap_bo(struct vugbm_buffer *buf)
+{
+}
+
+static void
+mem_device_destroy(struct vugbm_device *dev)
+{
+}
+
+#ifdef CONFIG_MEMFD
+struct udmabuf_create {
+ uint32_t memfd;
+ uint32_t flags;
+ uint64_t offset;
+ uint64_t size;
+};
+
+#define UDMABUF_CREATE _IOW('u', 0x42, struct udmabuf_create)
+
+static size_t
+udmabuf_get_size(struct vugbm_buffer *buf)
+{
+ return ROUND_UP(buf->width * buf->height * 4, getpagesize());
+}
+
+static bool
+udmabuf_alloc_bo(struct vugbm_buffer *buf)
+{
+ int ret;
+
+ buf->memfd = memfd_create("udmabuf-bo", MFD_ALLOW_SEALING);
+ if (buf->memfd < 0) {
+ return false;
+ }
+
+ ret = ftruncate(buf->memfd, udmabuf_get_size(buf));
+ if (ret < 0) {
+ close(buf->memfd);
+ return false;
+ }
+
+ ret = fcntl(buf->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
+ if (ret < 0) {
+ close(buf->memfd);
+ return false;
+ }
+
+ buf->stride = buf->width * 4;
+
+ return true;
+}
+
+static void
+udmabuf_free_bo(struct vugbm_buffer *buf)
+{
+ close(buf->memfd);
+}
+
+static bool
+udmabuf_map_bo(struct vugbm_buffer *buf)
+{
+ buf->mmap = mmap(NULL, udmabuf_get_size(buf),
+ PROT_READ | PROT_WRITE, MAP_SHARED, buf->memfd, 0);
+ if (buf->mmap == MAP_FAILED) {
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+udmabuf_get_fd(struct vugbm_buffer *buf, int *fd)
+{
+ struct udmabuf_create create = {
+ .memfd = buf->memfd,
+ .offset = 0,
+ .size = udmabuf_get_size(buf),
+ };
+
+ *fd = ioctl(buf->dev->fd, UDMABUF_CREATE, &create);
+
+ return *fd >= 0;
+}
+
+static void
+udmabuf_unmap_bo(struct vugbm_buffer *buf)
+{
+ munmap(buf->mmap, udmabuf_get_size(buf));
+}
+
+static void
+udmabuf_device_destroy(struct vugbm_device *dev)
+{
+ close(dev->fd);
+}
+#endif
+
+#ifdef CONFIG_GBM
+static bool
+alloc_bo(struct vugbm_buffer *buf)
+{
+ struct gbm_device *dev = buf->dev->dev;
+
+ assert(!buf->bo);
+
+ buf->bo = gbm_bo_create(dev, buf->width, buf->height,
+ buf->format,
+ GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR);
+
+ if (buf->bo) {
+ buf->stride = gbm_bo_get_stride(buf->bo);
+ return true;
+ }
+
+ return false;
+}
+
+static void
+free_bo(struct vugbm_buffer *buf)
+{
+ gbm_bo_destroy(buf->bo);
+}
+
+static bool
+map_bo(struct vugbm_buffer *buf)
+{
+ uint32_t stride;
+
+ buf->mmap = gbm_bo_map(buf->bo, 0, 0, buf->width, buf->height,
+ GBM_BO_TRANSFER_READ_WRITE, &stride,
+ &buf->mmap_data);
+
+ assert(stride == buf->stride);
+
+ return buf->mmap != NULL;
+}
+
+static void
+unmap_bo(struct vugbm_buffer *buf)
+{
+ gbm_bo_unmap(buf->bo, buf->mmap_data);
+}
+
+static bool
+get_fd(struct vugbm_buffer *buf, int *fd)
+{
+ *fd = gbm_bo_get_fd(buf->bo);
+
+ return *fd >= 0;
+}
+
+static void
+device_destroy(struct vugbm_device *dev)
+{
+ gbm_device_destroy(dev->dev);
+}
+#endif
+
+void
+vugbm_device_destroy(struct vugbm_device *dev)
+{
+ if (!dev->inited) {
+ return;
+ }
+
+ dev->device_destroy(dev);
+}
+
+bool
+vugbm_device_init(struct vugbm_device *dev, int fd)
+{
+ dev->fd = fd;
+
+#ifdef CONFIG_GBM
+ dev->dev = gbm_create_device(fd);
+#endif
+
+ if (0) {
+ /* nothing */
+ }
+#ifdef CONFIG_GBM
+ else if (dev->dev != NULL) {
+ dev->alloc_bo = alloc_bo;
+ dev->free_bo = free_bo;
+ dev->get_fd = get_fd;
+ dev->map_bo = map_bo;
+ dev->unmap_bo = unmap_bo;
+ dev->device_destroy = device_destroy;
+ }
+#endif
+#ifdef CONFIG_MEMFD
+ else if (g_file_test("/dev/udmabuf", G_FILE_TEST_EXISTS)) {
+ dev->fd = open("/dev/udmabuf", O_RDWR);
+ if (dev->fd < 0) {
+ return false;
+ }
+ g_debug("Using experimental udmabuf backend");
+ dev->alloc_bo = udmabuf_alloc_bo;
+ dev->free_bo = udmabuf_free_bo;
+ dev->get_fd = udmabuf_get_fd;
+ dev->map_bo = udmabuf_map_bo;
+ dev->unmap_bo = udmabuf_unmap_bo;
+ dev->device_destroy = udmabuf_device_destroy;
+ }
+#endif
+ else {
+ g_debug("Using mem fallback");
+ dev->alloc_bo = mem_alloc_bo;
+ dev->free_bo = mem_free_bo;
+ dev->map_bo = mem_map_bo;
+ dev->unmap_bo = mem_unmap_bo;
+ dev->device_destroy = mem_device_destroy;
+ return false;
+ }
+
+ dev->inited = true;
+ return true;
+}
+
+static bool
+vugbm_buffer_map(struct vugbm_buffer *buf)
+{
+ struct vugbm_device *dev = buf->dev;
+
+ return dev->map_bo(buf);
+}
+
+static void
+vugbm_buffer_unmap(struct vugbm_buffer *buf)
+{
+ struct vugbm_device *dev = buf->dev;
+
+ dev->unmap_bo(buf);
+}
+
+bool
+vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer)
+{
+ if (!buffer->dev->get_fd) {
+ return false;
+ }
+
+ return true;
+}
+
+bool
+vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd)
+{
+ if (!vugbm_buffer_can_get_dmabuf_fd(buffer) ||
+ !buffer->dev->get_fd(buffer, fd)) {
+ g_warning("Failed to get dmabuf");
+ return false;
+ }
+
+ if (*fd < 0) {
+ g_warning("error: dmabuf_fd < 0");
+ return false;
+ }
+
+ return true;
+}
+
+bool
+vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
+ uint32_t width, uint32_t height)
+{
+ buffer->dev = dev;
+ buffer->width = width;
+ buffer->height = height;
+ buffer->format = GBM_FORMAT_XRGB8888;
+ buffer->stride = 0; /* modified during alloc */
+ if (!dev->alloc_bo(buffer)) {
+ g_warning("alloc_bo failed");
+ return false;
+ }
+
+ if (!vugbm_buffer_map(buffer)) {
+ g_warning("map_bo failed");
+ goto err;
+ }
+
+ return true;
+
+err:
+ dev->free_bo(buffer);
+ return false;
+}
+
+void
+vugbm_buffer_destroy(struct vugbm_buffer *buffer)
+{
+ struct vugbm_device *dev = buffer->dev;
+
+ vugbm_buffer_unmap(buffer);
+ dev->free_bo(buffer);
+}
diff --git a/contrib/vhost-user-gpu/vugbm.h b/contrib/vhost-user-gpu/vugbm.h
new file mode 100644
index 0000000000..c0bf27af9b
--- /dev/null
+++ b/contrib/vhost-user-gpu/vugbm.h
@@ -0,0 +1,67 @@
+/*
+ * Virtio vhost-user GPU Device
+ *
+ * GBM helpers
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef VHOST_USER_GPU_GBM_H
+#define VHOST_USER_GPU_GBM_H
+
+#include "qemu/osdep.h"
+
+#ifdef CONFIG_MEMFD
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#endif
+
+#ifdef CONFIG_GBM
+#include <gbm.h>
+#endif
+
+struct vugbm_buffer;
+
+struct vugbm_device {
+ bool inited;
+ int fd;
+#ifdef CONFIG_GBM
+ struct gbm_device *dev;
+#endif
+
+ bool (*alloc_bo)(struct vugbm_buffer *buf);
+ void (*free_bo)(struct vugbm_buffer *buf);
+ bool (*get_fd)(struct vugbm_buffer *buf, int *fd);
+ bool (*map_bo)(struct vugbm_buffer *buf);
+ void (*unmap_bo)(struct vugbm_buffer *buf);
+ void (*device_destroy)(struct vugbm_device *dev);
+};
+
+struct vugbm_buffer {
+ struct vugbm_device *dev;
+
+#ifdef CONFIG_MEMFD
+ int memfd;
+#endif
+#ifdef CONFIG_GBM
+ struct gbm_bo *bo;
+ void *mmap_data;
+#endif
+
+ uint8_t *mmap;
+ uint32_t width;
+ uint32_t height;
+ uint32_t stride;
+ uint32_t format;
+};
+
+bool vugbm_device_init(struct vugbm_device *dev, int fd);
+void vugbm_device_destroy(struct vugbm_device *dev);
+
+bool vugbm_buffer_create(struct vugbm_buffer *buffer, struct vugbm_device *dev,
+ uint32_t width, uint32_t height);
+bool vugbm_buffer_can_get_dmabuf_fd(struct vugbm_buffer *buffer);
+bool vugbm_buffer_get_dmabuf_fd(struct vugbm_buffer *buffer, int *fd);
+void vugbm_buffer_destroy(struct vugbm_buffer *buffer);
+
+#endif
diff --git a/contrib/vhost-user-gpu/vugpu.h b/contrib/vhost-user-gpu/vugpu.h
new file mode 100644
index 0000000000..458e92a1b3
--- /dev/null
+++ b/contrib/vhost-user-gpu/vugpu.h
@@ -0,0 +1,177 @@
+/*
+ * Virtio vhost-user GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2018
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+#ifndef VUGPU_H_
+#define VUGPU_H_
+
+#include "qemu/osdep.h"
+
+#include "contrib/libvhost-user/libvhost-user-glib.h"
+#include "standard-headers/linux/virtio_gpu.h"
+
+#include "qemu/queue.h"
+#include "qemu/iov.h"
+#include "qemu/bswap.h"
+#include "vugbm.h"
+
+typedef enum VhostUserGpuRequest {
+ VHOST_USER_GPU_NONE = 0,
+ VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
+ VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
+ VHOST_USER_GPU_GET_DISPLAY_INFO,
+ VHOST_USER_GPU_CURSOR_POS,
+ VHOST_USER_GPU_CURSOR_POS_HIDE,
+ VHOST_USER_GPU_CURSOR_UPDATE,
+ VHOST_USER_GPU_SCANOUT,
+ VHOST_USER_GPU_UPDATE,
+ VHOST_USER_GPU_DMABUF_SCANOUT,
+ VHOST_USER_GPU_DMABUF_UPDATE,
+} VhostUserGpuRequest;
+
+typedef struct VhostUserGpuDisplayInfoReply {
+ struct virtio_gpu_resp_display_info info;
+} VhostUserGpuDisplayInfoReply;
+
+typedef struct VhostUserGpuCursorPos {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+} QEMU_PACKED VhostUserGpuCursorPos;
+
+typedef struct VhostUserGpuCursorUpdate {
+ VhostUserGpuCursorPos pos;
+ uint32_t hot_x;
+ uint32_t hot_y;
+ uint32_t data[64 * 64];
+} QEMU_PACKED VhostUserGpuCursorUpdate;
+
+typedef struct VhostUserGpuScanout {
+ uint32_t scanout_id;
+ uint32_t width;
+ uint32_t height;
+} QEMU_PACKED VhostUserGpuScanout;
+
+typedef struct VhostUserGpuUpdate {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint8_t data[];
+} QEMU_PACKED VhostUserGpuUpdate;
+
+typedef struct VhostUserGpuDMABUFScanout {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint32_t fd_width;
+ uint32_t fd_height;
+ uint32_t fd_stride;
+ uint32_t fd_flags;
+ int fd_drm_fourcc;
+} QEMU_PACKED VhostUserGpuDMABUFScanout;
+
+typedef struct VhostUserGpuMsg {
+ uint32_t request; /* VhostUserGpuRequest */
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+ VhostUserGpuCursorPos cursor_pos;
+ VhostUserGpuCursorUpdate cursor_update;
+ VhostUserGpuScanout scanout;
+ VhostUserGpuUpdate update;
+ VhostUserGpuDMABUFScanout dmabuf_scanout;
+ struct virtio_gpu_resp_display_info display_info;
+ uint64_t u64;
+ } payload;
+} QEMU_PACKED VhostUserGpuMsg;
+
+static VhostUserGpuMsg m __attribute__ ((unused));
+#define VHOST_USER_GPU_HDR_SIZE \
+ (sizeof(m.request) + sizeof(m.flags) + sizeof(m.size))
+
+#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
+
+struct virtio_gpu_scanout {
+ uint32_t width, height;
+ int x, y;
+ int invalidate;
+ uint32_t resource_id;
+};
+
+typedef struct VuGpu {
+ VugDev dev;
+ struct virtio_gpu_config virtio_config;
+ struct vugbm_device gdev;
+ int sock_fd;
+ int drm_rnode_fd;
+ GSource *renderer_source;
+ guint wait_ok;
+
+ bool virgl;
+ bool virgl_inited;
+ uint32_t inflight;
+
+ struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
+ QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
+ QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;
+} VuGpu;
+
+struct virtio_gpu_ctrl_command {
+ VuVirtqElement elem;
+ VuVirtq *vq;
+ struct virtio_gpu_ctrl_hdr cmd_hdr;
+ uint32_t error;
+ bool finished;
+ QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
+};
+
+#define VUGPU_FILL_CMD(out) do { \
+ size_t s; \
+ s = iov_to_buf(cmd->elem.out_sg, cmd->elem.out_num, 0, \
+ &out, sizeof(out)); \
+ if (s != sizeof(out)) { \
+ g_critical("%s: command size incorrect %zu vs %zu", \
+ __func__, s, sizeof(out)); \
+ return; \
+ } \
+ } while (0)
+
+
+void vg_ctrl_response(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ struct virtio_gpu_ctrl_hdr *resp,
+ size_t resp_len);
+
+void vg_ctrl_response_nodata(VuGpu *g,
+ struct virtio_gpu_ctrl_command *cmd,
+ enum virtio_gpu_ctrl_type type);
+
+int vg_create_mapping_iov(VuGpu *g,
+ struct virtio_gpu_resource_attach_backing *ab,
+ struct virtio_gpu_ctrl_command *cmd,
+ struct iovec **iov);
+
+void vg_get_display_info(VuGpu *vg, struct virtio_gpu_ctrl_command *cmd);
+
+void vg_wait_ok(VuGpu *g);
+
+void vg_send_msg(VuGpu *g, const VhostUserGpuMsg *msg, int fd);
+
+bool vg_recv_msg(VuGpu *g, uint32_t expect_req, uint32_t expect_size,
+ gpointer payload);
+
+
+#endif
diff --git a/docs/interop/index.rst b/docs/interop/index.rst
index a037bd67ec..b4bfcab417 100644
--- a/docs/interop/index.rst
+++ b/docs/interop/index.rst
@@ -16,3 +16,4 @@ Contents:
live-block-operations
pr-helper
vhost-user
+ vhost-user-gpu
diff --git a/docs/interop/vhost-user-gpu.rst b/docs/interop/vhost-user-gpu.rst
new file mode 100644
index 0000000000..688f8b4259
--- /dev/null
+++ b/docs/interop/vhost-user-gpu.rst
@@ -0,0 +1,242 @@
+=======================
+Vhost-user-gpu Protocol
+=======================
+
+:Licence: This work is licensed under the terms of the GNU GPL,
+ version 2 or later. See the COPYING file in the top-level
+ directory.
+
+.. contents:: Table of Contents
+
+Introduction
+============
+
+The vhost-user-gpu protocol is aiming at sharing the rendering result
+of a virtio-gpu, done from a vhost-user slave process to a vhost-user
+master process (such as QEMU). It bears a resemblance to a display
+server protocol, if you consider QEMU as the display server and the
+slave as the client, but in a very limited way. Typically, it will
+work by setting a scanout/display configuration, before sending flush
+events for the display updates. It will also update the cursor shape
+and position.
+
+The protocol is sent over a UNIX domain stream socket, since it uses
+socket ancillary data to share opened file descriptors (DMABUF fds or
+shared memory). The socket is usually obtained via
+``VHOST_USER_GPU_SET_SOCKET``.
+
+Requests are sent by the *slave*, and the optional replies by the
+*master*.
+
+Wire format
+===========
+
+Unless specified differently, numbers are in the machine native byte
+order.
+
+A vhost-user-gpu message (request and reply) consists of 3 header
+fields and a payload.
+
++---------+-------+------+---------+
+| request | flags | size | payload |
++---------+-------+------+---------+
+
+Header
+------
+
+:request: ``u32``, type of the request
+
+:flags: ``u32``, 32-bit bit field:
+
+ - Bit 2 is the reply flag - needs to be set on each reply
+
+:size: ``u32``, size of the payload
+
+Payload types
+-------------
+
+Depending on the request type, **payload** can be:
+
+VhostUserGpuCursorPos
+^^^^^^^^^^^^^^^^^^^^^
+
++------------+---+---+
+| scanout-id | x | y |
++------------+---+---+
+
+:scanout-id: ``u32``, the scanout where the cursor is located
+
+:x/y: ``u32``, the cursor postion
+
+VhostUserGpuCursorUpdate
+^^^^^^^^^^^^^^^^^^^^^^^^
+
++-----+-------+-------+--------+
+| pos | hot_x | hot_y | cursor |
++-----+-------+-------+--------+
+
+:pos: a ``VhostUserGpuCursorPos``, the cursor location
+
+:hot_x/hot_y: ``u32``, the cursor hot location
+
+:cursor: ``[u32; 64 * 64]``, 64x64 RGBA cursor data (PIXMAN_a8r8g8b8 format)
+
+VhostUserGpuScanout
+^^^^^^^^^^^^^^^^^^^
+
++------------+---+---+
+| scanout-id | w | h |
++------------+---+---+
+
+:scanout-id: ``u32``, the scanout configuration to set
+
+:w/h: ``u32``, the scanout width/height size
+
+VhostUserGpuUpdate
+^^^^^^^^^^^^^^^^^^
+
++------------+---+---+---+---+------+
+| scanout-id | x | y | w | h | data |
++------------+---+---+---+---+------+
+
+:scanout-id: ``u32``, the scanout content to update
+
+:x/y/w/h: ``u32``, region of the update
+
+:data: RGB data (PIXMAN_x8r8g8b8 format)
+
+VhostUserGpuDMABUFScanout
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
++------------+---+---+---+---+-----+-----+--------+-------+--------+
+| scanout-id | x | y | w | h | fdw | fwh | stride | flags | fourcc |
++------------+---+---+---+---+-----+-----+--------+-------+--------+
+
+:scanout-id: ``u32``, the scanout configuration to set
+
+:x/y: ``u32``, the location of the scanout within the DMABUF
+
+:w/h: ``u32``, the scanout width/height size
+
+:fdw/fdh/stride/flags: ``u32``, the DMABUF width/height/stride/flags
+
+:fourcc: ``i32``, the DMABUF fourcc
+
+
+C structure
+-----------
+
+In QEMU the vhost-user-gpu message is implemented with the following struct:
+
+.. code:: c
+
+ typedef struct VhostUserGpuMsg {
+ uint32_t request; /* VhostUserGpuRequest */
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+ VhostUserGpuCursorPos cursor_pos;
+ VhostUserGpuCursorUpdate cursor_update;
+ VhostUserGpuScanout scanout;
+ VhostUserGpuUpdate update;
+ VhostUserGpuDMABUFScanout dmabuf_scanout;
+ struct virtio_gpu_resp_display_info display_info;
+ uint64_t u64;
+ } payload;
+ } QEMU_PACKED VhostUserGpuMsg;
+
+Protocol features
+-----------------
+
+None yet.
+
+As the protocol may need to evolve, new messages and communication
+changes are negotiated thanks to preliminary
+``VHOST_USER_GPU_GET_PROTOCOL_FEATURES`` and
+``VHOST_USER_GPU_SET_PROTOCOL_FEATURES`` requests.
+
+Communication
+=============
+
+Message types
+-------------
+
+``VHOST_USER_GPU_GET_PROTOCOL_FEATURES``
+ :id: 1
+ :request payload: N/A
+ :reply payload: ``u64``
+
+ Get the supported protocol features bitmask.
+
+``VHOST_USER_GPU_SET_PROTOCOL_FEATURES``
+ :id: 2
+ :request payload: ``u64``
+ :reply payload: N/A
+
+ Enable protocol features using a bitmask.
+
+``VHOST_USER_GPU_GET_DISPLAY_INFO``
+ :id: 3
+ :request payload: N/A
+ :reply payload: ``struct virtio_gpu_resp_display_info`` (from virtio specification)
+
+ Get the preferred display configuration.
+
+``VHOST_USER_GPU_CURSOR_POS``
+ :id: 4
+ :request payload: ``VhostUserGpuCursorPos``
+ :reply payload: N/A
+
+ Set/show the cursor position.
+
+``VHOST_USER_GPU_CURSOR_POS_HIDE``
+ :id: 5
+ :request payload: ``VhostUserGpuCursorPos``
+ :reply payload: N/A
+
+ Set/hide the cursor.
+
+``VHOST_USER_GPU_CURSOR_UPDATE``
+ :id: 6
+ :request payload: ``VhostUserGpuCursorUpdate``
+ :reply payload: N/A
+
+ Update the cursor shape and location.
+
+``VHOST_USER_GPU_SCANOUT``
+ :id: 7
+ :request payload: ``VhostUserGpuScanout``
+ :reply payload: N/A
+
+ Set the scanout resolution. To disable a scanout, the dimensions
+ width/height are set to 0.
+
+``VHOST_USER_GPU_UPDATE``
+ :id: 8
+ :request payload: ``VhostUserGpuUpdate``
+ :reply payload: N/A
+
+ Update the scanout content. The data payload contains the graphical bits.
+ The display should be flushed and presented.
+
+``VHOST_USER_GPU_DMABUF_SCANOUT``
+ :id: 9
+ :request payload: ``VhostUserGpuDMABUFScanout``
+ :reply payload: N/A
+
+ Set the scanout resolution/configuration, and share a DMABUF file
+ descriptor for the scanout content, which is passed as ancillary
+ data. To disable a scanout, the dimensions width/height are set
+ to 0, there is no file descriptor passed.
+
+``VHOST_USER_GPU_DMABUF_UPDATE``
+ :id: 10
+ :request payload: ``VhostUserGpuUpdate``
+ :reply payload: empty payload
+
+ The display should be flushed and presented according to updated
+ region from ``VhostUserGpuUpdate``.
+
+ Note: there is no data payload, since the scanout is shared thanks
+ to DMABUF, that must have been set previously with
+ ``VHOST_USER_GPU_DMABUF_SCANOUT``.
diff --git a/docs/interop/vhost-user.rst b/docs/interop/vhost-user.rst
index 7f3232c798..dc0ff9211f 100644
--- a/docs/interop/vhost-user.rst
+++ b/docs/interop/vhost-user.rst
@@ -1163,6 +1163,15 @@ Master message types
send the shared inflight buffer back to slave so that slave could
get inflight I/O after a crash or restart.
+``VHOST_USER_GPU_SET_SOCKET``
+ :id: 33
+ :equivalent ioctl: N/A
+ :master payload: N/A
+
+ Sets the GPU protocol socket file descriptor, which is passed as
+ ancillary data. The GPU protocol is used to inform the master of
+ rendering state and updates. See vhost-user-gpu.rst for details.
+
Slave message types
-------------------
diff --git a/hw/display/Kconfig b/hw/display/Kconfig
index dc1f113df2..910dccb2f7 100644
--- a/hw/display/Kconfig
+++ b/hw/display/Kconfig
@@ -111,6 +111,16 @@ config VIRTIO_VGA
depends on VIRTIO_PCI
select VGA
+config VHOST_USER_GPU
+ bool
+ default y
+ depends on VIRTIO_GPU && VHOST_USER
+
+config VHOST_USER_VGA
+ bool
+ default y
+ depends on VIRTIO_VGA && VHOST_USER_GPU
+
config DPCD
bool
select AUX
diff --git a/hw/display/Makefile.objs b/hw/display/Makefile.objs
index 650031f725..a64998fc7b 100644
--- a/hw/display/Makefile.objs
+++ b/hw/display/Makefile.objs
@@ -43,9 +43,12 @@ obj-$(CONFIG_VGA) += vga.o
common-obj-$(CONFIG_QXL) += qxl.o qxl-logger.o qxl-render.o
-obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu.o virtio-gpu-3d.o
+obj-$(CONFIG_VIRTIO_GPU) += virtio-gpu-base.o virtio-gpu.o virtio-gpu-3d.o
+obj-$(CONFIG_VHOST_USER_GPU) += vhost-user-gpu.o
obj-$(call land,$(CONFIG_VIRTIO_GPU),$(CONFIG_VIRTIO_PCI)) += virtio-gpu-pci.o
+obj-$(call land,$(CONFIG_VHOST_USER_GPU),$(CONFIG_VIRTIO_PCI)) += vhost-user-gpu-pci.o
obj-$(CONFIG_VIRTIO_VGA) += virtio-vga.o
+obj-$(CONFIG_VHOST_USER_VGA) += vhost-user-vga.o
virtio-gpu.o-cflags := $(VIRGL_CFLAGS)
virtio-gpu.o-libs += $(VIRGL_LIBS)
virtio-gpu-3d.o-cflags := $(VIRGL_CFLAGS)
diff --git a/hw/display/vhost-user-gpu-pci.c b/hw/display/vhost-user-gpu-pci.c
new file mode 100644
index 0000000000..7d9b1f5a8c
--- /dev/null
+++ b/hw/display/vhost-user-gpu-pci.c
@@ -0,0 +1,51 @@
+/*
+ * vhost-user GPU PCI device
+ *
+ * Copyright Red Hat, Inc. 2018
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/virtio/virtio-gpu-pci.h"
+
+#define TYPE_VHOST_USER_GPU_PCI "vhost-user-gpu-pci"
+#define VHOST_USER_GPU_PCI(obj) \
+ OBJECT_CHECK(VhostUserGPUPCI, (obj), TYPE_VHOST_USER_GPU_PCI)
+
+typedef struct VhostUserGPUPCI {
+ VirtIOGPUPCIBase parent_obj;
+
+ VhostUserGPU vdev;
+} VhostUserGPUPCI;
+
+static void vhost_user_gpu_pci_initfn(Object *obj)
+{
+ VhostUserGPUPCI *dev = VHOST_USER_GPU_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_GPU);
+
+ VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
+
+ object_property_add_alias(obj, "chardev",
+ OBJECT(&dev->vdev), "chardev",
+ &error_abort);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_gpu_pci_info = {
+ .generic_name = TYPE_VHOST_USER_GPU_PCI,
+ .parent = TYPE_VIRTIO_GPU_PCI_BASE,
+ .instance_size = sizeof(VhostUserGPUPCI),
+ .instance_init = vhost_user_gpu_pci_initfn,
+};
+
+static void vhost_user_gpu_pci_register_types(void)
+{
+ virtio_pci_types_register(&vhost_user_gpu_pci_info);
+}
+
+type_init(vhost_user_gpu_pci_register_types)
diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c
new file mode 100644
index 0000000000..7181d9cdba
--- /dev/null
+++ b/hw/display/vhost-user-gpu.c
@@ -0,0 +1,607 @@
+/*
+ * vhost-user GPU Device
+ *
+ * Copyright Red Hat, Inc. 2018
+ *
+ * Authors:
+ * Marc-André Lureau <marcandre.lureau@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+#include "hw/virtio/virtio-gpu.h"
+#include "chardev/char-fe.h"
+#include "qapi/error.h"
+#include "migration/blocker.h"
+
+#define VHOST_USER_GPU(obj) \
+ OBJECT_CHECK(VhostUserGPU, (obj), TYPE_VHOST_USER_GPU)
+
+typedef enum VhostUserGpuRequest {
+ VHOST_USER_GPU_NONE = 0,
+ VHOST_USER_GPU_GET_PROTOCOL_FEATURES,
+ VHOST_USER_GPU_SET_PROTOCOL_FEATURES,
+ VHOST_USER_GPU_GET_DISPLAY_INFO,
+ VHOST_USER_GPU_CURSOR_POS,
+ VHOST_USER_GPU_CURSOR_POS_HIDE,
+ VHOST_USER_GPU_CURSOR_UPDATE,
+ VHOST_USER_GPU_SCANOUT,
+ VHOST_USER_GPU_UPDATE,
+ VHOST_USER_GPU_DMABUF_SCANOUT,
+ VHOST_USER_GPU_DMABUF_UPDATE,
+} VhostUserGpuRequest;
+
+typedef struct VhostUserGpuDisplayInfoReply {
+ struct virtio_gpu_resp_display_info info;
+} VhostUserGpuDisplayInfoReply;
+
+typedef struct VhostUserGpuCursorPos {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+} QEMU_PACKED VhostUserGpuCursorPos;
+
+typedef struct VhostUserGpuCursorUpdate {
+ VhostUserGpuCursorPos pos;
+ uint32_t hot_x;
+ uint32_t hot_y;
+ uint32_t data[64 * 64];
+} QEMU_PACKED VhostUserGpuCursorUpdate;
+
+typedef struct VhostUserGpuScanout {
+ uint32_t scanout_id;
+ uint32_t width;
+ uint32_t height;
+} QEMU_PACKED VhostUserGpuScanout;
+
+typedef struct VhostUserGpuUpdate {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint8_t data[];
+} QEMU_PACKED VhostUserGpuUpdate;
+
+typedef struct VhostUserGpuDMABUFScanout {
+ uint32_t scanout_id;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint32_t fd_width;
+ uint32_t fd_height;
+ uint32_t fd_stride;
+ uint32_t fd_flags;
+ int fd_drm_fourcc;
+} QEMU_PACKED VhostUserGpuDMABUFScanout;
+
+typedef struct VhostUserGpuMsg {
+ uint32_t request; /* VhostUserGpuRequest */
+ uint32_t flags;
+ uint32_t size; /* the following payload size */
+ union {
+ VhostUserGpuCursorPos cursor_pos;
+ VhostUserGpuCursorUpdate cursor_update;
+ VhostUserGpuScanout scanout;
+ VhostUserGpuUpdate update;
+ VhostUserGpuDMABUFScanout dmabuf_scanout;
+ struct virtio_gpu_resp_display_info display_info;
+ uint64_t u64;
+ } payload;
+} QEMU_PACKED VhostUserGpuMsg;
+
+static VhostUserGpuMsg m __attribute__ ((unused));
+#define VHOST_USER_GPU_HDR_SIZE \
+ (sizeof(m.request) + sizeof(m.size) + sizeof(m.flags))
+
+#define VHOST_USER_GPU_MSG_FLAG_REPLY 0x4
+
+static void vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked);
+
+static void
+vhost_user_gpu_handle_cursor(VhostUserGPU *g, VhostUserGpuMsg *msg)
+{
+ VhostUserGpuCursorPos *pos = &msg->payload.cursor_pos;
+ struct virtio_gpu_scanout *s;
+
+ if (pos->scanout_id >= g->parent_obj.conf.max_outputs) {
+ return;
+ }
+ s = &g->parent_obj.scanout[pos->scanout_id];
+
+ if (msg->request == VHOST_USER_GPU_CURSOR_UPDATE) {
+ VhostUserGpuCursorUpdate *up = &msg->payload.cursor_update;
+ if (!s->current_cursor) {
+ s->current_cursor = cursor_alloc(64, 64);
+ }
+
+ s->current_cursor->hot_x = up->hot_x;
+ s->current_cursor->hot_y = up->hot_y;
+
+ memcpy(s->current_cursor->data, up->data,
+ 64 * 64 * sizeof(uint32_t));
+
+ dpy_cursor_define(s->con, s->current_cursor);
+ }
+
+ dpy_mouse_set(s->con, pos->x, pos->y,
+ msg->request != VHOST_USER_GPU_CURSOR_POS_HIDE);
+}
+
+static void
+vhost_user_gpu_send_msg(VhostUserGPU *g, const VhostUserGpuMsg *msg)
+{
+ qemu_chr_fe_write(&g->vhost_chr, (uint8_t *)msg,
+ VHOST_USER_GPU_HDR_SIZE + msg->size);
+}
+
+static void
+vhost_user_gpu_unblock(VhostUserGPU *g)
+{
+ VhostUserGpuMsg msg = {
+ .request = VHOST_USER_GPU_DMABUF_UPDATE,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ };
+
+ vhost_user_gpu_send_msg(g, &msg);
+}
+
+static void
+vhost_user_gpu_handle_display(VhostUserGPU *g, VhostUserGpuMsg *msg)
+{
+ QemuConsole *con = NULL;
+ struct virtio_gpu_scanout *s;
+
+ switch (msg->request) {
+ case VHOST_USER_GPU_GET_PROTOCOL_FEATURES: {
+ VhostUserGpuMsg reply = {
+ .request = msg->request,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ .size = sizeof(uint64_t),
+ };
+
+ vhost_user_gpu_send_msg(g, &reply);
+ break;
+ }
+ case VHOST_USER_GPU_SET_PROTOCOL_FEATURES: {
+ break;
+ }
+ case VHOST_USER_GPU_GET_DISPLAY_INFO: {
+ struct virtio_gpu_resp_display_info display_info = { {} };
+ VhostUserGpuMsg reply = {
+ .request = msg->request,
+ .flags = VHOST_USER_GPU_MSG_FLAG_REPLY,
+ .size = sizeof(struct virtio_gpu_resp_display_info),
+ };
+
+ display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
+ virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
+ memcpy(&reply.payload.display_info, &display_info,
+ sizeof(display_info));
+ vhost_user_gpu_send_msg(g, &reply);
+ break;
+ }
+ case VHOST_USER_GPU_SCANOUT: {
+ VhostUserGpuScanout *m = &msg->payload.scanout;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ return;
+ }
+
+ g->parent_obj.enable = 1;
+ s = &g->parent_obj.scanout[m->scanout_id];
+ con = s->con;
+
+ if (m->scanout_id == 0 && m->width == 0) {
+ s->ds = qemu_create_message_surface(640, 480,
+ "Guest disabled display.");
+ dpy_gfx_replace_surface(con, s->ds);
+ } else {
+ s->ds = qemu_create_displaysurface(m->width, m->height);
+ /* replace surface on next update */
+ }
+
+ break;
+ }
+ case VHOST_USER_GPU_DMABUF_SCANOUT: {
+ VhostUserGpuDMABUFScanout *m = &msg->payload.dmabuf_scanout;
+ int fd = qemu_chr_fe_get_msgfd(&g->vhost_chr);
+ QemuDmaBuf *dmabuf;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ error_report("invalid scanout: %d", m->scanout_id);
+ if (fd >= 0) {
+ close(fd);
+ }
+ break;
+ }
+
+ g->parent_obj.enable = 1;
+ con = g->parent_obj.scanout[m->scanout_id].con;
+ dmabuf = &g->dmabuf[m->scanout_id];
+ if (dmabuf->fd >= 0) {
+ close(dmabuf->fd);
+ dmabuf->fd = -1;
+ }
+ if (!console_has_gl_dmabuf(con)) {
+ /* it would be nice to report that error earlier */
+ error_report("console doesn't support dmabuf!");
+ break;
+ }
+ dpy_gl_release_dmabuf(con, dmabuf);
+ if (fd == -1) {
+ dpy_gl_scanout_disable(con);
+ break;
+ }
+ *dmabuf = (QemuDmaBuf) {
+ .fd = fd,
+ .width = m->fd_width,
+ .height = m->fd_height,
+ .stride = m->fd_stride,
+ .fourcc = m->fd_drm_fourcc,
+ .y0_top = m->fd_flags & VIRTIO_GPU_RESOURCE_FLAG_Y_0_TOP,
+ };
+ dpy_gl_scanout_dmabuf(con, dmabuf);
+ break;
+ }
+ case VHOST_USER_GPU_DMABUF_UPDATE: {
+ VhostUserGpuUpdate *m = &msg->payload.update;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs ||
+ !g->parent_obj.scanout[m->scanout_id].con) {
+ error_report("invalid scanout update: %d", m->scanout_id);
+ vhost_user_gpu_unblock(g);
+ break;
+ }
+
+ con = g->parent_obj.scanout[m->scanout_id].con;
+ if (!console_has_gl(con)) {
+ error_report("console doesn't support GL!");
+ vhost_user_gpu_unblock(g);
+ break;
+ }
+ dpy_gl_update(con, m->x, m->y, m->width, m->height);
+ g->backend_blocked = true;
+ break;
+ }
+ case VHOST_USER_GPU_UPDATE: {
+ VhostUserGpuUpdate *m = &msg->payload.update;
+
+ if (m->scanout_id >= g->parent_obj.conf.max_outputs) {
+ break;
+ }
+ s = &g->parent_obj.scanout[m->scanout_id];
+ con = s->con;
+ pixman_image_t *image =
+ pixman_image_create_bits(PIXMAN_x8r8g8b8,
+ m->width,
+ m->height,
+ (uint32_t *)m->data,
+ m->width * 4);
+
+ pixman_image_composite(PIXMAN_OP_SRC,
+ image, NULL, s->ds->image,
+ 0, 0, 0, 0, m->x, m->y, m->width, m->height);
+
+ pixman_image_unref(image);
+ if (qemu_console_surface(con) != s->ds) {
+ dpy_gfx_replace_surface(con, s->ds);
+ } else {
+ dpy_gfx_update(con, m->x, m->y, m->width, m->height);
+ }
+ break;
+ }
+ default:
+ g_warning("unhandled message %d %d", msg->request, msg->size);
+ }
+
+ if (con && qemu_console_is_gl_blocked(con)) {
+ vhost_user_gpu_update_blocked(g, true);
+ }
+}
+
+static void
+vhost_user_gpu_chr_read(void *opaque)
+{
+ VhostUserGPU *g = opaque;
+ VhostUserGpuMsg *msg = NULL;
+ VhostUserGpuRequest request;
+ uint32_t size, flags;
+ int r;
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&request, sizeof(uint32_t));
+ if (r != sizeof(uint32_t)) {
+ error_report("failed to read msg header: %d, %d", r, errno);
+ goto end;
+ }
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&flags, sizeof(uint32_t));
+ if (r != sizeof(uint32_t)) {
+ error_report("failed to read msg flags");
+ goto end;
+ }
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&size, sizeof(uint32_t));
+ if (r != sizeof(uint32_t)) {
+ error_report("failed to read msg size");
+ goto end;
+ }
+
+ msg = g_malloc(VHOST_USER_GPU_HDR_SIZE + size);
+ g_return_if_fail(msg != NULL);
+
+ r = qemu_chr_fe_read_all(&g->vhost_chr,
+ (uint8_t *)&msg->payload, size);
+ if (r != size) {
+ error_report("failed to read msg payload %d != %d", r, size);
+ goto end;
+ }
+
+ msg->request = request;
+ msg->flags = size;
+ msg->size = size;
+
+ if (request == VHOST_USER_GPU_CURSOR_UPDATE ||
+ request == VHOST_USER_GPU_CURSOR_POS ||
+ request == VHOST_USER_GPU_CURSOR_POS_HIDE) {
+ vhost_user_gpu_handle_cursor(g, msg);
+ } else {
+ vhost_user_gpu_handle_display(g, msg);
+ }
+
+end:
+ g_free(msg);
+}
+
+static void
+vhost_user_gpu_update_blocked(VhostUserGPU *g, bool blocked)
+{
+ qemu_set_fd_handler(g->vhost_gpu_fd,
+ blocked ? NULL : vhost_user_gpu_chr_read, NULL, g);
+}
+
+static void
+vhost_user_gpu_gl_unblock(VirtIOGPUBase *b)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(b);
+
+ if (g->backend_blocked) {
+ vhost_user_gpu_unblock(VHOST_USER_GPU(g));
+ g->backend_blocked = false;
+ }
+
+ vhost_user_gpu_update_blocked(VHOST_USER_GPU(g), false);
+}
+
+static bool
+vhost_user_gpu_do_set_socket(VhostUserGPU *g, Error **errp)
+{
+ Chardev *chr;
+ int sv[2];
+
+ if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) {
+ error_setg_errno(errp, errno, "socketpair() failed");
+ return false;
+ }
+
+ chr = CHARDEV(object_new(TYPE_CHARDEV_SOCKET));
+ if (!chr || qemu_chr_add_client(chr, sv[0]) == -1) {
+ error_setg(errp, "Failed to make socket chardev");
+ goto err;
+ }
+ if (!qemu_chr_fe_init(&g->vhost_chr, chr, errp)) {
+ goto err;
+ }
+ if (vhost_user_gpu_set_socket(&g->vhost->dev, sv[1]) < 0) {
+ error_setg(errp, "Failed to set vhost-user-gpu socket");
+ qemu_chr_fe_deinit(&g->vhost_chr, false);
+ goto err;
+ }
+
+ g->vhost_gpu_fd = sv[0];
+ vhost_user_gpu_update_blocked(g, false);
+ close(sv[1]);
+ return true;
+
+err:
+ close(sv[0]);
+ close(sv[1]);
+ if (chr) {
+ object_unref(OBJECT(chr));
+ }
+ return false;
+}
+
+static void
+vhost_user_gpu_get_config(VirtIODevice *vdev, uint8_t *config_data)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
+ struct virtio_gpu_config *vgconfig =
+ (struct virtio_gpu_config *)config_data;
+ int ret;
+
+ memset(config_data, 0, sizeof(struct virtio_gpu_config));
+
+ ret = vhost_dev_get_config(&g->vhost->dev,
+ config_data, sizeof(struct virtio_gpu_config));
+ if (ret) {
+ error_report("vhost-user-gpu: get device config space failed");
+ return;
+ }
+
+ /* those fields are managed by qemu */
+ vgconfig->num_scanouts = b->virtio_config.num_scanouts;
+ vgconfig->events_read = b->virtio_config.events_read;
+ vgconfig->events_clear = b->virtio_config.events_clear;
+}
+
+static void
+vhost_user_gpu_set_config(VirtIODevice *vdev,
+ const uint8_t *config_data)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(vdev);
+ const struct virtio_gpu_config *vgconfig =
+ (const struct virtio_gpu_config *)config_data;
+ int ret;
+
+ if (vgconfig->events_clear) {
+ b->virtio_config.events_read &= ~vgconfig->events_clear;
+ }
+
+ ret = vhost_dev_set_config(&g->vhost->dev, config_data,
+ 0, sizeof(struct virtio_gpu_config),
+ VHOST_SET_CONFIG_TYPE_MASTER);
+ if (ret) {
+ error_report("vhost-user-gpu: set device config space failed");
+ return;
+ }
+}
+
+static void
+vhost_user_gpu_set_status(VirtIODevice *vdev, uint8_t val)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+ Error *err = NULL;
+
+ if (val & VIRTIO_CONFIG_S_DRIVER_OK && vdev->vm_running) {
+ if (!vhost_user_gpu_do_set_socket(g, &err)) {
+ error_report_err(err);
+ return;
+ }
+ vhost_user_backend_start(g->vhost);
+ } else {
+ /* unblock any wait and stop processing */
+ if (g->vhost_gpu_fd != -1) {
+ vhost_user_gpu_update_blocked(g, true);
+ qemu_chr_fe_deinit(&g->vhost_chr, true);
+ g->vhost_gpu_fd = -1;
+ }
+ vhost_user_backend_stop(g->vhost);
+ }
+}
+
+static bool
+vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+
+ return vhost_virtqueue_pending(&g->vhost->dev, idx);
+}
+
+static void
+vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+
+ vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask);
+}
+
+static void
+vhost_user_gpu_instance_init(Object *obj)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(obj);
+
+ g->vhost = VHOST_USER_BACKEND(object_new(TYPE_VHOST_USER_BACKEND));
+ object_property_add_alias(obj, "chardev",
+ OBJECT(g->vhost), "chardev", &error_abort);
+}
+
+static void
+vhost_user_gpu_instance_finalize(Object *obj)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(obj);
+
+ object_unref(OBJECT(g->vhost));
+}
+
+static void
+vhost_user_gpu_reset(VirtIODevice *vdev)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(vdev);
+
+ virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
+
+ vhost_user_backend_stop(g->vhost);
+}
+
+static int
+vhost_user_gpu_config_change(struct vhost_dev *dev)
+{
+ error_report("vhost-user-gpu: unhandled backend config change");
+ return -1;
+}
+
+static const VhostDevConfigOps config_ops = {
+ .vhost_dev_config_notifier = vhost_user_gpu_config_change,
+};
+
+static void
+vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp)
+{
+ VhostUserGPU *g = VHOST_USER_GPU(qdev);
+ VirtIODevice *vdev = VIRTIO_DEVICE(g);
+
+ vhost_dev_set_config_notifier(&g->vhost->dev, &config_ops);
+ if (vhost_user_backend_dev_init(g->vhost, vdev, 2, errp) < 0) {
+ return;
+ }
+
+ if (virtio_has_feature(g->vhost->dev.features, VIRTIO_GPU_F_VIRGL)) {
+ g->parent_obj.conf.flags |= 1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED;
+ }
+
+ if (!virtio_gpu_base_device_realize(qdev, NULL, NULL, errp)) {
+ return;
+ }
+
+ g->vhost_gpu_fd = -1;
+}
+
+static Property vhost_user_gpu_properties[] = {
+ VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void
+vhost_user_gpu_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
+
+ vgc->gl_unblock = vhost_user_gpu_gl_unblock;
+
+ vdc->realize = vhost_user_gpu_device_realize;
+ vdc->reset = vhost_user_gpu_reset;
+ vdc->set_status = vhost_user_gpu_set_status;
+ vdc->guest_notifier_mask = vhost_user_gpu_guest_notifier_mask;
+ vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending;
+ vdc->get_config = vhost_user_gpu_get_config;
+ vdc->set_config = vhost_user_gpu_set_config;
+
+ dc->props = vhost_user_gpu_properties;
+}
+
+static const TypeInfo vhost_user_gpu_info = {
+ .name = TYPE_VHOST_USER_GPU,
+ .parent = TYPE_VIRTIO_GPU_BASE,
+ .instance_size = sizeof(VhostUserGPU),
+ .instance_init = vhost_user_gpu_instance_init,
+ .instance_finalize = vhost_user_gpu_instance_finalize,
+ .class_init = vhost_user_gpu_class_init,
+};
+
+static void vhost_user_gpu_register_types(void)
+{
+ type_register_static(&vhost_user_gpu_info);
+}
+
+type_init(vhost_user_gpu_register_types)
diff --git a/hw/display/vhost-user-vga.c b/hw/display/vhost-user-vga.c
new file mode 100644
index 0000000000..a7195276d9
--- /dev/null
+++ b/hw/display/vhost-user-vga.c
@@ -0,0 +1,52 @@
+/*
+ * vhost-user VGA device
+ *
+ * Copyright Red Hat, Inc. 2018
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "virtio-vga.h"
+
+#define TYPE_VHOST_USER_VGA "vhost-user-vga"
+
+#define VHOST_USER_VGA(obj) \
+ OBJECT_CHECK(VhostUserVGA, (obj), TYPE_VHOST_USER_VGA)
+
+typedef struct VhostUserVGA {
+ VirtIOVGABase parent_obj;
+
+ VhostUserGPU vdev;
+} VhostUserVGA;
+
+static void vhost_user_vga_inst_initfn(Object *obj)
+{
+ VhostUserVGA *dev = VHOST_USER_VGA(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_GPU);
+
+ VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
+
+ object_property_add_alias(obj, "chardev",
+ OBJECT(&dev->vdev), "chardev",
+ &error_abort);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_vga_info = {
+ .generic_name = TYPE_VHOST_USER_VGA,
+ .parent = TYPE_VIRTIO_VGA_BASE,
+ .instance_size = sizeof(struct VhostUserVGA),
+ .instance_init = vhost_user_vga_inst_initfn,
+};
+
+static void vhost_user_vga_register_types(void)
+{
+ virtio_pci_types_register(&vhost_user_vga_info);
+}
+
+type_init(vhost_user_vga_register_types)
diff --git a/hw/display/virtio-gpu-3d.c b/hw/display/virtio-gpu-3d.c
index 2d302526ab..b918167aec 100644
--- a/hw/display/virtio-gpu-3d.c
+++ b/hw/display/virtio-gpu-3d.c
@@ -118,11 +118,11 @@ static void virgl_cmd_context_destroy(VirtIOGPU *g,
static void virtio_gpu_rect_update(VirtIOGPU *g, int idx, int x, int y,
int width, int height)
{
- if (!g->scanout[idx].con) {
+ if (!g->parent_obj.scanout[idx].con) {
return;
}
- dpy_gl_update(g->scanout[idx].con, x, y, width, height);
+ dpy_gl_update(g->parent_obj.scanout[idx].con, x, y, width, height);
}
static void virgl_cmd_resource_flush(VirtIOGPU *g,
@@ -135,8 +135,8 @@ static void virgl_cmd_resource_flush(VirtIOGPU *g,
trace_virtio_gpu_cmd_res_flush(rf.resource_id,
rf.r.width, rf.r.height, rf.r.x, rf.r.y);
- for (i = 0; i < g->conf.max_outputs; i++) {
- if (g->scanout[i].resource_id != rf.resource_id) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
+ if (g->parent_obj.scanout[i].resource_id != rf.resource_id) {
continue;
}
virtio_gpu_rect_update(g, i, rf.r.x, rf.r.y, rf.r.width, rf.r.height);
@@ -154,13 +154,13 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
- if (ss.scanout_id >= g->conf.max_outputs) {
+ if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
- g->enable = 1;
+ g->parent_obj.enable = 1;
memset(&info, 0, sizeof(info));
@@ -173,20 +173,22 @@ static void virgl_cmd_set_scanout(VirtIOGPU *g,
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_RESOURCE_ID;
return;
}
- qemu_console_resize(g->scanout[ss.scanout_id].con,
+ qemu_console_resize(g->parent_obj.scanout[ss.scanout_id].con,
ss.r.width, ss.r.height);
virgl_renderer_force_ctx_0();
- dpy_gl_scanout_texture(g->scanout[ss.scanout_id].con, info.tex_id,
- info.flags & 1 /* FIXME: Y_0_TOP */,
- info.width, info.height,
- ss.r.x, ss.r.y, ss.r.width, ss.r.height);
+ dpy_gl_scanout_texture(
+ g->parent_obj.scanout[ss.scanout_id].con, info.tex_id,
+ info.flags & 1 /* FIXME: Y_0_TOP */,
+ info.width, info.height,
+ ss.r.x, ss.r.y, ss.r.width, ss.r.height);
} else {
if (ss.scanout_id != 0) {
- dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, NULL);
+ dpy_gfx_replace_surface(
+ g->parent_obj.scanout[ss.scanout_id].con, NULL);
}
- dpy_gl_scanout_disable(g->scanout[ss.scanout_id].con);
+ dpy_gl_scanout_disable(g->parent_obj.scanout[ss.scanout_id].con);
}
- g->scanout[ss.scanout_id].resource_id = ss.resource_id;
+ g->parent_obj.scanout[ss.scanout_id].resource_id = ss.resource_id;
}
static void virgl_cmd_submit_3d(VirtIOGPU *g,
@@ -209,7 +211,7 @@ static void virgl_cmd_submit_3d(VirtIOGPU *g,
goto out;
}
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
g->stats.req_3d++;
g->stats.bytes_3d += cs.size;
}
@@ -507,7 +509,7 @@ static void virgl_write_fence(void *opaque, uint32_t fence)
QTAILQ_REMOVE(&g->fenceq, cmd, next);
g_free(cmd);
g->inflight--;
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
fprintf(stderr, "inflight: %3d (-)\r", g->inflight);
}
}
@@ -524,7 +526,7 @@ virgl_create_context(void *opaque, int scanout_idx,
qparams.major_ver = params->major_ver;
qparams.minor_ver = params->minor_ver;
- ctx = dpy_gl_ctx_create(g->scanout[scanout_idx].con, &qparams);
+ ctx = dpy_gl_ctx_create(g->parent_obj.scanout[scanout_idx].con, &qparams);
return (virgl_renderer_gl_context)ctx;
}
@@ -533,7 +535,7 @@ static void virgl_destroy_context(void *opaque, virgl_renderer_gl_context ctx)
VirtIOGPU *g = opaque;
QEMUGLContext qctx = (QEMUGLContext)ctx;
- dpy_gl_ctx_destroy(g->scanout[0].con, qctx);
+ dpy_gl_ctx_destroy(g->parent_obj.scanout[0].con, qctx);
}
static int virgl_make_context_current(void *opaque, int scanout_idx,
@@ -542,7 +544,8 @@ static int virgl_make_context_current(void *opaque, int scanout_idx,
VirtIOGPU *g = opaque;
QEMUGLContext qctx = (QEMUGLContext)ctx;
- return dpy_gl_ctx_make_current(g->scanout[scanout_idx].con, qctx);
+ return dpy_gl_ctx_make_current(g->parent_obj.scanout[scanout_idx].con,
+ qctx);
}
static struct virgl_renderer_callbacks virtio_gpu_3d_cbs = {
@@ -594,11 +597,11 @@ void virtio_gpu_virgl_reset(VirtIOGPU *g)
int i;
/* virgl_renderer_reset() ??? */
- for (i = 0; i < g->conf.max_outputs; i++) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
if (i != 0) {
- dpy_gfx_replace_surface(g->scanout[i].con, NULL);
+ dpy_gfx_replace_surface(g->parent_obj.scanout[i].con, NULL);
}
- dpy_gl_scanout_disable(g->scanout[i].con);
+ dpy_gl_scanout_disable(g->parent_obj.scanout[i].con);
}
}
@@ -614,7 +617,7 @@ int virtio_gpu_virgl_init(VirtIOGPU *g)
g->fence_poll = timer_new_ms(QEMU_CLOCK_VIRTUAL,
virtio_gpu_fence_poll, g);
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
g->print_stats = timer_new_ms(QEMU_CLOCK_VIRTUAL,
virtio_gpu_print_stats, g);
timer_mod(g->print_stats, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 1000);
diff --git a/hw/display/virtio-gpu-base.c b/hw/display/virtio-gpu-base.c
new file mode 100644
index 0000000000..55e07995fe
--- /dev/null
+++ b/hw/display/virtio-gpu-base.c
@@ -0,0 +1,268 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "qemu/osdep.h"
+
+#include "hw/virtio/virtio-gpu.h"
+#include "migration/blocker.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+
+void
+virtio_gpu_base_reset(VirtIOGPUBase *g)
+{
+ int i;
+
+ g->enable = 0;
+ g->use_virgl_renderer = false;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ g->scanout[i].resource_id = 0;
+ g->scanout[i].width = 0;
+ g->scanout[i].height = 0;
+ g->scanout[i].x = 0;
+ g->scanout[i].y = 0;
+ g->scanout[i].ds = NULL;
+ }
+}
+
+void
+virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
+ struct virtio_gpu_resp_display_info *dpy_info)
+{
+ int i;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ if (g->enabled_output_bitmask & (1 << i)) {
+ dpy_info->pmodes[i].enabled = 1;
+ dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
+ dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
+ }
+ }
+}
+
+static void virtio_gpu_invalidate_display(void *opaque)
+{
+}
+
+static void virtio_gpu_update_display(void *opaque)
+{
+}
+
+static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
+{
+}
+
+static void virtio_gpu_notify_event(VirtIOGPUBase *g, uint32_t event_type)
+{
+ g->virtio_config.events_read |= event_type;
+ virtio_notify_config(&g->parent_obj);
+}
+
+static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
+{
+ VirtIOGPUBase *g = opaque;
+
+ if (idx >= g->conf.max_outputs) {
+ return -1;
+ }
+
+ g->req_state[idx].x = info->xoff;
+ g->req_state[idx].y = info->yoff;
+ g->req_state[idx].width = info->width;
+ g->req_state[idx].height = info->height;
+
+ if (info->width && info->height) {
+ g->enabled_output_bitmask |= (1 << idx);
+ } else {
+ g->enabled_output_bitmask &= ~(1 << idx);
+ }
+
+ /* send event to guest */
+ virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
+ return 0;
+}
+
+static void
+virtio_gpu_gl_block(void *opaque, bool block)
+{
+ VirtIOGPUBase *g = opaque;
+ VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_GET_CLASS(g);
+
+ if (block) {
+ g->renderer_blocked++;
+ } else {
+ g->renderer_blocked--;
+ }
+ assert(g->renderer_blocked >= 0);
+
+ if (g->renderer_blocked == 0) {
+ vgc->gl_unblock(g);
+ }
+}
+
+const GraphicHwOps virtio_gpu_ops = {
+ .invalidate = virtio_gpu_invalidate_display,
+ .gfx_update = virtio_gpu_update_display,
+ .text_update = virtio_gpu_text_update,
+ .ui_info = virtio_gpu_ui_info,
+ .gl_block = virtio_gpu_gl_block,
+};
+
+bool
+virtio_gpu_base_device_realize(DeviceState *qdev,
+ VirtIOHandleOutput ctrl_cb,
+ VirtIOHandleOutput cursor_cb,
+ Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
+ Error *local_err = NULL;
+ int i;
+
+ if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
+ error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
+ return false;
+ }
+
+ g->use_virgl_renderer = false;
+ if (virtio_gpu_virgl_enabled(g->conf)) {
+ error_setg(&g->migration_blocker, "virgl is not yet migratable");
+ migrate_add_blocker(g->migration_blocker, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ error_free(g->migration_blocker);
+ return false;
+ }
+ }
+
+ g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
+ virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
+ sizeof(struct virtio_gpu_config));
+
+ if (virtio_gpu_virgl_enabled(g->conf)) {
+ /* use larger control queue in 3d mode */
+ virtio_add_queue(vdev, 256, ctrl_cb);
+ virtio_add_queue(vdev, 16, cursor_cb);
+ } else {
+ virtio_add_queue(vdev, 64, ctrl_cb);
+ virtio_add_queue(vdev, 16, cursor_cb);
+ }
+
+ g->enabled_output_bitmask = 1;
+
+ g->req_state[0].width = g->conf.xres;
+ g->req_state[0].height = g->conf.yres;
+
+ for (i = 0; i < g->conf.max_outputs; i++) {
+ g->scanout[i].con =
+ graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
+ if (i > 0) {
+ dpy_gfx_replace_surface(g->scanout[i].con, NULL);
+ }
+ }
+
+ return true;
+}
+
+static uint64_t
+virtio_gpu_base_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+
+ if (virtio_gpu_virgl_enabled(g->conf)) {
+ features |= (1 << VIRTIO_GPU_F_VIRGL);
+ }
+ if (virtio_gpu_edid_enabled(g->conf)) {
+ features |= (1 << VIRTIO_GPU_F_EDID);
+ }
+
+ return features;
+}
+
+static void
+virtio_gpu_base_set_features(VirtIODevice *vdev, uint64_t features)
+{
+ static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+
+ g->use_virgl_renderer = ((features & virgl) == virgl);
+ trace_virtio_gpu_features(g->use_virgl_renderer);
+}
+
+static void
+virtio_gpu_base_device_unrealize(DeviceState *qdev, Error **errp)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(qdev);
+
+ if (g->migration_blocker) {
+ migrate_del_blocker(g->migration_blocker);
+ error_free(g->migration_blocker);
+ }
+}
+
+static void
+virtio_gpu_base_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ vdc->unrealize = virtio_gpu_base_device_unrealize;
+ vdc->get_features = virtio_gpu_base_get_features;
+ vdc->set_features = virtio_gpu_base_set_features;
+
+ set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
+ dc->hotpluggable = false;
+}
+
+static const TypeInfo virtio_gpu_base_info = {
+ .name = TYPE_VIRTIO_GPU_BASE,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VirtIOGPUBase),
+ .class_size = sizeof(VirtIOGPUBaseClass),
+ .class_init = virtio_gpu_base_class_init,
+ .abstract = true
+};
+
+static void
+virtio_register_types(void)
+{
+ type_register_static(&virtio_gpu_base_info);
+}
+
+type_init(virtio_register_types)
+
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
+
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
+QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
diff --git a/hw/display/virtio-gpu-pci.c b/hw/display/virtio-gpu-pci.c
index 0bc4d9d424..206870cd4c 100644
--- a/hw/display/virtio-gpu-pci.c
+++ b/hw/display/virtio-gpu-pci.c
@@ -16,33 +16,18 @@
#include "hw/pci/pci.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-bus.h"
-#include "hw/virtio/virtio-pci.h"
-#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-gpu-pci.h"
-typedef struct VirtIOGPUPCI VirtIOGPUPCI;
-
-/*
- * virtio-gpu-pci: This extends VirtioPCIProxy.
- */
-#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
-#define VIRTIO_GPU_PCI(obj) \
- OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
-
-struct VirtIOGPUPCI {
- VirtIOPCIProxy parent_obj;
- VirtIOGPU vdev;
-};
-
-static Property virtio_gpu_pci_properties[] = {
+static Property virtio_gpu_pci_base_properties[] = {
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+static void virtio_gpu_pci_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
- VirtIOGPUPCI *vgpu = VIRTIO_GPU_PCI(vpci_dev);
- VirtIOGPU *g = &vgpu->vdev;
- DeviceState *vdev = DEVICE(&vgpu->vdev);
+ VirtIOGPUPCIBase *vgpu = VIRTIO_GPU_PCI_BASE(vpci_dev);
+ VirtIOGPUBase *g = vgpu->vgpu;
+ DeviceState *vdev = DEVICE(g);
int i;
Error *local_error = NULL;
@@ -64,36 +49,56 @@ static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
}
}
-static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data)
+static void virtio_gpu_pci_base_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
- dc->props = virtio_gpu_pci_properties;
+ dc->props = virtio_gpu_pci_base_properties;
dc->hotpluggable = false;
- k->realize = virtio_gpu_pci_realize;
+ k->realize = virtio_gpu_pci_base_realize;
pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER;
}
+static const TypeInfo virtio_gpu_pci_base_info = {
+ .name = TYPE_VIRTIO_GPU_PCI_BASE,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOGPUPCIBase),
+ .class_init = virtio_gpu_pci_base_class_init,
+ .abstract = true
+};
+
+#define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci"
+#define VIRTIO_GPU_PCI(obj) \
+ OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI)
+
+typedef struct VirtIOGPUPCI {
+ VirtIOGPUPCIBase parent_obj;
+ VirtIOGPU vdev;
+} VirtIOGPUPCI;
+
static void virtio_gpu_initfn(Object *obj)
{
VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj);
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
TYPE_VIRTIO_GPU);
+ VIRTIO_GPU_PCI_BASE(obj)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
}
static const VirtioPCIDeviceTypeInfo virtio_gpu_pci_info = {
.generic_name = TYPE_VIRTIO_GPU_PCI,
+ .parent = TYPE_VIRTIO_GPU_PCI_BASE,
.instance_size = sizeof(VirtIOGPUPCI),
.instance_init = virtio_gpu_initfn,
- .class_init = virtio_gpu_pci_class_init,
};
static void virtio_gpu_pci_register_types(void)
{
+ type_register_static(&virtio_gpu_pci_base_info);
virtio_pci_types_register(&virtio_gpu_pci_info);
}
+
type_init(virtio_gpu_pci_register_types)
diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c
index 9e37e0ac96..4a49da5ecb 100644
--- a/hw/display/virtio-gpu.c
+++ b/hw/display/virtio-gpu.c
@@ -20,11 +20,13 @@
#include "sysemu/dma.h"
#include "hw/virtio/virtio.h"
#include "hw/virtio/virtio-gpu.h"
+#include "hw/virtio/virtio-gpu-bswap.h"
+#include "hw/virtio/virtio-gpu-pixman.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/display/edid.h"
-#include "migration/blocker.h"
#include "qemu/log.h"
#include "qapi/error.h"
+#include "qemu/error-report.h"
#define VIRTIO_GPU_VM_VERSION 1
@@ -34,53 +36,11 @@ virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id);
static void virtio_gpu_cleanup_mapping(VirtIOGPU *g,
struct virtio_gpu_simple_resource *res);
-static void
-virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
-{
- le32_to_cpus(&hdr->type);
- le32_to_cpus(&hdr->flags);
- le64_to_cpus(&hdr->fence_id);
- le32_to_cpus(&hdr->ctx_id);
- le32_to_cpus(&hdr->padding);
-}
-
-static void virtio_gpu_bswap_32(void *ptr,
- size_t size)
-{
-#ifdef HOST_WORDS_BIGENDIAN
-
- size_t i;
- struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
-
- virtio_gpu_ctrl_hdr_bswap(hdr);
-
- i = sizeof(struct virtio_gpu_ctrl_hdr);
- while (i < size) {
- le32_to_cpus((uint32_t *)(ptr + i));
- i = i + sizeof(uint32_t);
- }
-
-#endif
-}
-
-static void
-virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
-{
- virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
- le32_to_cpus(&t2d->r.x);
- le32_to_cpus(&t2d->r.y);
- le32_to_cpus(&t2d->r.width);
- le32_to_cpus(&t2d->r.height);
- le64_to_cpus(&t2d->offset);
- le32_to_cpus(&t2d->resource_id);
- le32_to_cpus(&t2d->padding);
-}
-
#ifdef CONFIG_VIRGL
#include <virglrenderer.h>
#define VIRGL(_g, _virgl, _simple, ...) \
do { \
- if (_g->use_virgl_renderer) { \
+ if (_g->parent_obj.use_virgl_renderer) { \
_virgl(__VA_ARGS__); \
} else { \
_simple(__VA_ARGS__); \
@@ -148,10 +108,10 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
struct virtio_gpu_scanout *s;
bool move = cursor->hdr.type == VIRTIO_GPU_CMD_MOVE_CURSOR;
- if (cursor->pos.scanout_id >= g->conf.max_outputs) {
+ if (cursor->pos.scanout_id >= g->parent_obj.conf.max_outputs) {
return;
}
- s = &g->scanout[cursor->pos.scanout_id];
+ s = &g->parent_obj.scanout[cursor->pos.scanout_id];
trace_virtio_gpu_update_cursor(cursor->pos.scanout_id,
cursor->pos.x,
@@ -182,53 +142,6 @@ static void update_cursor(VirtIOGPU *g, struct virtio_gpu_update_cursor *cursor)
cursor->resource_id ? 1 : 0);
}
-static void virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
-{
- VirtIOGPU *g = VIRTIO_GPU(vdev);
- memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
-}
-
-static void virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
-{
- VirtIOGPU *g = VIRTIO_GPU(vdev);
- struct virtio_gpu_config vgconfig;
-
- memcpy(&vgconfig, config, sizeof(g->virtio_config));
-
- if (vgconfig.events_clear) {
- g->virtio_config.events_read &= ~vgconfig.events_clear;
- }
-}
-
-static uint64_t virtio_gpu_get_features(VirtIODevice *vdev, uint64_t features,
- Error **errp)
-{
- VirtIOGPU *g = VIRTIO_GPU(vdev);
-
- if (virtio_gpu_virgl_enabled(g->conf)) {
- features |= (1 << VIRTIO_GPU_F_VIRGL);
- }
- if (virtio_gpu_edid_enabled(g->conf)) {
- features |= (1 << VIRTIO_GPU_F_EDID);
- }
- return features;
-}
-
-static void virtio_gpu_set_features(VirtIODevice *vdev, uint64_t features)
-{
- static const uint32_t virgl = (1 << VIRTIO_GPU_F_VIRGL);
- VirtIOGPU *g = VIRTIO_GPU(vdev);
-
- g->use_virgl_renderer = ((features & virgl) == virgl);
- trace_virtio_gpu_features(g->use_virgl_renderer);
-}
-
-static void virtio_gpu_notify_event(VirtIOGPU *g, uint32_t event_type)
-{
- g->virtio_config.events_read |= event_type;
- virtio_notify_config(&g->parent_obj);
-}
-
static struct virtio_gpu_simple_resource *
virtio_gpu_find_resource(VirtIOGPU *g, uint32_t resource_id)
{
@@ -277,21 +190,6 @@ void virtio_gpu_ctrl_response_nodata(VirtIOGPU *g,
virtio_gpu_ctrl_response(g, cmd, &resp, sizeof(resp));
}
-static void
-virtio_gpu_fill_display_info(VirtIOGPU *g,
- struct virtio_gpu_resp_display_info *dpy_info)
-{
- int i;
-
- for (i = 0; i < g->conf.max_outputs; i++) {
- if (g->enabled_output_bitmask & (1 << i)) {
- dpy_info->pmodes[i].enabled = 1;
- dpy_info->pmodes[i].r.width = cpu_to_le32(g->req_state[i].width);
- dpy_info->pmodes[i].r.height = cpu_to_le32(g->req_state[i].height);
- }
- }
-}
-
void virtio_gpu_get_display_info(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd)
{
@@ -300,7 +198,7 @@ void virtio_gpu_get_display_info(VirtIOGPU *g,
trace_virtio_gpu_cmd_get_display_info();
memset(&display_info, 0, sizeof(display_info));
display_info.hdr.type = VIRTIO_GPU_RESP_OK_DISPLAY_INFO;
- virtio_gpu_fill_display_info(g, &display_info);
+ virtio_gpu_base_fill_display_info(VIRTIO_GPU_BASE(g), &display_info);
virtio_gpu_ctrl_response(g, cmd, &display_info.hdr,
sizeof(display_info));
}
@@ -309,9 +207,10 @@ static void
virtio_gpu_generate_edid(VirtIOGPU *g, int scanout,
struct virtio_gpu_resp_edid *edid)
{
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
qemu_edid_info info = {
- .prefx = g->req_state[scanout].width,
- .prefy = g->req_state[scanout].height,
+ .prefx = b->req_state[scanout].width,
+ .prefy = b->req_state[scanout].height,
};
edid->size = cpu_to_le32(sizeof(edid->edid));
@@ -323,11 +222,12 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
{
struct virtio_gpu_resp_edid edid;
struct virtio_gpu_cmd_get_edid get_edid;
+ VirtIOGPUBase *b = VIRTIO_GPU_BASE(g);
VIRTIO_GPU_FILL_CMD(get_edid);
virtio_gpu_bswap_32(&get_edid, sizeof(get_edid));
- if (get_edid.scanout >= g->conf.max_outputs) {
+ if (get_edid.scanout >= b->conf.max_outputs) {
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_PARAMETER;
return;
}
@@ -339,30 +239,6 @@ void virtio_gpu_get_edid(VirtIOGPU *g,
virtio_gpu_ctrl_response(g, cmd, &edid.hdr, sizeof(edid));
}
-static pixman_format_code_t get_pixman_format(uint32_t virtio_gpu_format)
-{
- switch (virtio_gpu_format) {
- case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
- return PIXMAN_BE_b8g8r8x8;
- case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
- return PIXMAN_BE_b8g8r8a8;
- case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
- return PIXMAN_BE_x8r8g8b8;
- case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
- return PIXMAN_BE_a8r8g8b8;
- case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
- return PIXMAN_BE_r8g8b8x8;
- case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
- return PIXMAN_BE_r8g8b8a8;
- case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
- return PIXMAN_BE_x8b8g8r8;
- case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
- return PIXMAN_BE_a8b8g8r8;
- default:
- return 0;
- }
-}
-
static uint32_t calc_image_hostmem(pixman_format_code_t pformat,
uint32_t width, uint32_t height)
{
@@ -409,7 +285,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
res->format = c2d.format;
res->resource_id = c2d.resource_id;
- pformat = get_pixman_format(c2d.format);
+ pformat = virtio_gpu_get_pixman_format(c2d.format);
if (!pformat) {
qemu_log_mask(LOG_GUEST_ERROR,
"%s: host couldn't handle guest format %d\n",
@@ -420,7 +296,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
}
res->hostmem = calc_image_hostmem(pformat, c2d.width, c2d.height);
- if (res->hostmem + g->hostmem < g->conf.max_hostmem) {
+ if (res->hostmem + g->hostmem < g->conf_max_hostmem) {
res->image = pixman_image_create_bits(pformat,
c2d.width,
c2d.height,
@@ -442,7 +318,7 @@ static void virtio_gpu_resource_create_2d(VirtIOGPU *g,
static void virtio_gpu_disable_scanout(VirtIOGPU *g, int scanout_id)
{
- struct virtio_gpu_scanout *scanout = &g->scanout[scanout_id];
+ struct virtio_gpu_scanout *scanout = &g->parent_obj.scanout[scanout_id];
struct virtio_gpu_simple_resource *res;
DisplaySurface *ds = NULL;
@@ -474,7 +350,7 @@ static void virtio_gpu_resource_destroy(VirtIOGPU *g,
int i;
if (res->scanout_bitmask) {
- for (i = 0; i < g->conf.max_outputs; i++) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
if (res->scanout_bitmask & (1 << i)) {
virtio_gpu_disable_scanout(g, i);
}
@@ -604,7 +480,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
pixman_region_init_rect(&flush_region,
rf.r.x, rf.r.y, rf.r.width, rf.r.height);
- for (i = 0; i < g->conf.max_outputs; i++) {
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
struct virtio_gpu_scanout *scanout;
pixman_region16_t region, finalregion;
pixman_box16_t *extents;
@@ -612,7 +488,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
if (!(res->scanout_bitmask & (1 << i))) {
continue;
}
- scanout = &g->scanout[i];
+ scanout = &g->parent_obj.scanout[i];
pixman_region_init(&finalregion);
pixman_region_init_rect(&region, scanout->x, scanout->y,
@@ -622,7 +498,7 @@ static void virtio_gpu_resource_flush(VirtIOGPU *g,
pixman_region_translate(&finalregion, -scanout->x, -scanout->y);
extents = pixman_region_extents(&finalregion);
/* work out the area we need to update for each console */
- dpy_gfx_update(g->scanout[i].con,
+ dpy_gfx_update(g->parent_obj.scanout[i].con,
extents->x1, extents->y1,
extents->x2 - extents->x1,
extents->y2 - extents->y1);
@@ -653,14 +529,14 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
trace_virtio_gpu_cmd_set_scanout(ss.scanout_id, ss.resource_id,
ss.r.width, ss.r.height, ss.r.x, ss.r.y);
- if (ss.scanout_id >= g->conf.max_outputs) {
+ if (ss.scanout_id >= g->parent_obj.conf.max_outputs) {
qemu_log_mask(LOG_GUEST_ERROR, "%s: illegal scanout id specified %d",
__func__, ss.scanout_id);
cmd->error = VIRTIO_GPU_RESP_ERR_INVALID_SCANOUT_ID;
return;
}
- g->enable = 1;
+ g->parent_obj.enable = 1;
if (ss.resource_id == 0) {
virtio_gpu_disable_scanout(g, ss.scanout_id);
return;
@@ -677,6 +553,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
if (ss.r.x > res->width ||
ss.r.y > res->height ||
+ ss.r.width < 16 ||
+ ss.r.height < 16 ||
ss.r.width > res->width ||
ss.r.height > res->height ||
ss.r.x + ss.r.width > res->width ||
@@ -689,7 +567,7 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
return;
}
- scanout = &g->scanout[ss.scanout_id];
+ scanout = &g->parent_obj.scanout[ss.scanout_id];
format = pixman_image_get_format(res->image);
bpp = DIV_ROUND_UP(PIXMAN_FORMAT_BPP(format), 8);
@@ -712,7 +590,8 @@ static void virtio_gpu_set_scanout(VirtIOGPU *g,
return;
}
pixman_image_unref(rect);
- dpy_gfx_replace_surface(g->scanout[ss.scanout_id].con, scanout->ds);
+ dpy_gfx_replace_surface(g->parent_obj.scanout[ss.scanout_id].con,
+ scanout->ds);
}
ores = virtio_gpu_find_resource(g, scanout->resource_id);
@@ -930,7 +809,7 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
while (!QTAILQ_EMPTY(&g->cmdq)) {
cmd = QTAILQ_FIRST(&g->cmdq);
- if (g->renderer_blocked) {
+ if (g->parent_obj.renderer_blocked) {
break;
}
@@ -939,14 +818,14 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
g, cmd);
QTAILQ_REMOVE(&g->cmdq, cmd, next);
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
g->stats.requests++;
}
if (!cmd->finished) {
QTAILQ_INSERT_TAIL(&g->fenceq, cmd, next);
g->inflight++;
- if (virtio_gpu_stats_enabled(g->conf)) {
+ if (virtio_gpu_stats_enabled(g->parent_obj.conf)) {
if (g->stats.max_inflight < g->inflight) {
g->stats.max_inflight = g->inflight;
}
@@ -958,6 +837,19 @@ void virtio_gpu_process_cmdq(VirtIOGPU *g)
}
}
+static void virtio_gpu_gl_unblock(VirtIOGPUBase *b)
+{
+ VirtIOGPU *g = VIRTIO_GPU(b);
+
+#ifdef CONFIG_VIRGL
+ if (g->renderer_reset) {
+ g->renderer_reset = false;
+ virtio_gpu_virgl_reset(g);
+ }
+#endif
+ virtio_gpu_process_cmdq(g);
+}
+
static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOGPU *g = VIRTIO_GPU(vdev);
@@ -968,7 +860,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
}
#ifdef CONFIG_VIRGL
- if (!g->renderer_inited && g->use_virgl_renderer) {
+ if (!g->renderer_inited && g->parent_obj.use_virgl_renderer) {
virtio_gpu_virgl_init(g);
g->renderer_inited = true;
}
@@ -986,7 +878,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
virtio_gpu_process_cmdq(g);
#ifdef CONFIG_VIRGL
- if (g->use_virgl_renderer) {
+ if (g->parent_obj.use_virgl_renderer) {
virtio_gpu_virgl_fence_poll(g);
}
#endif
@@ -995,7 +887,7 @@ static void virtio_gpu_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
static void virtio_gpu_ctrl_bh(void *opaque)
{
VirtIOGPU *g = opaque;
- virtio_gpu_handle_ctrl(&g->parent_obj, g->ctrl_vq);
+ virtio_gpu_handle_ctrl(&g->parent_obj.parent_obj, g->ctrl_vq);
}
static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
@@ -1033,75 +925,9 @@ static void virtio_gpu_handle_cursor(VirtIODevice *vdev, VirtQueue *vq)
static void virtio_gpu_cursor_bh(void *opaque)
{
VirtIOGPU *g = opaque;
- virtio_gpu_handle_cursor(&g->parent_obj, g->cursor_vq);
+ virtio_gpu_handle_cursor(&g->parent_obj.parent_obj, g->cursor_vq);
}
-static void virtio_gpu_invalidate_display(void *opaque)
-{
-}
-
-static void virtio_gpu_update_display(void *opaque)
-{
-}
-
-static void virtio_gpu_text_update(void *opaque, console_ch_t *chardata)
-{
-}
-
-static int virtio_gpu_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
-{
- VirtIOGPU *g = opaque;
-
- if (idx >= g->conf.max_outputs) {
- return -1;
- }
-
- g->req_state[idx].x = info->xoff;
- g->req_state[idx].y = info->yoff;
- g->req_state[idx].width = info->width;
- g->req_state[idx].height = info->height;
-
- if (info->width && info->height) {
- g->enabled_output_bitmask |= (1 << idx);
- } else {
- g->enabled_output_bitmask &= ~(1 << idx);
- }
-
- /* send event to guest */
- virtio_gpu_notify_event(g, VIRTIO_GPU_EVENT_DISPLAY);
- return 0;
-}
-
-static void virtio_gpu_gl_block(void *opaque, bool block)
-{
- VirtIOGPU *g = opaque;
-
- if (block) {
- g->renderer_blocked++;
- } else {
- g->renderer_blocked--;
- }
- assert(g->renderer_blocked >= 0);
-
- if (g->renderer_blocked == 0) {
-#ifdef CONFIG_VIRGL
- if (g->renderer_reset) {
- g->renderer_reset = false;
- virtio_gpu_virgl_reset(g);
- }
-#endif
- virtio_gpu_process_cmdq(g);
- }
-}
-
-const GraphicHwOps virtio_gpu_ops = {
- .invalidate = virtio_gpu_invalidate_display,
- .gfx_update = virtio_gpu_update_display,
- .text_update = virtio_gpu_text_update,
- .ui_info = virtio_gpu_ui_info,
- .gl_block = virtio_gpu_gl_block,
-};
-
static const VMStateDescription vmstate_virtio_gpu_scanout = {
.name = "virtio-gpu-one-scanout",
.version_id = 1,
@@ -1124,10 +950,11 @@ static const VMStateDescription vmstate_virtio_gpu_scanouts = {
.name = "virtio-gpu-scanouts",
.version_id = 1,
.fields = (VMStateField[]) {
- VMSTATE_INT32(enable, struct VirtIOGPU),
- VMSTATE_UINT32_EQUAL(conf.max_outputs, struct VirtIOGPU, NULL),
- VMSTATE_STRUCT_VARRAY_UINT32(scanout, struct VirtIOGPU,
- conf.max_outputs, 1,
+ VMSTATE_INT32(parent_obj.enable, struct VirtIOGPU),
+ VMSTATE_UINT32_EQUAL(parent_obj.conf.max_outputs,
+ struct VirtIOGPU, NULL),
+ VMSTATE_STRUCT_VARRAY_UINT32(parent_obj.scanout, struct VirtIOGPU,
+ parent_obj.conf.max_outputs, 1,
vmstate_virtio_gpu_scanout,
struct virtio_gpu_scanout),
VMSTATE_END_OF_LIST()
@@ -1183,7 +1010,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
res->iov_cnt = qemu_get_be32(f);
/* allocate */
- pformat = get_pixman_format(res->format);
+ pformat = virtio_gpu_get_pixman_format(res->format);
if (!pformat) {
g_free(res);
return -EINVAL;
@@ -1242,8 +1069,8 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size,
/* load & apply scanout state */
vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1);
- for (i = 0; i < g->conf.max_outputs; i++) {
- scanout = &g->scanout[i];
+ for (i = 0; i < g->parent_obj.conf.max_outputs; i++) {
+ scanout = &g->parent_obj.scanout[i];
if (!scanout->resource_id) {
continue;
}
@@ -1272,84 +1099,35 @@ static void virtio_gpu_device_realize(DeviceState *qdev, Error **errp)
VirtIODevice *vdev = VIRTIO_DEVICE(qdev);
VirtIOGPU *g = VIRTIO_GPU(qdev);
bool have_virgl;
- Error *local_err = NULL;
- int i;
-
- if (g->conf.max_outputs > VIRTIO_GPU_MAX_SCANOUTS) {
- error_setg(errp, "invalid max_outputs > %d", VIRTIO_GPU_MAX_SCANOUTS);
- return;
- }
- g->use_virgl_renderer = false;
#if !defined(CONFIG_VIRGL) || defined(HOST_WORDS_BIGENDIAN)
have_virgl = false;
#else
have_virgl = display_opengl;
#endif
if (!have_virgl) {
- g->conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
- }
-
- if (virtio_gpu_virgl_enabled(g->conf)) {
- error_setg(&g->migration_blocker, "virgl is not yet migratable");
- migrate_add_blocker(g->migration_blocker, &local_err);
- if (local_err) {
- error_propagate(errp, local_err);
- error_free(g->migration_blocker);
- return;
- }
- }
-
- g->virtio_config.num_scanouts = cpu_to_le32(g->conf.max_outputs);
- virtio_init(VIRTIO_DEVICE(g), "virtio-gpu", VIRTIO_ID_GPU,
- sizeof(struct virtio_gpu_config));
-
- g->req_state[0].width = g->conf.xres;
- g->req_state[0].height = g->conf.yres;
-
- if (virtio_gpu_virgl_enabled(g->conf)) {
- /* use larger control queue in 3d mode */
- g->ctrl_vq = virtio_add_queue(vdev, 256, virtio_gpu_handle_ctrl_cb);
- g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
-
+ g->parent_obj.conf.flags &= ~(1 << VIRTIO_GPU_FLAG_VIRGL_ENABLED);
+ } else {
#if defined(CONFIG_VIRGL)
- g->virtio_config.num_capsets = virtio_gpu_virgl_get_num_capsets(g);
-#else
- g->virtio_config.num_capsets = 0;
+ VIRTIO_GPU_BASE(g)->virtio_config.num_capsets =
+ virtio_gpu_virgl_get_num_capsets(g);
#endif
- } else {
- g->ctrl_vq = virtio_add_queue(vdev, 64, virtio_gpu_handle_ctrl_cb);
- g->cursor_vq = virtio_add_queue(vdev, 16, virtio_gpu_handle_cursor_cb);
}
+ if (!virtio_gpu_base_device_realize(qdev,
+ virtio_gpu_handle_ctrl_cb,
+ virtio_gpu_handle_cursor_cb,
+ errp)) {
+ return;
+ }
+
+ g->ctrl_vq = virtio_get_queue(vdev, 0);
+ g->cursor_vq = virtio_get_queue(vdev, 1);
g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g);
g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g);
QTAILQ_INIT(&g->reslist);
QTAILQ_INIT(&g->cmdq);
QTAILQ_INIT(&g->fenceq);
-
- g->enabled_output_bitmask = 1;
-
- for (i = 0; i < g->conf.max_outputs; i++) {
- g->scanout[i].con =
- graphic_console_init(DEVICE(g), i, &virtio_gpu_ops, g);
- if (i > 0) {
- dpy_gfx_replace_surface(g->scanout[i].con, NULL);
- }
- }
-}
-
-static void virtio_gpu_device_unrealize(DeviceState *qdev, Error **errp)
-{
- VirtIOGPU *g = VIRTIO_GPU(qdev);
- if (g->migration_blocker) {
- migrate_del_blocker(g->migration_blocker);
- error_free(g->migration_blocker);
- }
-}
-
-static void virtio_gpu_instance_init(Object *obj)
-{
}
static void virtio_gpu_reset(VirtIODevice *vdev)
@@ -1357,21 +1135,16 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
VirtIOGPU *g = VIRTIO_GPU(vdev);
struct virtio_gpu_simple_resource *res, *tmp;
struct virtio_gpu_ctrl_command *cmd;
- int i;
- g->enable = 0;
+#ifdef CONFIG_VIRGL
+ if (g->parent_obj.use_virgl_renderer) {
+ virtio_gpu_virgl_reset(g);
+ }
+#endif
QTAILQ_FOREACH_SAFE(res, &g->reslist, next, tmp) {
virtio_gpu_resource_destroy(g, res);
}
- for (i = 0; i < g->conf.max_outputs; i++) {
- g->scanout[i].resource_id = 0;
- g->scanout[i].width = 0;
- g->scanout[i].height = 0;
- g->scanout[i].x = 0;
- g->scanout[i].y = 0;
- g->scanout[i].ds = NULL;
- }
while (!QTAILQ_EMPTY(&g->cmdq)) {
cmd = QTAILQ_FIRST(&g->cmdq);
@@ -1387,15 +1160,37 @@ static void virtio_gpu_reset(VirtIODevice *vdev)
}
#ifdef CONFIG_VIRGL
- if (g->use_virgl_renderer) {
- if (g->renderer_blocked) {
+ if (g->parent_obj.use_virgl_renderer) {
+ if (g->parent_obj.renderer_blocked) {
g->renderer_reset = true;
} else {
virtio_gpu_virgl_reset(g);
}
- g->use_virgl_renderer = 0;
+ g->parent_obj.use_virgl_renderer = false;
}
#endif
+
+ virtio_gpu_base_reset(VIRTIO_GPU_BASE(vdev));
+}
+
+static void
+virtio_gpu_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+
+ memcpy(config, &g->virtio_config, sizeof(g->virtio_config));
+}
+
+static void
+virtio_gpu_set_config(VirtIODevice *vdev, const uint8_t *config)
+{
+ VirtIOGPUBase *g = VIRTIO_GPU_BASE(vdev);
+ const struct virtio_gpu_config *vgconfig =
+ (const struct virtio_gpu_config *)config;
+
+ if (vgconfig->events_clear) {
+ g->virtio_config.events_read &= ~vgconfig->events_clear;
+ }
}
/*
@@ -1426,18 +1221,15 @@ static const VMStateDescription vmstate_virtio_gpu = {
};
static Property virtio_gpu_properties[] = {
- DEFINE_PROP_UINT32("max_outputs", VirtIOGPU, conf.max_outputs, 1),
- DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf.max_hostmem, 256 * MiB),
+ VIRTIO_GPU_BASE_PROPERTIES(VirtIOGPU, parent_obj.conf),
+ DEFINE_PROP_SIZE("max_hostmem", VirtIOGPU, conf_max_hostmem,
+ 256 * MiB),
#ifdef CONFIG_VIRGL
- DEFINE_PROP_BIT("virgl", VirtIOGPU, conf.flags,
+ DEFINE_PROP_BIT("virgl", VirtIOGPU, parent_obj.conf.flags,
VIRTIO_GPU_FLAG_VIRGL_ENABLED, true),
- DEFINE_PROP_BIT("stats", VirtIOGPU, conf.flags,
+ DEFINE_PROP_BIT("stats", VirtIOGPU, parent_obj.conf.flags,
VIRTIO_GPU_FLAG_STATS_ENABLED, false),
#endif
- DEFINE_PROP_BIT("edid", VirtIOGPU, conf.flags,
- VIRTIO_GPU_FLAG_EDID_ENABLED, false),
- DEFINE_PROP_UINT32("xres", VirtIOGPU, conf.xres, 1024),
- DEFINE_PROP_UINT32("yres", VirtIOGPU, conf.yres, 768),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1445,27 +1237,22 @@ static void virtio_gpu_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+ VirtIOGPUBaseClass *vgc = VIRTIO_GPU_BASE_CLASS(klass);
+ vgc->gl_unblock = virtio_gpu_gl_unblock;
vdc->realize = virtio_gpu_device_realize;
- vdc->unrealize = virtio_gpu_device_unrealize;
+ vdc->reset = virtio_gpu_reset;
vdc->get_config = virtio_gpu_get_config;
vdc->set_config = virtio_gpu_set_config;
- vdc->get_features = virtio_gpu_get_features;
- vdc->set_features = virtio_gpu_set_features;
- vdc->reset = virtio_gpu_reset;
-
- set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
- dc->props = virtio_gpu_properties;
dc->vmsd = &vmstate_virtio_gpu;
- dc->hotpluggable = false;
+ dc->props = virtio_gpu_properties;
}
static const TypeInfo virtio_gpu_info = {
.name = TYPE_VIRTIO_GPU,
- .parent = TYPE_VIRTIO_DEVICE,
+ .parent = TYPE_VIRTIO_GPU_BASE,
.instance_size = sizeof(VirtIOGPU),
- .instance_init = virtio_gpu_instance_init,
.class_init = virtio_gpu_class_init,
};
@@ -1475,26 +1262,3 @@ static void virtio_register_types(void)
}
type_init(virtio_register_types)
-
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctrl_hdr) != 24);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_update_cursor) != 56);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_unref) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_2d) != 40);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_set_scanout) != 48);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_flush) != 48);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_to_host_2d) != 56);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_mem_entry) != 16);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_attach_backing) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_detach_backing) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_display_info) != 408);
-
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_transfer_host_3d) != 72);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resource_create_3d) != 72);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_create) != 96);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_destroy) != 24);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_ctx_resource) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_cmd_submit) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset_info) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset_info) != 40);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_get_capset) != 32);
-QEMU_BUILD_BUG_ON(sizeof(struct virtio_gpu_resp_capset) != 24);
diff --git a/hw/display/virtio-vga.c b/hw/display/virtio-vga.c
index 5d57bf5b0c..67e34935c2 100644
--- a/hw/display/virtio-vga.c
+++ b/hw/display/virtio-vga.c
@@ -1,63 +1,42 @@
#include "qemu/osdep.h"
#include "hw/hw.h"
#include "hw/pci/pci.h"
-#include "vga_int.h"
-#include "hw/virtio/virtio-pci.h"
#include "hw/virtio/virtio-gpu.h"
#include "qapi/error.h"
+#include "virtio-vga.h"
-/*
- * virtio-vga: This extends VirtioPCIProxy.
- */
-#define TYPE_VIRTIO_VGA "virtio-vga"
-#define VIRTIO_VGA(obj) \
- OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA)
-#define VIRTIO_VGA_GET_CLASS(obj) \
- OBJECT_GET_CLASS(VirtIOVGAClass, obj, TYPE_VIRTIO_VGA)
-#define VIRTIO_VGA_CLASS(klass) \
- OBJECT_CLASS_CHECK(VirtIOVGAClass, klass, TYPE_VIRTIO_VGA)
-
-typedef struct VirtIOVGA {
- VirtIOPCIProxy parent_obj;
- VirtIOGPU vdev;
- VGACommonState vga;
- MemoryRegion vga_mrs[3];
-} VirtIOVGA;
-
-typedef struct VirtIOVGAClass {
- VirtioPCIClass parent_class;
- DeviceReset parent_reset;
-} VirtIOVGAClass;
-
-static void virtio_vga_invalidate_display(void *opaque)
+static void virtio_vga_base_invalidate_display(void *opaque)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
- if (vvga->vdev.enable) {
- virtio_gpu_ops.invalidate(&vvga->vdev);
+ if (g->enable) {
+ virtio_gpu_ops.invalidate(g);
} else {
vvga->vga.hw_ops->invalidate(&vvga->vga);
}
}
-static void virtio_vga_update_display(void *opaque)
+static void virtio_vga_base_update_display(void *opaque)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
- if (vvga->vdev.enable) {
- virtio_gpu_ops.gfx_update(&vvga->vdev);
+ if (g->enable) {
+ virtio_gpu_ops.gfx_update(g);
} else {
vvga->vga.hw_ops->gfx_update(&vvga->vga);
}
}
-static void virtio_vga_text_update(void *opaque, console_ch_t *chardata)
+static void virtio_vga_base_text_update(void *opaque, console_ch_t *chardata)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
- if (vvga->vdev.enable) {
+ if (g->enable) {
if (virtio_gpu_ops.text_update) {
- virtio_gpu_ops.text_update(&vvga->vdev, chardata);
+ virtio_gpu_ops.text_update(g, chardata);
}
} else {
if (vvga->vga.hw_ops->text_update) {
@@ -66,49 +45,52 @@ static void virtio_vga_text_update(void *opaque, console_ch_t *chardata)
}
}
-static int virtio_vga_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
+static int virtio_vga_base_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
if (virtio_gpu_ops.ui_info) {
- return virtio_gpu_ops.ui_info(&vvga->vdev, idx, info);
+ return virtio_gpu_ops.ui_info(g, idx, info);
}
return -1;
}
-static void virtio_vga_gl_block(void *opaque, bool block)
+static void virtio_vga_base_gl_block(void *opaque, bool block)
{
- VirtIOVGA *vvga = opaque;
+ VirtIOVGABase *vvga = opaque;
+ VirtIOGPUBase *g = vvga->vgpu;
if (virtio_gpu_ops.gl_block) {
- virtio_gpu_ops.gl_block(&vvga->vdev, block);
+ virtio_gpu_ops.gl_block(g, block);
}
}
-static const GraphicHwOps virtio_vga_ops = {
- .invalidate = virtio_vga_invalidate_display,
- .gfx_update = virtio_vga_update_display,
- .text_update = virtio_vga_text_update,
- .ui_info = virtio_vga_ui_info,
- .gl_block = virtio_vga_gl_block,
+static const GraphicHwOps virtio_vga_base_ops = {
+ .invalidate = virtio_vga_base_invalidate_display,
+ .gfx_update = virtio_vga_base_update_display,
+ .text_update = virtio_vga_base_text_update,
+ .ui_info = virtio_vga_base_ui_info,
+ .gl_block = virtio_vga_base_gl_block,
};
-static const VMStateDescription vmstate_virtio_vga = {
+static const VMStateDescription vmstate_virtio_vga_base = {
.name = "virtio-vga",
.version_id = 2,
.minimum_version_id = 2,
.fields = (VMStateField[]) {
/* no pci stuff here, saving the virtio device will handle that */
- VMSTATE_STRUCT(vga, VirtIOVGA, 0, vmstate_vga_common, VGACommonState),
+ VMSTATE_STRUCT(vga, VirtIOVGABase, 0,
+ vmstate_vga_common, VGACommonState),
VMSTATE_END_OF_LIST()
}
};
/* VGA device wrapper around PCI device around virtio GPU */
-static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+static void virtio_vga_base_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
{
- VirtIOVGA *vvga = VIRTIO_VGA(vpci_dev);
- VirtIOGPU *g = &vvga->vdev;
+ VirtIOVGABase *vvga = VIRTIO_VGA_BASE(vpci_dev);
+ VirtIOGPUBase *g = vvga->vgpu;
VGACommonState *vga = &vvga->vga;
Error *err = NULL;
uint32_t offset;
@@ -168,7 +150,7 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
vvga->vga_mrs, true, false);
vga->con = g->scanout[0].con;
- graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga);
+ graphic_console_set_hwops(vga->con, &virtio_vga_base_ops, vvga);
for (i = 0; i < g->conf.max_outputs; i++) {
object_property_set_link(OBJECT(g->scanout[i].con),
@@ -177,10 +159,10 @@ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
}
}
-static void virtio_vga_reset(DeviceState *dev)
+static void virtio_vga_base_reset(DeviceState *dev)
{
- VirtIOVGAClass *klass = VIRTIO_VGA_GET_CLASS(dev);
- VirtIOVGA *vvga = VIRTIO_VGA(dev);
+ VirtIOVGABaseClass *klass = VIRTIO_VGA_BASE_GET_CLASS(dev);
+ VirtIOVGABase *vvga = VIRTIO_VGA_BASE(dev);
/* reset virtio-gpu */
klass->parent_reset(dev);
@@ -190,48 +172,70 @@ static void virtio_vga_reset(DeviceState *dev)
vga_dirty_log_start(&vvga->vga);
}
-static Property virtio_vga_properties[] = {
+static Property virtio_vga_base_properties[] = {
DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy),
DEFINE_PROP_END_OF_LIST(),
};
-static void virtio_vga_class_init(ObjectClass *klass, void *data)
+static void virtio_vga_base_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
- VirtIOVGAClass *v = VIRTIO_VGA_CLASS(klass);
+ VirtIOVGABaseClass *v = VIRTIO_VGA_BASE_CLASS(klass);
PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories);
- dc->props = virtio_vga_properties;
- dc->vmsd = &vmstate_virtio_vga;
+ dc->props = virtio_vga_base_properties;
+ dc->vmsd = &vmstate_virtio_vga_base;
dc->hotpluggable = false;
- device_class_set_parent_reset(dc, virtio_vga_reset,
+ device_class_set_parent_reset(dc, virtio_vga_base_reset,
&v->parent_reset);
- k->realize = virtio_vga_realize;
+ k->realize = virtio_vga_base_realize;
pcidev_k->romfile = "vgabios-virtio.bin";
pcidev_k->class_id = PCI_CLASS_DISPLAY_VGA;
}
+static TypeInfo virtio_vga_base_info = {
+ .name = TYPE_VIRTIO_VGA_BASE,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(struct VirtIOVGABase),
+ .class_size = sizeof(struct VirtIOVGABaseClass),
+ .class_init = virtio_vga_base_class_init,
+ .abstract = true,
+};
+
+#define TYPE_VIRTIO_VGA "virtio-vga"
+
+#define VIRTIO_VGA(obj) \
+ OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA)
+
+typedef struct VirtIOVGA {
+ VirtIOVGABase parent_obj;
+
+ VirtIOGPU vdev;
+} VirtIOVGA;
+
static void virtio_vga_inst_initfn(Object *obj)
{
VirtIOVGA *dev = VIRTIO_VGA(obj);
virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
TYPE_VIRTIO_GPU);
+ VIRTIO_VGA_BASE(dev)->vgpu = VIRTIO_GPU_BASE(&dev->vdev);
}
+
static VirtioPCIDeviceTypeInfo virtio_vga_info = {
.generic_name = TYPE_VIRTIO_VGA,
+ .parent = TYPE_VIRTIO_VGA_BASE,
.instance_size = sizeof(struct VirtIOVGA),
.instance_init = virtio_vga_inst_initfn,
- .class_size = sizeof(struct VirtIOVGAClass),
- .class_init = virtio_vga_class_init,
};
static void virtio_vga_register_types(void)
{
+ type_register_static(&virtio_vga_base_info);
virtio_pci_types_register(&virtio_vga_info);
}
diff --git a/hw/display/virtio-vga.h b/hw/display/virtio-vga.h
new file mode 100644
index 0000000000..c10bf390aa
--- /dev/null
+++ b/hw/display/virtio-vga.h
@@ -0,0 +1,32 @@
+#ifndef VIRTIO_VGA_H_
+#define VIRTIO_VGA_H_
+
+#include "hw/virtio/virtio-gpu-pci.h"
+#include "vga_int.h"
+
+/*
+ * virtio-vga-base: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_VGA_BASE "virtio-vga-base"
+#define VIRTIO_VGA_BASE(obj) \
+ OBJECT_CHECK(VirtIOVGABase, (obj), TYPE_VIRTIO_VGA_BASE)
+#define VIRTIO_VGA_BASE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtIOVGABaseClass, obj, TYPE_VIRTIO_VGA_BASE)
+#define VIRTIO_VGA_BASE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtIOVGABaseClass, klass, TYPE_VIRTIO_VGA_BASE)
+
+typedef struct VirtIOVGABase {
+ VirtIOPCIProxy parent_obj;
+
+ VirtIOGPUBase *vgpu;
+ VGACommonState vga;
+ MemoryRegion vga_mrs[3];
+} VirtIOVGABase;
+
+typedef struct VirtIOVGABaseClass {
+ VirtioPCIClass parent_class;
+
+ DeviceReset parent_reset;
+} VirtIOVGABaseClass;
+
+#endif /* VIRTIO_VGA_H_ */
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 553319c7ac..4ca5b2551e 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -96,6 +96,7 @@ typedef enum VhostUserRequest {
VHOST_USER_POSTCOPY_END = 30,
VHOST_USER_GET_INFLIGHT_FD = 31,
VHOST_USER_SET_INFLIGHT_FD = 32,
+ VHOST_USER_GPU_SET_SOCKET = 33,
VHOST_USER_MAX
} VhostUserRequest;
@@ -353,6 +354,16 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg,
return 0;
}
+int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd)
+{
+ VhostUserMsg msg = {
+ .hdr.request = VHOST_USER_GPU_SET_SOCKET,
+ .hdr.flags = VHOST_USER_VERSION,
+ };
+
+ return vhost_user_write(dev, &msg, &fd, 1);
+}
+
static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base,
struct vhost_log *log)
{
diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h
index d6632a18e6..6f6670783f 100644
--- a/include/hw/virtio/vhost-backend.h
+++ b/include/hw/virtio/vhost-backend.h
@@ -170,4 +170,6 @@ int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev,
int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *imsg);
+int vhost_user_gpu_set_socket(struct vhost_dev *dev, int fd);
+
#endif /* VHOST_BACKEND_H */
diff --git a/include/hw/virtio/virtio-gpu-bswap.h b/include/hw/virtio/virtio-gpu-bswap.h
new file mode 100644
index 0000000000..38d12160f6
--- /dev/null
+++ b/include/hw/virtio/virtio-gpu-bswap.h
@@ -0,0 +1,61 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_GPU_BSWAP_H
+#define HW_VIRTIO_GPU_BSWAP_H
+
+#include "qemu/bswap.h"
+
+static inline void
+virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr)
+{
+ le32_to_cpus(&hdr->type);
+ le32_to_cpus(&hdr->flags);
+ le64_to_cpus(&hdr->fence_id);
+ le32_to_cpus(&hdr->ctx_id);
+ le32_to_cpus(&hdr->padding);
+}
+
+static inline void
+virtio_gpu_bswap_32(void *ptr, size_t size)
+{
+#ifdef HOST_WORDS_BIGENDIAN
+
+ size_t i;
+ struct virtio_gpu_ctrl_hdr *hdr = (struct virtio_gpu_ctrl_hdr *) ptr;
+
+ virtio_gpu_ctrl_hdr_bswap(hdr);
+
+ i = sizeof(struct virtio_gpu_ctrl_hdr);
+ while (i < size) {
+ le32_to_cpus((uint32_t *)(ptr + i));
+ i = i + sizeof(uint32_t);
+ }
+
+#endif
+}
+
+static inline void
+virtio_gpu_t2d_bswap(struct virtio_gpu_transfer_to_host_2d *t2d)
+{
+ virtio_gpu_ctrl_hdr_bswap(&t2d->hdr);
+ le32_to_cpus(&t2d->r.x);
+ le32_to_cpus(&t2d->r.y);
+ le32_to_cpus(&t2d->r.width);
+ le32_to_cpus(&t2d->r.height);
+ le64_to_cpus(&t2d->offset);
+ le32_to_cpus(&t2d->resource_id);
+ le32_to_cpus(&t2d->padding);
+}
+
+#endif
diff --git a/include/hw/virtio/virtio-gpu-pci.h b/include/hw/virtio/virtio-gpu-pci.h
new file mode 100644
index 0000000000..2f69b5a9cc
--- /dev/null
+++ b/include/hw/virtio/virtio-gpu-pci.h
@@ -0,0 +1,40 @@
+/*
+ * Virtio GPU PCI Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_GPU_PCI_H
+#define HW_VIRTIO_GPU_PCI_H
+
+#include "hw/virtio/virtio-pci.h"
+#include "hw/virtio/virtio-gpu.h"
+
+typedef struct VirtIOGPUPCIBase VirtIOGPUPCIBase;
+
+/*
+ * virtio-gpu-pci-base: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_GPU_PCI_BASE "virtio-gpu-pci-base"
+#define VIRTIO_GPU_PCI_BASE(obj) \
+ OBJECT_CHECK(VirtIOGPUPCIBase, (obj), TYPE_VIRTIO_GPU_PCI_BASE)
+
+struct VirtIOGPUPCIBase {
+ VirtIOPCIProxy parent_obj;
+ VirtIOGPUBase *vgpu;
+};
+
+/* to share between PCI and VGA */
+#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state) \
+ DEFINE_PROP_BIT("ioeventfd", _state, flags, \
+ VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
+ DEFINE_PROP_UINT32("vectors", _state, nvectors, 3)
+
+#endif /* HW_VIRTIO_GPU_PCI_H */
diff --git a/include/hw/virtio/virtio-gpu-pixman.h b/include/hw/virtio/virtio-gpu-pixman.h
new file mode 100644
index 0000000000..4dba782758
--- /dev/null
+++ b/include/hw/virtio/virtio-gpu-pixman.h
@@ -0,0 +1,45 @@
+/*
+ * Virtio GPU Device
+ *
+ * Copyright Red Hat, Inc. 2013-2014
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Gerd Hoffmann <kraxel@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#ifndef HW_VIRTIO_GPU_PIXMAN_H
+#define HW_VIRTIO_GPU_PIXMAN_H
+
+#include "ui/qemu-pixman.h"
+#include "standard-headers/linux/virtio_gpu.h"
+
+static inline pixman_format_code_t
+virtio_gpu_get_pixman_format(uint32_t virtio_gpu_format)
+{
+ switch (virtio_gpu_format) {
+ case VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM:
+ return PIXMAN_BE_b8g8r8x8;
+ case VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM:
+ return PIXMAN_BE_b8g8r8a8;
+ case VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM:
+ return PIXMAN_BE_x8r8g8b8;
+ case VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM:
+ return PIXMAN_BE_a8r8g8b8;
+ case VIRTIO_GPU_FORMAT_R8G8B8X8_UNORM:
+ return PIXMAN_BE_r8g8b8x8;
+ case VIRTIO_GPU_FORMAT_R8G8B8A8_UNORM:
+ return PIXMAN_BE_r8g8b8a8;
+ case VIRTIO_GPU_FORMAT_X8B8G8R8_UNORM:
+ return PIXMAN_BE_x8b8g8r8;
+ case VIRTIO_GPU_FORMAT_A8B8G8R8_UNORM:
+ return PIXMAN_BE_a8b8g8r8;
+ default:
+ return 0;
+ }
+}
+
+#endif
diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h
index 60425c5d58..8ecac1987a 100644
--- a/include/hw/virtio/virtio-gpu.h
+++ b/include/hw/virtio/virtio-gpu.h
@@ -19,13 +19,24 @@
#include "ui/console.h"
#include "hw/virtio/virtio.h"
#include "qemu/log.h"
+#include "sysemu/vhost-user-backend.h"
#include "standard-headers/linux/virtio_gpu.h"
+#define TYPE_VIRTIO_GPU_BASE "virtio-gpu-base"
+#define VIRTIO_GPU_BASE(obj) \
+ OBJECT_CHECK(VirtIOGPUBase, (obj), TYPE_VIRTIO_GPU_BASE)
+#define VIRTIO_GPU_BASE_GET_CLASS(obj) \
+ OBJECT_GET_CLASS(VirtIOGPUBaseClass, obj, TYPE_VIRTIO_GPU_BASE)
+#define VIRTIO_GPU_BASE_CLASS(klass) \
+ OBJECT_CLASS_CHECK(VirtIOGPUBaseClass, klass, TYPE_VIRTIO_GPU_BASE)
+
#define TYPE_VIRTIO_GPU "virtio-gpu-device"
#define VIRTIO_GPU(obj) \
OBJECT_CHECK(VirtIOGPU, (obj), TYPE_VIRTIO_GPU)
+#define TYPE_VHOST_USER_GPU "vhost-user-gpu"
+
#define VIRTIO_ID_GPU 16
struct virtio_gpu_simple_resource {
@@ -58,7 +69,7 @@ struct virtio_gpu_requested_state {
int x, y;
};
-enum virtio_gpu_conf_flags {
+enum virtio_gpu_base_conf_flags {
VIRTIO_GPU_FLAG_VIRGL_ENABLED = 1,
VIRTIO_GPU_FLAG_STATS_ENABLED,
VIRTIO_GPU_FLAG_EDID_ENABLED,
@@ -71,8 +82,7 @@ enum virtio_gpu_conf_flags {
#define virtio_gpu_edid_enabled(_cfg) \
(_cfg.flags & (1 << VIRTIO_GPU_FLAG_EDID_ENABLED))
-struct virtio_gpu_conf {
- uint64_t max_hostmem;
+struct virtio_gpu_base_conf {
uint32_t max_outputs;
uint32_t flags;
uint32_t xres;
@@ -88,31 +98,55 @@ struct virtio_gpu_ctrl_command {
QTAILQ_ENTRY(virtio_gpu_ctrl_command) next;
};
-typedef struct VirtIOGPU {
+typedef struct VirtIOGPUBase {
VirtIODevice parent_obj;
- QEMUBH *ctrl_bh;
- QEMUBH *cursor_bh;
+ Error *migration_blocker;
+
+ struct virtio_gpu_base_conf conf;
+ struct virtio_gpu_config virtio_config;
+
+ bool use_virgl_renderer;
+ int renderer_blocked;
+ int enable;
+
+ struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
+
+ int enabled_output_bitmask;
+ struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
+} VirtIOGPUBase;
+
+typedef struct VirtIOGPUBaseClass {
+ VirtioDeviceClass parent;
+
+ void (*gl_unblock)(VirtIOGPUBase *g);
+} VirtIOGPUBaseClass;
+
+#define VIRTIO_GPU_BASE_PROPERTIES(_state, _conf) \
+ DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \
+ DEFINE_PROP_BIT("edid", _state, _conf.flags, \
+ VIRTIO_GPU_FLAG_EDID_ENABLED, false), \
+ DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1024), \
+ DEFINE_PROP_UINT32("yres", _state, _conf.yres, 768)
+
+typedef struct VirtIOGPU {
+ VirtIOGPUBase parent_obj;
+
+ uint64_t conf_max_hostmem;
+
VirtQueue *ctrl_vq;
VirtQueue *cursor_vq;
- int enable;
+ QEMUBH *ctrl_bh;
+ QEMUBH *cursor_bh;
QTAILQ_HEAD(, virtio_gpu_simple_resource) reslist;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) cmdq;
QTAILQ_HEAD(, virtio_gpu_ctrl_command) fenceq;
- struct virtio_gpu_scanout scanout[VIRTIO_GPU_MAX_SCANOUTS];
- struct virtio_gpu_requested_state req_state[VIRTIO_GPU_MAX_SCANOUTS];
-
- struct virtio_gpu_conf conf;
uint64_t hostmem;
- int enabled_output_bitmask;
- struct virtio_gpu_config virtio_config;
- bool use_virgl_renderer;
bool renderer_inited;
- int renderer_blocked;
bool renderer_reset;
QEMUTimer *fence_poll;
QEMUTimer *print_stats;
@@ -124,17 +158,19 @@ typedef struct VirtIOGPU {
uint32_t req_3d;
uint32_t bytes_3d;
} stats;
-
- Error *migration_blocker;
} VirtIOGPU;
-extern const GraphicHwOps virtio_gpu_ops;
+typedef struct VhostUserGPU {
+ VirtIOGPUBase parent_obj;
+
+ VhostUserBackend *vhost;
+ int vhost_gpu_fd; /* closed by the chardev */
+ CharBackend vhost_chr;
+ QemuDmaBuf dmabuf[VIRTIO_GPU_MAX_SCANOUTS];
+ bool backend_blocked;
+} VhostUserGPU;
-/* to share between PCI and VGA */
-#define DEFINE_VIRTIO_GPU_PCI_PROPERTIES(_state) \
- DEFINE_PROP_BIT("ioeventfd", _state, flags, \
- VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false), \
- DEFINE_PROP_UINT32("vectors", _state, nvectors, 3)
+extern const GraphicHwOps virtio_gpu_ops;
#define VIRTIO_GPU_FILL_CMD(out) do { \
size_t s; \
@@ -148,6 +184,15 @@ extern const GraphicHwOps virtio_gpu_ops;
} \
} while (0)
+/* virtio-gpu-base.c */
+bool virtio_gpu_base_device_realize(DeviceState *qdev,
+ VirtIOHandleOutput ctrl_cb,
+ VirtIOHandleOutput cursor_cb,
+ Error **errp);
+void virtio_gpu_base_reset(VirtIOGPUBase *g);
+void virtio_gpu_base_fill_display_info(VirtIOGPUBase *g,
+ struct virtio_gpu_resp_display_info *dpy_info);
+
/* virtio-gpu.c */
void virtio_gpu_ctrl_response(VirtIOGPU *g,
struct virtio_gpu_ctrl_command *cmd,
@@ -175,4 +220,5 @@ void virtio_gpu_virgl_fence_poll(VirtIOGPU *g);
void virtio_gpu_virgl_reset(VirtIOGPU *g);
int virtio_gpu_virgl_init(VirtIOGPU *g);
int virtio_gpu_virgl_get_num_capsets(VirtIOGPU *g);
+
#endif
diff --git a/rules.mak b/rules.mak
index df45bcffb4..967295dd2b 100644
--- a/rules.mak
+++ b/rules.mak
@@ -144,7 +144,7 @@ cc-option = $(if $(shell $(CC) $1 $2 -S -o /dev/null -xc /dev/null \
cc-c-option = $(if $(shell $(CC) $1 $2 -c -o /dev/null -xc /dev/null \
>/dev/null 2>&1 && echo OK), $2, $3)
-VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc Kconfig%
+VPATH_SUFFIXES = %.c %.h %.S %.cc %.cpp %.m %.mak %.texi %.sh %.rc Kconfig% %.json.in
set-vpath = $(if $1,$(foreach PATTERN,$(VPATH_SUFFIXES),$(eval vpath $(PATTERN) $1)))
# install-prog list, dir
@@ -392,3 +392,10 @@ TEXI2MAN = $(call quiet-command, \
$(call TEXI2MAN)
%.8:
$(call TEXI2MAN)
+
+GEN_SUBST = $(call quiet-command, \
+ sed -e "s!@libexecdir@!$(libexecdir)!g" < $< > $@, \
+ "GEN","$@")
+
+%.json: %.json.in
+ $(call GEN_SUBST)
diff --git a/ui/spice-app.c b/ui/spice-app.c
index 925b27b708..30541b1022 100644
--- a/ui/spice-app.c
+++ b/ui/spice-app.c
@@ -157,9 +157,10 @@ static void spice_app_display_early_init(DisplayOptions *opts)
qemu_opt_set(qopts, "addr", sock_path, &error_abort);
qemu_opt_set(qopts, "image-compression", "off", &error_abort);
qemu_opt_set(qopts, "streaming-video", "off", &error_abort);
+#ifdef CONFIG_OPENGL
qemu_opt_set(qopts, "gl", opts->has_gl ? "on" : "off", &error_abort);
display_opengl = opts->has_gl;
-
+#endif
be->u.spiceport.data->fqdn = g_strdup("org.qemu.monitor.qmp.0");
qemu_chardev_new("org.qemu.monitor.qmp", TYPE_CHARDEV_SPICEPORT,
be, NULL, &error_abort);
diff --git a/util/Makefile.objs b/util/Makefile.objs
index c27a923dbe..38178201ff 100644
--- a/util/Makefile.objs
+++ b/util/Makefile.objs
@@ -53,7 +53,7 @@ util-obj-y += systemd.o
util-obj-y += iova-tree.o
util-obj-$(CONFIG_INOTIFY1) += filemonitor-inotify.o
util-obj-$(CONFIG_LINUX) += vfio-helpers.o
-util-obj-$(CONFIG_OPENGL) += drm.o
+util-obj-$(CONFIG_POSIX) += drm.o
util-obj-y += guest-random.o
stub-obj-y += filemonitor-stub.o
diff --git a/vl.c b/vl.c
index 2e69c9fef2..139658d8b3 100644
--- a/vl.c
+++ b/vl.c
@@ -239,6 +239,7 @@ static struct {
{ .driver = "qxl-vga", .flag = &default_vga },
{ .driver = "virtio-vga", .flag = &default_vga },
{ .driver = "ati-vga", .flag = &default_vga },
+ { .driver = "vhost-user-vga", .flag = &default_vga },
};
static QemuOptsList qemu_rtc_opts = {