aboutsummaryrefslogtreecommitdiff
path: root/hw/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'hw/virtio')
-rw-r--r--hw/virtio/Kconfig5
-rw-r--r--hw/virtio/meson.build4
-rw-r--r--hw/virtio/trace-events9
-rw-r--r--hw/virtio/vhost-user-fs.c9
-rw-r--r--hw/virtio/vhost-user-gpio-pci.c69
-rw-r--r--hw/virtio/vhost-user-gpio.c411
-rw-r--r--hw/virtio/vhost-user-i2c.c10
-rw-r--r--hw/virtio/vhost-user-rng.c10
-rw-r--r--hw/virtio/vhost-user-vsock.c8
-rw-r--r--hw/virtio/vhost-user.c16
-rw-r--r--hw/virtio/vhost-vsock-common.c3
-rw-r--r--hw/virtio/vhost-vsock.c8
-rw-r--r--hw/virtio/vhost.c6
-rw-r--r--hw/virtio/virtio-stub.c42
-rw-r--r--hw/virtio/virtio.c1049
15 files changed, 1618 insertions, 41 deletions
diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig
index e9ecae1f50..cbfd8c7173 100644
--- a/hw/virtio/Kconfig
+++ b/hw/virtio/Kconfig
@@ -80,3 +80,8 @@ config VHOST_USER_FS
bool
default y
depends on VIRTIO && VHOST_USER
+
+config VHOST_USER_GPIO
+ bool
+ default y
+ depends on VIRTIO && VHOST_USER
diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build
index 7e8877fd64..dfed1e7af5 100644
--- a/hw/virtio/meson.build
+++ b/hw/virtio/meson.build
@@ -29,6 +29,8 @@ virtio_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu.c'))
virtio_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem.c'))
virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c'))
virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c'))
+virtio_ss.add(when: 'CONFIG_VHOST_USER_GPIO', if_true: files('vhost-user-gpio.c'))
+virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_GPIO'], if_true: files('vhost-user-gpio-pci.c'))
virtio_pci_ss = ss.source_set()
virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c'))
@@ -60,4 +62,6 @@ virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss)
specific_ss.add_all(when: 'CONFIG_VIRTIO', if_true: virtio_ss)
softmmu_ss.add_all(when: 'CONFIG_VIRTIO', if_true: softmmu_virtio_ss)
softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('vhost-stub.c'))
+softmmu_ss.add(when: 'CONFIG_VIRTIO', if_false: files('virtio-stub.c'))
softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c'))
+softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('virtio-stub.c'))
diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events
index 20af2e7ebd..820dadc26c 100644
--- a/hw/virtio/trace-events
+++ b/hw/virtio/trace-events
@@ -8,6 +8,10 @@ vhost_region_add_section_aligned(const char *name, uint64_t gpa, uint64_t size,
vhost_section(const char *name) "%s"
vhost_reject_section(const char *name, int d) "%s:%d"
vhost_iotlb_miss(void *dev, int step) "%p step %d"
+vhost_dev_cleanup(void *dev) "%p"
+vhost_dev_start(void *dev, const char *name) "%p:%s"
+vhost_dev_stop(void *dev, const char *name) "%p:%s"
+
# vhost-user.c
vhost_user_postcopy_end_entry(void) ""
@@ -140,3 +144,8 @@ virtio_mem_state_response(uint16_t state) "state=%" PRIu16
virtio_pmem_flush_request(void) "flush request"
virtio_pmem_response(void) "flush response"
virtio_pmem_flush_done(int type) "fsync return=%d"
+
+# virtio-gpio.c
+virtio_gpio_start(void) "start"
+virtio_gpio_stop(void) "stop"
+virtio_gpio_set_status(uint8_t status) "0x%x"
diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c
index e513e4fdda..ad0f91c607 100644
--- a/hw/virtio/vhost-user-fs.c
+++ b/hw/virtio/vhost-user-fs.c
@@ -20,6 +20,7 @@
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
#include "qemu/error-report.h"
+#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-user-fs.h"
#include "monitor/monitor.h"
#include "sysemu/sysemu.h"
@@ -122,13 +123,9 @@ static void vuf_stop(VirtIODevice *vdev)
static void vuf_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserFS *fs = VHOST_USER_FS(vdev);
- bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+ bool should_start = virtio_device_started(vdev, status);
- if (!vdev->vm_running) {
- should_start = false;
- }
-
- if (fs->vhost_dev.started == should_start) {
+ if (vhost_dev_is_started(&fs->vhost_dev) == should_start) {
return;
}
diff --git a/hw/virtio/vhost-user-gpio-pci.c b/hw/virtio/vhost-user-gpio-pci.c
new file mode 100644
index 0000000000..b3028a24a1
--- /dev/null
+++ b/hw/virtio/vhost-user-gpio-pci.c
@@ -0,0 +1,69 @@
+/*
+ * Vhost-user gpio virtio device PCI glue
+ *
+ * Copyright (c) 2022 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "hw/qdev-properties.h"
+#include "hw/virtio/vhost-user-gpio.h"
+#include "hw/virtio/virtio-pci.h"
+
+struct VHostUserGPIOPCI {
+ VirtIOPCIProxy parent_obj;
+ VHostUserGPIO vdev;
+};
+
+typedef struct VHostUserGPIOPCI VHostUserGPIOPCI;
+
+#define TYPE_VHOST_USER_GPIO_PCI "vhost-user-gpio-pci-base"
+
+DECLARE_INSTANCE_CHECKER(VHostUserGPIOPCI, VHOST_USER_GPIO_PCI,
+ TYPE_VHOST_USER_GPIO_PCI)
+
+static void vhost_user_gpio_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VHostUserGPIOPCI *dev = VHOST_USER_GPIO_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&dev->vdev);
+
+ vpci_dev->nvectors = 1;
+ qdev_realize(vdev, BUS(&vpci_dev->bus), errp);
+}
+
+static void vhost_user_gpio_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+ k->realize = vhost_user_gpio_pci_realize;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+ pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
+ pcidev_k->device_id = 0; /* Set by virtio-pci based on virtio id */
+ pcidev_k->revision = 0x00;
+ pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
+}
+
+static void vhost_user_gpio_pci_instance_init(Object *obj)
+{
+ VHostUserGPIOPCI *dev = VHOST_USER_GPIO_PCI(obj);
+
+ virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev),
+ TYPE_VHOST_USER_GPIO);
+}
+
+static const VirtioPCIDeviceTypeInfo vhost_user_gpio_pci_info = {
+ .base_name = TYPE_VHOST_USER_GPIO_PCI,
+ .non_transitional_name = "vhost-user-gpio-pci",
+ .instance_size = sizeof(VHostUserGPIOPCI),
+ .instance_init = vhost_user_gpio_pci_instance_init,
+ .class_init = vhost_user_gpio_pci_class_init,
+};
+
+static void vhost_user_gpio_pci_register(void)
+{
+ virtio_pci_types_register(&vhost_user_gpio_pci_info);
+}
+
+type_init(vhost_user_gpio_pci_register);
diff --git a/hw/virtio/vhost-user-gpio.c b/hw/virtio/vhost-user-gpio.c
new file mode 100644
index 0000000000..8b40fe450c
--- /dev/null
+++ b/hw/virtio/vhost-user-gpio.c
@@ -0,0 +1,411 @@
+/*
+ * Vhost-user GPIO virtio device
+ *
+ * Copyright (c) 2022 Viresh Kumar <viresh.kumar@linaro.org>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "hw/qdev-properties.h"
+#include "hw/virtio/virtio-bus.h"
+#include "hw/virtio/vhost-user-gpio.h"
+#include "qemu/error-report.h"
+#include "standard-headers/linux/virtio_ids.h"
+#include "trace.h"
+
+#define REALIZE_CONNECTION_RETRIES 3
+
+/* Features required from VirtIO */
+static const int feature_bits[] = {
+ VIRTIO_F_VERSION_1,
+ VIRTIO_F_NOTIFY_ON_EMPTY,
+ VIRTIO_RING_F_INDIRECT_DESC,
+ VIRTIO_RING_F_EVENT_IDX,
+ VIRTIO_GPIO_F_IRQ,
+ VHOST_INVALID_FEATURE_BIT
+};
+
+static void vu_gpio_get_config(VirtIODevice *vdev, uint8_t *config)
+{
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+
+ memcpy(config, &gpio->config, sizeof(gpio->config));
+}
+
+static int vu_gpio_config_notifier(struct vhost_dev *dev)
+{
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(dev->vdev);
+
+ memcpy(dev->vdev->config, &gpio->config, sizeof(gpio->config));
+ virtio_notify_config(dev->vdev);
+
+ return 0;
+}
+
+const VhostDevConfigOps gpio_ops = {
+ .vhost_dev_config_notifier = vu_gpio_config_notifier,
+};
+
+static int vu_gpio_start(VirtIODevice *vdev)
+{
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+ struct vhost_dev *vhost_dev = &gpio->vhost_dev;
+ int ret, i;
+
+ if (!k->set_guest_notifiers) {
+ error_report("binding does not support guest notifiers");
+ return -ENOSYS;
+ }
+
+ ret = vhost_dev_enable_notifiers(vhost_dev, vdev);
+ if (ret < 0) {
+ error_report("Error enabling host notifiers: %d", ret);
+ return ret;
+ }
+
+ ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, true);
+ if (ret < 0) {
+ error_report("Error binding guest notifier: %d", ret);
+ goto err_host_notifiers;
+ }
+
+ /*
+ * Before we start up we need to ensure we have the final feature
+ * set needed for the vhost configuration. The backend may also
+ * apply backend_features when the feature set is sent.
+ */
+ vhost_ack_features(&gpio->vhost_dev, feature_bits, vdev->guest_features);
+
+ ret = vhost_dev_start(&gpio->vhost_dev, vdev);
+ if (ret < 0) {
+ error_report("Error starting vhost-user-gpio: %d", ret);
+ goto err_guest_notifiers;
+ }
+
+ /*
+ * guest_notifier_mask/pending not used yet, so just unmask
+ * everything here. virtio-pci will do the right thing by
+ * enabling/disabling irqfd.
+ */
+ for (i = 0; i < gpio->vhost_dev.nvqs; i++) {
+ vhost_virtqueue_mask(&gpio->vhost_dev, vdev, i, false);
+ }
+
+ /*
+ * As we must have VHOST_USER_F_PROTOCOL_FEATURES (because
+ * VHOST_USER_GET_CONFIG requires it) we need to explicitly enable
+ * the vrings.
+ */
+ g_assert(vhost_dev->vhost_ops &&
+ vhost_dev->vhost_ops->vhost_set_vring_enable);
+ ret = vhost_dev->vhost_ops->vhost_set_vring_enable(vhost_dev, true);
+ if (ret == 0) {
+ return 0;
+ }
+
+ error_report("Failed to start vrings for vhost-user-gpio: %d", ret);
+
+err_guest_notifiers:
+ k->set_guest_notifiers(qbus->parent, gpio->vhost_dev.nvqs, false);
+err_host_notifiers:
+ vhost_dev_disable_notifiers(&gpio->vhost_dev, vdev);
+
+ return ret;
+}
+
+static void vu_gpio_stop(VirtIODevice *vdev)
+{
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+ BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev)));
+ VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ struct vhost_dev *vhost_dev = &gpio->vhost_dev;
+ int ret;
+
+ if (!k->set_guest_notifiers) {
+ return;
+ }
+
+ /*
+ * We can call vu_gpio_stop multiple times, for example from
+ * vm_state_notify and the final object finalisation. Check we
+ * aren't already stopped before doing so.
+ */
+ if (!vhost_dev_is_started(vhost_dev)) {
+ return;
+ }
+
+ vhost_dev_stop(vhost_dev, vdev);
+
+ ret = k->set_guest_notifiers(qbus->parent, vhost_dev->nvqs, false);
+ if (ret < 0) {
+ error_report("vhost guest notifier cleanup failed: %d", ret);
+ return;
+ }
+
+ vhost_dev_disable_notifiers(vhost_dev, vdev);
+}
+
+static void vu_gpio_set_status(VirtIODevice *vdev, uint8_t status)
+{
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+ bool should_start = virtio_device_started(vdev, status);
+
+ trace_virtio_gpio_set_status(status);
+
+ if (!gpio->connected) {
+ return;
+ }
+
+ if (vhost_dev_is_started(&gpio->vhost_dev) == should_start) {
+ return;
+ }
+
+ if (should_start) {
+ if (vu_gpio_start(vdev)) {
+ qemu_chr_fe_disconnect(&gpio->chardev);
+ }
+ } else {
+ vu_gpio_stop(vdev);
+ }
+}
+
+static uint64_t vu_gpio_get_features(VirtIODevice *vdev, uint64_t features,
+ Error **errp)
+{
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+
+ return vhost_get_features(&gpio->vhost_dev, feature_bits, features);
+}
+
+static void vu_gpio_handle_output(VirtIODevice *vdev, VirtQueue *vq)
+{
+ /*
+ * Not normally called; it's the daemon that handles the queue;
+ * however virtio's cleanup path can call this.
+ */
+}
+
+static void vu_gpio_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask)
+{
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+
+ vhost_virtqueue_mask(&gpio->vhost_dev, vdev, idx, mask);
+}
+
+static void do_vhost_user_cleanup(VirtIODevice *vdev, VHostUserGPIO *gpio)
+{
+ virtio_delete_queue(gpio->command_vq);
+ virtio_delete_queue(gpio->interrupt_vq);
+ g_free(gpio->vhost_dev.vqs);
+ gpio->vhost_dev.vqs = NULL;
+ virtio_cleanup(vdev);
+ vhost_user_cleanup(&gpio->vhost_user);
+}
+
+static int vu_gpio_connect(DeviceState *dev, Error **errp)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+ struct vhost_dev *vhost_dev = &gpio->vhost_dev;
+ int ret;
+
+ if (gpio->connected) {
+ return 0;
+ }
+ gpio->connected = true;
+
+ vhost_dev_set_config_notifier(vhost_dev, &gpio_ops);
+ gpio->vhost_user.supports_config = true;
+
+ ret = vhost_dev_init(vhost_dev, &gpio->vhost_user,
+ VHOST_BACKEND_TYPE_USER, 0, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /* restore vhost state */
+ if (virtio_device_started(vdev, vdev->status)) {
+ vu_gpio_start(vdev);
+ }
+
+ return 0;
+}
+
+static void vu_gpio_disconnect(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+
+ if (!gpio->connected) {
+ return;
+ }
+ gpio->connected = false;
+
+ vu_gpio_stop(vdev);
+ vhost_dev_cleanup(&gpio->vhost_dev);
+}
+
+static void vu_gpio_event(void *opaque, QEMUChrEvent event)
+{
+ DeviceState *dev = opaque;
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(vdev);
+ Error *local_err = NULL;
+
+ switch (event) {
+ case CHR_EVENT_OPENED:
+ if (vu_gpio_connect(dev, &local_err) < 0) {
+ qemu_chr_fe_disconnect(&gpio->chardev);
+ return;
+ }
+ break;
+ case CHR_EVENT_CLOSED:
+ vu_gpio_disconnect(dev);
+ break;
+ case CHR_EVENT_BREAK:
+ case CHR_EVENT_MUX_IN:
+ case CHR_EVENT_MUX_OUT:
+ /* Ignore */
+ break;
+ }
+}
+
+static int vu_gpio_realize_connect(VHostUserGPIO *gpio, Error **errp)
+{
+ VirtIODevice *vdev = &gpio->parent_obj;
+ DeviceState *dev = &vdev->parent_obj;
+ struct vhost_dev *vhost_dev = &gpio->vhost_dev;
+ int ret;
+
+ ret = qemu_chr_fe_wait_connected(&gpio->chardev, errp);
+ if (ret < 0) {
+ return ret;
+ }
+
+ /*
+ * vu_gpio_connect() may have already connected (via the event
+ * callback) in which case it will just report success.
+ */
+ ret = vu_gpio_connect(dev, errp);
+ if (ret < 0) {
+ qemu_chr_fe_disconnect(&gpio->chardev);
+ return ret;
+ }
+ g_assert(gpio->connected);
+
+ ret = vhost_dev_get_config(vhost_dev, (uint8_t *)&gpio->config,
+ sizeof(gpio->config), errp);
+
+ if (ret < 0) {
+ error_report("vhost-user-gpio: get config failed");
+
+ qemu_chr_fe_disconnect(&gpio->chardev);
+ vhost_dev_cleanup(vhost_dev);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void vu_gpio_device_realize(DeviceState *dev, Error **errp)
+{
+ ERRP_GUARD();
+
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(dev);
+ int retries, ret;
+
+ if (!gpio->chardev.chr) {
+ error_setg(errp, "vhost-user-gpio: chardev is mandatory");
+ return;
+ }
+
+ if (!vhost_user_init(&gpio->vhost_user, &gpio->chardev, errp)) {
+ return;
+ }
+
+ virtio_init(vdev, VIRTIO_ID_GPIO, sizeof(gpio->config));
+
+ gpio->vhost_dev.nvqs = 2;
+ gpio->command_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
+ gpio->interrupt_vq = virtio_add_queue(vdev, 256, vu_gpio_handle_output);
+ gpio->vhost_dev.vqs = g_new0(struct vhost_virtqueue, gpio->vhost_dev.nvqs);
+
+ gpio->connected = false;
+
+ qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, vu_gpio_event, NULL,
+ dev, NULL, true);
+
+ retries = REALIZE_CONNECTION_RETRIES;
+ g_assert(!*errp);
+ do {
+ if (*errp) {
+ error_prepend(errp, "Reconnecting after error: ");
+ error_report_err(*errp);
+ *errp = NULL;
+ }
+ ret = vu_gpio_realize_connect(gpio, errp);
+ } while (ret < 0 && retries--);
+
+ if (ret < 0) {
+ do_vhost_user_cleanup(vdev, gpio);
+ }
+
+ return;
+}
+
+static void vu_gpio_device_unrealize(DeviceState *dev)
+{
+ VirtIODevice *vdev = VIRTIO_DEVICE(dev);
+ VHostUserGPIO *gpio = VHOST_USER_GPIO(dev);
+
+ vu_gpio_set_status(vdev, 0);
+ qemu_chr_fe_set_handlers(&gpio->chardev, NULL, NULL, NULL, NULL, NULL, NULL,
+ false);
+ vhost_dev_cleanup(&gpio->vhost_dev);
+ do_vhost_user_cleanup(vdev, gpio);
+}
+
+static const VMStateDescription vu_gpio_vmstate = {
+ .name = "vhost-user-gpio",
+ .unmigratable = 1,
+};
+
+static Property vu_gpio_properties[] = {
+ DEFINE_PROP_CHR("chardev", VHostUserGPIO, chardev),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void vu_gpio_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
+
+ device_class_set_props(dc, vu_gpio_properties);
+ dc->vmsd = &vu_gpio_vmstate;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+ vdc->realize = vu_gpio_device_realize;
+ vdc->unrealize = vu_gpio_device_unrealize;
+ vdc->get_features = vu_gpio_get_features;
+ vdc->get_config = vu_gpio_get_config;
+ vdc->set_status = vu_gpio_set_status;
+ vdc->guest_notifier_mask = vu_gpio_guest_notifier_mask;
+}
+
+static const TypeInfo vu_gpio_info = {
+ .name = TYPE_VHOST_USER_GPIO,
+ .parent = TYPE_VIRTIO_DEVICE,
+ .instance_size = sizeof(VHostUserGPIO),
+ .class_init = vu_gpio_class_init,
+};
+
+static void vu_gpio_register_types(void)
+{
+ type_register_static(&vu_gpio_info);
+}
+
+type_init(vu_gpio_register_types)
diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c
index 6020eee093..bc58b6c0d1 100644
--- a/hw/virtio/vhost-user-i2c.c
+++ b/hw/virtio/vhost-user-i2c.c
@@ -93,13 +93,9 @@ static void vu_i2c_stop(VirtIODevice *vdev)
static void vu_i2c_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserI2C *i2c = VHOST_USER_I2C(vdev);
- bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+ bool should_start = virtio_device_started(vdev, status);
- if (!vdev->vm_running) {
- should_start = false;
- }
-
- if (i2c->vhost_dev.started == should_start) {
+ if (vhost_dev_is_started(&i2c->vhost_dev) == should_start) {
return;
}
@@ -178,7 +174,7 @@ static void vu_i2c_disconnect(DeviceState *dev)
}
i2c->connected = false;
- if (i2c->vhost_dev.started) {
+ if (vhost_dev_is_started(&i2c->vhost_dev)) {
vu_i2c_stop(vdev);
}
}
diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c
index 3a7bf8e32d..bc1f36c5ac 100644
--- a/hw/virtio/vhost-user-rng.c
+++ b/hw/virtio/vhost-user-rng.c
@@ -90,13 +90,9 @@ static void vu_rng_stop(VirtIODevice *vdev)
static void vu_rng_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostUserRNG *rng = VHOST_USER_RNG(vdev);
- bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+ bool should_start = virtio_device_started(vdev, status);
- if (!vdev->vm_running) {
- should_start = false;
- }
-
- if (rng->vhost_dev.started == should_start) {
+ if (vhost_dev_is_started(&rng->vhost_dev) == should_start) {
return;
}
@@ -164,7 +160,7 @@ static void vu_rng_disconnect(DeviceState *dev)
rng->connected = false;
- if (rng->vhost_dev.started) {
+ if (vhost_dev_is_started(&rng->vhost_dev)) {
vu_rng_stop(vdev);
}
}
diff --git a/hw/virtio/vhost-user-vsock.c b/hw/virtio/vhost-user-vsock.c
index 0f8ff99f85..7b67e29d83 100644
--- a/hw/virtio/vhost-user-vsock.c
+++ b/hw/virtio/vhost-user-vsock.c
@@ -55,13 +55,9 @@ const VhostDevConfigOps vsock_ops = {
static void vuv_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
- bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+ bool should_start = virtio_device_started(vdev, status);
- if (!vdev->vm_running) {
- should_start = false;
- }
-
- if (vvc->vhost_dev.started == should_start) {
+ if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) {
return;
}
diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c
index 70748e61e0..03415b6c95 100644
--- a/hw/virtio/vhost-user.c
+++ b/hw/virtio/vhost-user.c
@@ -200,7 +200,7 @@ typedef struct {
VhostUserRequest request;
#define VHOST_USER_VERSION_MASK (0x3)
-#define VHOST_USER_REPLY_MASK (0x1<<2)
+#define VHOST_USER_REPLY_MASK (0x1 << 2)
#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
uint32_t flags;
uint32_t size; /* the following payload size */
@@ -208,7 +208,7 @@ typedef struct {
typedef union {
#define VHOST_USER_VRING_IDX_MASK (0xff)
-#define VHOST_USER_VRING_NOFD_MASK (0x1<<8)
+#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
uint64_t u64;
struct vhost_vring_state state;
struct vhost_vring_addr addr;
@@ -248,7 +248,8 @@ struct vhost_user {
size_t region_rb_len;
/* RAMBlock associated with a given region */
RAMBlock **region_rb;
- /* The offset from the start of the RAMBlock to the start of the
+ /*
+ * The offset from the start of the RAMBlock to the start of the
* vhost region.
*/
ram_addr_t *region_rb_offset;
@@ -1460,7 +1461,14 @@ static int vhost_user_set_features(struct vhost_dev *dev,
*/
bool log_enabled = features & (0x1ULL << VHOST_F_LOG_ALL);
- return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES, features,
+ /*
+ * We need to include any extra backend only feature bits that
+ * might be needed by our device. Currently this includes the
+ * VHOST_USER_F_PROTOCOL_FEATURES bit for enabling protocol
+ * features.
+ */
+ return vhost_user_set_u64(dev, VHOST_USER_SET_FEATURES,
+ features | dev->backend_features,
log_enabled);
}
diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c
index 7394818e00..29b9ab4f72 100644
--- a/hw/virtio/vhost-vsock-common.c
+++ b/hw/virtio/vhost-vsock-common.c
@@ -14,6 +14,7 @@
#include "hw/virtio/virtio-access.h"
#include "qemu/error-report.h"
#include "hw/qdev-properties.h"
+#include "hw/virtio/vhost.h"
#include "hw/virtio/vhost-vsock.h"
#include "qemu/iov.h"
#include "monitor/monitor.h"
@@ -199,7 +200,7 @@ int vhost_vsock_common_pre_save(void *opaque)
* At this point, backend must be stopped, otherwise
* it might keep writing to memory.
*/
- assert(!vvc->vhost_dev.started);
+ assert(!vhost_dev_is_started(&vvc->vhost_dev));
return 0;
}
diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c
index 0338de892f..7dc3c73931 100644
--- a/hw/virtio/vhost-vsock.c
+++ b/hw/virtio/vhost-vsock.c
@@ -70,14 +70,10 @@ static int vhost_vsock_set_running(VirtIODevice *vdev, int start)
static void vhost_vsock_set_status(VirtIODevice *vdev, uint8_t status)
{
VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev);
- bool should_start = status & VIRTIO_CONFIG_S_DRIVER_OK;
+ bool should_start = virtio_device_started(vdev, status);
int ret;
- if (!vdev->vm_running) {
- should_start = false;
- }
-
- if (vvc->vhost_dev.started == should_start) {
+ if (vhost_dev_is_started(&vvc->vhost_dev) == should_start) {
return;
}
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index f758f177bb..5185c15295 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -1477,6 +1477,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev)
{
int i;
+ trace_vhost_dev_cleanup(hdev);
+
for (i = 0; i < hdev->nvqs; ++i) {
vhost_virtqueue_cleanup(hdev->vqs + i);
}
@@ -1783,6 +1785,8 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
+ trace_vhost_dev_start(hdev, vdev->name);
+
vdev->vhost_started = true;
hdev->started = true;
hdev->vdev = vdev;
@@ -1869,6 +1873,8 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev)
/* should only be called after backend is connected */
assert(hdev->vhost_ops);
+ trace_vhost_dev_stop(hdev, vdev->name);
+
if (hdev->vhost_ops->vhost_dev_start) {
hdev->vhost_ops->vhost_dev_start(hdev, false);
}
diff --git a/hw/virtio/virtio-stub.c b/hw/virtio/virtio-stub.c
new file mode 100644
index 0000000000..7ddb22cc5e
--- /dev/null
+++ b/hw/virtio/virtio-stub.c
@@ -0,0 +1,42 @@
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qapi/qapi-commands-virtio.h"
+
+static void *qmp_virtio_unsupported(Error **errp)
+{
+ error_setg(errp, "Virtio is disabled");
+ return NULL;
+}
+
+VirtioInfoList *qmp_x_query_virtio(Error **errp)
+{
+ return qmp_virtio_unsupported(errp);
+}
+
+VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp)
+{
+ return qmp_virtio_unsupported(errp);
+}
+
+VirtVhostQueueStatus *qmp_x_query_virtio_vhost_queue_status(const char *path,
+ uint16_t queue,
+ Error **errp)
+{
+ return qmp_virtio_unsupported(errp);
+}
+
+VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
+ uint16_t queue,
+ Error **errp)
+{
+ return qmp_virtio_unsupported(errp);
+}
+
+VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
+ uint16_t queue,
+ bool has_index,
+ uint16_t index,
+ Error **errp)
+{
+ return qmp_virtio_unsupported(errp);
+}
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 5d607aeaa0..808446b4c9 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -13,12 +13,18 @@
#include "qemu/osdep.h"
#include "qapi/error.h"
+#include "qapi/qmp/qdict.h"
+#include "qapi/qapi-commands-virtio.h"
+#include "qapi/qapi-commands-qom.h"
+#include "qapi/qapi-visit-virtio.h"
+#include "qapi/qmp/qjson.h"
#include "cpu.h"
#include "trace.h"
#include "qemu/error-report.h"
#include "qemu/log.h"
#include "qemu/main-loop.h"
#include "qemu/module.h"
+#include "qom/object_interfaces.h"
#include "hw/virtio/virtio.h"
#include "migration/qemu-file-types.h"
#include "qemu/atomic.h"
@@ -28,6 +34,432 @@
#include "sysemu/dma.h"
#include "sysemu/runstate.h"
#include "standard-headers/linux/virtio_ids.h"
+#include "standard-headers/linux/vhost_types.h"
+#include "standard-headers/linux/virtio_blk.h"
+#include "standard-headers/linux/virtio_console.h"
+#include "standard-headers/linux/virtio_gpu.h"
+#include "standard-headers/linux/virtio_net.h"
+#include "standard-headers/linux/virtio_scsi.h"
+#include "standard-headers/linux/virtio_i2c.h"
+#include "standard-headers/linux/virtio_balloon.h"
+#include "standard-headers/linux/virtio_iommu.h"
+#include "standard-headers/linux/virtio_mem.h"
+#include "standard-headers/linux/virtio_vsock.h"
+#include CONFIG_DEVICES
+
+/* QAPI list of realized VirtIODevices */
+static QTAILQ_HEAD(, VirtIODevice) virtio_list;
+
+/*
+ * Maximum size of virtio device config space
+ */
+#define VHOST_USER_MAX_CONFIG_SIZE 256
+
+#define FEATURE_ENTRY(name, desc) (qmp_virtio_feature_map_t) \
+ { .virtio_bit = name, .feature_desc = desc }
+
+enum VhostUserProtocolFeature {
+ VHOST_USER_PROTOCOL_F_MQ = 0,
+ VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
+ VHOST_USER_PROTOCOL_F_RARP = 2,
+ VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
+ VHOST_USER_PROTOCOL_F_NET_MTU = 4,
+ VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
+ VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
+ VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
+ VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
+ VHOST_USER_PROTOCOL_F_CONFIG = 9,
+ VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
+ VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
+ VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
+ VHOST_USER_PROTOCOL_F_RESET_DEVICE = 13,
+ VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
+ VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+ VHOST_USER_PROTOCOL_F_MAX
+};
+
+/* Virtio transport features mapping */
+static qmp_virtio_feature_map_t virtio_transport_map[] = {
+ /* Virtio device transport features */
+#ifndef VIRTIO_CONFIG_NO_LEGACY
+ FEATURE_ENTRY(VIRTIO_F_NOTIFY_ON_EMPTY, \
+ "VIRTIO_F_NOTIFY_ON_EMPTY: Notify when device runs out of avail. "
+ "descs. on VQ"),
+ FEATURE_ENTRY(VIRTIO_F_ANY_LAYOUT, \
+ "VIRTIO_F_ANY_LAYOUT: Device accepts arbitrary desc. layouts"),
+#endif /* !VIRTIO_CONFIG_NO_LEGACY */
+ FEATURE_ENTRY(VIRTIO_F_VERSION_1, \
+ "VIRTIO_F_VERSION_1: Device compliant for v1 spec (legacy)"),
+ FEATURE_ENTRY(VIRTIO_F_IOMMU_PLATFORM, \
+ "VIRTIO_F_IOMMU_PLATFORM: Device can be used on IOMMU platform"),
+ FEATURE_ENTRY(VIRTIO_F_RING_PACKED, \
+ "VIRTIO_F_RING_PACKED: Device supports packed VQ layout"),
+ FEATURE_ENTRY(VIRTIO_F_IN_ORDER, \
+ "VIRTIO_F_IN_ORDER: Device uses buffers in same order as made "
+ "available by driver"),
+ FEATURE_ENTRY(VIRTIO_F_ORDER_PLATFORM, \
+ "VIRTIO_F_ORDER_PLATFORM: Memory accesses ordered by platform"),
+ FEATURE_ENTRY(VIRTIO_F_SR_IOV, \
+ "VIRTIO_F_SR_IOV: Device supports single root I/O virtualization"),
+ /* Virtio ring transport features */
+ FEATURE_ENTRY(VIRTIO_RING_F_INDIRECT_DESC, \
+ "VIRTIO_RING_F_INDIRECT_DESC: Indirect descriptors supported"),
+ FEATURE_ENTRY(VIRTIO_RING_F_EVENT_IDX, \
+ "VIRTIO_RING_F_EVENT_IDX: Used & avail. event fields enabled"),
+ { -1, "" }
+};
+
+/* Vhost-user protocol features mapping */
+static qmp_virtio_feature_map_t vhost_user_protocol_map[] = {
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_MQ, \
+ "VHOST_USER_PROTOCOL_F_MQ: Multiqueue protocol supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_LOG_SHMFD, \
+ "VHOST_USER_PROTOCOL_F_LOG_SHMFD: Shared log memory fd supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RARP, \
+ "VHOST_USER_PROTOCOL_F_RARP: Vhost-user back-end RARP broadcasting "
+ "supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_REPLY_ACK, \
+ "VHOST_USER_PROTOCOL_F_REPLY_ACK: Requested operation status ack. "
+ "supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_NET_MTU, \
+ "VHOST_USER_PROTOCOL_F_NET_MTU: Expose host MTU to guest supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_REQ, \
+ "VHOST_USER_PROTOCOL_F_SLAVE_REQ: Socket fd for back-end initiated "
+ "requests supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CROSS_ENDIAN, \
+ "VHOST_USER_PROTOCOL_F_CROSS_ENDIAN: Endianness of VQs for legacy "
+ "devices supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CRYPTO_SESSION, \
+ "VHOST_USER_PROTOCOL_F_CRYPTO_SESSION: Session creation for crypto "
+ "operations supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_PAGEFAULT, \
+ "VHOST_USER_PROTOCOL_F_PAGEFAULT: Request servicing on userfaultfd "
+ "for accessed pages supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIG, \
+ "VHOST_USER_PROTOCOL_F_CONFIG: Vhost-user messaging for virtio "
+ "device configuration space supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD, \
+ "VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD: Slave fd communication "
+ "channel supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_HOST_NOTIFIER, \
+ "VHOST_USER_PROTOCOL_F_HOST_NOTIFIER: Host notifiers for specified "
+ "VQs supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD, \
+ "VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD: Shared inflight I/O buffers "
+ "supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_RESET_DEVICE, \
+ "VHOST_USER_PROTOCOL_F_RESET_DEVICE: Disabling all rings and "
+ "resetting internal device state supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS, \
+ "VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS: In-band messaging "
+ "supported"),
+ FEATURE_ENTRY(VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS, \
+ "VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS: Configuration for "
+ "memory slots supported"),
+ { -1, "" }
+};
+
+/* virtio device configuration statuses */
+static qmp_virtio_feature_map_t virtio_config_status_map[] = {
+ FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER_OK, \
+ "VIRTIO_CONFIG_S_DRIVER_OK: Driver setup and ready"),
+ FEATURE_ENTRY(VIRTIO_CONFIG_S_FEATURES_OK, \
+ "VIRTIO_CONFIG_S_FEATURES_OK: Feature negotiation complete"),
+ FEATURE_ENTRY(VIRTIO_CONFIG_S_DRIVER, \
+ "VIRTIO_CONFIG_S_DRIVER: Guest OS compatible with device"),
+ FEATURE_ENTRY(VIRTIO_CONFIG_S_NEEDS_RESET, \
+ "VIRTIO_CONFIG_S_NEEDS_RESET: Irrecoverable error, device needs "
+ "reset"),
+ FEATURE_ENTRY(VIRTIO_CONFIG_S_FAILED, \
+ "VIRTIO_CONFIG_S_FAILED: Error in guest, device failed"),
+ FEATURE_ENTRY(VIRTIO_CONFIG_S_ACKNOWLEDGE, \
+ "VIRTIO_CONFIG_S_ACKNOWLEDGE: Valid virtio device found"),
+ { -1, "" }
+};
+
+/* virtio-blk features mapping */
+qmp_virtio_feature_map_t virtio_blk_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_BLK_F_SIZE_MAX, \
+ "VIRTIO_BLK_F_SIZE_MAX: Max segment size is size_max"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_SEG_MAX, \
+ "VIRTIO_BLK_F_SEG_MAX: Max segments in a request is seg_max"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_GEOMETRY, \
+ "VIRTIO_BLK_F_GEOMETRY: Legacy geometry available"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_RO, \
+ "VIRTIO_BLK_F_RO: Device is read-only"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_BLK_SIZE, \
+ "VIRTIO_BLK_F_BLK_SIZE: Block size of disk available"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_TOPOLOGY, \
+ "VIRTIO_BLK_F_TOPOLOGY: Topology information available"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_MQ, \
+ "VIRTIO_BLK_F_MQ: Multiqueue supported"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_DISCARD, \
+ "VIRTIO_BLK_F_DISCARD: Discard command supported"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_WRITE_ZEROES, \
+ "VIRTIO_BLK_F_WRITE_ZEROES: Write zeroes command supported"),
+#ifndef VIRTIO_BLK_NO_LEGACY
+ FEATURE_ENTRY(VIRTIO_BLK_F_BARRIER, \
+ "VIRTIO_BLK_F_BARRIER: Request barriers supported"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_SCSI, \
+ "VIRTIO_BLK_F_SCSI: SCSI packet commands supported"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_FLUSH, \
+ "VIRTIO_BLK_F_FLUSH: Flush command supported"),
+ FEATURE_ENTRY(VIRTIO_BLK_F_CONFIG_WCE, \
+ "VIRTIO_BLK_F_CONFIG_WCE: Cache writeback and writethrough modes "
+ "supported"),
+#endif /* !VIRTIO_BLK_NO_LEGACY */
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio-serial features mapping */
+qmp_virtio_feature_map_t virtio_serial_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_CONSOLE_F_SIZE, \
+ "VIRTIO_CONSOLE_F_SIZE: Host providing console size"),
+ FEATURE_ENTRY(VIRTIO_CONSOLE_F_MULTIPORT, \
+ "VIRTIO_CONSOLE_F_MULTIPORT: Multiple ports for device supported"),
+ FEATURE_ENTRY(VIRTIO_CONSOLE_F_EMERG_WRITE, \
+ "VIRTIO_CONSOLE_F_EMERG_WRITE: Emergency write supported"),
+ { -1, "" }
+};
+
+/* virtio-gpu features mapping */
+qmp_virtio_feature_map_t virtio_gpu_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_GPU_F_VIRGL, \
+ "VIRTIO_GPU_F_VIRGL: Virgl 3D mode supported"),
+ FEATURE_ENTRY(VIRTIO_GPU_F_EDID, \
+ "VIRTIO_GPU_F_EDID: EDID metadata supported"),
+ FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_UUID, \
+ "VIRTIO_GPU_F_RESOURCE_UUID: Resource UUID assigning supported"),
+ FEATURE_ENTRY(VIRTIO_GPU_F_RESOURCE_BLOB, \
+ "VIRTIO_GPU_F_RESOURCE_BLOB: Size-based blob resources supported"),
+ FEATURE_ENTRY(VIRTIO_GPU_F_CONTEXT_INIT, \
+ "VIRTIO_GPU_F_CONTEXT_INIT: Context types and synchronization "
+ "timelines supported"),
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio-input features mapping */
+qmp_virtio_feature_map_t virtio_input_feature_map[] = {
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio-net features mapping */
+qmp_virtio_feature_map_t virtio_net_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_NET_F_CSUM, \
+ "VIRTIO_NET_F_CSUM: Device handling packets with partial checksum "
+ "supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_GUEST_CSUM, \
+ "VIRTIO_NET_F_GUEST_CSUM: Driver handling packets with partial "
+ "checksum supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS, \
+ "VIRTIO_NET_F_CTRL_GUEST_OFFLOADS: Control channel offloading "
+ "reconfig. supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_MTU, \
+ "VIRTIO_NET_F_MTU: Device max MTU reporting supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_MAC, \
+ "VIRTIO_NET_F_MAC: Device has given MAC address"),
+ FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO4, \
+ "VIRTIO_NET_F_GUEST_TSO4: Driver can receive TSOv4"),
+ FEATURE_ENTRY(VIRTIO_NET_F_GUEST_TSO6, \
+ "VIRTIO_NET_F_GUEST_TSO6: Driver can receive TSOv6"),
+ FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ECN, \
+ "VIRTIO_NET_F_GUEST_ECN: Driver can receive TSO with ECN"),
+ FEATURE_ENTRY(VIRTIO_NET_F_GUEST_UFO, \
+ "VIRTIO_NET_F_GUEST_UFO: Driver can receive UFO"),
+ FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO4, \
+ "VIRTIO_NET_F_HOST_TSO4: Device can receive TSOv4"),
+ FEATURE_ENTRY(VIRTIO_NET_F_HOST_TSO6, \
+ "VIRTIO_NET_F_HOST_TSO6: Device can receive TSOv6"),
+ FEATURE_ENTRY(VIRTIO_NET_F_HOST_ECN, \
+ "VIRTIO_NET_F_HOST_ECN: Device can receive TSO with ECN"),
+ FEATURE_ENTRY(VIRTIO_NET_F_HOST_UFO, \
+ "VIRTIO_NET_F_HOST_UFO: Device can receive UFO"),
+ FEATURE_ENTRY(VIRTIO_NET_F_MRG_RXBUF, \
+ "VIRTIO_NET_F_MRG_RXBUF: Driver can merge receive buffers"),
+ FEATURE_ENTRY(VIRTIO_NET_F_STATUS, \
+ "VIRTIO_NET_F_STATUS: Configuration status field available"),
+ FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VQ, \
+ "VIRTIO_NET_F_CTRL_VQ: Control channel available"),
+ FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX, \
+ "VIRTIO_NET_F_CTRL_RX: Control channel RX mode supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_CTRL_VLAN, \
+ "VIRTIO_NET_F_CTRL_VLAN: Control channel VLAN filtering supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_CTRL_RX_EXTRA, \
+ "VIRTIO_NET_F_CTRL_RX_EXTRA: Extra RX mode control supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_GUEST_ANNOUNCE, \
+ "VIRTIO_NET_F_GUEST_ANNOUNCE: Driver sending gratuitous packets "
+ "supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_MQ, \
+ "VIRTIO_NET_F_MQ: Multiqueue with automatic receive steering "
+ "supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_CTRL_MAC_ADDR, \
+ "VIRTIO_NET_F_CTRL_MAC_ADDR: MAC address set through control "
+ "channel"),
+ FEATURE_ENTRY(VIRTIO_NET_F_HASH_REPORT, \
+ "VIRTIO_NET_F_HASH_REPORT: Hash reporting supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_RSS, \
+ "VIRTIO_NET_F_RSS: RSS RX steering supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_RSC_EXT, \
+ "VIRTIO_NET_F_RSC_EXT: Extended coalescing info supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_STANDBY, \
+ "VIRTIO_NET_F_STANDBY: Device acting as standby for primary "
+ "device with same MAC addr. supported"),
+ FEATURE_ENTRY(VIRTIO_NET_F_SPEED_DUPLEX, \
+ "VIRTIO_NET_F_SPEED_DUPLEX: Device set linkspeed and duplex"),
+#ifndef VIRTIO_NET_NO_LEGACY
+ FEATURE_ENTRY(VIRTIO_NET_F_GSO, \
+ "VIRTIO_NET_F_GSO: Handling GSO-type packets supported"),
+#endif /* !VIRTIO_NET_NO_LEGACY */
+ FEATURE_ENTRY(VHOST_NET_F_VIRTIO_NET_HDR, \
+ "VHOST_NET_F_VIRTIO_NET_HDR: Virtio-net headers for RX and TX "
+ "packets supported"),
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio-scsi features mapping */
+qmp_virtio_feature_map_t virtio_scsi_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_SCSI_F_INOUT, \
+ "VIRTIO_SCSI_F_INOUT: Requests including read and writable data "
+ "buffers suppoted"),
+ FEATURE_ENTRY(VIRTIO_SCSI_F_HOTPLUG, \
+ "VIRTIO_SCSI_F_HOTPLUG: Reporting and handling hot-plug events "
+ "supported"),
+ FEATURE_ENTRY(VIRTIO_SCSI_F_CHANGE, \
+ "VIRTIO_SCSI_F_CHANGE: Reporting and handling LUN changes "
+ "supported"),
+ FEATURE_ENTRY(VIRTIO_SCSI_F_T10_PI, \
+ "VIRTIO_SCSI_F_T10_PI: T10 info included in request header"),
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio/vhost-user-fs features mapping */
+qmp_virtio_feature_map_t virtio_fs_feature_map[] = {
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio/vhost-user-i2c features mapping */
+qmp_virtio_feature_map_t virtio_i2c_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_I2C_F_ZERO_LENGTH_REQUEST, \
+ "VIRTIO_I2C_F_ZERO_LEGNTH_REQUEST: Zero length requests supported"),
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio/vhost-vsock features mapping */
+qmp_virtio_feature_map_t virtio_vsock_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_VSOCK_F_SEQPACKET, \
+ "VIRTIO_VSOCK_F_SEQPACKET: SOCK_SEQPACKET supported"),
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
+
+/* virtio-balloon features mapping */
+qmp_virtio_feature_map_t virtio_balloon_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_BALLOON_F_MUST_TELL_HOST, \
+ "VIRTIO_BALLOON_F_MUST_TELL_HOST: Tell host before reclaiming "
+ "pages"),
+ FEATURE_ENTRY(VIRTIO_BALLOON_F_STATS_VQ, \
+ "VIRTIO_BALLOON_F_STATS_VQ: Guest memory stats VQ available"),
+ FEATURE_ENTRY(VIRTIO_BALLOON_F_DEFLATE_ON_OOM, \
+ "VIRTIO_BALLOON_F_DEFLATE_ON_OOM: Deflate balloon when guest OOM"),
+ FEATURE_ENTRY(VIRTIO_BALLOON_F_FREE_PAGE_HINT, \
+ "VIRTIO_BALLOON_F_FREE_PAGE_HINT: VQ reporting free pages enabled"),
+ FEATURE_ENTRY(VIRTIO_BALLOON_F_PAGE_POISON, \
+ "VIRTIO_BALLOON_F_PAGE_POISON: Guest page poisoning enabled"),
+ FEATURE_ENTRY(VIRTIO_BALLOON_F_REPORTING, \
+ "VIRTIO_BALLOON_F_REPORTING: Page reporting VQ enabled"),
+ { -1, "" }
+};
+
+/* virtio-crypto features mapping */
+qmp_virtio_feature_map_t virtio_crypto_feature_map[] = {
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ { -1, "" }
+};
+
+/* virtio-iommu features mapping */
+qmp_virtio_feature_map_t virtio_iommu_feature_map[] = {
+ FEATURE_ENTRY(VIRTIO_IOMMU_F_INPUT_RANGE, \
+ "VIRTIO_IOMMU_F_INPUT_RANGE: Range of available virtual addrs. "
+ "available"),
+ FEATURE_ENTRY(VIRTIO_IOMMU_F_DOMAIN_RANGE, \
+ "VIRTIO_IOMMU_F_DOMAIN_RANGE: Number of supported domains "
+ "available"),
+ FEATURE_ENTRY(VIRTIO_IOMMU_F_MAP_UNMAP, \
+ "VIRTIO_IOMMU_F_MAP_UNMAP: Map and unmap requests available"),
+ FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS, \
+ "VIRTIO_IOMMU_F_BYPASS: Endpoints not attached to domains are in "
+ "bypass mode"),
+ FEATURE_ENTRY(VIRTIO_IOMMU_F_PROBE, \
+ "VIRTIO_IOMMU_F_PROBE: Probe requests available"),
+ FEATURE_ENTRY(VIRTIO_IOMMU_F_MMIO, \
+ "VIRTIO_IOMMU_F_MMIO: VIRTIO_IOMMU_MAP_F_MMIO flag available"),
+ FEATURE_ENTRY(VIRTIO_IOMMU_F_BYPASS_CONFIG, \
+ "VIRTIO_IOMMU_F_BYPASS_CONFIG: Bypass field of IOMMU config "
+ "available"),
+ { -1, "" }
+};
+
+/* virtio-mem features mapping */
+qmp_virtio_feature_map_t virtio_mem_feature_map[] = {
+#ifndef CONFIG_ACPI
+ FEATURE_ENTRY(VIRTIO_MEM_F_ACPI_PXM, \
+ "VIRTIO_MEM_F_ACPI_PXM: node_id is an ACPI PXM and is valid"),
+#endif /* !CONFIG_ACPI */
+ FEATURE_ENTRY(VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE, \
+ "VIRTIO_MEM_F_UNPLUGGED_INACCESSIBLE: Unplugged memory cannot be "
+ "accessed"),
+ { -1, "" }
+};
+
+/* virtio-rng features mapping */
+qmp_virtio_feature_map_t virtio_rng_feature_map[] = {
+ FEATURE_ENTRY(VHOST_F_LOG_ALL, \
+ "VHOST_F_LOG_ALL: Logging write descriptors supported"),
+ FEATURE_ENTRY(VHOST_USER_F_PROTOCOL_FEATURES, \
+ "VHOST_USER_F_PROTOCOL_FEATURES: Vhost-user protocol features "
+ "negotiation supported"),
+ { -1, "" }
+};
/*
* The alignment to use between consumer and producer parts of vring.
@@ -391,6 +823,19 @@ static inline void vring_used_write(VirtQueue *vq, VRingUsedElem *uelem,
address_space_cache_invalidate(&caches->used, pa, sizeof(VRingUsedElem));
}
+/* Called within rcu_read_lock(). */
+static inline uint16_t vring_used_flags(VirtQueue *vq)
+{
+ VRingMemoryRegionCaches *caches = vring_get_region_caches(vq);
+ hwaddr pa = offsetof(VRingUsed, flags);
+
+ if (!caches) {
+ return 0;
+ }
+
+ return virtio_lduw_phys_cached(vq->vdev, &caches->used, pa);
+}
+
/* Called within rcu_read_lock(). */
static uint16_t vring_used_idx(VirtQueue *vq)
{
@@ -2980,6 +3425,13 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
return -EINVAL;
}
+
+ if (val & (1ull << VIRTIO_F_BAD_FEATURE)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "%s: guest driver for %s has enabled UNUSED(30) feature bit!\n",
+ __func__, vdev->name);
+ }
+
ret = virtio_set_features_nocheck(vdev, val);
if (virtio_vdev_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX)) {
/* VIRTIO_RING_F_EVENT_IDX changes the size of the caches. */
@@ -2999,11 +3451,12 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
return ret;
}
-size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes,
- uint64_t host_features)
+size_t virtio_get_config_size(const VirtIOConfigSizeParams *params,
+ uint64_t host_features)
{
- size_t config_size = 0;
- int i;
+ size_t config_size = params->min_size;
+ const VirtIOFeature *feature_sizes = params->feature_sizes;
+ size_t i;
for (i = 0; feature_sizes[i].flags != 0; i++) {
if (host_features & feature_sizes[i].flags) {
@@ -3011,6 +3464,7 @@ size_t virtio_feature_get_config_size(const VirtIOFeature *feature_sizes,
}
}
+ assert(config_size <= params->max_size);
return config_size;
}
@@ -3698,6 +4152,7 @@ static void virtio_device_realize(DeviceState *dev, Error **errp)
vdev->listener.commit = virtio_memory_listener_commit;
vdev->listener.name = "virtio";
memory_listener_register(&vdev->listener, vdev->dma_as);
+ QTAILQ_INSERT_TAIL(&virtio_list, vdev, next);
}
static void virtio_device_unrealize(DeviceState *dev)
@@ -3712,6 +4167,7 @@ static void virtio_device_unrealize(DeviceState *dev)
vdc->unrealize(dev);
}
+ QTAILQ_REMOVE(&virtio_list, vdev, next);
g_free(vdev->bus_name);
vdev->bus_name = NULL;
}
@@ -3885,6 +4341,8 @@ static void virtio_device_class_init(ObjectClass *klass, void *data)
vdc->stop_ioeventfd = virtio_device_stop_ioeventfd_impl;
vdc->legacy_features |= VIRTIO_LEGACY_FEATURES;
+
+ QTAILQ_INIT(&virtio_list);
}
bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
@@ -3895,6 +4353,589 @@ bool virtio_device_ioeventfd_enabled(VirtIODevice *vdev)
return virtio_bus_ioeventfd_enabled(vbus);
}
+VirtioInfoList *qmp_x_query_virtio(Error **errp)
+{
+ VirtioInfoList *list = NULL;
+ VirtioInfoList *node;
+ VirtIODevice *vdev;
+
+ QTAILQ_FOREACH(vdev, &virtio_list, next) {
+ DeviceState *dev = DEVICE(vdev);
+ Error *err = NULL;
+ QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err);
+
+ if (err == NULL) {
+ GString *is_realized = qobject_to_json_pretty(obj, true);
+ /* virtio device is NOT realized, remove it from list */
+ if (!strncmp(is_realized->str, "false", 4)) {
+ QTAILQ_REMOVE(&virtio_list, vdev, next);
+ } else {
+ node = g_new0(VirtioInfoList, 1);
+ node->value = g_new(VirtioInfo, 1);
+ node->value->path = g_strdup(dev->canonical_path);
+ node->value->name = g_strdup(vdev->name);
+ QAPI_LIST_PREPEND(list, node->value);
+ }
+ g_string_free(is_realized, true);
+ }
+ qobject_unref(obj);
+ }
+
+ return list;
+}
+
+static VirtIODevice *virtio_device_find(const char *path)
+{
+ VirtIODevice *vdev;
+
+ QTAILQ_FOREACH(vdev, &virtio_list, next) {
+ DeviceState *dev = DEVICE(vdev);
+
+ if (strcmp(dev->canonical_path, path) != 0) {
+ continue;
+ }
+
+ Error *err = NULL;
+ QObject *obj = qmp_qom_get(dev->canonical_path, "realized", &err);
+ if (err == NULL) {
+ GString *is_realized = qobject_to_json_pretty(obj, true);
+ /* virtio device is NOT realized, remove it from list */
+ if (!strncmp(is_realized->str, "false", 4)) {
+ g_string_free(is_realized, true);
+ qobject_unref(obj);
+ QTAILQ_REMOVE(&virtio_list, vdev, next);
+ return NULL;
+ }
+ g_string_free(is_realized, true);
+ } else {
+ /* virtio device doesn't exist in QOM tree */
+ QTAILQ_REMOVE(&virtio_list, vdev, next);
+ qobject_unref(obj);
+ return NULL;
+ }
+ /* device exists in QOM tree & is realized */
+ qobject_unref(obj);
+ return vdev;
+ }
+ return NULL;
+}
+
+#define CONVERT_FEATURES(type, map, is_status, bitmap) \
+ ({ \
+ type *list = NULL; \
+ type *node; \
+ for (i = 0; map[i].virtio_bit != -1; i++) { \
+ if (is_status) { \
+ bit = map[i].virtio_bit; \
+ } \
+ else { \
+ bit = 1ULL << map[i].virtio_bit; \
+ } \
+ if ((bitmap & bit) == 0) { \
+ continue; \
+ } \
+ node = g_new0(type, 1); \
+ node->value = g_strdup(map[i].feature_desc); \
+ node->next = list; \
+ list = node; \
+ bitmap ^= bit; \
+ } \
+ list; \
+ })
+
+static VirtioDeviceStatus *qmp_decode_status(uint8_t bitmap)
+{
+ VirtioDeviceStatus *status;
+ uint8_t bit;
+ int i;
+
+ status = g_new0(VirtioDeviceStatus, 1);
+ status->statuses = CONVERT_FEATURES(strList, virtio_config_status_map,
+ 1, bitmap);
+ status->has_unknown_statuses = bitmap != 0;
+ if (status->has_unknown_statuses) {
+ status->unknown_statuses = bitmap;
+ }
+
+ return status;
+}
+
+static VhostDeviceProtocols *qmp_decode_protocols(uint64_t bitmap)
+{
+ VhostDeviceProtocols *vhu_protocols;
+ uint64_t bit;
+ int i;
+
+ vhu_protocols = g_new0(VhostDeviceProtocols, 1);
+ vhu_protocols->protocols =
+ CONVERT_FEATURES(strList,
+ vhost_user_protocol_map, 0, bitmap);
+ vhu_protocols->has_unknown_protocols = bitmap != 0;
+ if (vhu_protocols->has_unknown_protocols) {
+ vhu_protocols->unknown_protocols = bitmap;
+ }
+
+ return vhu_protocols;
+}
+
+static VirtioDeviceFeatures *qmp_decode_features(uint16_t device_id,
+ uint64_t bitmap)
+{
+ VirtioDeviceFeatures *features;
+ uint64_t bit;
+ int i;
+
+ features = g_new0(VirtioDeviceFeatures, 1);
+ features->has_dev_features = true;
+
+ /* transport features */
+ features->transports = CONVERT_FEATURES(strList, virtio_transport_map, 0,
+ bitmap);
+
+ /* device features */
+ switch (device_id) {
+#ifdef CONFIG_VIRTIO_SERIAL
+ case VIRTIO_ID_CONSOLE:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_serial_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_BLK
+ case VIRTIO_ID_BLOCK:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_blk_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_GPU
+ case VIRTIO_ID_GPU:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_gpu_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_NET
+ case VIRTIO_ID_NET:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_net_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_SCSI
+ case VIRTIO_ID_SCSI:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_scsi_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_BALLOON
+ case VIRTIO_ID_BALLOON:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_balloon_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_IOMMU
+ case VIRTIO_ID_IOMMU:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_iommu_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_INPUT
+ case VIRTIO_ID_INPUT:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_input_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VHOST_USER_FS
+ case VIRTIO_ID_FS:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_fs_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VHOST_VSOCK
+ case VIRTIO_ID_VSOCK:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_vsock_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_CRYPTO
+ case VIRTIO_ID_CRYPTO:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_crypto_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_MEM
+ case VIRTIO_ID_MEM:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_mem_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_I2C_ADAPTER
+ case VIRTIO_ID_I2C_ADAPTER:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_i2c_feature_map, 0, bitmap);
+ break;
+#endif
+#ifdef CONFIG_VIRTIO_RNG
+ case VIRTIO_ID_RNG:
+ features->dev_features =
+ CONVERT_FEATURES(strList, virtio_rng_feature_map, 0, bitmap);
+ break;
+#endif
+ /* No features */
+ case VIRTIO_ID_9P:
+ case VIRTIO_ID_PMEM:
+ case VIRTIO_ID_IOMEM:
+ case VIRTIO_ID_RPMSG:
+ case VIRTIO_ID_CLOCK:
+ case VIRTIO_ID_MAC80211_WLAN:
+ case VIRTIO_ID_MAC80211_HWSIM:
+ case VIRTIO_ID_RPROC_SERIAL:
+ case VIRTIO_ID_MEMORY_BALLOON:
+ case VIRTIO_ID_CAIF:
+ case VIRTIO_ID_SIGNAL_DIST:
+ case VIRTIO_ID_PSTORE:
+ case VIRTIO_ID_SOUND:
+ case VIRTIO_ID_BT:
+ case VIRTIO_ID_RPMB:
+ case VIRTIO_ID_VIDEO_ENCODER:
+ case VIRTIO_ID_VIDEO_DECODER:
+ case VIRTIO_ID_SCMI:
+ case VIRTIO_ID_NITRO_SEC_MOD:
+ case VIRTIO_ID_WATCHDOG:
+ case VIRTIO_ID_CAN:
+ case VIRTIO_ID_DMABUF:
+ case VIRTIO_ID_PARAM_SERV:
+ case VIRTIO_ID_AUDIO_POLICY:
+ case VIRTIO_ID_GPIO:
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ features->has_unknown_dev_features = bitmap != 0;
+ if (features->has_unknown_dev_features) {
+ features->unknown_dev_features = bitmap;
+ }
+
+ return features;
+}
+
+VirtioStatus *qmp_x_query_virtio_status(const char *path, Error **errp)
+{
+ VirtIODevice *vdev;
+ VirtioStatus *status;
+
+ vdev = virtio_device_find(path);
+ if (vdev == NULL) {
+ error_setg(errp, "Path %s is not a VirtIODevice", path);
+ return NULL;
+ }
+
+ status = g_new0(VirtioStatus, 1);
+ status->name = g_strdup(vdev->name);
+ status->device_id = vdev->device_id;
+ status->vhost_started = vdev->vhost_started;
+ status->guest_features = qmp_decode_features(vdev->device_id,
+ vdev->guest_features);
+ status->host_features = qmp_decode_features(vdev->device_id,
+ vdev->host_features);
+ status->backend_features = qmp_decode_features(vdev->device_id,
+ vdev->backend_features);
+
+ switch (vdev->device_endian) {
+ case VIRTIO_DEVICE_ENDIAN_LITTLE:
+ status->device_endian = g_strdup("little");
+ break;
+ case VIRTIO_DEVICE_ENDIAN_BIG:
+ status->device_endian = g_strdup("big");
+ break;
+ default:
+ status->device_endian = g_strdup("unknown");
+ break;
+ }
+
+ status->num_vqs = virtio_get_num_queues(vdev);
+ status->status = qmp_decode_status(vdev->status);
+ status->isr = vdev->isr;
+ status->queue_sel = vdev->queue_sel;
+ status->vm_running = vdev->vm_running;
+ status->broken = vdev->broken;
+ status->disabled = vdev->disabled;
+ status->use_started = vdev->use_started;
+ status->started = vdev->started;
+ status->start_on_kick = vdev->start_on_kick;
+ status->disable_legacy_check = vdev->disable_legacy_check;
+ status->bus_name = g_strdup(vdev->bus_name);
+ status->use_guest_notifier_mask = vdev->use_guest_notifier_mask;
+ status->has_vhost_dev = vdev->vhost_started;
+
+ if (vdev->vhost_started) {
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ struct vhost_dev *hdev = vdc->get_vhost(vdev);
+
+ status->vhost_dev = g_new0(VhostStatus, 1);
+ status->vhost_dev->n_mem_sections = hdev->n_mem_sections;
+ status->vhost_dev->n_tmp_sections = hdev->n_tmp_sections;
+ status->vhost_dev->nvqs = hdev->nvqs;
+ status->vhost_dev->vq_index = hdev->vq_index;
+ status->vhost_dev->features =
+ qmp_decode_features(vdev->device_id, hdev->features);
+ status->vhost_dev->acked_features =
+ qmp_decode_features(vdev->device_id, hdev->acked_features);
+ status->vhost_dev->backend_features =
+ qmp_decode_features(vdev->device_id, hdev->backend_features);
+ status->vhost_dev->protocol_features =
+ qmp_decode_protocols(hdev->protocol_features);
+ status->vhost_dev->max_queues = hdev->max_queues;
+ status->vhost_dev->backend_cap = hdev->backend_cap;
+ status->vhost_dev->log_enabled = hdev->log_enabled;
+ status->vhost_dev->log_size = hdev->log_size;
+ }
+
+ return status;
+}
+
+VirtVhostQueueStatus *qmp_x_query_virtio_vhost_queue_status(const char *path,
+ uint16_t queue,
+ Error **errp)
+{
+ VirtIODevice *vdev;
+ VirtVhostQueueStatus *status;
+
+ vdev = virtio_device_find(path);
+ if (vdev == NULL) {
+ error_setg(errp, "Path %s is not a VirtIODevice", path);
+ return NULL;
+ }
+
+ if (!vdev->vhost_started) {
+ error_setg(errp, "Error: vhost device has not started yet");
+ return NULL;
+ }
+
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ struct vhost_dev *hdev = vdc->get_vhost(vdev);
+
+ if (queue < hdev->vq_index || queue >= hdev->vq_index + hdev->nvqs) {
+ error_setg(errp, "Invalid vhost virtqueue number %d", queue);
+ return NULL;
+ }
+
+ status = g_new0(VirtVhostQueueStatus, 1);
+ status->name = g_strdup(vdev->name);
+ status->kick = hdev->vqs[queue].kick;
+ status->call = hdev->vqs[queue].call;
+ status->desc = (uintptr_t)hdev->vqs[queue].desc;
+ status->avail = (uintptr_t)hdev->vqs[queue].avail;
+ status->used = (uintptr_t)hdev->vqs[queue].used;
+ status->num = hdev->vqs[queue].num;
+ status->desc_phys = hdev->vqs[queue].desc_phys;
+ status->desc_size = hdev->vqs[queue].desc_size;
+ status->avail_phys = hdev->vqs[queue].avail_phys;
+ status->avail_size = hdev->vqs[queue].avail_size;
+ status->used_phys = hdev->vqs[queue].used_phys;
+ status->used_size = hdev->vqs[queue].used_size;
+
+ return status;
+}
+
+VirtQueueStatus *qmp_x_query_virtio_queue_status(const char *path,
+ uint16_t queue,
+ Error **errp)
+{
+ VirtIODevice *vdev;
+ VirtQueueStatus *status;
+
+ vdev = virtio_device_find(path);
+ if (vdev == NULL) {
+ error_setg(errp, "Path %s is not a VirtIODevice", path);
+ return NULL;
+ }
+
+ if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
+ error_setg(errp, "Invalid virtqueue number %d", queue);
+ return NULL;
+ }
+
+ status = g_new0(VirtQueueStatus, 1);
+ status->name = g_strdup(vdev->name);
+ status->queue_index = vdev->vq[queue].queue_index;
+ status->inuse = vdev->vq[queue].inuse;
+ status->vring_num = vdev->vq[queue].vring.num;
+ status->vring_num_default = vdev->vq[queue].vring.num_default;
+ status->vring_align = vdev->vq[queue].vring.align;
+ status->vring_desc = vdev->vq[queue].vring.desc;
+ status->vring_avail = vdev->vq[queue].vring.avail;
+ status->vring_used = vdev->vq[queue].vring.used;
+ status->used_idx = vdev->vq[queue].used_idx;
+ status->signalled_used = vdev->vq[queue].signalled_used;
+ status->signalled_used_valid = vdev->vq[queue].signalled_used_valid;
+
+ if (vdev->vhost_started) {
+ VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev);
+ struct vhost_dev *hdev = vdc->get_vhost(vdev);
+
+ /* check if vq index exists for vhost as well */
+ if (queue >= hdev->vq_index && queue < hdev->vq_index + hdev->nvqs) {
+ status->has_last_avail_idx = true;
+
+ int vhost_vq_index =
+ hdev->vhost_ops->vhost_get_vq_index(hdev, queue);
+ struct vhost_vring_state state = {
+ .index = vhost_vq_index,
+ };
+
+ status->last_avail_idx =
+ hdev->vhost_ops->vhost_get_vring_base(hdev, &state);
+ }
+ } else {
+ status->has_shadow_avail_idx = true;
+ status->has_last_avail_idx = true;
+ status->last_avail_idx = vdev->vq[queue].last_avail_idx;
+ status->shadow_avail_idx = vdev->vq[queue].shadow_avail_idx;
+ }
+
+ return status;
+}
+
+static strList *qmp_decode_vring_desc_flags(uint16_t flags)
+{
+ strList *list = NULL;
+ strList *node;
+ int i;
+
+ struct {
+ uint16_t flag;
+ const char *value;
+ } map[] = {
+ { VRING_DESC_F_NEXT, "next" },
+ { VRING_DESC_F_WRITE, "write" },
+ { VRING_DESC_F_INDIRECT, "indirect" },
+ { 1 << VRING_PACKED_DESC_F_AVAIL, "avail" },
+ { 1 << VRING_PACKED_DESC_F_USED, "used" },
+ { 0, "" }
+ };
+
+ for (i = 0; map[i].flag; i++) {
+ if ((map[i].flag & flags) == 0) {
+ continue;
+ }
+ node = g_malloc0(sizeof(strList));
+ node->value = g_strdup(map[i].value);
+ node->next = list;
+ list = node;
+ }
+
+ return list;
+}
+
+VirtioQueueElement *qmp_x_query_virtio_queue_element(const char *path,
+ uint16_t queue,
+ bool has_index,
+ uint16_t index,
+ Error **errp)
+{
+ VirtIODevice *vdev;
+ VirtQueue *vq;
+ VirtioQueueElement *element = NULL;
+
+ vdev = virtio_device_find(path);
+ if (vdev == NULL) {
+ error_setg(errp, "Path %s is not a VirtIO device", path);
+ return NULL;
+ }
+
+ if (queue >= VIRTIO_QUEUE_MAX || !virtio_queue_get_num(vdev, queue)) {
+ error_setg(errp, "Invalid virtqueue number %d", queue);
+ return NULL;
+ }
+ vq = &vdev->vq[queue];
+
+ if (virtio_vdev_has_feature(vdev, VIRTIO_F_RING_PACKED)) {
+ error_setg(errp, "Packed ring not supported");
+ return NULL;
+ } else {
+ unsigned int head, i, max;
+ VRingMemoryRegionCaches *caches;
+ MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID;
+ MemoryRegionCache *desc_cache;
+ VRingDesc desc;
+ VirtioRingDescList *list = NULL;
+ VirtioRingDescList *node;
+ int rc; int ndescs;
+
+ RCU_READ_LOCK_GUARD();
+
+ max = vq->vring.num;
+
+ if (!has_index) {
+ head = vring_avail_ring(vq, vq->last_avail_idx % vq->vring.num);
+ } else {
+ head = vring_avail_ring(vq, index % vq->vring.num);
+ }
+ i = head;
+
+ caches = vring_get_region_caches(vq);
+ if (!caches) {
+ error_setg(errp, "Region caches not initialized");
+ return NULL;
+ }
+ if (caches->desc.len < max * sizeof(VRingDesc)) {
+ error_setg(errp, "Cannot map descriptor ring");
+ return NULL;
+ }
+
+ desc_cache = &caches->desc;
+ vring_split_desc_read(vdev, &desc, desc_cache, i);
+ if (desc.flags & VRING_DESC_F_INDIRECT) {
+ int64_t len;
+ len = address_space_cache_init(&indirect_desc_cache, vdev->dma_as,
+ desc.addr, desc.len, false);
+ desc_cache = &indirect_desc_cache;
+ if (len < desc.len) {
+ error_setg(errp, "Cannot map indirect buffer");
+ goto done;
+ }
+
+ max = desc.len / sizeof(VRingDesc);
+ i = 0;
+ vring_split_desc_read(vdev, &desc, desc_cache, i);
+ }
+
+ element = g_new0(VirtioQueueElement, 1);
+ element->avail = g_new0(VirtioRingAvail, 1);
+ element->used = g_new0(VirtioRingUsed, 1);
+ element->name = g_strdup(vdev->name);
+ element->index = head;
+ element->avail->flags = vring_avail_flags(vq);
+ element->avail->idx = vring_avail_idx(vq);
+ element->avail->ring = head;
+ element->used->flags = vring_used_flags(vq);
+ element->used->idx = vring_used_idx(vq);
+ ndescs = 0;
+
+ do {
+ /* A buggy driver may produce an infinite loop */
+ if (ndescs >= max) {
+ break;
+ }
+ node = g_new0(VirtioRingDescList, 1);
+ node->value = g_new0(VirtioRingDesc, 1);
+ node->value->addr = desc.addr;
+ node->value->len = desc.len;
+ node->value->flags = qmp_decode_vring_desc_flags(desc.flags);
+ node->next = list;
+ list = node;
+
+ ndescs++;
+ rc = virtqueue_split_read_next_desc(vdev, &desc, desc_cache,
+ max, &i);
+ } while (rc == VIRTQUEUE_READ_DESC_MORE);
+ element->descs = list;
+done:
+ address_space_cache_destroy(&indirect_desc_cache);
+ }
+
+ return element;
+}
+
static const TypeInfo virtio_device_info = {
.name = TYPE_VIRTIO_DEVICE,
.parent = TYPE_DEVICE,