aboutsummaryrefslogtreecommitdiff
path: root/hw/virtio
diff options
context:
space:
mode:
Diffstat (limited to 'hw/virtio')
-rw-r--r--hw/virtio/dataplane/vring.c47
-rw-r--r--hw/virtio/vhost.c18
-rw-r--r--hw/virtio/virtio-balloon.c2
-rw-r--r--hw/virtio/virtio-mmio.c3
-rw-r--r--hw/virtio/virtio-pci.c612
-rw-r--r--hw/virtio/virtio-pci.h59
-rw-r--r--hw/virtio/virtio-rng.c8
-rw-r--r--hw/virtio/virtio.c198
8 files changed, 870 insertions, 77 deletions
diff --git a/hw/virtio/dataplane/vring.c b/hw/virtio/dataplane/vring.c
index e37873384f..35891856ee 100644
--- a/hw/virtio/dataplane/vring.c
+++ b/hw/virtio/dataplane/vring.c
@@ -157,15 +157,18 @@ bool vring_should_notify(VirtIODevice *vdev, Vring *vring)
}
-static int get_desc(Vring *vring, VirtQueueElement *elem,
+static int get_desc(VirtIODevice *vdev, Vring *vring, VirtQueueElement *elem,
struct vring_desc *desc)
{
unsigned *num;
struct iovec *iov;
hwaddr *addr;
MemoryRegion *mr;
+ int is_write = virtio_tswap16(vdev, desc->flags) & VRING_DESC_F_WRITE;
+ uint32_t len = virtio_tswap32(vdev, desc->len);
+ uint64_t desc_addr = virtio_tswap64(vdev, desc->addr);
- if (desc->flags & VRING_DESC_F_WRITE) {
+ if (is_write) {
num = &elem->in_num;
iov = &elem->in_sg[*num];
addr = &elem->in_addr[*num];
@@ -189,18 +192,17 @@ static int get_desc(Vring *vring, VirtQueueElement *elem,
}
/* TODO handle non-contiguous memory across region boundaries */
- iov->iov_base = vring_map(&mr, desc->addr, desc->len,
- desc->flags & VRING_DESC_F_WRITE);
+ iov->iov_base = vring_map(&mr, desc_addr, len, is_write);
if (!iov->iov_base) {
error_report("Failed to map descriptor addr %#" PRIx64 " len %u",
- (uint64_t)desc->addr, desc->len);
+ (uint64_t)desc_addr, len);
return -EFAULT;
}
/* The MemoryRegion is looked up again and unref'ed later, leave the
* ref in place. */
- iov->iov_len = desc->len;
- *addr = desc->addr;
+ iov->iov_len = len;
+ *addr = desc_addr;
*num += 1;
return 0;
}
@@ -222,21 +224,23 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
struct vring_desc desc;
unsigned int i = 0, count, found = 0;
int ret;
+ uint32_t len = virtio_tswap32(vdev, indirect->len);
+ uint64_t addr = virtio_tswap64(vdev, indirect->addr);
/* Sanity check */
- if (unlikely(indirect->len % sizeof(desc))) {
+ if (unlikely(len % sizeof(desc))) {
error_report("Invalid length in indirect descriptor: "
"len %#x not multiple of %#zx",
- indirect->len, sizeof(desc));
+ len, sizeof(desc));
vring->broken = true;
return -EFAULT;
}
- count = indirect->len / sizeof(desc);
+ count = len / sizeof(desc);
/* Buffers are chained via a 16 bit next field, so
* we can have at most 2^16 of these. */
if (unlikely(count > USHRT_MAX + 1)) {
- error_report("Indirect buffer length too big: %d", indirect->len);
+ error_report("Indirect buffer length too big: %d", len);
vring->broken = true;
return -EFAULT;
}
@@ -247,12 +251,12 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
/* Translate indirect descriptor */
desc_ptr = vring_map(&mr,
- indirect->addr + found * sizeof(desc),
+ addr + found * sizeof(desc),
sizeof(desc), false);
if (!desc_ptr) {
error_report("Failed to map indirect descriptor "
"addr %#" PRIx64 " len %zu",
- (uint64_t)indirect->addr + found * sizeof(desc),
+ (uint64_t)addr + found * sizeof(desc),
sizeof(desc));
vring->broken = true;
return -EFAULT;
@@ -270,19 +274,20 @@ static int get_indirect(VirtIODevice *vdev, Vring *vring,
return -EFAULT;
}
- if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
+ if (unlikely(virtio_tswap16(vdev, desc.flags)
+ & VRING_DESC_F_INDIRECT)) {
error_report("Nested indirect descriptor");
vring->broken = true;
return -EFAULT;
}
- ret = get_desc(vring, elem, &desc);
+ ret = get_desc(vdev, vring, elem, &desc);
if (ret < 0) {
vring->broken |= (ret == -EFAULT);
return ret;
}
- i = desc.next;
- } while (desc.flags & VRING_DESC_F_NEXT);
+ i = virtio_tswap16(vdev, desc.next);
+ } while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
return 0;
}
@@ -383,7 +388,7 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
/* Ensure descriptor is loaded before accessing fields */
barrier();
- if (desc.flags & VRING_DESC_F_INDIRECT) {
+ if (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_INDIRECT) {
ret = get_indirect(vdev, vring, elem, &desc);
if (ret < 0) {
goto out;
@@ -391,13 +396,13 @@ int vring_pop(VirtIODevice *vdev, Vring *vring,
continue;
}
- ret = get_desc(vring, elem, &desc);
+ ret = get_desc(vdev, vring, elem, &desc);
if (ret < 0) {
goto out;
}
- i = desc.next;
- } while (desc.flags & VRING_DESC_F_NEXT);
+ i = virtio_tswap16(vdev, desc.next);
+ } while (virtio_tswap16(vdev, desc.flags) & VRING_DESC_F_NEXT);
/* On success, increment avail index. */
vring->last_avail_idx++;
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c
index a7fe3c5104..2d6c27af8d 100644
--- a/hw/virtio/vhost.c
+++ b/hw/virtio/vhost.c
@@ -591,7 +591,7 @@ static int vhost_dev_set_features(struct vhost_dev *dev, bool enable_log)
uint64_t features = dev->acked_features;
int r;
if (enable_log) {
- features |= 0x1 << VHOST_F_LOG_ALL;
+ features |= 0x1ULL << VHOST_F_LOG_ALL;
}
r = dev->vhost_ops->vhost_call(dev, VHOST_SET_FEATURES, &features);
return r < 0 ? -errno : 0;
@@ -902,7 +902,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
.priority = 10
};
hdev->migration_blocker = NULL;
- if (!(hdev->features & (0x1 << VHOST_F_LOG_ALL))) {
+ if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
migrate_add_blocker(hdev->migration_blocker);
@@ -1045,12 +1045,12 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n,
assert(r >= 0);
}
-unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
- unsigned features)
+uint64_t vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
+ uint64_t features)
{
const int *bit = feature_bits;
while (*bit != VHOST_INVALID_FEATURE_BIT) {
- unsigned bit_mask = (1 << *bit);
+ uint64_t bit_mask = (1ULL << *bit);
if (!(hdev->features & bit_mask)) {
features &= ~bit_mask;
}
@@ -1060,11 +1060,11 @@ unsigned vhost_get_features(struct vhost_dev *hdev, const int *feature_bits,
}
void vhost_ack_features(struct vhost_dev *hdev, const int *feature_bits,
- unsigned features)
+ uint64_t features)
{
const int *bit = feature_bits;
while (*bit != VHOST_INVALID_FEATURE_BIT) {
- unsigned bit_mask = (1 << *bit);
+ uint64_t bit_mask = (1ULL << *bit);
if (features & bit_mask) {
hdev->acked_features |= bit_mask;
}
@@ -1114,9 +1114,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev)
return 0;
fail_log:
- if (hdev->log_size) {
- vhost_log_put(hdev, false);
- }
+ vhost_log_put(hdev, false);
fail_vq:
while (--i >= 0) {
vhost_virtqueue_stop(hdev,
diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c
index f915c7bd73..78bc14fc85 100644
--- a/hw/virtio/virtio-balloon.c
+++ b/hw/virtio/virtio-balloon.c
@@ -312,7 +312,7 @@ static void virtio_balloon_set_config(VirtIODevice *vdev,
static uint64_t virtio_balloon_get_features(VirtIODevice *vdev, uint64_t f)
{
- f |= (1 << VIRTIO_BALLOON_F_STATS_VQ);
+ virtio_add_feature(&f, VIRTIO_BALLOON_F_STATS_VQ);
return f;
}
diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c
index c8f72947d4..18660b07b1 100644
--- a/hw/virtio/virtio-mmio.c
+++ b/hw/virtio/virtio-mmio.c
@@ -333,8 +333,11 @@ static void virtio_mmio_write(void *opaque, hwaddr offset, uint64_t value,
case VIRTIO_MMIO_QUEUENUM:
DPRINTF("mmio_queue write %d max %d\n", (int)value, VIRTQUEUE_MAX_SIZE);
virtio_queue_set_num(vdev, vdev->queue_sel, value);
+ /* Note: only call this function for legacy devices */
+ virtio_queue_update_rings(vdev, vdev->queue_sel);
break;
case VIRTIO_MMIO_QUEUEALIGN:
+ /* Note: this is only valid for legacy devices */
virtio_queue_set_align(vdev, vdev->queue_sel, value);
break;
case VIRTIO_MMIO_QUEUEPFN:
diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c
index 6d4f64e282..d7cf34cee9 100644
--- a/hw/virtio/virtio-pci.c
+++ b/hw/virtio/virtio-pci.c
@@ -24,6 +24,7 @@
#include "hw/virtio/virtio-serial.h"
#include "hw/virtio/virtio-scsi.h"
#include "hw/virtio/virtio-balloon.h"
+#include "hw/virtio/virtio-input.h"
#include "hw/pci/pci.h"
#include "qemu/error-report.h"
#include "hw/pci/msi.h"
@@ -38,6 +39,8 @@
#define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev))
+#undef VIRTIO_PCI_CONFIG
+
/* The remaining space is defined by each driver as the per-driver
* configuration space */
#define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev))
@@ -133,12 +136,21 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
return 0;
}
+#define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000
+
static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
int n, bool assign, bool set_handler)
{
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
VirtQueue *vq = virtio_get_queue(vdev, n);
EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
+ bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
+ bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
+ MemoryRegion *modern_mr = &proxy->notify.mr;
+ MemoryRegion *legacy_mr = &proxy->bar;
+ hwaddr modern_addr = QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
+ virtio_get_queue_index(vq);
+ hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY;
int r = 0;
if (assign) {
@@ -149,11 +161,23 @@ static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
return r;
}
virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
- memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
- true, n, notifier);
+ if (modern) {
+ memory_region_add_eventfd(modern_mr, modern_addr, 2,
+ true, n, notifier);
+ }
+ if (legacy) {
+ memory_region_add_eventfd(legacy_mr, legacy_addr, 2,
+ true, n, notifier);
+ }
} else {
- memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
- true, n, notifier);
+ if (modern) {
+ memory_region_del_eventfd(modern_mr, modern_addr, 2,
+ true, n, notifier);
+ }
+ if (legacy) {
+ memory_region_del_eventfd(legacy_mr, legacy_addr, 2,
+ true, n, notifier);
+ }
virtio_queue_set_host_notifier_fd_handler(vq, false, false);
event_notifier_cleanup(notifier);
}
@@ -918,11 +942,359 @@ static int virtio_pci_query_nvectors(DeviceState *d)
return proxy->nvectors;
}
+static void virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy,
+ struct virtio_pci_cap *cap)
+{
+ PCIDevice *dev = &proxy->pci_dev;
+ int offset;
+
+ offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, cap->cap_len);
+ assert(offset > 0);
+
+ assert(cap->cap_len >= sizeof *cap);
+ memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len,
+ cap->cap_len - PCI_CAP_FLAGS);
+}
+
+static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ uint32_t val = 0;
+ int i;
+
+ switch (addr) {
+ case VIRTIO_PCI_COMMON_DFSELECT:
+ val = proxy->dfselect;
+ break;
+ case VIRTIO_PCI_COMMON_DF:
+ if (proxy->dfselect <= 1) {
+ val = vdev->host_features >> (32 * proxy->dfselect);
+ }
+ break;
+ case VIRTIO_PCI_COMMON_GFSELECT:
+ val = proxy->gfselect;
+ break;
+ case VIRTIO_PCI_COMMON_GF:
+ if (proxy->gfselect <= ARRAY_SIZE(proxy->guest_features)) {
+ val = proxy->guest_features[proxy->gfselect];
+ }
+ break;
+ case VIRTIO_PCI_COMMON_MSIX:
+ val = vdev->config_vector;
+ break;
+ case VIRTIO_PCI_COMMON_NUMQ:
+ for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) {
+ if (virtio_queue_get_num(vdev, i)) {
+ val = i + 1;
+ }
+ }
+ break;
+ case VIRTIO_PCI_COMMON_STATUS:
+ val = vdev->status;
+ break;
+ case VIRTIO_PCI_COMMON_CFGGENERATION:
+ val = vdev->generation;
+ break;
+ case VIRTIO_PCI_COMMON_Q_SELECT:
+ val = vdev->queue_sel;
+ break;
+ case VIRTIO_PCI_COMMON_Q_SIZE:
+ val = virtio_queue_get_num(vdev, vdev->queue_sel);
+ break;
+ case VIRTIO_PCI_COMMON_Q_MSIX:
+ val = virtio_queue_vector(vdev, vdev->queue_sel);
+ break;
+ case VIRTIO_PCI_COMMON_Q_ENABLE:
+ val = proxy->vqs[vdev->queue_sel].enabled;
+ break;
+ case VIRTIO_PCI_COMMON_Q_NOFF:
+ /* Simply map queues in order */
+ val = vdev->queue_sel;
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCLO:
+ val = proxy->vqs[vdev->queue_sel].desc[0];
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCHI:
+ val = proxy->vqs[vdev->queue_sel].desc[1];
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILLO:
+ val = proxy->vqs[vdev->queue_sel].avail[0];
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILHI:
+ val = proxy->vqs[vdev->queue_sel].avail[1];
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDLO:
+ val = proxy->vqs[vdev->queue_sel].used[0];
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDHI:
+ val = proxy->vqs[vdev->queue_sel].used[1];
+ break;
+ default:
+ val = 0;
+ }
+
+ return val;
+}
+
+static void virtio_pci_common_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+
+ switch (addr) {
+ case VIRTIO_PCI_COMMON_DFSELECT:
+ proxy->dfselect = val;
+ break;
+ case VIRTIO_PCI_COMMON_GFSELECT:
+ proxy->gfselect = val;
+ break;
+ case VIRTIO_PCI_COMMON_GF:
+ if (proxy->gfselect <= ARRAY_SIZE(proxy->guest_features)) {
+ proxy->guest_features[proxy->gfselect] = val;
+ virtio_set_features(vdev,
+ (((uint64_t)proxy->guest_features[1]) << 32) |
+ proxy->guest_features[0]);
+ }
+ break;
+ case VIRTIO_PCI_COMMON_MSIX:
+ msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
+ /* Make it possible for guest to discover an error took place. */
+ if (msix_vector_use(&proxy->pci_dev, val) < 0) {
+ val = VIRTIO_NO_VECTOR;
+ }
+ vdev->config_vector = val;
+ break;
+ case VIRTIO_PCI_COMMON_STATUS:
+ if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
+ virtio_pci_stop_ioeventfd(proxy);
+ }
+
+ virtio_set_status(vdev, val & 0xFF);
+
+ if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
+ virtio_pci_start_ioeventfd(proxy);
+ }
+
+ if (vdev->status == 0) {
+ virtio_reset(vdev);
+ msix_unuse_all_vectors(&proxy->pci_dev);
+ }
+
+ break;
+ case VIRTIO_PCI_COMMON_Q_SELECT:
+ if (val < VIRTIO_QUEUE_MAX) {
+ vdev->queue_sel = val;
+ }
+ break;
+ case VIRTIO_PCI_COMMON_Q_SIZE:
+ proxy->vqs[vdev->queue_sel].num = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_MSIX:
+ msix_vector_unuse(&proxy->pci_dev,
+ virtio_queue_vector(vdev, vdev->queue_sel));
+ /* Make it possible for guest to discover an error took place. */
+ if (msix_vector_use(&proxy->pci_dev, val) < 0) {
+ val = VIRTIO_NO_VECTOR;
+ }
+ virtio_queue_set_vector(vdev, vdev->queue_sel, val);
+ break;
+ case VIRTIO_PCI_COMMON_Q_ENABLE:
+ /* TODO: need a way to put num back on reset. */
+ virtio_queue_set_num(vdev, vdev->queue_sel,
+ proxy->vqs[vdev->queue_sel].num);
+ virtio_queue_set_rings(vdev, vdev->queue_sel,
+ ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 |
+ proxy->vqs[vdev->queue_sel].desc[0],
+ ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 |
+ proxy->vqs[vdev->queue_sel].avail[0],
+ ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 |
+ proxy->vqs[vdev->queue_sel].used[0]);
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCLO:
+ proxy->vqs[vdev->queue_sel].desc[0] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_DESCHI:
+ proxy->vqs[vdev->queue_sel].desc[1] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILLO:
+ proxy->vqs[vdev->queue_sel].avail[0] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_AVAILHI:
+ proxy->vqs[vdev->queue_sel].avail[1] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDLO:
+ proxy->vqs[vdev->queue_sel].used[0] = val;
+ break;
+ case VIRTIO_PCI_COMMON_Q_USEDHI:
+ proxy->vqs[vdev->queue_sel].used[1] = val;
+ break;
+ default:
+ break;
+ }
+}
+
+
+static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ return 0;
+}
+
+static void virtio_pci_notify_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ unsigned queue = addr / QEMU_VIRTIO_PCI_QUEUE_MEM_MULT;
+
+ if (queue < VIRTIO_QUEUE_MAX) {
+ virtio_queue_notify(vdev, queue);
+ }
+}
+
+static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VirtIOPCIProxy *proxy = opaque;
+ VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
+ uint64_t val = vdev->isr;
+
+ vdev->isr = 0;
+ pci_irq_deassert(&proxy->pci_dev);
+
+ return val;
+}
+
+static void virtio_pci_isr_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+}
+
+static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr,
+ unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ uint64_t val = 0;
+
+ switch (size) {
+ case 1:
+ val = virtio_config_modern_readb(vdev, addr);
+ break;
+ case 2:
+ val = virtio_config_modern_readw(vdev, addr);
+ break;
+ case 4:
+ val = virtio_config_modern_readl(vdev, addr);
+ break;
+ }
+ return val;
+}
+
+static void virtio_pci_device_write(void *opaque, hwaddr addr,
+ uint64_t val, unsigned size)
+{
+ VirtIODevice *vdev = opaque;
+ switch (size) {
+ case 1:
+ virtio_config_modern_writeb(vdev, addr, val);
+ break;
+ case 2:
+ virtio_config_modern_writew(vdev, addr, val);
+ break;
+ case 4:
+ virtio_config_modern_writel(vdev, addr, val);
+ break;
+ }
+}
+
+static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy)
+{
+ static const MemoryRegionOps common_ops = {
+ .read = virtio_pci_common_read,
+ .write = virtio_pci_common_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+ static const MemoryRegionOps isr_ops = {
+ .read = virtio_pci_isr_read,
+ .write = virtio_pci_isr_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+ static const MemoryRegionOps device_ops = {
+ .read = virtio_pci_device_read,
+ .write = virtio_pci_device_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+ static const MemoryRegionOps notify_ops = {
+ .read = virtio_pci_notify_read,
+ .write = virtio_pci_notify_write,
+ .impl = {
+ .min_access_size = 1,
+ .max_access_size = 4,
+ },
+ .endianness = DEVICE_LITTLE_ENDIAN,
+ };
+
+ memory_region_init_io(&proxy->common.mr, OBJECT(proxy),
+ &common_ops,
+ proxy,
+ "virtio-pci-common",
+ proxy->common.size);
+
+ memory_region_init_io(&proxy->isr.mr, OBJECT(proxy),
+ &isr_ops,
+ proxy,
+ "virtio-pci-isr",
+ proxy->isr.size);
+
+ memory_region_init_io(&proxy->device.mr, OBJECT(proxy),
+ &device_ops,
+ virtio_bus_get_device(&proxy->bus),
+ "virtio-pci-device",
+ proxy->device.size);
+
+ memory_region_init_io(&proxy->notify.mr, OBJECT(proxy),
+ &notify_ops,
+ virtio_bus_get_device(&proxy->bus),
+ "virtio-pci-notify",
+ proxy->notify.size);
+}
+
+static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy,
+ VirtIOPCIRegion *region,
+ struct virtio_pci_cap *cap)
+{
+ memory_region_add_subregion(&proxy->modern_bar,
+ region->offset,
+ &region->mr);
+
+ cap->cfg_type = region->type;
+ cap->bar = proxy->modern_mem_bar;
+ cap->offset = cpu_to_le32(region->offset);
+ cap->length = cpu_to_le32(region->size);
+ virtio_pci_add_mem_cap(proxy, cap);
+}
+
/* This is called by virtio-bus just after the device is plugged. */
static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
{
VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
VirtioBusState *bus = &proxy->bus;
+ bool legacy = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_LEGACY);
+ bool modern = !(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_MODERN);
uint8_t *config;
uint32_t size;
VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus);
@@ -931,13 +1303,51 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
if (proxy->class_code) {
pci_config_set_class(config, proxy->class_code);
}
- pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
- pci_get_word(config + PCI_VENDOR_ID));
- pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
+
+ if (legacy) {
+ /* legacy and transitional */
+ pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
+ pci_get_word(config + PCI_VENDOR_ID));
+ pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
+ } else {
+ /* pure virtio-1.0 */
+ pci_set_word(config + PCI_VENDOR_ID,
+ PCI_VENDOR_ID_REDHAT_QUMRANET);
+ pci_set_word(config + PCI_DEVICE_ID,
+ 0x1040 + virtio_bus_get_vdev_id(bus));
+ pci_config_set_revision(config, 1);
+ }
config[PCI_INTERRUPT_PIN] = 1;
+
+ if (modern) {
+ struct virtio_pci_cap cap = {
+ .cap_len = sizeof cap,
+ };
+ struct virtio_pci_notify_cap notify = {
+ .cap.cap_len = sizeof notify,
+ .notify_off_multiplier =
+ cpu_to_le32(QEMU_VIRTIO_PCI_QUEUE_MEM_MULT),
+ };
+
+ /* TODO: add io access for speed */
+
+ virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1);
+ virtio_pci_modern_regions_init(proxy);
+ virtio_pci_modern_region_map(proxy, &proxy->common, &cap);
+ virtio_pci_modern_region_map(proxy, &proxy->isr, &cap);
+ virtio_pci_modern_region_map(proxy, &proxy->device, &cap);
+ virtio_pci_modern_region_map(proxy, &proxy->notify, &notify.cap);
+ pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar,
+ PCI_BASE_ADDRESS_SPACE_MEMORY |
+ PCI_BASE_ADDRESS_MEM_PREFETCH |
+ PCI_BASE_ADDRESS_MEM_TYPE_64,
+ &proxy->modern_bar);
+ }
+
if (proxy->nvectors &&
- msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
+ msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors,
+ proxy->msix_bar)) {
error_report("unable to init msix vectors to %" PRIu32,
proxy->nvectors);
proxy->nvectors = 0;
@@ -945,16 +1355,20 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp)
proxy->pci_dev.config_write = virtio_write_config;
- size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
- + virtio_bus_get_vdev_config_len(bus);
- if (size & (size - 1)) {
- size = 1 << qemu_fls(size);
- }
+ if (legacy) {
+ size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
+ + virtio_bus_get_vdev_config_len(bus);
+ if (size & (size - 1)) {
+ size = 1 << qemu_fls(size);
+ }
- memory_region_init_io(&proxy->bar, OBJECT(proxy), &virtio_pci_config_ops,
- proxy, "virtio-pci", size);
- pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
- &proxy->bar);
+ memory_region_init_io(&proxy->bar, OBJECT(proxy),
+ &virtio_pci_config_ops,
+ proxy, "virtio-pci", size);
+
+ pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar,
+ PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar);
+ }
if (!kvm_has_many_ioeventfds()) {
proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
@@ -972,12 +1386,47 @@ static void virtio_pci_device_unplugged(DeviceState *d)
static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp)
{
- VirtIOPCIProxy *dev = VIRTIO_PCI(pci_dev);
+ VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
- virtio_pci_bus_new(&dev->bus, sizeof(dev->bus), dev);
+ /*
+ * virtio pci bar layout used by default.
+ * subclasses can re-arrange things if needed.
+ *
+ * region 0 -- virtio legacy io bar
+ * region 1 -- msi-x bar
+ * region 4+5 -- virtio modern memory (64bit) bar
+ *
+ */
+ proxy->legacy_io_bar = 0;
+ proxy->msix_bar = 1;
+ proxy->modern_mem_bar = 4;
+
+ proxy->common.offset = 0x0;
+ proxy->common.size = 0x1000;
+ proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG;
+
+ proxy->isr.offset = 0x1000;
+ proxy->isr.size = 0x1000;
+ proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG;
+
+ proxy->device.offset = 0x2000;
+ proxy->device.size = 0x1000;
+ proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG;
+
+ proxy->notify.offset = 0x3000;
+ proxy->notify.size =
+ QEMU_VIRTIO_PCI_QUEUE_MEM_MULT * VIRTIO_QUEUE_MAX;
+ proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG;
+
+ /* subclasses can enforce modern, so do this unconditionally */
+ memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci",
+ 2 * QEMU_VIRTIO_PCI_QUEUE_MEM_MULT *
+ VIRTIO_QUEUE_MAX);
+
+ virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy);
if (k->realize) {
- k->realize(dev, errp);
+ k->realize(proxy, errp);
}
}
@@ -998,6 +1447,10 @@ static void virtio_pci_reset(DeviceState *qdev)
static Property virtio_pci_properties[] = {
DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags,
VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false),
+ DEFINE_PROP_BIT("disable-legacy", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT, false),
+ DEFINE_PROP_BIT("disable-modern", VirtIOPCIProxy, flags,
+ VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT, true),
DEFINE_PROP_END_OF_LIST(),
};
@@ -1445,6 +1898,120 @@ static const TypeInfo virtio_rng_pci_info = {
.class_init = virtio_rng_pci_class_init,
};
+/* virtio-input-pci */
+
+static Property virtio_input_hid_pci_properties[] = {
+ DEFINE_VIRTIO_INPUT_PROPERTIES(VirtIOInputPCI, vdev.input),
+ DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
+ DEFINE_PROP_END_OF_LIST(),
+};
+
+static void virtio_input_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp)
+{
+ VirtIOInputPCI *vinput = VIRTIO_INPUT_PCI(vpci_dev);
+ DeviceState *vdev = DEVICE(&vinput->vdev);
+
+ qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
+ /* force virtio-1.0 */
+ vpci_dev->flags &= ~VIRTIO_PCI_FLAG_DISABLE_MODERN;
+ vpci_dev->flags |= VIRTIO_PCI_FLAG_DISABLE_LEGACY;
+ object_property_set_bool(OBJECT(vdev), true, "realized", errp);
+}
+
+static void virtio_input_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+ VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ k->realize = virtio_input_pci_realize;
+ set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
+
+ pcidev_k->class_id = PCI_CLASS_INPUT_OTHER;
+}
+
+static void virtio_input_hid_pci_class_init(ObjectClass *klass, void *data)
+{
+ DeviceClass *dc = DEVICE_CLASS(klass);
+
+ dc->props = virtio_input_hid_pci_properties;
+}
+
+static void virtio_input_hid_kbd_pci_class_init(ObjectClass *klass, void *data)
+{
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ pcidev_k->class_id = PCI_CLASS_INPUT_KEYBOARD;
+}
+
+static void virtio_input_hid_mouse_pci_class_init(ObjectClass *klass,
+ void *data)
+{
+ PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
+
+ pcidev_k->class_id = PCI_CLASS_INPUT_MOUSE;
+}
+
+static void virtio_keyboard_initfn(Object *obj)
+{
+ VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
+ object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_KEYBOARD);
+ object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
+}
+
+static void virtio_mouse_initfn(Object *obj)
+{
+ VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
+ object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_MOUSE);
+ object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
+}
+
+static void virtio_tablet_initfn(Object *obj)
+{
+ VirtIOInputHIDPCI *dev = VIRTIO_INPUT_HID_PCI(obj);
+ object_initialize(&dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_TABLET);
+ object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
+}
+
+static const TypeInfo virtio_input_pci_info = {
+ .name = TYPE_VIRTIO_INPUT_PCI,
+ .parent = TYPE_VIRTIO_PCI,
+ .instance_size = sizeof(VirtIOInputPCI),
+ .class_init = virtio_input_pci_class_init,
+ .abstract = true,
+};
+
+static const TypeInfo virtio_input_hid_pci_info = {
+ .name = TYPE_VIRTIO_INPUT_HID_PCI,
+ .parent = TYPE_VIRTIO_INPUT_PCI,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .class_init = virtio_input_hid_pci_class_init,
+ .abstract = true,
+};
+
+static const TypeInfo virtio_keyboard_pci_info = {
+ .name = TYPE_VIRTIO_KEYBOARD_PCI,
+ .parent = TYPE_VIRTIO_INPUT_HID_PCI,
+ .class_init = virtio_input_hid_kbd_pci_class_init,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .instance_init = virtio_keyboard_initfn,
+};
+
+static const TypeInfo virtio_mouse_pci_info = {
+ .name = TYPE_VIRTIO_MOUSE_PCI,
+ .parent = TYPE_VIRTIO_INPUT_HID_PCI,
+ .class_init = virtio_input_hid_mouse_pci_class_init,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .instance_init = virtio_mouse_initfn,
+};
+
+static const TypeInfo virtio_tablet_pci_info = {
+ .name = TYPE_VIRTIO_TABLET_PCI,
+ .parent = TYPE_VIRTIO_INPUT_HID_PCI,
+ .instance_size = sizeof(VirtIOInputHIDPCI),
+ .instance_init = virtio_tablet_initfn,
+};
+
/* virtio-pci-bus */
static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size,
@@ -1486,6 +2053,11 @@ static const TypeInfo virtio_pci_bus_info = {
static void virtio_pci_register_types(void)
{
type_register_static(&virtio_rng_pci_info);
+ type_register_static(&virtio_input_pci_info);
+ type_register_static(&virtio_input_hid_pci_info);
+ type_register_static(&virtio_keyboard_pci_info);
+ type_register_static(&virtio_mouse_pci_info);
+ type_register_static(&virtio_tablet_pci_info);
type_register_static(&virtio_pci_bus_info);
type_register_static(&virtio_pci_info);
#ifdef CONFIG_VIRTFS
diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h
index de394687ef..d962125351 100644
--- a/hw/virtio/virtio-pci.h
+++ b/hw/virtio/virtio-pci.h
@@ -24,6 +24,7 @@
#include "hw/virtio/virtio-balloon.h"
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-9p.h"
+#include "hw/virtio/virtio-input.h"
#ifdef CONFIG_VIRTFS
#include "hw/9pfs/virtio-9p.h"
#endif
@@ -39,6 +40,8 @@ typedef struct VirtIOSerialPCI VirtIOSerialPCI;
typedef struct VirtIONetPCI VirtIONetPCI;
typedef struct VHostSCSIPCI VHostSCSIPCI;
typedef struct VirtIORngPCI VirtIORngPCI;
+typedef struct VirtIOInputPCI VirtIOInputPCI;
+typedef struct VirtIOInputHIDPCI VirtIOInputHIDPCI;
/* virtio-pci-bus */
@@ -63,6 +66,12 @@ typedef struct VirtioBusClass VirtioPCIBusClass;
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT 1
#define VIRTIO_PCI_FLAG_USE_IOEVENTFD (1 << VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT)
+/* virtio version flags */
+#define VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT 2
+#define VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT 3
+#define VIRTIO_PCI_FLAG_DISABLE_LEGACY (1 << VIRTIO_PCI_FLAG_DISABLE_LEGACY_BIT)
+#define VIRTIO_PCI_FLAG_DISABLE_MODERN (1 << VIRTIO_PCI_FLAG_DISABLE_MODERN_BIT)
+
typedef struct {
MSIMessage msg;
int virq;
@@ -85,12 +94,38 @@ typedef struct VirtioPCIClass {
void (*realize)(VirtIOPCIProxy *vpci_dev, Error **errp);
} VirtioPCIClass;
+typedef struct VirtIOPCIRegion {
+ MemoryRegion mr;
+ uint32_t offset;
+ uint32_t size;
+ uint32_t type;
+} VirtIOPCIRegion;
+
struct VirtIOPCIProxy {
PCIDevice pci_dev;
MemoryRegion bar;
+ VirtIOPCIRegion common;
+ VirtIOPCIRegion isr;
+ VirtIOPCIRegion device;
+ VirtIOPCIRegion notify;
+ MemoryRegion modern_bar;
+ uint32_t legacy_io_bar;
+ uint32_t msix_bar;
+ uint32_t modern_mem_bar;
uint32_t flags;
uint32_t class_code;
uint32_t nvectors;
+ uint32_t dfselect;
+ uint32_t gfselect;
+ uint32_t guest_features[2];
+ struct {
+ uint16_t num;
+ bool enabled;
+ uint32_t desc[2];
+ uint32_t avail[2];
+ uint32_t used[2];
+ } vqs[VIRTIO_QUEUE_MAX];
+
bool ioeventfd_disabled;
bool ioeventfd_started;
VirtIOIRQFD *vector_irqfd;
@@ -202,6 +237,30 @@ struct VirtIORngPCI {
VirtIORNG vdev;
};
+/*
+ * virtio-input-pci: This extends VirtioPCIProxy.
+ */
+#define TYPE_VIRTIO_INPUT_PCI "virtio-input-pci"
+#define VIRTIO_INPUT_PCI(obj) \
+ OBJECT_CHECK(VirtIOInputPCI, (obj), TYPE_VIRTIO_INPUT_PCI)
+
+struct VirtIOInputPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOInput vdev;
+};
+
+#define TYPE_VIRTIO_INPUT_HID_PCI "virtio-input-hid-pci"
+#define TYPE_VIRTIO_KEYBOARD_PCI "virtio-keyboard-pci"
+#define TYPE_VIRTIO_MOUSE_PCI "virtio-mouse-pci"
+#define TYPE_VIRTIO_TABLET_PCI "virtio-tablet-pci"
+#define VIRTIO_INPUT_HID_PCI(obj) \
+ OBJECT_CHECK(VirtIOInputHIDPCI, (obj), TYPE_VIRTIO_INPUT_HID_PCI)
+
+struct VirtIOInputHIDPCI {
+ VirtIOPCIProxy parent_obj;
+ VirtIOInputHID vdev;
+};
+
/* Virtio ABI version, if we increment this, we break the guest driver. */
#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c
index 420c39fb50..22b1d87d0e 100644
--- a/hw/virtio/virtio-rng.c
+++ b/hw/virtio/virtio-rng.c
@@ -219,7 +219,13 @@ static void virtio_rng_device_unrealize(DeviceState *dev, Error **errp)
}
static Property virtio_rng_properties[] = {
- DEFINE_VIRTIO_RNG_PROPERTIES(VirtIORNG, conf),
+ /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
+ * you have an entropy source capable of generating more entropy than this
+ * and you can pass it through via virtio-rng, then hats off to you. Until
+ * then, this is unlimited for all practical purposes.
+ */
+ DEFINE_PROP_UINT64("max-bytes", VirtIORNG, conf.max_bytes, INT64_MAX),
+ DEFINE_PROP_UINT32("period", VirtIORNG, conf.period_ms, 1 << 16),
DEFINE_PROP_END_OF_LIST(),
};
diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c
index 8ac6156861..fb49ffcb2d 100644
--- a/hw/virtio/virtio.c
+++ b/hw/virtio/virtio.c
@@ -69,7 +69,6 @@ typedef struct VRing
struct VirtQueue
{
VRing vring;
- hwaddr pa;
uint16_t last_avail_idx;
/* Last used index value we have signalled on */
uint16_t signalled_used;
@@ -93,15 +92,18 @@ struct VirtQueue
};
/* virt queue functions */
-static void virtqueue_init(VirtQueue *vq)
+void virtio_queue_update_rings(VirtIODevice *vdev, int n)
{
- hwaddr pa = vq->pa;
+ VRing *vring = &vdev->vq[n].vring;
- vq->vring.desc = pa;
- vq->vring.avail = pa + vq->vring.num * sizeof(VRingDesc);
- vq->vring.used = vring_align(vq->vring.avail +
- offsetof(VRingAvail, ring[vq->vring.num]),
- vq->vring.align);
+ if (!vring->desc) {
+ /* not yet setup -> nothing to do */
+ return;
+ }
+ vring->avail = vring->desc + vring->num * sizeof(VRingDesc);
+ vring->used = vring_align(vring->avail +
+ offsetof(VRingAvail, ring[vring->num]),
+ vring->align);
}
static inline uint64_t vring_desc_addr(VirtIODevice *vdev, hwaddr desc_pa,
@@ -542,15 +544,37 @@ void virtio_update_irq(VirtIODevice *vdev)
virtio_notify_vector(vdev, VIRTIO_NO_VECTOR);
}
-void virtio_set_status(VirtIODevice *vdev, uint8_t val)
+static int virtio_validate_features(VirtIODevice *vdev)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+
+ if (k->validate_features) {
+ return k->validate_features(vdev);
+ } else {
+ return 0;
+ }
+}
+
+int virtio_set_status(VirtIODevice *vdev, uint8_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
trace_virtio_set_status(vdev, val);
+ if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ if (!(vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) &&
+ val & VIRTIO_CONFIG_S_FEATURES_OK) {
+ int ret = virtio_validate_features(vdev);
+
+ if (ret) {
+ return ret;
+ }
+ }
+ }
if (k->set_status) {
k->set_status(vdev, val);
}
vdev->status = val;
+ return 0;
}
bool target_words_bigendian(void);
@@ -605,7 +629,6 @@ void virtio_reset(void *opaque)
vdev->vq[i].vring.avail = 0;
vdev->vq[i].vring.used = 0;
vdev->vq[i].last_avail_idx = 0;
- vdev->vq[i].pa = 0;
virtio_queue_set_vector(vdev, i, VIRTIO_NO_VECTOR);
vdev->vq[i].signalled_used = 0;
vdev->vq[i].signalled_used_valid = false;
@@ -706,15 +729,119 @@ void virtio_config_writel(VirtIODevice *vdev, uint32_t addr, uint32_t data)
}
}
+uint32_t virtio_config_modern_readb(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint8_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = ldub_p(vdev->config + addr);
+ return val;
+}
+
+uint32_t virtio_config_modern_readw(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint16_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = lduw_le_p(vdev->config + addr);
+ return val;
+}
+
+uint32_t virtio_config_modern_readl(VirtIODevice *vdev, uint32_t addr)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint32_t val;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return (uint32_t)-1;
+ }
+
+ k->get_config(vdev, vdev->config);
+
+ val = ldl_le_p(vdev->config + addr);
+ return val;
+}
+
+void virtio_config_modern_writeb(VirtIODevice *vdev,
+ uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint8_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stb_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_modern_writew(VirtIODevice *vdev,
+ uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint16_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stw_le_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
+void virtio_config_modern_writel(VirtIODevice *vdev,
+ uint32_t addr, uint32_t data)
+{
+ VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
+ uint32_t val = data;
+
+ if (addr + sizeof(val) > vdev->config_len) {
+ return;
+ }
+
+ stl_le_p(vdev->config + addr, val);
+
+ if (k->set_config) {
+ k->set_config(vdev, vdev->config);
+ }
+}
+
void virtio_queue_set_addr(VirtIODevice *vdev, int n, hwaddr addr)
{
- vdev->vq[n].pa = addr;
- virtqueue_init(&vdev->vq[n]);
+ vdev->vq[n].vring.desc = addr;
+ virtio_queue_update_rings(vdev, n);
}
hwaddr virtio_queue_get_addr(VirtIODevice *vdev, int n)
{
- return vdev->vq[n].pa;
+ return vdev->vq[n].vring.desc;
+}
+
+void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc,
+ hwaddr avail, hwaddr used)
+{
+ vdev->vq[n].vring.desc = desc;
+ vdev->vq[n].vring.avail = avail;
+ vdev->vq[n].vring.used = used;
}
void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
@@ -728,7 +855,6 @@ void virtio_queue_set_num(VirtIODevice *vdev, int n, int num)
return;
}
vdev->vq[n].vring.num = num;
- virtqueue_init(&vdev->vq[n]);
}
VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector)
@@ -771,6 +897,11 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
BusState *qbus = qdev_get_parent_bus(DEVICE(vdev));
VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus);
+ /* virtio-1 compliant devices cannot change the alignment */
+ if (virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ error_report("tried to modify queue alignment for virtio-1 device");
+ return;
+ }
/* Check that the transport told us it was going to do this
* (so a buggy transport will immediately assert rather than
* silently failing to migrate this state)
@@ -778,7 +909,7 @@ void virtio_queue_set_align(VirtIODevice *vdev, int n, int align)
assert(k->has_variable_vring_alignment);
vdev->vq[n].vring.align = align;
- virtqueue_init(&vdev->vq[n]);
+ virtio_queue_update_rings(vdev, n);
}
void virtio_queue_notify_vq(VirtQueue *vq)
@@ -895,6 +1026,7 @@ void virtio_notify_config(VirtIODevice *vdev)
return;
vdev->isr |= 0x03;
+ vdev->generation++;
virtio_notify_vector(vdev, vdev->config_vector);
}
@@ -903,7 +1035,11 @@ static bool virtio_device_endian_needed(void *opaque)
VirtIODevice *vdev = opaque;
assert(vdev->device_endian != VIRTIO_DEVICE_ENDIAN_UNKNOWN);
- return vdev->device_endian != virtio_default_endian();
+ if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) {
+ return vdev->device_endian != virtio_default_endian();
+ }
+ /* Devices conforming to VIRTIO 1.0 or later are always LE. */
+ return vdev->device_endian != VIRTIO_DEVICE_ENDIAN_LITTLE;
}
static bool virtio_64bit_features_needed(void *opaque)
@@ -988,7 +1124,8 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
if (k->has_variable_vring_alignment) {
qemu_put_be32(f, vdev->vq[i].vring.align);
}
- qemu_put_be64(f, vdev->vq[i].pa);
+ /* XXX virtio-1 devices */
+ qemu_put_be64(f, vdev->vq[i].vring.desc);
qemu_put_be16s(f, &vdev->vq[i].last_avail_idx);
if (k->save_queue) {
k->save_queue(qbus->parent, i, f);
@@ -1003,7 +1140,7 @@ void virtio_save(VirtIODevice *vdev, QEMUFile *f)
vmstate_save_state(f, &vmstate_virtio, vdev, NULL);
}
-int virtio_set_features(VirtIODevice *vdev, uint64_t val)
+static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val)
{
VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev);
bool bad = (val & ~(vdev->host_features)) != 0;
@@ -1016,6 +1153,18 @@ int virtio_set_features(VirtIODevice *vdev, uint64_t val)
return bad ? -1 : 0;
}
+int virtio_set_features(VirtIODevice *vdev, uint64_t val)
+{
+ /*
+ * The driver must not attempt to set features after feature negotiation
+ * has finished.
+ */
+ if (vdev->status & VIRTIO_CONFIG_S_FEATURES_OK) {
+ return -EINVAL;
+ }
+ return virtio_set_features_nocheck(vdev, val);
+}
+
int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
{
int i, ret;
@@ -1072,13 +1221,14 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
if (k->has_variable_vring_alignment) {
vdev->vq[i].vring.align = qemu_get_be32(f);
}
- vdev->vq[i].pa = qemu_get_be64(f);
+ vdev->vq[i].vring.desc = qemu_get_be64(f);
qemu_get_be16s(f, &vdev->vq[i].last_avail_idx);
vdev->vq[i].signalled_used_valid = false;
vdev->vq[i].notification = true;
- if (vdev->vq[i].pa) {
- virtqueue_init(&vdev->vq[i]);
+ if (vdev->vq[i].vring.desc) {
+ /* XXX virtio-1 devices */
+ virtio_queue_update_rings(vdev, i);
} else if (vdev->vq[i].last_avail_idx) {
error_report("VQ %d address 0x0 "
"inconsistent with Host index 0x%x",
@@ -1118,14 +1268,14 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
* host_features.
*/
uint64_t features64 = vdev->guest_features;
- if (virtio_set_features(vdev, features64) < 0) {
+ if (virtio_set_features_nocheck(vdev, features64) < 0) {
error_report("Features 0x%" PRIx64 " unsupported. "
"Allowed features: 0x%" PRIx64,
features64, vdev->host_features);
return -1;
}
} else {
- if (virtio_set_features(vdev, features) < 0) {
+ if (virtio_set_features_nocheck(vdev, features) < 0) {
error_report("Features 0x%x unsupported. "
"Allowed features: 0x%" PRIx64,
features, vdev->host_features);
@@ -1134,7 +1284,7 @@ int virtio_load(VirtIODevice *vdev, QEMUFile *f, int version_id)
}
for (i = 0; i < num; i++) {
- if (vdev->vq[i].pa) {
+ if (vdev->vq[i].vring.desc) {
uint16_t nheads;
nheads = vring_avail_idx(&vdev->vq[i]) - vdev->vq[i].last_avail_idx;
/* Check it isn't doing strange things with descriptor numbers. */