aboutsummaryrefslogtreecommitdiff
path: root/hw/virtio
diff options
context:
space:
mode:
authorCindy Lu <lulu@redhat.com>2020-07-01 22:55:37 +0800
committerMichael S. Tsirkin <mst@redhat.com>2020-07-07 07:59:51 -0400
commit108a64818e69be0a97cde3838d768f2d9910c08b (patch)
treea60b587a2ff2b704dac731d9886bc8c30c7d0d0c /hw/virtio
parent38140cc4d9713dc9af78090503105bf9c82b6bff (diff)
vhost-vdpa: introduce vhost-vdpa backend
Currently we have 2 types of vhost backends in QEMU: vhost kernel and vhost-user. The above patch provides a generic device for vDPA purpose, this vDPA device exposes to user space a non-vendor-specific configuration interface for setting up a vhost HW accelerator, this patch set introduces a third vhost backend called vhost-vdpa based on the vDPA interface. Vhost-vdpa usage: qemu-system-x86_64 -cpu host -enable-kvm \ ...... -netdev type=vhost-vdpa,vhostdev=/dev/vhost-vdpa-id,id=vhost-vdpa0 \ -device virtio-net-pci,netdev=vhost-vdpa0,page-per-vq=on \ Signed-off-by: Lingshan zhu <lingshan.zhu@intel.com> Signed-off-by: Tiwei Bie <tiwei.bie@intel.com> Signed-off-by: Cindy Lu <lulu@redhat.com> Signed-off-by: Jason Wang <jasowang@redhat.com> Message-Id: <20200701145538.22333-14-lulu@redhat.com> Reviewed-by: Michael S. Tsirkin <mst@redhat.com> Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Acked-by: Jason Wang <jasowang@redhat.com>
Diffstat (limited to 'hw/virtio')
-rw-r--r--hw/virtio/Makefile.objs1
-rw-r--r--hw/virtio/vhost-backend.c6
-rw-r--r--hw/virtio/vhost-vdpa.c475
3 files changed, 482 insertions, 0 deletions
diff --git a/hw/virtio/Makefile.objs b/hw/virtio/Makefile.objs
index a986708186..fc91719b4a 100644
--- a/hw/virtio/Makefile.objs
+++ b/hw/virtio/Makefile.objs
@@ -5,6 +5,7 @@ obj-y += virtio.o
obj-$(CONFIG_VHOST) += vhost.o vhost-backend.o
common-obj-$(call lnot,$(CONFIG_VHOST)) += vhost-stub.o
obj-$(CONFIG_VHOST_USER) += vhost-user.o
+obj-$(CONFIG_VHOST_VDPA) += vhost-vdpa.o
common-obj-$(CONFIG_VIRTIO_RNG) += virtio-rng.o
common-obj-$(CONFIG_VIRTIO_PCI) += virtio-pci.o
diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c
index 48905383f8..782b1d67d9 100644
--- a/hw/virtio/vhost-backend.c
+++ b/hw/virtio/vhost-backend.c
@@ -15,6 +15,7 @@
#include "qemu/main-loop.h"
#include "standard-headers/linux/vhost_types.h"
+#include "hw/virtio/vhost-vdpa.h"
#ifdef CONFIG_VHOST_KERNEL
#include <linux/vhost.h>
#include <sys/ioctl.h>
@@ -286,6 +287,11 @@ int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type)
dev->vhost_ops = &user_ops;
break;
#endif
+#ifdef CONFIG_VHOST_VDPA
+ case VHOST_BACKEND_TYPE_VDPA:
+ dev->vhost_ops = &vdpa_ops;
+ break;
+#endif
default:
error_report("Unknown vhost backend type");
r = -1;
diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c
new file mode 100644
index 0000000000..a3d17fe0f9
--- /dev/null
+++ b/hw/virtio/vhost-vdpa.c
@@ -0,0 +1,475 @@
+/*
+ * vhost-vdpa
+ *
+ * Copyright(c) 2017-2018 Intel Corporation.
+ * Copyright(c) 2020 Red Hat, Inc.
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include <linux/vhost.h>
+#include <linux/vfio.h>
+#include <sys/eventfd.h>
+#include <sys/ioctl.h>
+#include "hw/virtio/vhost.h"
+#include "hw/virtio/vhost-backend.h"
+#include "hw/virtio/virtio-net.h"
+#include "hw/virtio/vhost-vdpa.h"
+#include "qemu/main-loop.h"
+#include <linux/kvm.h>
+#include "sysemu/kvm.h"
+
+static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section)
+{
+ return (!memory_region_is_ram(section->mr) &&
+ !memory_region_is_iommu(section->mr)) ||
+ /*
+ * Sizing an enabled 64-bit BAR can cause spurious mappings to
+ * addresses in the upper part of the 64-bit address space. These
+ * are never accessed by the CPU and beyond the address width of
+ * some IOMMU hardware. TODO: VDPA should tell us the IOMMU width.
+ */
+ section->offset_within_address_space & (1ULL << 63);
+}
+
+static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
+ void *vaddr, bool readonly)
+{
+ struct vhost_msg_v2 msg;
+ int fd = v->device_fd;
+ int ret = 0;
+
+ msg.type = v->msg_type;
+ msg.iotlb.iova = iova;
+ msg.iotlb.size = size;
+ msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
+ msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
+ msg.iotlb.type = VHOST_IOTLB_UPDATE;
+
+ if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
+ error_report("failed to write, fd=%d, errno=%d (%s)",
+ fd, errno, strerror(errno));
+ return -EIO ;
+ }
+
+ return ret;
+}
+
+static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
+ hwaddr size)
+{
+ struct vhost_msg_v2 msg;
+ int fd = v->device_fd;
+ int ret = 0;
+
+ msg.type = v->msg_type;
+ msg.iotlb.iova = iova;
+ msg.iotlb.size = size;
+ msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
+
+ if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
+ error_report("failed to write, fd=%d, errno=%d (%s)",
+ fd, errno, strerror(errno));
+ return -EIO ;
+ }
+
+ return ret;
+}
+
+static void vhost_vdpa_listener_region_add(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
+ hwaddr iova;
+ Int128 llend, llsize;
+ void *vaddr;
+ int ret;
+
+ if (vhost_vdpa_listener_skipped_section(section)) {
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
+ (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ llend = int128_make64(section->offset_within_address_space);
+ llend = int128_add(llend, section->size);
+ llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
+
+ if (int128_ge(int128_make64(iova), llend)) {
+ return;
+ }
+
+ memory_region_ref(section->mr);
+
+ /* Here we assume that memory_region_is_ram(section->mr)==true */
+
+ vaddr = memory_region_get_ram_ptr(section->mr) +
+ section->offset_within_region +
+ (iova - section->offset_within_address_space);
+
+ llsize = int128_sub(llend, int128_make64(iova));
+
+ ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
+ vaddr, section->readonly);
+ if (ret) {
+ error_report("vhost vdpa map fail!");
+ if (memory_region_is_ram_device(section->mr)) {
+ /* Allow unexpected mappings not to be fatal for RAM devices */
+ error_report("map ram fail!");
+ return ;
+ }
+ goto fail;
+ }
+
+ return;
+
+fail:
+ if (memory_region_is_ram_device(section->mr)) {
+ error_report("failed to vdpa_dma_map. pci p2p may not work");
+ return;
+
+ }
+ /*
+ * On the initfn path, store the first error in the container so we
+ * can gracefully fail. Runtime, there's not much we can do other
+ * than throw a hardware error.
+ */
+ error_report("vhost-vdpa: DMA mapping failed, unable to continue");
+ return;
+
+}
+
+static void vhost_vdpa_listener_region_del(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
+ hwaddr iova;
+ Int128 llend, llsize;
+ int ret;
+ bool try_unmap = true;
+
+ if (vhost_vdpa_listener_skipped_section(section)) {
+ return;
+ }
+
+ if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
+ (section->offset_within_region & ~TARGET_PAGE_MASK))) {
+ error_report("%s received unaligned region", __func__);
+ return;
+ }
+
+ iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
+ llend = int128_make64(section->offset_within_address_space);
+ llend = int128_add(llend, section->size);
+ llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
+
+ if (int128_ge(int128_make64(iova), llend)) {
+ return;
+ }
+
+ llsize = int128_sub(llend, int128_make64(iova));
+
+ if (try_unmap) {
+ ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
+ if (ret) {
+ error_report("vhost_vdpa dma unmap error!");
+ }
+ }
+
+ memory_region_unref(section->mr);
+}
+/*
+ * IOTLB API is used by vhost-vpda which requires incremental updating
+ * of the mapping. So we can not use generic vhost memory listener which
+ * depends on the addnop().
+ */
+static const MemoryListener vhost_vdpa_memory_listener = {
+ .region_add = vhost_vdpa_listener_region_add,
+ .region_del = vhost_vdpa_listener_region_del,
+};
+
+static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
+ void *arg)
+{
+ struct vhost_vdpa *v = dev->opaque;
+ int fd = v->device_fd;
+
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
+
+ return ioctl(fd, request, arg);
+}
+
+static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
+{
+ uint8_t s;
+
+ if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) {
+ return;
+ }
+
+ s |= status;
+
+ vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
+}
+
+static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque)
+{
+ struct vhost_vdpa *v;
+ uint64_t features;
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
+
+ v = opaque;
+ dev->opaque = opaque ;
+ vhost_vdpa_call(dev, VHOST_GET_FEATURES, &features);
+ dev->backend_features = features;
+ v->listener = vhost_vdpa_memory_listener;
+ v->msg_type = VHOST_IOTLB_MSG_V2;
+
+ vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
+ VIRTIO_CONFIG_S_DRIVER);
+
+ return 0;
+}
+
+static int vhost_vdpa_cleanup(struct vhost_dev *dev)
+{
+ struct vhost_vdpa *v;
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
+ v = dev->opaque;
+ memory_listener_unregister(&v->listener);
+
+ dev->opaque = NULL;
+ return 0;
+}
+
+static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
+{
+ return INT_MAX;
+}
+
+static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
+ struct vhost_memory *mem)
+{
+
+ if (mem->padding) {
+ return -1;
+ }
+
+ return 0;
+}
+
+static int vhost_vdpa_set_features(struct vhost_dev *dev,
+ uint64_t features)
+{
+ int ret;
+ ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
+ uint8_t status = 0;
+ if (ret) {
+ return ret;
+ }
+ vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
+ vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
+
+ return !(status & VIRTIO_CONFIG_S_FEATURES_OK);
+}
+
+int vhost_vdpa_get_device_id(struct vhost_dev *dev,
+ uint32_t *device_id)
+{
+ return vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
+}
+
+static int vhost_vdpa_reset_device(struct vhost_dev *dev)
+{
+ uint8_t status = 0;
+
+ return vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
+}
+
+static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
+{
+ assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
+
+ return idx - dev->vq_index;
+}
+
+static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
+{
+ int i;
+ for (i = 0; i < dev->nvqs; ++i) {
+ struct vhost_vring_state state = {
+ .index = dev->vq_index + i,
+ .num = 1,
+ };
+ vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
+ }
+ return 0;
+}
+
+static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
+ uint32_t offset, uint32_t size,
+ uint32_t flags)
+{
+ struct vhost_vdpa_config *config;
+ int ret;
+ unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
+ config = g_malloc(size + config_size);
+ if (config == NULL) {
+ return -1;
+ }
+ config->off = offset;
+ config->len = size;
+ memcpy(config->buf, data, size);
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
+ g_free(config);
+ return ret;
+}
+
+static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
+ uint32_t config_len)
+{
+ struct vhost_vdpa_config *v_config;
+ unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
+ int ret;
+
+ v_config = g_malloc(config_len + config_size);
+ if (v_config == NULL) {
+ return -1;
+ }
+ v_config->len = config_len;
+ v_config->off = 0;
+ ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
+ memcpy(config, v_config->buf, config_len);
+ g_free(v_config);
+ return ret;
+ }
+
+static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
+{
+ struct vhost_vdpa *v = dev->opaque;
+ if (started) {
+ uint8_t status = 0;
+ memory_listener_register(&v->listener, &address_space_memory);
+ vhost_vdpa_set_vring_ready(dev);
+ vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
+ vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status);
+
+ return !(status & VIRTIO_CONFIG_S_DRIVER_OK);
+ } else {
+ vhost_vdpa_reset_device(dev);
+ vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
+ VIRTIO_CONFIG_S_DRIVER);
+ memory_listener_unregister(&v->listener);
+
+ return 0;
+ }
+}
+
+static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
+ struct vhost_log *log)
+{
+ return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
+}
+
+static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr)
+{
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
+}
+
+static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
+}
+
+static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
+}
+
+static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
+ struct vhost_vring_state *ring)
+{
+ return vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
+}
+
+static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
+}
+
+static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
+ struct vhost_vring_file *file)
+{
+ return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
+}
+
+static int vhost_vdpa_get_features(struct vhost_dev *dev,
+ uint64_t *features)
+{
+ return vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
+}
+
+static int vhost_vdpa_set_owner(struct vhost_dev *dev)
+{
+ return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
+}
+
+static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
+ struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
+{
+ assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
+ addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
+ addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
+ addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
+ return 0;
+}
+
+static bool vhost_vdpa_force_iommu(struct vhost_dev *dev)
+{
+ return true;
+}
+
+const VhostOps vdpa_ops = {
+ .backend_type = VHOST_BACKEND_TYPE_VDPA,
+ .vhost_backend_init = vhost_vdpa_init,
+ .vhost_backend_cleanup = vhost_vdpa_cleanup,
+ .vhost_set_log_base = vhost_vdpa_set_log_base,
+ .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
+ .vhost_set_vring_num = vhost_vdpa_set_vring_num,
+ .vhost_set_vring_base = vhost_vdpa_set_vring_base,
+ .vhost_get_vring_base = vhost_vdpa_get_vring_base,
+ .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
+ .vhost_set_vring_call = vhost_vdpa_set_vring_call,
+ .vhost_get_features = vhost_vdpa_get_features,
+ .vhost_set_owner = vhost_vdpa_set_owner,
+ .vhost_set_vring_endian = NULL,
+ .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
+ .vhost_set_mem_table = vhost_vdpa_set_mem_table,
+ .vhost_set_features = vhost_vdpa_set_features,
+ .vhost_reset_device = vhost_vdpa_reset_device,
+ .vhost_get_vq_index = vhost_vdpa_get_vq_index,
+ .vhost_get_config = vhost_vdpa_get_config,
+ .vhost_set_config = vhost_vdpa_set_config,
+ .vhost_requires_shm_log = NULL,
+ .vhost_migration_done = NULL,
+ .vhost_backend_can_merge = NULL,
+ .vhost_net_set_mtu = NULL,
+ .vhost_set_iotlb_callback = NULL,
+ .vhost_send_device_iotlb_msg = NULL,
+ .vhost_dev_start = vhost_vdpa_dev_start,
+ .vhost_get_device_id = vhost_vdpa_get_device_id,
+ .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
+ .vhost_force_iommu = vhost_vdpa_force_iommu,
+};