aboutsummaryrefslogtreecommitdiff
path: root/hw/vfio/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/vfio/common.c')
-rw-r--r--hw/vfio/common.c1847
1 files changed, 119 insertions, 1728 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 134649226d..5ff5acf1d8 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -46,8 +46,8 @@
#include "migration/qemu-file.h"
#include "sysemu/tpm.h"
-VFIOGroupList vfio_group_list =
- QLIST_HEAD_INITIALIZER(vfio_group_list);
+VFIODeviceList vfio_device_list =
+ QLIST_HEAD_INITIALIZER(vfio_device_list);
static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
QLIST_HEAD_INITIALIZER(vfio_address_spaces);
@@ -59,304 +59,24 @@ static QLIST_HEAD(, VFIOAddressSpace) vfio_address_spaces =
* initialized, this file descriptor is only released on QEMU exit and
* we'll re-use it should another vfio device be attached before then.
*/
-static int vfio_kvm_device_fd = -1;
+int vfio_kvm_device_fd = -1;
#endif
/*
- * Common VFIO interrupt disable
- */
-void vfio_disable_irqindex(VFIODevice *vbasedev, int index)
-{
- struct vfio_irq_set irq_set = {
- .argsz = sizeof(irq_set),
- .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER,
- .index = index,
- .start = 0,
- .count = 0,
- };
-
- ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
-}
-
-void vfio_unmask_single_irqindex(VFIODevice *vbasedev, int index)
-{
- struct vfio_irq_set irq_set = {
- .argsz = sizeof(irq_set),
- .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_UNMASK,
- .index = index,
- .start = 0,
- .count = 1,
- };
-
- ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
-}
-
-void vfio_mask_single_irqindex(VFIODevice *vbasedev, int index)
-{
- struct vfio_irq_set irq_set = {
- .argsz = sizeof(irq_set),
- .flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_MASK,
- .index = index,
- .start = 0,
- .count = 1,
- };
-
- ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, &irq_set);
-}
-
-static inline const char *action_to_str(int action)
-{
- switch (action) {
- case VFIO_IRQ_SET_ACTION_MASK:
- return "MASK";
- case VFIO_IRQ_SET_ACTION_UNMASK:
- return "UNMASK";
- case VFIO_IRQ_SET_ACTION_TRIGGER:
- return "TRIGGER";
- default:
- return "UNKNOWN ACTION";
- }
-}
-
-static const char *index_to_str(VFIODevice *vbasedev, int index)
-{
- if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
- return NULL;
- }
-
- switch (index) {
- case VFIO_PCI_INTX_IRQ_INDEX:
- return "INTX";
- case VFIO_PCI_MSI_IRQ_INDEX:
- return "MSI";
- case VFIO_PCI_MSIX_IRQ_INDEX:
- return "MSIX";
- case VFIO_PCI_ERR_IRQ_INDEX:
- return "ERR";
- case VFIO_PCI_REQ_IRQ_INDEX:
- return "REQ";
- default:
- return NULL;
- }
-}
-
-static int vfio_ram_block_discard_disable(VFIOContainer *container, bool state)
-{
- switch (container->iommu_type) {
- case VFIO_TYPE1v2_IOMMU:
- case VFIO_TYPE1_IOMMU:
- /*
- * We support coordinated discarding of RAM via the RamDiscardManager.
- */
- return ram_block_uncoordinated_discard_disable(state);
- default:
- /*
- * VFIO_SPAPR_TCE_IOMMU most probably works just fine with
- * RamDiscardManager, however, it is completely untested.
- *
- * VFIO_SPAPR_TCE_v2_IOMMU with "DMA memory preregistering" does
- * completely the opposite of managing mapping/pinning dynamically as
- * required by RamDiscardManager. We would have to special-case sections
- * with a RamDiscardManager.
- */
- return ram_block_discard_disable(state);
- }
-}
-
-int vfio_set_irq_signaling(VFIODevice *vbasedev, int index, int subindex,
- int action, int fd, Error **errp)
-{
- struct vfio_irq_set *irq_set;
- int argsz, ret = 0;
- const char *name;
- int32_t *pfd;
-
- argsz = sizeof(*irq_set) + sizeof(*pfd);
-
- irq_set = g_malloc0(argsz);
- irq_set->argsz = argsz;
- irq_set->flags = VFIO_IRQ_SET_DATA_EVENTFD | action;
- irq_set->index = index;
- irq_set->start = subindex;
- irq_set->count = 1;
- pfd = (int32_t *)&irq_set->data;
- *pfd = fd;
-
- if (ioctl(vbasedev->fd, VFIO_DEVICE_SET_IRQS, irq_set)) {
- ret = -errno;
- }
- g_free(irq_set);
-
- if (!ret) {
- return 0;
- }
-
- error_setg_errno(errp, -ret, "VFIO_DEVICE_SET_IRQS failure");
-
- name = index_to_str(vbasedev, index);
- if (name) {
- error_prepend(errp, "%s-%d: ", name, subindex);
- } else {
- error_prepend(errp, "index %d-%d: ", index, subindex);
- }
- error_prepend(errp,
- "Failed to %s %s eventfd signaling for interrupt ",
- fd < 0 ? "tear down" : "set up", action_to_str(action));
- return ret;
-}
-
-/*
- * IO Port/MMIO - Beware of the endians, VFIO is always little endian
- */
-void vfio_region_write(void *opaque, hwaddr addr,
- uint64_t data, unsigned size)
-{
- VFIORegion *region = opaque;
- VFIODevice *vbasedev = region->vbasedev;
- union {
- uint8_t byte;
- uint16_t word;
- uint32_t dword;
- uint64_t qword;
- } buf;
-
- switch (size) {
- case 1:
- buf.byte = data;
- break;
- case 2:
- buf.word = cpu_to_le16(data);
- break;
- case 4:
- buf.dword = cpu_to_le32(data);
- break;
- case 8:
- buf.qword = cpu_to_le64(data);
- break;
- default:
- hw_error("vfio: unsupported write size, %u bytes", size);
- break;
- }
-
- if (pwrite(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
- error_report("%s(%s:region%d+0x%"HWADDR_PRIx", 0x%"PRIx64
- ",%d) failed: %m",
- __func__, vbasedev->name, region->nr,
- addr, data, size);
- }
-
- trace_vfio_region_write(vbasedev->name, region->nr, addr, data, size);
-
- /*
- * A read or write to a BAR always signals an INTx EOI. This will
- * do nothing if not pending (including not in INTx mode). We assume
- * that a BAR access is in response to an interrupt and that BAR
- * accesses will service the interrupt. Unfortunately, we don't know
- * which access will service the interrupt, so we're potentially
- * getting quite a few host interrupts per guest interrupt.
- */
- vbasedev->ops->vfio_eoi(vbasedev);
-}
-
-uint64_t vfio_region_read(void *opaque,
- hwaddr addr, unsigned size)
-{
- VFIORegion *region = opaque;
- VFIODevice *vbasedev = region->vbasedev;
- union {
- uint8_t byte;
- uint16_t word;
- uint32_t dword;
- uint64_t qword;
- } buf;
- uint64_t data = 0;
-
- if (pread(vbasedev->fd, &buf, size, region->fd_offset + addr) != size) {
- error_report("%s(%s:region%d+0x%"HWADDR_PRIx", %d) failed: %m",
- __func__, vbasedev->name, region->nr,
- addr, size);
- return (uint64_t)-1;
- }
- switch (size) {
- case 1:
- data = buf.byte;
- break;
- case 2:
- data = le16_to_cpu(buf.word);
- break;
- case 4:
- data = le32_to_cpu(buf.dword);
- break;
- case 8:
- data = le64_to_cpu(buf.qword);
- break;
- default:
- hw_error("vfio: unsupported read size, %u bytes", size);
- break;
- }
-
- trace_vfio_region_read(vbasedev->name, region->nr, addr, size, data);
-
- /* Same as write above */
- vbasedev->ops->vfio_eoi(vbasedev);
-
- return data;
-}
-
-const MemoryRegionOps vfio_region_ops = {
- .read = vfio_region_read,
- .write = vfio_region_write,
- .endianness = DEVICE_LITTLE_ENDIAN,
- .valid = {
- .min_access_size = 1,
- .max_access_size = 8,
- },
- .impl = {
- .min_access_size = 1,
- .max_access_size = 8,
- },
-};
-
-/*
* Device state interfaces
*/
-typedef struct {
- unsigned long *bitmap;
- hwaddr size;
- hwaddr pages;
-} VFIOBitmap;
-
-static int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size)
-{
- vbmap->pages = REAL_HOST_PAGE_ALIGN(size) / qemu_real_host_page_size();
- vbmap->size = ROUND_UP(vbmap->pages, sizeof(__u64) * BITS_PER_BYTE) /
- BITS_PER_BYTE;
- vbmap->bitmap = g_try_malloc0(vbmap->size);
- if (!vbmap->bitmap) {
- return -ENOMEM;
- }
-
- return 0;
-}
-
-static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
- uint64_t size, ram_addr_t ram_addr);
-
bool vfio_mig_active(void)
{
- VFIOGroup *group;
VFIODevice *vbasedev;
- if (QLIST_EMPTY(&vfio_group_list)) {
+ if (QLIST_EMPTY(&vfio_device_list)) {
return false;
}
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->migration_blocker) {
- return false;
- }
+ QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
+ if (vbasedev->migration_blocker) {
+ return false;
}
}
return true;
@@ -371,19 +91,16 @@ static Error *multiple_devices_migration_blocker;
*/
static bool vfio_multiple_devices_migration_is_supported(void)
{
- VFIOGroup *group;
VFIODevice *vbasedev;
unsigned int device_num = 0;
bool all_support_p2p = true;
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->migration) {
- device_num++;
+ QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
+ if (vbasedev->migration) {
+ device_num++;
- if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) {
- all_support_p2p = false;
- }
+ if (!(vbasedev->migration->mig_flags & VFIO_MIGRATION_P2P)) {
+ all_support_p2p = false;
}
}
}
@@ -435,7 +152,7 @@ void vfio_unblock_multiple_devices_migration(void)
bool vfio_viommu_preset(VFIODevice *vbasedev)
{
- return vbasedev->group->container->space->as != &address_space_memory;
+ return vbasedev->container->space->as != &address_space_memory;
}
static void vfio_set_migration_error(int err)
@@ -469,7 +186,6 @@ bool vfio_device_state_is_precopy(VFIODevice *vbasedev)
static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
{
- VFIOGroup *group;
VFIODevice *vbasedev;
MigrationState *ms = migrate_get_current();
@@ -478,34 +194,29 @@ static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
return false;
}
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- VFIOMigration *migration = vbasedev->migration;
+ QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ VFIOMigration *migration = vbasedev->migration;
- if (!migration) {
- return false;
- }
+ if (!migration) {
+ return false;
+ }
- if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
- (vfio_device_state_is_running(vbasedev) ||
- vfio_device_state_is_precopy(vbasedev))) {
- return false;
- }
+ if (vbasedev->pre_copy_dirty_page_tracking == ON_OFF_AUTO_OFF &&
+ (vfio_device_state_is_running(vbasedev) ||
+ vfio_device_state_is_precopy(vbasedev))) {
+ return false;
}
}
return true;
}
-static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
+bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
{
- VFIOGroup *group;
VFIODevice *vbasedev;
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (!vbasedev->dirty_pages_supported) {
- return false;
- }
+ QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ if (!vbasedev->dirty_pages_supported) {
+ return false;
}
}
@@ -516,178 +227,33 @@ static bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
* Check if all VFIO devices are running and migration is active, which is
* essentially equivalent to the migration being in pre-copy phase.
*/
-static bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
+bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
{
- VFIOGroup *group;
VFIODevice *vbasedev;
if (!migration_is_active(migrate_get_current())) {
return false;
}
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- VFIOMigration *migration = vbasedev->migration;
+ QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ VFIOMigration *migration = vbasedev->migration;
- if (!migration) {
- return false;
- }
-
- if (vfio_device_state_is_running(vbasedev) ||
- vfio_device_state_is_precopy(vbasedev)) {
- continue;
- } else {
- return false;
- }
+ if (!migration) {
+ return false;
}
- }
- return true;
-}
-
-static int vfio_dma_unmap_bitmap(VFIOContainer *container,
- hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb)
-{
- struct vfio_iommu_type1_dma_unmap *unmap;
- struct vfio_bitmap *bitmap;
- VFIOBitmap vbmap;
- int ret;
-
- ret = vfio_bitmap_alloc(&vbmap, size);
- if (ret) {
- return ret;
- }
-
- unmap = g_malloc0(sizeof(*unmap) + sizeof(*bitmap));
-
- unmap->argsz = sizeof(*unmap) + sizeof(*bitmap);
- unmap->iova = iova;
- unmap->size = size;
- unmap->flags |= VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
- bitmap = (struct vfio_bitmap *)&unmap->data;
-
- /*
- * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
- * qemu_real_host_page_size to mark those dirty. Hence set bitmap_pgsize
- * to qemu_real_host_page_size.
- */
- bitmap->pgsize = qemu_real_host_page_size();
- bitmap->size = vbmap.size;
- bitmap->data = (__u64 *)vbmap.bitmap;
-
- if (vbmap.size > container->max_dirty_bitmap_size) {
- error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
- ret = -E2BIG;
- goto unmap_exit;
- }
-
- ret = ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, unmap);
- if (!ret) {
- cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap,
- iotlb->translated_addr, vbmap.pages);
- } else {
- error_report("VFIO_UNMAP_DMA with DIRTY_BITMAP : %m");
- }
-
-unmap_exit:
- g_free(unmap);
- g_free(vbmap.bitmap);
-
- return ret;
-}
-/*
- * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
- */
-static int vfio_dma_unmap(VFIOContainer *container,
- hwaddr iova, ram_addr_t size,
- IOMMUTLBEntry *iotlb)
-{
- struct vfio_iommu_type1_dma_unmap unmap = {
- .argsz = sizeof(unmap),
- .flags = 0,
- .iova = iova,
- .size = size,
- };
- bool need_dirty_sync = false;
- int ret;
-
- if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
- if (!vfio_devices_all_device_dirty_tracking(container) &&
- container->dirty_pages_supported) {
- return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
- }
-
- need_dirty_sync = true;
- }
-
- while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) {
- /*
- * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c
- * v4.15) where an overflow in its wrap-around check prevents us from
- * unmapping the last page of the address space. Test for the error
- * condition and re-try the unmap excluding the last page. The
- * expectation is that we've never mapped the last page anyway and this
- * unmap request comes via vIOMMU support which also makes it unlikely
- * that this page is used. This bug was introduced well after type1 v2
- * support was introduced, so we shouldn't need to test for v1. A fix
- * is queued for kernel v5.0 so this workaround can be removed once
- * affected kernels are sufficiently deprecated.
- */
- if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
- container->iommu_type == VFIO_TYPE1v2_IOMMU) {
- trace_vfio_dma_unmap_overflow_workaround();
- unmap.size -= 1ULL << ctz64(container->pgsizes);
+ if (vfio_device_state_is_running(vbasedev) ||
+ vfio_device_state_is_precopy(vbasedev)) {
continue;
- }
- error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
- return -errno;
- }
-
- if (need_dirty_sync) {
- ret = vfio_get_dirty_bitmap(container, iova, size,
- iotlb->translated_addr);
- if (ret) {
- return ret;
+ } else {
+ return false;
}
}
-
- return 0;
-}
-
-static int vfio_dma_map(VFIOContainer *container, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly)
-{
- struct vfio_iommu_type1_dma_map map = {
- .argsz = sizeof(map),
- .flags = VFIO_DMA_MAP_FLAG_READ,
- .vaddr = (__u64)(uintptr_t)vaddr,
- .iova = iova,
- .size = size,
- };
-
- if (!readonly) {
- map.flags |= VFIO_DMA_MAP_FLAG_WRITE;
- }
-
- /*
- * Try the mapping, if it fails with EBUSY, unmap the region and try
- * again. This shouldn't be necessary, but we sometimes see it in
- * the VGA ROM space.
- */
- if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
- (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 &&
- ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
- return 0;
- }
-
- error_report("VFIO_MAP_DMA failed: %s", strerror(errno));
- return -errno;
+ return true;
}
-static void vfio_host_win_add(VFIOContainer *container,
- hwaddr min_iova, hwaddr max_iova,
- uint64_t iova_pgsizes)
+void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova,
+ hwaddr max_iova, uint64_t iova_pgsizes)
{
VFIOHostDMAWindow *hostwin;
@@ -708,8 +274,8 @@ static void vfio_host_win_add(VFIOContainer *container,
QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
}
-static int vfio_host_win_del(VFIOContainer *container, hwaddr min_iova,
- hwaddr max_iova)
+int vfio_host_win_del(VFIOContainer *container,
+ hwaddr min_iova, hwaddr max_iova)
{
VFIOHostDMAWindow *hostwin;
@@ -1084,62 +650,8 @@ static void vfio_listener_region_add(MemoryListener *listener,
return;
}
- if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
- hwaddr pgsize = 0;
-
- /* For now intersections are not allowed, we may relax this later */
- QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
- if (ranges_overlap(hostwin->min_iova,
- hostwin->max_iova - hostwin->min_iova + 1,
- section->offset_within_address_space,
- int128_get64(section->size))) {
- error_setg(&err,
- "region [0x%"PRIx64",0x%"PRIx64"] overlaps with existing"
- "host DMA window [0x%"PRIx64",0x%"PRIx64"]",
- section->offset_within_address_space,
- section->offset_within_address_space +
- int128_get64(section->size) - 1,
- hostwin->min_iova, hostwin->max_iova);
- goto fail;
- }
- }
-
- ret = vfio_spapr_create_window(container, section, &pgsize);
- if (ret) {
- error_setg_errno(&err, -ret, "Failed to create SPAPR window");
- goto fail;
- }
-
- vfio_host_win_add(container, section->offset_within_address_space,
- section->offset_within_address_space +
- int128_get64(section->size) - 1, pgsize);
-#ifdef CONFIG_KVM
- if (kvm_enabled()) {
- VFIOGroup *group;
- IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
- struct kvm_vfio_spapr_tce param;
- struct kvm_device_attr attr = {
- .group = KVM_DEV_VFIO_GROUP,
- .attr = KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE,
- .addr = (uint64_t)(unsigned long)&param,
- };
-
- if (!memory_region_iommu_get_attr(iommu_mr, IOMMU_ATTR_SPAPR_TCE_FD,
- &param.tablefd)) {
- QLIST_FOREACH(group, &container->group_list, container_next) {
- param.groupfd = group->fd;
- if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
- error_report("vfio: failed to setup fd %d "
- "for a group with fd %d: %s",
- param.tablefd, param.groupfd,
- strerror(errno));
- return;
- }
- trace_vfio_spapr_group_attach(param.groupfd, param.tablefd);
- }
- }
- }
-#endif
+ if (vfio_container_add_section_window(container, section, &err)) {
+ goto fail;
}
hostwin = vfio_find_hostwin(container, iova, end);
@@ -1251,7 +763,7 @@ static void vfio_listener_region_add(MemoryListener *listener,
fail:
if (memory_region_is_ram_device(section->mr)) {
- error_report("failed to vfio_dma_map. pci p2p may not work");
+ error_reportf_err(err, "PCI p2p may not work: ");
return;
}
/*
@@ -1356,44 +868,7 @@ static void vfio_listener_region_del(MemoryListener *listener,
memory_region_unref(section->mr);
- if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
- vfio_spapr_remove_window(container,
- section->offset_within_address_space);
- if (vfio_host_win_del(container,
- section->offset_within_address_space,
- section->offset_within_address_space +
- int128_get64(section->size) - 1) < 0) {
- hw_error("%s: Cannot delete missing window at %"HWADDR_PRIx,
- __func__, section->offset_within_address_space);
- }
- }
-}
-
-static int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
-{
- int ret;
- struct vfio_iommu_type1_dirty_bitmap dirty = {
- .argsz = sizeof(dirty),
- };
-
- if (!container->dirty_pages_supported) {
- return 0;
- }
-
- if (start) {
- dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
- } else {
- dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP;
- }
-
- ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, &dirty);
- if (ret) {
- ret = -errno;
- error_report("Failed to set dirty tracking flag 0x%x errno: %d",
- dirty.flags, errno);
- }
-
- return ret;
+ vfio_container_del_section_window(container, section);
}
typedef struct VFIODirtyRanges {
@@ -1416,20 +891,17 @@ static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
{
VFIOPCIDevice *pcidev;
VFIODevice *vbasedev;
- VFIOGroup *group;
Object *owner;
owner = memory_region_owner(section->mr);
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
- continue;
- }
- pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
- if (OBJECT(pcidev) == owner) {
- return true;
- }
+ QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ pcidev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ if (OBJECT(pcidev) == owner) {
+ return true;
}
}
@@ -1525,24 +997,21 @@ static void vfio_devices_dma_logging_stop(VFIOContainer *container)
sizeof(uint64_t))] = {};
struct vfio_device_feature *feature = (struct vfio_device_feature *)buf;
VFIODevice *vbasedev;
- VFIOGroup *group;
feature->argsz = sizeof(buf);
feature->flags = VFIO_DEVICE_FEATURE_SET |
VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (!vbasedev->dirty_tracking) {
- continue;
- }
+ QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ if (!vbasedev->dirty_tracking) {
+ continue;
+ }
- if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
- warn_report("%s: Failed to stop DMA logging, err %d (%s)",
- vbasedev->name, -errno, strerror(errno));
- }
- vbasedev->dirty_tracking = false;
+ if (ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature)) {
+ warn_report("%s: Failed to stop DMA logging, err %d (%s)",
+ vbasedev->name, -errno, strerror(errno));
}
+ vbasedev->dirty_tracking = false;
}
}
@@ -1625,7 +1094,6 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container)
struct vfio_device_feature *feature;
VFIODirtyRanges ranges;
VFIODevice *vbasedev;
- VFIOGroup *group;
int ret = 0;
vfio_dirty_tracking_init(container, &ranges);
@@ -1635,21 +1103,19 @@ static int vfio_devices_dma_logging_start(VFIOContainer *container)
return -errno;
}
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->dirty_tracking) {
- continue;
- }
+ QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ if (vbasedev->dirty_tracking) {
+ continue;
+ }
- ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
- if (ret) {
- ret = -errno;
- error_report("%s: Failed to start DMA logging, err %d (%s)",
- vbasedev->name, ret, strerror(errno));
- goto out;
- }
- vbasedev->dirty_tracking = true;
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_FEATURE, feature);
+ if (ret) {
+ ret = -errno;
+ error_report("%s: Failed to start DMA logging, err %d (%s)",
+ vbasedev->name, ret, strerror(errno));
+ goto out;
}
+ vbasedev->dirty_tracking = true;
}
out:
@@ -1724,71 +1190,31 @@ static int vfio_device_dma_logging_report(VFIODevice *vbasedev, hwaddr iova,
return 0;
}
-static int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
- VFIOBitmap *vbmap, hwaddr iova,
- hwaddr size)
+int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
+ VFIOBitmap *vbmap, hwaddr iova,
+ hwaddr size)
{
VFIODevice *vbasedev;
- VFIOGroup *group;
int ret;
- QLIST_FOREACH(group, &container->group_list, container_next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- ret = vfio_device_dma_logging_report(vbasedev, iova, size,
- vbmap->bitmap);
- if (ret) {
- error_report("%s: Failed to get DMA logging report, iova: "
- "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
- ", err: %d (%s)",
- vbasedev->name, iova, size, ret, strerror(-ret));
+ QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ ret = vfio_device_dma_logging_report(vbasedev, iova, size,
+ vbmap->bitmap);
+ if (ret) {
+ error_report("%s: Failed to get DMA logging report, iova: "
+ "0x%" HWADDR_PRIx ", size: 0x%" HWADDR_PRIx
+ ", err: %d (%s)",
+ vbasedev->name, iova, size, ret, strerror(-ret));
- return ret;
- }
+ return ret;
}
}
return 0;
}
-static int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
- hwaddr iova, hwaddr size)
-{
- struct vfio_iommu_type1_dirty_bitmap *dbitmap;
- struct vfio_iommu_type1_dirty_bitmap_get *range;
- int ret;
-
- dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range));
-
- dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range);
- dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
- range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data;
- range->iova = iova;
- range->size = size;
-
- /*
- * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
- * qemu_real_host_page_size to mark those dirty. Hence set bitmap's pgsize
- * to qemu_real_host_page_size.
- */
- range->bitmap.pgsize = qemu_real_host_page_size();
- range->bitmap.size = vbmap->size;
- range->bitmap.data = (__u64 *)vbmap->bitmap;
-
- ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap);
- if (ret) {
- ret = -errno;
- error_report("Failed to get dirty bitmap for iova: 0x%"PRIx64
- " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova,
- (uint64_t)range->size, errno);
- }
-
- g_free(dbitmap);
-
- return ret;
-}
-
-static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
- uint64_t size, ram_addr_t ram_addr)
+int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr)
{
bool all_device_dirty_tracking =
vfio_devices_all_device_dirty_tracking(container);
@@ -1977,7 +1403,7 @@ static void vfio_listener_log_sync(MemoryListener *listener,
}
}
-static const MemoryListener vfio_memory_listener = {
+const MemoryListener vfio_memory_listener = {
.name = "vfio",
.region_add = vfio_listener_region_add,
.region_del = vfio_listener_region_del,
@@ -1986,338 +1412,34 @@ static const MemoryListener vfio_memory_listener = {
.log_sync = vfio_listener_log_sync,
};
-static void vfio_listener_release(VFIOContainer *container)
-{
- memory_listener_unregister(&container->listener);
- if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
- memory_listener_unregister(&container->prereg_listener);
- }
-}
-
-static struct vfio_info_cap_header *
-vfio_get_cap(void *ptr, uint32_t cap_offset, uint16_t id)
-{
- struct vfio_info_cap_header *hdr;
-
- for (hdr = ptr + cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
- if (hdr->id == id) {
- return hdr;
- }
- }
-
- return NULL;
-}
-
-struct vfio_info_cap_header *
-vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id)
-{
- if (!(info->flags & VFIO_REGION_INFO_FLAG_CAPS)) {
- return NULL;
- }
-
- return vfio_get_cap((void *)info, info->cap_offset, id);
-}
-
-static struct vfio_info_cap_header *
-vfio_get_iommu_type1_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
-{
- if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
- return NULL;
- }
-
- return vfio_get_cap((void *)info, info->cap_offset, id);
-}
-
-struct vfio_info_cap_header *
-vfio_get_device_info_cap(struct vfio_device_info *info, uint16_t id)
-{
- if (!(info->flags & VFIO_DEVICE_FLAGS_CAPS)) {
- return NULL;
- }
-
- return vfio_get_cap((void *)info, info->cap_offset, id);
-}
-
-bool vfio_get_info_dma_avail(struct vfio_iommu_type1_info *info,
- unsigned int *avail)
-{
- struct vfio_info_cap_header *hdr;
- struct vfio_iommu_type1_info_dma_avail *cap;
-
- /* If the capability cannot be found, assume no DMA limiting */
- hdr = vfio_get_iommu_type1_info_cap(info,
- VFIO_IOMMU_TYPE1_INFO_DMA_AVAIL);
- if (hdr == NULL) {
- return false;
- }
-
- if (avail != NULL) {
- cap = (void *) hdr;
- *avail = cap->avail;
- }
-
- return true;
-}
-
-static int vfio_setup_region_sparse_mmaps(VFIORegion *region,
- struct vfio_region_info *info)
-{
- struct vfio_info_cap_header *hdr;
- struct vfio_region_info_cap_sparse_mmap *sparse;
- int i, j;
-
- hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP);
- if (!hdr) {
- return -ENODEV;
- }
-
- sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header);
-
- trace_vfio_region_sparse_mmap_header(region->vbasedev->name,
- region->nr, sparse->nr_areas);
-
- region->mmaps = g_new0(VFIOMmap, sparse->nr_areas);
-
- for (i = 0, j = 0; i < sparse->nr_areas; i++) {
- if (sparse->areas[i].size) {
- trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset,
- sparse->areas[i].offset +
- sparse->areas[i].size - 1);
- region->mmaps[j].offset = sparse->areas[i].offset;
- region->mmaps[j].size = sparse->areas[i].size;
- j++;
- }
- }
-
- region->nr_mmaps = j;
- region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap));
-
- return 0;
-}
-
-int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region,
- int index, const char *name)
-{
- struct vfio_region_info *info;
- int ret;
-
- ret = vfio_get_region_info(vbasedev, index, &info);
- if (ret) {
- return ret;
- }
-
- region->vbasedev = vbasedev;
- region->flags = info->flags;
- region->size = info->size;
- region->fd_offset = info->offset;
- region->nr = index;
-
- if (region->size) {
- region->mem = g_new0(MemoryRegion, 1);
- memory_region_init_io(region->mem, obj, &vfio_region_ops,
- region, name, region->size);
-
- if (!vbasedev->no_mmap &&
- region->flags & VFIO_REGION_INFO_FLAG_MMAP) {
-
- ret = vfio_setup_region_sparse_mmaps(region, info);
-
- if (ret) {
- region->nr_mmaps = 1;
- region->mmaps = g_new0(VFIOMmap, region->nr_mmaps);
- region->mmaps[0].offset = 0;
- region->mmaps[0].size = region->size;
- }
- }
- }
-
- g_free(info);
-
- trace_vfio_region_setup(vbasedev->name, index, name,
- region->flags, region->fd_offset, region->size);
- return 0;
-}
-
-static void vfio_subregion_unmap(VFIORegion *region, int index)
-{
- trace_vfio_region_unmap(memory_region_name(&region->mmaps[index].mem),
- region->mmaps[index].offset,
- region->mmaps[index].offset +
- region->mmaps[index].size - 1);
- memory_region_del_subregion(region->mem, &region->mmaps[index].mem);
- munmap(region->mmaps[index].mmap, region->mmaps[index].size);
- object_unparent(OBJECT(&region->mmaps[index].mem));
- region->mmaps[index].mmap = NULL;
-}
-
-int vfio_region_mmap(VFIORegion *region)
-{
- int i, prot = 0;
- char *name;
-
- if (!region->mem) {
- return 0;
- }
-
- prot |= region->flags & VFIO_REGION_INFO_FLAG_READ ? PROT_READ : 0;
- prot |= region->flags & VFIO_REGION_INFO_FLAG_WRITE ? PROT_WRITE : 0;
-
- for (i = 0; i < region->nr_mmaps; i++) {
- region->mmaps[i].mmap = mmap(NULL, region->mmaps[i].size, prot,
- MAP_SHARED, region->vbasedev->fd,
- region->fd_offset +
- region->mmaps[i].offset);
- if (region->mmaps[i].mmap == MAP_FAILED) {
- int ret = -errno;
-
- trace_vfio_region_mmap_fault(memory_region_name(region->mem), i,
- region->fd_offset +
- region->mmaps[i].offset,
- region->fd_offset +
- region->mmaps[i].offset +
- region->mmaps[i].size - 1, ret);
-
- region->mmaps[i].mmap = NULL;
-
- for (i--; i >= 0; i--) {
- vfio_subregion_unmap(region, i);
- }
-
- return ret;
- }
-
- name = g_strdup_printf("%s mmaps[%d]",
- memory_region_name(region->mem), i);
- memory_region_init_ram_device_ptr(&region->mmaps[i].mem,
- memory_region_owner(region->mem),
- name, region->mmaps[i].size,
- region->mmaps[i].mmap);
- g_free(name);
- memory_region_add_subregion(region->mem, region->mmaps[i].offset,
- &region->mmaps[i].mem);
-
- trace_vfio_region_mmap(memory_region_name(&region->mmaps[i].mem),
- region->mmaps[i].offset,
- region->mmaps[i].offset +
- region->mmaps[i].size - 1);
- }
-
- return 0;
-}
-
-void vfio_region_unmap(VFIORegion *region)
-{
- int i;
-
- if (!region->mem) {
- return;
- }
-
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- vfio_subregion_unmap(region, i);
- }
- }
-}
-
-void vfio_region_exit(VFIORegion *region)
-{
- int i;
-
- if (!region->mem) {
- return;
- }
-
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- memory_region_del_subregion(region->mem, &region->mmaps[i].mem);
- }
- }
-
- trace_vfio_region_exit(region->vbasedev->name, region->nr);
-}
-
-void vfio_region_finalize(VFIORegion *region)
-{
- int i;
-
- if (!region->mem) {
- return;
- }
-
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- munmap(region->mmaps[i].mmap, region->mmaps[i].size);
- object_unparent(OBJECT(&region->mmaps[i].mem));
- }
- }
-
- object_unparent(OBJECT(region->mem));
-
- g_free(region->mem);
- g_free(region->mmaps);
-
- trace_vfio_region_finalize(region->vbasedev->name, region->nr);
-
- region->mem = NULL;
- region->mmaps = NULL;
- region->nr_mmaps = 0;
- region->size = 0;
- region->flags = 0;
- region->nr = 0;
-}
-
-void vfio_region_mmaps_set_enabled(VFIORegion *region, bool enabled)
-{
- int i;
-
- if (!region->mem) {
- return;
- }
-
- for (i = 0; i < region->nr_mmaps; i++) {
- if (region->mmaps[i].mmap) {
- memory_region_set_enabled(&region->mmaps[i].mem, enabled);
- }
- }
-
- trace_vfio_region_mmaps_set_enabled(memory_region_name(region->mem),
- enabled);
-}
-
void vfio_reset_handler(void *opaque)
{
- VFIOGroup *group;
VFIODevice *vbasedev;
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->dev->realized) {
- vbasedev->ops->vfio_compute_needs_reset(vbasedev);
- }
+ QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
+ if (vbasedev->dev->realized) {
+ vbasedev->ops->vfio_compute_needs_reset(vbasedev);
}
}
- QLIST_FOREACH(group, &vfio_group_list, next) {
- QLIST_FOREACH(vbasedev, &group->device_list, next) {
- if (vbasedev->dev->realized && vbasedev->needs_reset) {
- vbasedev->ops->vfio_hot_reset_multi(vbasedev);
- }
+ QLIST_FOREACH(vbasedev, &vfio_device_list, next) {
+ if (vbasedev->dev->realized && vbasedev->needs_reset) {
+ vbasedev->ops->vfio_hot_reset_multi(vbasedev);
}
}
}
-static void vfio_kvm_device_add_group(VFIOGroup *group)
+int vfio_kvm_device_add_fd(int fd, Error **errp)
{
#ifdef CONFIG_KVM
struct kvm_device_attr attr = {
- .group = KVM_DEV_VFIO_GROUP,
- .attr = KVM_DEV_VFIO_GROUP_ADD,
- .addr = (uint64_t)(unsigned long)&group->fd,
+ .group = KVM_DEV_VFIO_FILE,
+ .attr = KVM_DEV_VFIO_FILE_ADD,
+ .addr = (uint64_t)(unsigned long)&fd,
};
if (!kvm_enabled()) {
- return;
+ return 0;
}
if (vfio_kvm_device_fd < 0) {
@@ -2326,41 +1448,46 @@ static void vfio_kvm_device_add_group(VFIOGroup *group)
};
if (kvm_vm_ioctl(kvm_state, KVM_CREATE_DEVICE, &cd)) {
- error_report("Failed to create KVM VFIO device: %m");
- return;
+ error_setg_errno(errp, errno, "Failed to create KVM VFIO device");
+ return -errno;
}
vfio_kvm_device_fd = cd.fd;
}
if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
- error_report("Failed to add group %d to KVM VFIO device: %m",
- group->groupid);
+ error_setg_errno(errp, errno, "Failed to add fd %d to KVM VFIO device",
+ fd);
+ return -errno;
}
#endif
+ return 0;
}
-static void vfio_kvm_device_del_group(VFIOGroup *group)
+int vfio_kvm_device_del_fd(int fd, Error **errp)
{
#ifdef CONFIG_KVM
struct kvm_device_attr attr = {
- .group = KVM_DEV_VFIO_GROUP,
- .attr = KVM_DEV_VFIO_GROUP_DEL,
- .addr = (uint64_t)(unsigned long)&group->fd,
+ .group = KVM_DEV_VFIO_FILE,
+ .attr = KVM_DEV_VFIO_FILE_DEL,
+ .addr = (uint64_t)(unsigned long)&fd,
};
if (vfio_kvm_device_fd < 0) {
- return;
+ error_setg(errp, "KVM VFIO device isn't created yet");
+ return -EINVAL;
}
if (ioctl(vfio_kvm_device_fd, KVM_SET_DEVICE_ATTR, &attr)) {
- error_report("Failed to remove group %d from KVM VFIO device: %m",
- group->groupid);
+ error_setg_errno(errp, errno,
+ "Failed to remove fd %d from KVM VFIO device", fd);
+ return -errno;
}
#endif
+ return 0;
}
-static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
+VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
{
VFIOAddressSpace *space;
@@ -2375,516 +1502,22 @@ static VFIOAddressSpace *vfio_get_address_space(AddressSpace *as)
space->as = as;
QLIST_INIT(&space->containers);
+ if (QLIST_EMPTY(&vfio_address_spaces)) {
+ qemu_register_reset(vfio_reset_handler, NULL);
+ }
+
QLIST_INSERT_HEAD(&vfio_address_spaces, space, list);
return space;
}
-static void vfio_put_address_space(VFIOAddressSpace *space)
+void vfio_put_address_space(VFIOAddressSpace *space)
{
if (QLIST_EMPTY(&space->containers)) {
QLIST_REMOVE(space, list);
g_free(space);
}
-}
-
-/*
- * vfio_get_iommu_type - selects the richest iommu_type (v2 first)
- */
-static int vfio_get_iommu_type(VFIOContainer *container,
- Error **errp)
-{
- int iommu_types[] = { VFIO_TYPE1v2_IOMMU, VFIO_TYPE1_IOMMU,
- VFIO_SPAPR_TCE_v2_IOMMU, VFIO_SPAPR_TCE_IOMMU };
- int i;
-
- for (i = 0; i < ARRAY_SIZE(iommu_types); i++) {
- if (ioctl(container->fd, VFIO_CHECK_EXTENSION, iommu_types[i])) {
- return iommu_types[i];
- }
- }
- error_setg(errp, "No available IOMMU models");
- return -EINVAL;
-}
-
-static int vfio_init_container(VFIOContainer *container, int group_fd,
- Error **errp)
-{
- int iommu_type, ret;
-
- iommu_type = vfio_get_iommu_type(container, errp);
- if (iommu_type < 0) {
- return iommu_type;
- }
-
- ret = ioctl(group_fd, VFIO_GROUP_SET_CONTAINER, &container->fd);
- if (ret) {
- error_setg_errno(errp, errno, "Failed to set group container");
- return -errno;
- }
-
- while (ioctl(container->fd, VFIO_SET_IOMMU, iommu_type)) {
- if (iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
- /*
- * On sPAPR, despite the IOMMU subdriver always advertises v1 and
- * v2, the running platform may not support v2 and there is no
- * way to guess it until an IOMMU group gets added to the container.
- * So in case it fails with v2, try v1 as a fallback.
- */
- iommu_type = VFIO_SPAPR_TCE_IOMMU;
- continue;
- }
- error_setg_errno(errp, errno, "Failed to set iommu for container");
- return -errno;
- }
-
- container->iommu_type = iommu_type;
- return 0;
-}
-
-static int vfio_get_iommu_info(VFIOContainer *container,
- struct vfio_iommu_type1_info **info)
-{
-
- size_t argsz = sizeof(struct vfio_iommu_type1_info);
-
- *info = g_new0(struct vfio_iommu_type1_info, 1);
-again:
- (*info)->argsz = argsz;
-
- if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
- g_free(*info);
- *info = NULL;
- return -errno;
- }
-
- if (((*info)->argsz > argsz)) {
- argsz = (*info)->argsz;
- *info = g_realloc(*info, argsz);
- goto again;
- }
-
- return 0;
-}
-
-static struct vfio_info_cap_header *
-vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
-{
- struct vfio_info_cap_header *hdr;
- void *ptr = info;
-
- if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
- return NULL;
- }
-
- for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
- if (hdr->id == id) {
- return hdr;
- }
- }
-
- return NULL;
-}
-
-static void vfio_get_iommu_info_migration(VFIOContainer *container,
- struct vfio_iommu_type1_info *info)
-{
- struct vfio_info_cap_header *hdr;
- struct vfio_iommu_type1_info_cap_migration *cap_mig;
-
- hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
- if (!hdr) {
- return;
- }
-
- cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
- header);
-
- /*
- * cpu_physical_memory_set_dirty_lebitmap() supports pages in bitmap of
- * qemu_real_host_page_size to mark those dirty.
- */
- if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
- container->dirty_pages_supported = true;
- container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
- container->dirty_pgsizes = cap_mig->pgsize_bitmap;
- }
-}
-
-static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
- Error **errp)
-{
- VFIOContainer *container;
- int ret, fd;
- VFIOAddressSpace *space;
-
- space = vfio_get_address_space(as);
-
- /*
- * VFIO is currently incompatible with discarding of RAM insofar as the
- * madvise to purge (zap) the page from QEMU's address space does not
- * interact with the memory API and therefore leaves stale virtual to
- * physical mappings in the IOMMU if the page was previously pinned. We
- * therefore set discarding broken for each group added to a container,
- * whether the container is used individually or shared. This provides
- * us with options to allow devices within a group to opt-in and allow
- * discarding, so long as it is done consistently for a group (for instance
- * if the device is an mdev device where it is known that the host vendor
- * driver will never pin pages outside of the working set of the guest
- * driver, which would thus not be discarding candidates).
- *
- * The first opportunity to induce pinning occurs here where we attempt to
- * attach the group to existing containers within the AddressSpace. If any
- * pages are already zapped from the virtual address space, such as from
- * previous discards, new pinning will cause valid mappings to be
- * re-established. Likewise, when the overall MemoryListener for a new
- * container is registered, a replay of mappings within the AddressSpace
- * will occur, re-establishing any previously zapped pages as well.
- *
- * Especially virtio-balloon is currently only prevented from discarding
- * new memory, it will not yet set ram_block_discard_set_required() and
- * therefore, neither stops us here or deals with the sudden memory
- * consumption of inflated memory.
- *
- * We do support discarding of memory coordinated via the RamDiscardManager
- * with some IOMMU types. vfio_ram_block_discard_disable() handles the
- * details once we know which type of IOMMU we are using.
- */
-
- QLIST_FOREACH(container, &space->containers, next) {
- if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
- ret = vfio_ram_block_discard_disable(container, true);
- if (ret) {
- error_setg_errno(errp, -ret,
- "Cannot set discarding of RAM broken");
- if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER,
- &container->fd)) {
- error_report("vfio: error disconnecting group %d from"
- " container", group->groupid);
- }
- return ret;
- }
- group->container = container;
- QLIST_INSERT_HEAD(&container->group_list, group, container_next);
- vfio_kvm_device_add_group(group);
- return 0;
- }
- }
-
- fd = qemu_open_old("/dev/vfio/vfio", O_RDWR);
- if (fd < 0) {
- error_setg_errno(errp, errno, "failed to open /dev/vfio/vfio");
- ret = -errno;
- goto put_space_exit;
- }
-
- ret = ioctl(fd, VFIO_GET_API_VERSION);
- if (ret != VFIO_API_VERSION) {
- error_setg(errp, "supported vfio version: %d, "
- "reported version: %d", VFIO_API_VERSION, ret);
- ret = -EINVAL;
- goto close_fd_exit;
- }
-
- container = g_malloc0(sizeof(*container));
- container->space = space;
- container->fd = fd;
- container->error = NULL;
- container->dirty_pages_supported = false;
- container->dma_max_mappings = 0;
- QLIST_INIT(&container->giommu_list);
- QLIST_INIT(&container->hostwin_list);
- QLIST_INIT(&container->vrdl_list);
-
- ret = vfio_init_container(container, group->fd, errp);
- if (ret) {
- goto free_container_exit;
- }
-
- ret = vfio_ram_block_discard_disable(container, true);
- if (ret) {
- error_setg_errno(errp, -ret, "Cannot set discarding of RAM broken");
- goto free_container_exit;
- }
-
- switch (container->iommu_type) {
- case VFIO_TYPE1v2_IOMMU:
- case VFIO_TYPE1_IOMMU:
- {
- struct vfio_iommu_type1_info *info;
-
- ret = vfio_get_iommu_info(container, &info);
- if (ret) {
- error_setg_errno(errp, -ret, "Failed to get VFIO IOMMU info");
- goto enable_discards_exit;
- }
-
- if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
- container->pgsizes = info->iova_pgsizes;
- } else {
- container->pgsizes = qemu_real_host_page_size();
- }
-
- if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
- container->dma_max_mappings = 65535;
- }
- vfio_get_iommu_info_migration(container, info);
- g_free(info);
-
- /*
- * FIXME: We should parse VFIO_IOMMU_TYPE1_INFO_CAP_IOVA_RANGE
- * information to get the actual window extent rather than assume
- * a 64-bit IOVA address space.
- */
- vfio_host_win_add(container, 0, (hwaddr)-1, container->pgsizes);
-
- break;
- }
- case VFIO_SPAPR_TCE_v2_IOMMU:
- case VFIO_SPAPR_TCE_IOMMU:
- {
- struct vfio_iommu_spapr_tce_info info;
- bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
-
- /*
- * The host kernel code implementing VFIO_IOMMU_DISABLE is called
- * when container fd is closed so we do not call it explicitly
- * in this file.
- */
- if (!v2) {
- ret = ioctl(fd, VFIO_IOMMU_ENABLE);
- if (ret) {
- error_setg_errno(errp, errno, "failed to enable container");
- ret = -errno;
- goto enable_discards_exit;
- }
- } else {
- container->prereg_listener = vfio_prereg_listener;
-
- memory_listener_register(&container->prereg_listener,
- &address_space_memory);
- if (container->error) {
- memory_listener_unregister(&container->prereg_listener);
- ret = -1;
- error_propagate_prepend(errp, container->error,
- "RAM memory listener initialization failed: ");
- goto enable_discards_exit;
- }
- }
-
- info.argsz = sizeof(info);
- ret = ioctl(fd, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info);
- if (ret) {
- error_setg_errno(errp, errno,
- "VFIO_IOMMU_SPAPR_TCE_GET_INFO failed");
- ret = -errno;
- if (v2) {
- memory_listener_unregister(&container->prereg_listener);
- }
- goto enable_discards_exit;
- }
-
- if (v2) {
- container->pgsizes = info.ddw.pgsizes;
- /*
- * There is a default window in just created container.
- * To make region_add/del simpler, we better remove this
- * window now and let those iommu_listener callbacks
- * create/remove them when needed.
- */
- ret = vfio_spapr_remove_window(container, info.dma32_window_start);
- if (ret) {
- error_setg_errno(errp, -ret,
- "failed to remove existing window");
- goto enable_discards_exit;
- }
- } else {
- /* The default table uses 4K pages */
- container->pgsizes = 0x1000;
- vfio_host_win_add(container, info.dma32_window_start,
- info.dma32_window_start +
- info.dma32_window_size - 1,
- 0x1000);
- }
- }
- }
-
- vfio_kvm_device_add_group(group);
-
- QLIST_INIT(&container->group_list);
- QLIST_INSERT_HEAD(&space->containers, container, next);
-
- group->container = container;
- QLIST_INSERT_HEAD(&container->group_list, group, container_next);
-
- container->listener = vfio_memory_listener;
-
- memory_listener_register(&container->listener, container->space->as);
-
- if (container->error) {
- ret = -1;
- error_propagate_prepend(errp, container->error,
- "memory listener initialization failed: ");
- goto listener_release_exit;
- }
-
- container->initialized = true;
-
- return 0;
-listener_release_exit:
- QLIST_REMOVE(group, container_next);
- QLIST_REMOVE(container, next);
- vfio_kvm_device_del_group(group);
- vfio_listener_release(container);
-
-enable_discards_exit:
- vfio_ram_block_discard_disable(container, false);
-
-free_container_exit:
- g_free(container);
-
-close_fd_exit:
- close(fd);
-
-put_space_exit:
- vfio_put_address_space(space);
-
- return ret;
-}
-
-static void vfio_disconnect_container(VFIOGroup *group)
-{
- VFIOContainer *container = group->container;
-
- QLIST_REMOVE(group, container_next);
- group->container = NULL;
-
- /*
- * Explicitly release the listener first before unset container,
- * since unset may destroy the backend container if it's the last
- * group.
- */
- if (QLIST_EMPTY(&container->group_list)) {
- vfio_listener_release(container);
- }
-
- if (ioctl(group->fd, VFIO_GROUP_UNSET_CONTAINER, &container->fd)) {
- error_report("vfio: error disconnecting group %d from container",
- group->groupid);
- }
-
- if (QLIST_EMPTY(&container->group_list)) {
- VFIOAddressSpace *space = container->space;
- VFIOGuestIOMMU *giommu, *tmp;
- VFIOHostDMAWindow *hostwin, *next;
-
- QLIST_REMOVE(container, next);
-
- QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
- memory_region_unregister_iommu_notifier(
- MEMORY_REGION(giommu->iommu_mr), &giommu->n);
- QLIST_REMOVE(giommu, giommu_next);
- g_free(giommu);
- }
-
- QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
- next) {
- QLIST_REMOVE(hostwin, hostwin_next);
- g_free(hostwin);
- }
-
- trace_vfio_disconnect_container(container->fd);
- close(container->fd);
- g_free(container);
-
- vfio_put_address_space(space);
- }
-}
-
-VFIOGroup *vfio_get_group(int groupid, AddressSpace *as, Error **errp)
-{
- VFIOGroup *group;
- char path[32];
- struct vfio_group_status status = { .argsz = sizeof(status) };
-
- QLIST_FOREACH(group, &vfio_group_list, next) {
- if (group->groupid == groupid) {
- /* Found it. Now is it already in the right context? */
- if (group->container->space->as == as) {
- return group;
- } else {
- error_setg(errp, "group %d used in multiple address spaces",
- group->groupid);
- return NULL;
- }
- }
- }
-
- group = g_malloc0(sizeof(*group));
-
- snprintf(path, sizeof(path), "/dev/vfio/%d", groupid);
- group->fd = qemu_open_old(path, O_RDWR);
- if (group->fd < 0) {
- error_setg_errno(errp, errno, "failed to open %s", path);
- goto free_group_exit;
- }
-
- if (ioctl(group->fd, VFIO_GROUP_GET_STATUS, &status)) {
- error_setg_errno(errp, errno, "failed to get group %d status", groupid);
- goto close_fd_exit;
- }
-
- if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
- error_setg(errp, "group %d is not viable", groupid);
- error_append_hint(errp,
- "Please ensure all devices within the iommu_group "
- "are bound to their vfio bus driver.\n");
- goto close_fd_exit;
- }
-
- group->groupid = groupid;
- QLIST_INIT(&group->device_list);
-
- if (vfio_connect_container(group, as, errp)) {
- error_prepend(errp, "failed to setup container for group %d: ",
- groupid);
- goto close_fd_exit;
- }
-
- if (QLIST_EMPTY(&vfio_group_list)) {
- qemu_register_reset(vfio_reset_handler, NULL);
- }
-
- QLIST_INSERT_HEAD(&vfio_group_list, group, next);
-
- return group;
-
-close_fd_exit:
- close(group->fd);
-
-free_group_exit:
- g_free(group);
-
- return NULL;
-}
-
-void vfio_put_group(VFIOGroup *group)
-{
- if (!group || !QLIST_EMPTY(&group->device_list)) {
- return;
- }
-
- if (!group->ram_block_discard_allowed) {
- vfio_ram_block_discard_disable(group->container, false);
- }
- vfio_kvm_device_del_group(group);
- vfio_disconnect_container(group);
- QLIST_REMOVE(group, next);
- trace_vfio_put_group(group->fd);
- close(group->fd);
- g_free(group);
-
- if (QLIST_EMPTY(&vfio_group_list)) {
+ if (QLIST_EMPTY(&vfio_address_spaces)) {
qemu_unregister_reset(vfio_reset_handler, NULL);
}
}
@@ -2912,245 +1545,3 @@ retry:
return info;
}
-
-int vfio_get_device(VFIOGroup *group, const char *name,
- VFIODevice *vbasedev, Error **errp)
-{
- g_autofree struct vfio_device_info *info = NULL;
- int fd;
-
- fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, name);
- if (fd < 0) {
- error_setg_errno(errp, errno, "error getting device from group %d",
- group->groupid);
- error_append_hint(errp,
- "Verify all devices in group %d are bound to vfio-<bus> "
- "or pci-stub and not already in use\n", group->groupid);
- return fd;
- }
-
- info = vfio_get_device_info(fd);
- if (!info) {
- error_setg_errno(errp, errno, "error getting device info");
- close(fd);
- return -1;
- }
-
- /*
- * Set discarding of RAM as not broken for this group if the driver knows
- * the device operates compatibly with discarding. Setting must be
- * consistent per group, but since compatibility is really only possible
- * with mdev currently, we expect singleton groups.
- */
- if (vbasedev->ram_block_discard_allowed !=
- group->ram_block_discard_allowed) {
- if (!QLIST_EMPTY(&group->device_list)) {
- error_setg(errp, "Inconsistent setting of support for discarding "
- "RAM (e.g., balloon) within group");
- close(fd);
- return -1;
- }
-
- if (!group->ram_block_discard_allowed) {
- group->ram_block_discard_allowed = true;
- vfio_ram_block_discard_disable(group->container, false);
- }
- }
-
- vbasedev->fd = fd;
- vbasedev->group = group;
- QLIST_INSERT_HEAD(&group->device_list, vbasedev, next);
-
- vbasedev->num_irqs = info->num_irqs;
- vbasedev->num_regions = info->num_regions;
- vbasedev->flags = info->flags;
-
- trace_vfio_get_device(name, info->flags, info->num_regions, info->num_irqs);
-
- vbasedev->reset_works = !!(info->flags & VFIO_DEVICE_FLAGS_RESET);
-
- return 0;
-}
-
-void vfio_put_base_device(VFIODevice *vbasedev)
-{
- if (!vbasedev->group) {
- return;
- }
- QLIST_REMOVE(vbasedev, next);
- vbasedev->group = NULL;
- trace_vfio_put_base_device(vbasedev->fd);
- close(vbasedev->fd);
-}
-
-int vfio_get_region_info(VFIODevice *vbasedev, int index,
- struct vfio_region_info **info)
-{
- size_t argsz = sizeof(struct vfio_region_info);
-
- *info = g_malloc0(argsz);
-
- (*info)->index = index;
-retry:
- (*info)->argsz = argsz;
-
- if (ioctl(vbasedev->fd, VFIO_DEVICE_GET_REGION_INFO, *info)) {
- g_free(*info);
- *info = NULL;
- return -errno;
- }
-
- if ((*info)->argsz > argsz) {
- argsz = (*info)->argsz;
- *info = g_realloc(*info, argsz);
-
- goto retry;
- }
-
- return 0;
-}
-
-int vfio_get_dev_region_info(VFIODevice *vbasedev, uint32_t type,
- uint32_t subtype, struct vfio_region_info **info)
-{
- int i;
-
- for (i = 0; i < vbasedev->num_regions; i++) {
- struct vfio_info_cap_header *hdr;
- struct vfio_region_info_cap_type *cap_type;
-
- if (vfio_get_region_info(vbasedev, i, info)) {
- continue;
- }
-
- hdr = vfio_get_region_info_cap(*info, VFIO_REGION_INFO_CAP_TYPE);
- if (!hdr) {
- g_free(*info);
- continue;
- }
-
- cap_type = container_of(hdr, struct vfio_region_info_cap_type, header);
-
- trace_vfio_get_dev_region(vbasedev->name, i,
- cap_type->type, cap_type->subtype);
-
- if (cap_type->type == type && cap_type->subtype == subtype) {
- return 0;
- }
-
- g_free(*info);
- }
-
- *info = NULL;
- return -ENODEV;
-}
-
-bool vfio_has_region_cap(VFIODevice *vbasedev, int region, uint16_t cap_type)
-{
- struct vfio_region_info *info = NULL;
- bool ret = false;
-
- if (!vfio_get_region_info(vbasedev, region, &info)) {
- if (vfio_get_region_info_cap(info, cap_type)) {
- ret = true;
- }
- g_free(info);
- }
-
- return ret;
-}
-
-/*
- * Interfaces for IBM EEH (Enhanced Error Handling)
- */
-static bool vfio_eeh_container_ok(VFIOContainer *container)
-{
- /*
- * As of 2016-03-04 (linux-4.5) the host kernel EEH/VFIO
- * implementation is broken if there are multiple groups in a
- * container. The hardware works in units of Partitionable
- * Endpoints (== IOMMU groups) and the EEH operations naively
- * iterate across all groups in the container, without any logic
- * to make sure the groups have their state synchronized. For
- * certain operations (ENABLE) that might be ok, until an error
- * occurs, but for others (GET_STATE) it's clearly broken.
- */
-
- /*
- * XXX Once fixed kernels exist, test for them here
- */
-
- if (QLIST_EMPTY(&container->group_list)) {
- return false;
- }
-
- if (QLIST_NEXT(QLIST_FIRST(&container->group_list), container_next)) {
- return false;
- }
-
- return true;
-}
-
-static int vfio_eeh_container_op(VFIOContainer *container, uint32_t op)
-{
- struct vfio_eeh_pe_op pe_op = {
- .argsz = sizeof(pe_op),
- .op = op,
- };
- int ret;
-
- if (!vfio_eeh_container_ok(container)) {
- error_report("vfio/eeh: EEH_PE_OP 0x%x: "
- "kernel requires a container with exactly one group", op);
- return -EPERM;
- }
-
- ret = ioctl(container->fd, VFIO_EEH_PE_OP, &pe_op);
- if (ret < 0) {
- error_report("vfio/eeh: EEH_PE_OP 0x%x failed: %m", op);
- return -errno;
- }
-
- return ret;
-}
-
-static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
-{
- VFIOAddressSpace *space = vfio_get_address_space(as);
- VFIOContainer *container = NULL;
-
- if (QLIST_EMPTY(&space->containers)) {
- /* No containers to act on */
- goto out;
- }
-
- container = QLIST_FIRST(&space->containers);
-
- if (QLIST_NEXT(container, next)) {
- /* We don't yet have logic to synchronize EEH state across
- * multiple containers */
- container = NULL;
- goto out;
- }
-
-out:
- vfio_put_address_space(space);
- return container;
-}
-
-bool vfio_eeh_as_ok(AddressSpace *as)
-{
- VFIOContainer *container = vfio_eeh_as_container(as);
-
- return (container != NULL) && vfio_eeh_container_ok(container);
-}
-
-int vfio_eeh_as_op(AddressSpace *as, uint32_t op)
-{
- VFIOContainer *container = vfio_eeh_as_container(as);
-
- if (!container) {
- return -ENODEV;
- }
- return vfio_eeh_container_op(container, op);
-}