diff options
author | Alexey Kardashevskiy <aik@ozlabs.ru> | 2018-03-13 11:17:30 -0600 |
---|---|---|
committer | Alex Williamson <alex.williamson@redhat.com> | 2018-03-13 11:17:30 -0600 |
commit | 567b5b309abe744b1098018a2eb157e7109c9f30 (patch) | |
tree | fc07cc15198fb1c7e242867c18e35e97f8b12f63 /hw | |
parent | 8b818e059bf071749f96fa4a10934eb533777f9a (diff) |
vfio/pci: Relax DMA map errors for MMIO regions
At the moment if vfio_memory_listener is registered in the system memory
address space, it maps/unmaps every RAM memory region for DMA.
It expects system page size aligned memory sections so vfio_dma_map
would not fail and so far this has been the case. A mapping failure
would be fatal. A side effect of such behavior is that some MMIO pages
would not be mapped silently.
However we are going to change MSIX BAR handling so we will end having
non-aligned sections in vfio_memory_listener (more details is in
the next patch) and vfio_dma_map will exit QEMU.
In order to avoid fatal failures on what previously was not a failure and
was just silently ignored, this checks the section alignment to
the smallest supported IOMMU page size and prints an error if not aligned;
it also prints an error if vfio_dma_map failed despite the page size check.
Both errors are not fatal; only MMIO RAM regions are checked
(aka "RAM device" regions).
If the amount of errors printed is overwhelming, the MSIX relocation
could be used to avoid excessive error output.
This is unlikely to cause any behavioral change.
Signed-off-by: Alexey Kardashevskiy <aik@ozlabs.ru>
[aw: Fix Int128 bit ops]
Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/vfio/common.c | 55 |
1 files changed, 49 insertions, 6 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 6a8203a532..07c03d78b6 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -544,18 +544,40 @@ static void vfio_listener_region_add(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); + if (memory_region_is_ram_device(section->mr)) { + hwaddr pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; + + if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) { + error_report("Region 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx + " is not aligned to 0x%"HWADDR_PRIx + " and cannot be mapped for DMA", + section->offset_within_region, + int128_getlo(section->size), + pgmask + 1); + return; + } + } + ret = vfio_dma_map(container, iova, int128_get64(llsize), vaddr, section->readonly); if (ret) { error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", " "0x%"HWADDR_PRIx", %p) = %d (%m)", container, iova, int128_get64(llsize), vaddr, ret); + if (memory_region_is_ram_device(section->mr)) { + /* Allow unexpected mappings not to be fatal for RAM devices */ + return; + } goto fail; } return; fail: + if (memory_region_is_ram_device(section->mr)) { + error_report("failed to vfio_dma_map. pci p2p may not work"); + return; + } /* * On the initfn path, store the first error in the container so we * can gracefully fail. Runtime, there's not much we can do other @@ -577,6 +599,7 @@ static void vfio_listener_region_del(MemoryListener *listener, hwaddr iova, end; Int128 llend, llsize; int ret; + bool try_unmap = true; if (vfio_listener_skipped_section(section)) { trace_vfio_listener_region_del_skip( @@ -629,14 +652,34 @@ static void vfio_listener_region_del(MemoryListener *listener, trace_vfio_listener_region_del(iova, end); - ret = vfio_dma_unmap(container, iova, int128_get64(llsize)); - memory_region_unref(section->mr); - if (ret) { - error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " - "0x%"HWADDR_PRIx") = %d (%m)", - container, iova, int128_get64(llsize), ret); + if (memory_region_is_ram_device(section->mr)) { + hwaddr pgmask; + VFIOHostDMAWindow *hostwin; + bool hostwin_found = false; + + QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) { + if (hostwin->min_iova <= iova && end <= hostwin->max_iova) { + hostwin_found = true; + break; + } + } + assert(hostwin_found); /* or region_add() would have failed */ + + pgmask = (1ULL << ctz64(hostwin->iova_pgsizes)) - 1; + try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask)); + } + + if (try_unmap) { + ret = vfio_dma_unmap(container, iova, int128_get64(llsize)); + if (ret) { + error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", " + "0x%"HWADDR_PRIx") = %d (%m)", + container, iova, int128_get64(llsize), ret); + } } + memory_region_unref(section->mr); + if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) { vfio_spapr_remove_window(container, section->offset_within_address_space); |