aboutsummaryrefslogtreecommitdiff
path: root/hw/vfio/common.c
diff options
context:
space:
mode:
Diffstat (limited to 'hw/vfio/common.c')
-rw-r--r--hw/vfio/common.c66
1 files changed, 5 insertions, 61 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 6b5d8c0bf6..130e5d1dc7 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -578,45 +578,11 @@ static bool vfio_listener_skipped_section(MemoryRegionSection *section)
static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
ram_addr_t *ram_addr, bool *read_only)
{
- MemoryRegion *mr;
- hwaddr xlat;
- hwaddr len = iotlb->addr_mask + 1;
- bool writable = iotlb->perm & IOMMU_WO;
-
- /*
- * The IOMMU TLB entry we have just covers translation through
- * this IOMMU to its immediate target. We need to translate
- * it the rest of the way through to memory.
- */
- mr = address_space_translate(&address_space_memory,
- iotlb->translated_addr,
- &xlat, &len, writable,
- MEMTXATTRS_UNSPECIFIED);
- if (!memory_region_is_ram(mr)) {
- error_report("iommu map to non memory area %"HWADDR_PRIx"",
- xlat);
- return false;
- } else if (memory_region_has_ram_discard_manager(mr)) {
- RamDiscardManager *rdm = memory_region_get_ram_discard_manager(mr);
- MemoryRegionSection tmp = {
- .mr = mr,
- .offset_within_region = xlat,
- .size = int128_make64(len),
- };
-
- /*
- * Malicious VMs can map memory into the IOMMU, which is expected
- * to remain discarded. vfio will pin all pages, populating memory.
- * Disallow that. vmstate priorities make sure any RamDiscardManager
- * were already restored before IOMMUs are restored.
- */
- if (!ram_discard_manager_is_populated(rdm, &tmp)) {
- error_report("iommu map to discarded memory (e.g., unplugged via"
- " virtio-mem): %"HWADDR_PRIx"",
- iotlb->translated_addr);
- return false;
- }
+ bool ret, mr_has_discard_manager;
+ ret = memory_get_xlat_addr(iotlb, vaddr, ram_addr, read_only,
+ &mr_has_discard_manager);
+ if (ret && mr_has_discard_manager) {
/*
* Malicious VMs might trigger discarding of IOMMU-mapped memory. The
* pages will remain pinned inside vfio until unmapped, resulting in a
@@ -635,29 +601,7 @@ static bool vfio_get_xlat_addr(IOMMUTLBEntry *iotlb, void **vaddr,
" intended via an IOMMU. It's possible to mitigate "
" by setting/adjusting RLIMIT_MEMLOCK.");
}
-
- /*
- * Translation truncates length to the IOMMU page size,
- * check that it did not truncate too much.
- */
- if (len & iotlb->addr_mask) {
- error_report("iommu has granularity incompatible with target AS");
- return false;
- }
-
- if (vaddr) {
- *vaddr = memory_region_get_ram_ptr(mr) + xlat;
- }
-
- if (ram_addr) {
- *ram_addr = memory_region_get_ram_addr(mr) + xlat;
- }
-
- if (read_only) {
- *read_only = !writable || mr->readonly;
- }
-
- return true;
+ return ret;
}
static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)