diff options
author | Peter Xu <peterx@redhat.com> | 2019-06-24 17:18:11 +0800 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-07-05 22:16:46 +0200 |
commit | 9a4bb8391fda2312803d44664575a662b9be7189 (patch) | |
tree | 335eee30aea3174e5f3963bf04f625770cde2a24 /hw/i386 | |
parent | d6d10793dcfa33504745be40b49bd747ff8752ef (diff) |
intel_iommu: Fix unexpected unmaps during global unmap
This is an replacement work of Yan Zhao's patch:
https://www.mail-archive.com/qemu-devel@nongnu.org/msg625340.html
vtd_address_space_unmap() will do proper page mask alignment to make
sure each IOTLB message will have correct masks for notification
messages (2^N-1), but sometimes it can be expanded to even supercede
the registered range. That could lead to unexpected UNMAP of already
mapped regions in some other notifiers.
Instead of doing mindless expension of the start address and address
mask, we split the range into smaller ones and guarantee that each
small range will have correct masks (2^N-1) and at the same time we
should also try our best to generate as less IOTLB messages as
possible.
Reported-by: Yan Zhao <yan.y.zhao@intel.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Eric Auger <eric.auger@redhat.com>
Tested-by: Yan Zhao <yan.y.zhao@intel.com>
Message-Id: <20190624091811.30412-3-peterx@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'hw/i386')
-rw-r--r-- | hw/i386/intel_iommu.c | 67 |
1 files changed, 41 insertions, 26 deletions
diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 719ce19ab3..de86f53b4e 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -3363,11 +3363,28 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) return vtd_dev_as; } +static uint64_t get_naturally_aligned_size(uint64_t start, + uint64_t size, int gaw) +{ + uint64_t max_mask = 1ULL << gaw; + uint64_t alignment = start ? start & -start : max_mask; + + alignment = MIN(alignment, max_mask); + size = MIN(size, max_mask); + + if (alignment <= size) { + /* Increase the alignment of start */ + return alignment; + } else { + /* Find the largest page mask from size */ + return 1ULL << (63 - clz64(size)); + } +} + /* Unmap the whole range in the notifier's scope. */ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) { - IOMMUTLBEntry entry; - hwaddr size; + hwaddr size, remain; hwaddr start = n->start; hwaddr end = n->end; IntelIOMMUState *s = as->iommu_state; @@ -3388,39 +3405,37 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) } assert(start <= end); - size = end - start; + size = remain = end - start + 1; - if (ctpop64(size) != 1) { - /* - * This size cannot format a correct mask. Let's enlarge it to - * suite the minimum available mask. - */ - int n = 64 - clz64(size); - if (n > s->aw_bits) { - /* should not happen, but in case it happens, limit it */ - n = s->aw_bits; - } - size = 1ULL << n; + while (remain >= VTD_PAGE_SIZE) { + IOMMUTLBEntry entry; + uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits); + + assert(mask); + + entry.iova = start; + entry.addr_mask = mask - 1; + entry.target_as = &address_space_memory; + entry.perm = IOMMU_NONE; + /* This field is meaningless for unmap */ + entry.translated_addr = 0; + + memory_region_notify_one(n, &entry); + + start += mask; + remain -= mask; } - entry.target_as = &address_space_memory; - /* Adjust iova for the size */ - entry.iova = n->start & ~(size - 1); - /* This field is meaningless for unmap */ - entry.translated_addr = 0; - entry.perm = IOMMU_NONE; - entry.addr_mask = size - 1; + assert(!remain); trace_vtd_as_unmap_whole(pci_bus_num(as->bus), VTD_PCI_SLOT(as->devfn), VTD_PCI_FUNC(as->devfn), - entry.iova, size); + n->start, size); - map.iova = entry.iova; - map.size = entry.addr_mask; + map.iova = n->start; + map.size = size; iova_tree_remove(as->iova_tree, &map); - - memory_region_notify_one(n, &entry); } static void vtd_address_space_unmap_all(IntelIOMMUState *s) |