From 5039caf3c449c49e625d34e134463260cf8e00e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Eugenio=20P=C3=A9rez?= Date: Mon, 16 Nov 2020 17:55:03 +0100 Subject: memory: Add IOMMUTLBEvent MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This way we can tell between regular IOMMUTLBEntry (entry of IOMMU hardware) and notifications. In the notifications, we set explicitly if it is a MAPs or an UNMAP, instead of trusting in entry permissions to differentiate them. Signed-off-by: Eugenio PĂ©rez Reviewed-by: Peter Xu Reviewed-by: Juan Quintela Acked-by: Jason Wang Message-Id: <20201116165506.31315-3-eperezma@redhat.com> Reviewed-by: Michael S. Tsirkin Signed-off-by: Michael S. Tsirkin Reviewed-by: Matthew Rosato Acked-by: David Gibson --- hw/i386/intel_iommu.c | 88 ++++++++++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 39 deletions(-) (limited to 'hw/i386/intel_iommu.c') diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 067593b9e4..56180b1c43 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -1073,7 +1073,7 @@ static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, } } -typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private); +typedef int (*vtd_page_walk_hook)(IOMMUTLBEvent *event, void *private); /** * Constant information used during page walking @@ -1094,11 +1094,12 @@ typedef struct { uint16_t domain_id; } vtd_page_walk_info; -static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) +static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) { VTDAddressSpace *as = info->as; vtd_page_walk_hook hook_fn = info->hook_fn; void *private = info->private; + IOMMUTLBEntry *entry = &event->entry; DMAMap target = { .iova = entry->iova, .size = entry->addr_mask, @@ -1107,7 +1108,7 @@ static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) }; DMAMap *mapped = iova_tree_find(as->iova_tree, &target); - if (entry->perm == IOMMU_NONE && !info->notify_unmap) { + if (event->type == IOMMU_NOTIFIER_UNMAP && !info->notify_unmap) { trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); return 0; } @@ -1115,7 +1116,7 @@ static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) assert(hook_fn); /* Update local IOVA mapped ranges */ - if (entry->perm) { + if (event->type == IOMMU_NOTIFIER_MAP) { if (mapped) { /* If it's exactly the same translation, skip */ if (!memcmp(mapped, &target, sizeof(target))) { @@ -1141,19 +1142,21 @@ static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) int ret; /* Emulate an UNMAP */ + event->type = IOMMU_NOTIFIER_UNMAP; entry->perm = IOMMU_NONE; trace_vtd_page_walk_one(info->domain_id, entry->iova, entry->translated_addr, entry->addr_mask, entry->perm); - ret = hook_fn(entry, private); + ret = hook_fn(event, private); if (ret) { return ret; } /* Drop any existing mapping */ iova_tree_remove(as->iova_tree, &target); - /* Recover the correct permission */ + /* Recover the correct type */ + event->type = IOMMU_NOTIFIER_MAP; entry->perm = cache_perm; } } @@ -1170,7 +1173,7 @@ static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) trace_vtd_page_walk_one(info->domain_id, entry->iova, entry->translated_addr, entry->addr_mask, entry->perm); - return hook_fn(entry, private); + return hook_fn(event, private); } /** @@ -1191,7 +1194,7 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, uint32_t offset; uint64_t slpte; uint64_t subpage_size, subpage_mask; - IOMMUTLBEntry entry; + IOMMUTLBEvent event; uint64_t iova = start; uint64_t iova_next; int ret = 0; @@ -1245,13 +1248,15 @@ static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, * * In either case, we send an IOTLB notification down. */ - entry.target_as = &address_space_memory; - entry.iova = iova & subpage_mask; - entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); - entry.addr_mask = ~subpage_mask; + event.entry.target_as = &address_space_memory; + event.entry.iova = iova & subpage_mask; + event.entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); + event.entry.addr_mask = ~subpage_mask; /* NOTE: this is only meaningful if entry_valid == true */ - entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw); - ret = vtd_page_walk_one(&entry, info); + event.entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw); + event.type = event.entry.perm ? IOMMU_NOTIFIER_MAP : + IOMMU_NOTIFIER_UNMAP; + ret = vtd_page_walk_one(&event, info); } if (ret < 0) { @@ -1430,10 +1435,10 @@ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, return 0; } -static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry, +static int vtd_sync_shadow_page_hook(IOMMUTLBEvent *event, void *private) { - memory_region_notify_iommu((IOMMUMemoryRegion *)private, 0, *entry); + memory_region_notify_iommu(private, 0, *event); return 0; } @@ -1993,14 +1998,17 @@ static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, * page tables. We just deliver the PSI down to * invalidate caches. */ - IOMMUTLBEntry entry = { - .target_as = &address_space_memory, - .iova = addr, - .translated_addr = 0, - .addr_mask = size - 1, - .perm = IOMMU_NONE, + IOMMUTLBEvent event = { + .type = IOMMU_NOTIFIER_UNMAP, + .entry = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = 0, + .addr_mask = size - 1, + .perm = IOMMU_NONE, + }, }; - memory_region_notify_iommu(&vtd_as->iommu, 0, entry); + memory_region_notify_iommu(&vtd_as->iommu, 0, event); } } } @@ -2412,7 +2420,7 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { VTDAddressSpace *vtd_dev_as; - IOMMUTLBEntry entry; + IOMMUTLBEvent event; struct VTDBus *vtd_bus; hwaddr addr; uint64_t sz; @@ -2460,12 +2468,13 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, sz = VTD_PAGE_SIZE; } - entry.target_as = &vtd_dev_as->as; - entry.addr_mask = sz - 1; - entry.iova = addr; - entry.perm = IOMMU_NONE; - entry.translated_addr = 0; - memory_region_notify_iommu(&vtd_dev_as->iommu, 0, entry); + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &vtd_dev_as->as; + event.entry.addr_mask = sz - 1; + event.entry.iova = addr; + event.entry.perm = IOMMU_NONE; + event.entry.translated_addr = 0; + memory_region_notify_iommu(&vtd_dev_as->iommu, 0, event); done: return true; @@ -3485,19 +3494,20 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) size = remain = end - start + 1; while (remain >= VTD_PAGE_SIZE) { - IOMMUTLBEntry entry; + IOMMUTLBEvent event; uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits); assert(mask); - entry.iova = start; - entry.addr_mask = mask - 1; - entry.target_as = &address_space_memory; - entry.perm = IOMMU_NONE; + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.iova = start; + event.entry.addr_mask = mask - 1; + event.entry.target_as = &address_space_memory; + event.entry.perm = IOMMU_NONE; /* This field is meaningless for unmap */ - entry.translated_addr = 0; + event.entry.translated_addr = 0; - memory_region_notify_iommu_one(n, &entry); + memory_region_notify_iommu_one(n, &event); start += mask; remain -= mask; @@ -3533,9 +3543,9 @@ static void vtd_address_space_refresh_all(IntelIOMMUState *s) vtd_switch_address_space_all(s); } -static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private) +static int vtd_replay_hook(IOMMUTLBEvent *event, void *private) { - memory_region_notify_iommu_one((IOMMUNotifier *)private, entry); + memory_region_notify_iommu_one(private, event); return 0; } -- cgit v1.2.3