diff options
Diffstat (limited to 'hw/i386')
-rw-r--r-- | hw/i386/amd_iommu.c | 3 | ||||
-rw-r--r-- | hw/i386/intel_iommu.c | 442 | ||||
-rw-r--r-- | hw/i386/intel_iommu_internal.h | 1 | ||||
-rw-r--r-- | hw/i386/pc.c | 4 | ||||
-rw-r--r-- | hw/i386/trace-events | 11 |
5 files changed, 438 insertions, 23 deletions
diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index e0732ccaf1..f86a40aa30 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -572,8 +572,7 @@ static uint64_t amdvi_mmio_read(void *opaque, hwaddr addr, unsigned size) uint64_t val = -1; if (addr + size > AMDVI_MMIO_SIZE) { - trace_amdvi_mmio_read("error: addr outside region: max ", - (uint64_t)AMDVI_MMIO_SIZE, addr, size); + trace_amdvi_mmio_read_invalid(AMDVI_MMIO_SIZE, addr, size); return (uint64_t)-1; } diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index 22d8226e43..02f047c8e3 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -595,6 +595,22 @@ static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry *ce) return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; } +static inline uint64_t vtd_iova_limit(VTDContextEntry *ce) +{ + uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce); + return 1ULL << MIN(ce_agaw, VTD_MGAW); +} + +/* Return true if IOVA passes range check, otherwise false. */ +static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce) +{ + /* + * Check if @iova is above 2^X-1, where X is the minimum of MGAW + * in CAP_REG and AW in context-entry. + */ + return !(iova & ~(vtd_iova_limit(ce) - 1)); +} + static const uint64_t vtd_paging_entry_rsvd_field[] = { [0] = ~0ULL, /* For not large page */ @@ -630,13 +646,9 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, uint32_t level = vtd_get_level_from_context_entry(ce); uint32_t offset; uint64_t slpte; - uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce); uint64_t access_right_check; - /* Check if @iova is above 2^X-1, where X is the minimum of MGAW - * in CAP_REG and AW in context-entry. - */ - if (iova & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) { + if (!vtd_iova_range_check(iova, ce)) { VTD_DPRINTF(GENERAL, "error: iova 0x%"PRIx64 " exceeds limits", iova); return -VTD_FR_ADDR_BEYOND_MGAW; } @@ -684,6 +696,135 @@ static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, } } +typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private); + +/** + * vtd_page_walk_level - walk over specific level for IOVA range + * + * @addr: base GPA addr to start the walk + * @start: IOVA range start address + * @end: IOVA range end address (start <= addr < end) + * @hook_fn: hook func to be called when detected page + * @private: private data to be passed into hook func + * @read: whether parent level has read permission + * @write: whether parent level has write permission + * @notify_unmap: whether we should notify invalid entries + */ +static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, + uint64_t end, vtd_page_walk_hook hook_fn, + void *private, uint32_t level, + bool read, bool write, bool notify_unmap) +{ + bool read_cur, write_cur, entry_valid; + uint32_t offset; + uint64_t slpte; + uint64_t subpage_size, subpage_mask; + IOMMUTLBEntry entry; + uint64_t iova = start; + uint64_t iova_next; + int ret = 0; + + trace_vtd_page_walk_level(addr, level, start, end); + + subpage_size = 1ULL << vtd_slpt_level_shift(level); + subpage_mask = vtd_slpt_level_page_mask(level); + + while (iova < end) { + iova_next = (iova & subpage_mask) + subpage_size; + + offset = vtd_iova_level_offset(iova, level); + slpte = vtd_get_slpte(addr, offset); + + if (slpte == (uint64_t)-1) { + trace_vtd_page_walk_skip_read(iova, iova_next); + goto next; + } + + if (vtd_slpte_nonzero_rsvd(slpte, level)) { + trace_vtd_page_walk_skip_reserve(iova, iova_next); + goto next; + } + + /* Permissions are stacked with parents' */ + read_cur = read && (slpte & VTD_SL_R); + write_cur = write && (slpte & VTD_SL_W); + + /* + * As long as we have either read/write permission, this is a + * valid entry. The rule works for both page entries and page + * table entries. + */ + entry_valid = read_cur | write_cur; + + if (vtd_is_last_slpte(slpte, level)) { + entry.target_as = &address_space_memory; + entry.iova = iova & subpage_mask; + /* NOTE: this is only meaningful if entry_valid == true */ + entry.translated_addr = vtd_get_slpte_addr(slpte); + entry.addr_mask = ~subpage_mask; + entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); + if (!entry_valid && !notify_unmap) { + trace_vtd_page_walk_skip_perm(iova, iova_next); + goto next; + } + trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr, + entry.addr_mask, entry.perm); + if (hook_fn) { + ret = hook_fn(&entry, private); + if (ret < 0) { + return ret; + } + } + } else { + if (!entry_valid) { + trace_vtd_page_walk_skip_perm(iova, iova_next); + goto next; + } + ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova, + MIN(iova_next, end), hook_fn, private, + level - 1, read_cur, write_cur, + notify_unmap); + if (ret < 0) { + return ret; + } + } + +next: + iova = iova_next; + } + + return 0; +} + +/** + * vtd_page_walk - walk specific IOVA range, and call the hook + * + * @ce: context entry to walk upon + * @start: IOVA address to start the walk + * @end: IOVA range end address (start <= addr < end) + * @hook_fn: the hook that to be called for each detected area + * @private: private data for the hook function + */ +static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end, + vtd_page_walk_hook hook_fn, void *private, + bool notify_unmap) +{ + dma_addr_t addr = vtd_get_slpt_base_from_context(ce); + uint32_t level = vtd_get_level_from_context_entry(ce); + + if (!vtd_iova_range_check(start, ce)) { + return -VTD_FR_ADDR_BEYOND_MGAW; + } + + if (!vtd_iova_range_check(end, ce)) { + /* Fix end so that it reaches the maximum */ + end = vtd_iova_limit(ce); + } + + return vtd_page_walk_level(addr, start, end, hook_fn, private, + level, true, true, notify_unmap); +} + /* Map a device to its corresponding domain (context-entry) */ static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, uint8_t devfn, VTDContextEntry *ce) @@ -898,6 +1039,15 @@ static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s) s->intr_root, s->intr_size); } +static void vtd_iommu_replay_all(IntelIOMMUState *s) +{ + IntelIOMMUNotifierNode *node; + + QLIST_FOREACH(node, &s->notifiers_list, next) { + memory_region_iommu_replay_all(&node->vtd_as->iommu); + } +} + static void vtd_context_global_invalidate(IntelIOMMUState *s) { trace_vtd_inv_desc_cc_global(); @@ -905,6 +1055,14 @@ static void vtd_context_global_invalidate(IntelIOMMUState *s) if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { vtd_reset_context_cache(s); } + /* + * From VT-d spec 6.5.2.1, a global context entry invalidation + * should be followed by a IOTLB global invalidation, so we should + * be safe even without this. Hoewever, let's replay the region as + * well to be safer, and go back here when we need finer tunes for + * VT-d emulation codes. + */ + vtd_iommu_replay_all(s); } @@ -971,6 +1129,16 @@ static void vtd_context_device_invalidate(IntelIOMMUState *s, trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), VTD_PCI_FUNC(devfn_it)); vtd_as->context_cache_entry.context_cache_gen = 0; + /* + * So a device is moving out of (or moving into) a + * domain, a replay() suites here to notify all the + * IOMMU_NOTIFIER_MAP registers about this change. + * This won't bring bad even if we have no such + * notifier registered - the IOMMU notification + * framework will skip MAP notifications if that + * happened. + */ + memory_region_iommu_replay_all(&vtd_as->iommu); } } } @@ -1012,12 +1180,53 @@ static void vtd_iotlb_global_invalidate(IntelIOMMUState *s) { trace_vtd_iotlb_reset("global invalidation recved"); vtd_reset_iotlb(s); + vtd_iommu_replay_all(s); } static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) { + IntelIOMMUNotifierNode *node; + VTDContextEntry ce; + VTDAddressSpace *vtd_as; + g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, &domain_id); + + QLIST_FOREACH(node, &s->notifiers_list, next) { + vtd_as = node->vtd_as; + if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), + vtd_as->devfn, &ce) && + domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { + memory_region_iommu_replay_all(&vtd_as->iommu); + } + } +} + +static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry *entry, + void *private) +{ + memory_region_notify_iommu((MemoryRegion *)private, *entry); + return 0; +} + +static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, + uint16_t domain_id, hwaddr addr, + uint8_t am) +{ + IntelIOMMUNotifierNode *node; + VTDContextEntry ce; + int ret; + + QLIST_FOREACH(node, &(s->notifiers_list), next) { + VTDAddressSpace *vtd_as = node->vtd_as; + ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), + vtd_as->devfn, &ce); + if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { + vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE, + vtd_page_invalidate_notify_hook, + (void *)&vtd_as->iommu, true); + } + } } static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, @@ -1030,6 +1239,7 @@ static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, info.addr = addr; info.mask = ~((1 << am) - 1); g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); + vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am); } /* Flush IOTLB @@ -1151,9 +1361,49 @@ static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s) vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS); } +static void vtd_switch_address_space(VTDAddressSpace *as) +{ + assert(as); + + trace_vtd_switch_address_space(pci_bus_num(as->bus), + VTD_PCI_SLOT(as->devfn), + VTD_PCI_FUNC(as->devfn), + as->iommu_state->dmar_enabled); + + /* Turn off first then on the other */ + if (as->iommu_state->dmar_enabled) { + memory_region_set_enabled(&as->sys_alias, false); + memory_region_set_enabled(&as->iommu, true); + } else { + memory_region_set_enabled(&as->iommu, false); + memory_region_set_enabled(&as->sys_alias, true); + } +} + +static void vtd_switch_address_space_all(IntelIOMMUState *s) +{ + GHashTableIter iter; + VTDBus *vtd_bus; + int i; + + g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); + while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { + for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) { + if (!vtd_bus->dev_as[i]) { + continue; + } + vtd_switch_address_space(vtd_bus->dev_as[i]); + } + } +} + /* Handle Translation Enable/Disable */ static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) { + if (s->dmar_enabled == en) { + return; + } + VTD_DPRINTF(CSR, "Translation Enable %s", (en ? "on" : "off")); if (en) { @@ -1168,6 +1418,8 @@ static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) /* Ok - report back to driver */ vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0); } + + vtd_switch_address_space_all(s); } /* Handle Interrupt Remap Enable/Disable */ @@ -1457,7 +1709,7 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, entry.iova = addr; entry.perm = IOMMU_NONE; entry.translated_addr = 0; - memory_region_notify_iommu(entry.target_as->root, entry); + memory_region_notify_iommu(&vtd_dev_as->iommu, entry); done: return true; @@ -2005,15 +2257,33 @@ static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu, IOMMUNotifierFlag new) { VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); + IntelIOMMUState *s = vtd_as->iommu_state; + IntelIOMMUNotifierNode *node = NULL; + IntelIOMMUNotifierNode *next_node = NULL; - if (new & IOMMU_NOTIFIER_MAP) { - error_report("Device at bus %s addr %02x.%d requires iommu " - "notifier which is currently not supported by " - "intel-iommu emulation", - vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn), - PCI_FUNC(vtd_as->devfn)); + if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { + error_report("We need to set cache_mode=1 for intel-iommu to enable " + "device assignment with IOMMU protection."); exit(1); } + + if (old == IOMMU_NOTIFIER_NONE) { + node = g_malloc0(sizeof(*node)); + node->vtd_as = vtd_as; + QLIST_INSERT_HEAD(&s->notifiers_list, node, next); + return; + } + + /* update notifier node with new flags */ + QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) { + if (node->vtd_as == vtd_as) { + if (new == IOMMU_NOTIFIER_NONE) { + QLIST_REMOVE(node, next); + g_free(node); + } + return; + } + } } static const VMStateDescription vtd_vmstate = { @@ -2389,19 +2659,150 @@ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) vtd_dev_as->devfn = (uint8_t)devfn; vtd_dev_as->iommu_state = s; vtd_dev_as->context_cache_entry.context_cache_gen = 0; + + /* + * Memory region relationships looks like (Address range shows + * only lower 32 bits to make it short in length...): + * + * |-----------------+-------------------+----------| + * | Name | Address range | Priority | + * |-----------------+-------------------+----------+ + * | vtd_root | 00000000-ffffffff | 0 | + * | intel_iommu | 00000000-ffffffff | 1 | + * | vtd_sys_alias | 00000000-ffffffff | 1 | + * | intel_iommu_ir | fee00000-feefffff | 64 | + * |-----------------+-------------------+----------| + * + * We enable/disable DMAR by switching enablement for + * vtd_sys_alias and intel_iommu regions. IR region is always + * enabled. + */ memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s), - &s->iommu_ops, "intel_iommu", UINT64_MAX); + &s->iommu_ops, "intel_iommu_dmar", + UINT64_MAX); + memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s), + "vtd_sys_alias", get_system_memory(), + 0, memory_region_size(get_system_memory())); memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s), &vtd_mem_ir_ops, s, "intel_iommu_ir", VTD_INTERRUPT_ADDR_SIZE); - memory_region_add_subregion(&vtd_dev_as->iommu, VTD_INTERRUPT_ADDR_FIRST, - &vtd_dev_as->iommu_ir); - address_space_init(&vtd_dev_as->as, - &vtd_dev_as->iommu, name); + memory_region_init(&vtd_dev_as->root, OBJECT(s), + "vtd_root", UINT64_MAX); + memory_region_add_subregion_overlap(&vtd_dev_as->root, + VTD_INTERRUPT_ADDR_FIRST, + &vtd_dev_as->iommu_ir, 64); + address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name); + memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, + &vtd_dev_as->sys_alias, 1); + memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, + &vtd_dev_as->iommu, 1); + vtd_switch_address_space(vtd_dev_as); } return vtd_dev_as; } +/* Unmap the whole range in the notifier's scope. */ +static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) +{ + IOMMUTLBEntry entry; + hwaddr size; + hwaddr start = n->start; + hwaddr end = n->end; + + /* + * Note: all the codes in this function has a assumption that IOVA + * bits are no more than VTD_MGAW bits (which is restricted by + * VT-d spec), otherwise we need to consider overflow of 64 bits. + */ + + if (end > VTD_ADDRESS_SIZE) { + /* + * Don't need to unmap regions that is bigger than the whole + * VT-d supported address space size + */ + end = VTD_ADDRESS_SIZE; + } + + assert(start <= end); + size = end - start; + + if (ctpop64(size) != 1) { + /* + * This size cannot format a correct mask. Let's enlarge it to + * suite the minimum available mask. + */ + int n = 64 - clz64(size); + if (n > VTD_MGAW) { + /* should not happen, but in case it happens, limit it */ + n = VTD_MGAW; + } + size = 1ULL << n; + } + + entry.target_as = &address_space_memory; + /* Adjust iova for the size */ + entry.iova = n->start & ~(size - 1); + /* This field is meaningless for unmap */ + entry.translated_addr = 0; + entry.perm = IOMMU_NONE; + entry.addr_mask = size - 1; + + trace_vtd_as_unmap_whole(pci_bus_num(as->bus), + VTD_PCI_SLOT(as->devfn), + VTD_PCI_FUNC(as->devfn), + entry.iova, size); + + memory_region_notify_one(n, &entry); +} + +static void vtd_address_space_unmap_all(IntelIOMMUState *s) +{ + IntelIOMMUNotifierNode *node; + VTDAddressSpace *vtd_as; + IOMMUNotifier *n; + + QLIST_FOREACH(node, &s->notifiers_list, next) { + vtd_as = node->vtd_as; + IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { + vtd_address_space_unmap(vtd_as, n); + } + } +} + +static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private) +{ + memory_region_notify_one((IOMMUNotifier *)private, entry); + return 0; +} + +static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n) +{ + VTDAddressSpace *vtd_as = container_of(mr, VTDAddressSpace, iommu); + IntelIOMMUState *s = vtd_as->iommu_state; + uint8_t bus_n = pci_bus_num(vtd_as->bus); + VTDContextEntry ce; + + /* + * The replay can be triggered by either a invalidation or a newly + * created entry. No matter what, we release existing mappings + * (it means flushing caches for UNMAP-only registers). + */ + vtd_address_space_unmap(vtd_as, n); + + if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { + trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn), + VTD_CONTEXT_ENTRY_DID(ce.hi), + ce.hi, ce.lo); + vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false); + } else { + trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), + PCI_FUNC(vtd_as->devfn)); + } + + return; +} + /* Do the initialization. It will also be called when reset, so pay * attention when adding new initialization stuff. */ @@ -2416,6 +2817,7 @@ static void vtd_init(IntelIOMMUState *s) s->iommu_ops.translate = vtd_iommu_translate; s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed; + s->iommu_ops.replay = vtd_iommu_replay; s->root = 0; s->root_extended = false; s->dmar_enabled = false; @@ -2511,6 +2913,11 @@ static void vtd_reset(DeviceState *dev) VTD_DPRINTF(GENERAL, ""); vtd_init(s); + + /* + * When device reset, throw away all mappings and external caches + */ + vtd_address_space_unmap_all(s); } static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) @@ -2574,6 +2981,7 @@ static void vtd_realize(DeviceState *dev, Error **errp) return; } + QLIST_INIT(&s->notifiers_list); memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, "intel_iommu", DMAR_REG_SIZE); diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index 41041219ba..29d67075f4 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -197,6 +197,7 @@ #define VTD_DOMAIN_ID_MASK ((1UL << VTD_DOMAIN_ID_SHIFT) - 1) #define VTD_CAP_ND (((VTD_DOMAIN_ID_SHIFT - 4) / 2) & 7ULL) #define VTD_MGAW 39 /* Maximum Guest Address Width */ +#define VTD_ADDRESS_SIZE (1ULL << VTD_MGAW) #define VTD_CAP_MGAW (((VTD_MGAW - 1) & 0x3fULL) << 16) #define VTD_MAMV 18ULL #define VTD_CAP_MAMV (VTD_MAMV << 48) diff --git a/hw/i386/pc.c b/hw/i386/pc.c index d24388e05f..f3b372a18f 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -1104,9 +1104,7 @@ static void pc_new_cpu(const char *typename, int64_t apic_id, Error **errp) object_property_set_bool(cpu, true, "realized", &local_err); object_unref(cpu); - if (local_err) { - error_propagate(errp, local_err); - } + error_propagate(errp, local_err); } void pc_hot_add_cpu(const int64_t id, Error **errp) diff --git a/hw/i386/trace-events b/hw/i386/trace-events index 88ad5e4c43..04a6980800 100644 --- a/hw/i386/trace-events +++ b/hw/i386/trace-events @@ -4,7 +4,6 @@ x86_iommu_iec_notify(bool global, uint32_t index, uint32_t mask) "Notify IEC invalidation: global=%d index=%" PRIu32 " mask=%" PRIu32 # hw/i386/intel_iommu.c -vtd_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)" vtd_inv_desc(const char *type, uint64_t hi, uint64_t lo) "invalidate desc type %s high 0x%"PRIx64" low 0x%"PRIx64 vtd_inv_desc_invalid(uint64_t hi, uint64_t lo) "invalid inv desc hi 0x%"PRIx64" lo 0x%"PRIx64 vtd_inv_desc_cc_domain(uint16_t domain) "context invalidate domain 0x%"PRIx16 @@ -30,6 +29,15 @@ vtd_iotlb_cc_hit(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32 vtd_iotlb_cc_update(uint8_t bus, uint8_t devfn, uint64_t high, uint64_t low, uint32_t gen1, uint32_t gen2) "IOTLB context update bus 0x%"PRIx8" devfn 0x%"PRIx8" high 0x%"PRIx64" low 0x%"PRIx64" gen %"PRIu32" -> gen %"PRIu32 vtd_iotlb_reset(const char *reason) "IOTLB reset (reason: %s)" vtd_fault_disabled(void) "Fault processing disabled for context entry" +vtd_replay_ce_valid(uint8_t bus, uint8_t dev, uint8_t fn, uint16_t domain, uint64_t hi, uint64_t lo) "replay valid context device %02"PRIx8":%02"PRIx8".%02"PRIx8" domain 0x%"PRIx16" hi 0x%"PRIx64" lo 0x%"PRIx64 +vtd_replay_ce_invalid(uint8_t bus, uint8_t dev, uint8_t fn) "replay invalid context device %02"PRIx8":%02"PRIx8".%02"PRIx8 +vtd_page_walk_level(uint64_t addr, uint32_t level, uint64_t start, uint64_t end) "walk (base=0x%"PRIx64", level=%"PRIu32") iova range 0x%"PRIx64" - 0x%"PRIx64 +vtd_page_walk_one(uint32_t level, uint64_t iova, uint64_t gpa, uint64_t mask, int perm) "detected page level 0x%"PRIx32" iova 0x%"PRIx64" -> gpa 0x%"PRIx64" mask 0x%"PRIx64" perm %d" +vtd_page_walk_skip_read(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to unable to read" +vtd_page_walk_skip_perm(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to perm empty" +vtd_page_walk_skip_reserve(uint64_t iova, uint64_t next) "Page walk skip iova 0x%"PRIx64" - 0x%"PRIx64" due to rsrv set" +vtd_switch_address_space(uint8_t bus, uint8_t slot, uint8_t fn, bool on) "Device %02x:%02x.%x switching address space (iommu enabled=%d)" +vtd_as_unmap_whole(uint8_t bus, uint8_t slot, uint8_t fn, uint64_t iova, uint64_t size) "Device %02x:%02x.%x start 0x%"PRIx64" size 0x%"PRIx64 # hw/i386/amd_iommu.c amdvi_evntlog_fail(uint64_t addr, uint32_t head) "error: fail to write at addr 0x%"PRIx64" + offset 0x%"PRIx32 @@ -37,6 +45,7 @@ amdvi_cache_update(uint16_t domid, uint8_t bus, uint8_t slot, uint8_t func, uint amdvi_completion_wait_fail(uint64_t addr) "error: fail to write at address 0x%"PRIx64 amdvi_mmio_write(const char *reg, uint64_t addr, unsigned size, uint64_t val, uint64_t offset) "%s write addr 0x%"PRIx64", size %u, val 0x%"PRIx64", offset 0x%"PRIx64 amdvi_mmio_read(const char *reg, uint64_t addr, unsigned size, uint64_t offset) "%s read addr 0x%"PRIx64", size %u offset 0x%"PRIx64 +amdvi_mmio_read_invalid(int max, uint64_t addr, unsigned size) "error: addr outside region (max 0x%x): read addr 0x%" PRIx64 ", size %u" amdvi_command_error(uint64_t status) "error: Executing commands with command buffer disabled 0x%"PRIx64 amdvi_command_read_fail(uint64_t addr, uint32_t head) "error: fail to access memory at 0x%"PRIx64" + 0x%"PRIx32 amdvi_command_exec(uint32_t head, uint32_t tail, uint64_t buf) "command buffer head at 0x%"PRIx32" command buffer tail at 0x%"PRIx32" command buffer base at 0x%"PRIx64 |