aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--exec.c103
1 files changed, 69 insertions, 34 deletions
diff --git a/exec.c b/exec.c
index eac6085760..f942eb2bd1 100644
--- a/exec.c
+++ b/exec.c
@@ -463,18 +463,20 @@ address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *x
}
/* Called from RCU critical section */
-IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
- bool is_write)
+static MemoryRegionSection address_space_do_translate(AddressSpace *as,
+ hwaddr addr,
+ hwaddr *xlat,
+ hwaddr *plen,
+ bool is_write,
+ bool is_mmio)
{
- IOMMUTLBEntry iotlb = {0};
+ IOMMUTLBEntry iotlb;
MemoryRegionSection *section;
MemoryRegion *mr;
for (;;) {
AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
- section = address_space_lookup_region(d, addr, false);
- addr = addr - section->offset_within_address_space
- + section->offset_within_region;
+ section = address_space_translate_internal(d, addr, &addr, plen, is_mmio);
mr = section->mr;
if (!mr->iommu_ops) {
@@ -482,55 +484,88 @@ IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
}
iotlb = mr->iommu_ops->translate(mr, addr, is_write);
+ addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
+ | (addr & iotlb.addr_mask));
+ *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
if (!(iotlb.perm & (1 << is_write))) {
- iotlb.target_as = NULL;
- break;
+ goto translate_fail;
}
- addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
- | (addr & iotlb.addr_mask));
as = iotlb.target_as;
}
- return iotlb;
+ *xlat = addr;
+
+ return *section;
+
+translate_fail:
+ return (MemoryRegionSection) { .mr = &io_mem_unassigned };
}
/* Called from RCU critical section */
-MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
- hwaddr *xlat, hwaddr *plen,
- bool is_write)
+IOMMUTLBEntry address_space_get_iotlb_entry(AddressSpace *as, hwaddr addr,
+ bool is_write)
{
- IOMMUTLBEntry iotlb;
- MemoryRegionSection *section;
- MemoryRegion *mr;
+ MemoryRegionSection section;
+ hwaddr xlat, plen;
- for (;;) {
- AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
- section = address_space_translate_internal(d, addr, &addr, plen, true);
- mr = section->mr;
+ /* Try to get maximum page mask during translation. */
+ plen = (hwaddr)-1;
- if (!mr->iommu_ops) {
- break;
- }
+ /* This can never be MMIO. */
+ section = address_space_do_translate(as, addr, &xlat, &plen,
+ is_write, false);
- iotlb = mr->iommu_ops->translate(mr, addr, is_write);
- addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
- | (addr & iotlb.addr_mask));
- *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
- if (!(iotlb.perm & (1 << is_write))) {
- mr = &io_mem_unassigned;
- break;
- }
+ /* Illegal translation */
+ if (section.mr == &io_mem_unassigned) {
+ goto iotlb_fail;
+ }
- as = iotlb.target_as;
+ /* Convert memory region offset into address space offset */
+ xlat += section.offset_within_address_space -
+ section.offset_within_region;
+
+ if (plen == (hwaddr)-1) {
+ /*
+ * We use default page size here. Logically it only happens
+ * for identity mappings.
+ */
+ plen = TARGET_PAGE_SIZE;
}
+ /* Convert to address mask */
+ plen -= 1;
+
+ return (IOMMUTLBEntry) {
+ .target_as = section.address_space,
+ .iova = addr & ~plen,
+ .translated_addr = xlat & ~plen,
+ .addr_mask = plen,
+ /* IOTLBs are for DMAs, and DMA only allows on RAMs. */
+ .perm = IOMMU_RW,
+ };
+
+iotlb_fail:
+ return (IOMMUTLBEntry) {0};
+}
+
+/* Called from RCU critical section */
+MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
+ hwaddr *xlat, hwaddr *plen,
+ bool is_write)
+{
+ MemoryRegion *mr;
+ MemoryRegionSection section;
+
+ /* This can be MMIO, so setup MMIO bit. */
+ section = address_space_do_translate(as, addr, xlat, plen, is_write, true);
+ mr = section.mr;
+
if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
*plen = MIN(page, *plen);
}
- *xlat = addr;
return mr;
}