diff options
-rw-r--r-- | hw/vfio/common.c | 48 | ||||
-rw-r--r-- | hw/vfio/pci.c | 79 | ||||
-rw-r--r-- | hw/vfio/spapr.c | 2 | ||||
-rw-r--r-- | include/exec/memory.h | 47 | ||||
-rw-r--r-- | memory.c | 80 | ||||
-rw-r--r-- | memory_mapping.c | 2 | ||||
-rw-r--r-- | trace-events | 2 |
7 files changed, 218 insertions, 42 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 9505fb3040..801578b4b9 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -610,16 +610,16 @@ vfio_get_region_info_cap(struct vfio_region_info *info, uint16_t id) return NULL; } -static void vfio_setup_region_sparse_mmaps(VFIORegion *region, - struct vfio_region_info *info) +static int vfio_setup_region_sparse_mmaps(VFIORegion *region, + struct vfio_region_info *info) { struct vfio_info_cap_header *hdr; struct vfio_region_info_cap_sparse_mmap *sparse; - int i; + int i, j; hdr = vfio_get_region_info_cap(info, VFIO_REGION_INFO_CAP_SPARSE_MMAP); if (!hdr) { - return; + return -ENODEV; } sparse = container_of(hdr, struct vfio_region_info_cap_sparse_mmap, header); @@ -627,16 +627,24 @@ static void vfio_setup_region_sparse_mmaps(VFIORegion *region, trace_vfio_region_sparse_mmap_header(region->vbasedev->name, region->nr, sparse->nr_areas); - region->nr_mmaps = sparse->nr_areas; - region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); + region->mmaps = g_new0(VFIOMmap, sparse->nr_areas); - for (i = 0; i < region->nr_mmaps; i++) { - region->mmaps[i].offset = sparse->areas[i].offset; - region->mmaps[i].size = sparse->areas[i].size; - trace_vfio_region_sparse_mmap_entry(i, region->mmaps[i].offset, - region->mmaps[i].offset + - region->mmaps[i].size); + for (i = 0, j = 0; i < sparse->nr_areas; i++) { + trace_vfio_region_sparse_mmap_entry(i, sparse->areas[i].offset, + sparse->areas[i].offset + + sparse->areas[i].size); + + if (sparse->areas[i].size) { + region->mmaps[j].offset = sparse->areas[i].offset; + region->mmaps[j].size = sparse->areas[i].size; + j++; + } } + + region->nr_mmaps = j; + region->mmaps = g_realloc(region->mmaps, j * sizeof(VFIOMmap)); + + return 0; } int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, @@ -662,12 +670,11 @@ int vfio_region_setup(Object *obj, VFIODevice *vbasedev, VFIORegion *region, region, name, region->size); if (!vbasedev->no_mmap && - region->flags & VFIO_REGION_INFO_FLAG_MMAP && - !(region->size & ~qemu_real_host_page_mask)) { + region->flags & VFIO_REGION_INFO_FLAG_MMAP) { - vfio_setup_region_sparse_mmaps(region, info); + ret = vfio_setup_region_sparse_mmaps(region, info); - if (!region->nr_mmaps) { + if (ret) { region->nr_mmaps = 1; region->mmaps = g_new0(VFIOMmap, region->nr_mmaps); region->mmaps[0].offset = 0; @@ -724,12 +731,11 @@ int vfio_region_mmap(VFIORegion *region) name = g_strdup_printf("%s mmaps[%d]", memory_region_name(region->mem), i); - memory_region_init_ram_ptr(®ion->mmaps[i].mem, - memory_region_owner(region->mem), - name, region->mmaps[i].size, - region->mmaps[i].mmap); + memory_region_init_ram_device_ptr(®ion->mmaps[i].mem, + memory_region_owner(region->mem), + name, region->mmaps[i].size, + region->mmaps[i].mmap); g_free(name); - memory_region_set_skip_dump(®ion->mmaps[i].mem); memory_region_add_subregion(region->mem, region->mmaps[i].offset, ®ion->mmaps[i].mem); diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 65d30fdef9..d7dbe0e3e0 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -1071,6 +1071,55 @@ static const MemoryRegionOps vfio_vga_ops = { }; /* + * Expand memory region of sub-page(size < PAGE_SIZE) MMIO BAR to page + * size if the BAR is in an exclusive page in host so that we could map + * this BAR to guest. But this sub-page BAR may not occupy an exclusive + * page in guest. So we should set the priority of the expanded memory + * region to zero in case of overlap with BARs which share the same page + * with the sub-page BAR in guest. Besides, we should also recover the + * size of this sub-page BAR when its base address is changed in guest + * and not page aligned any more. + */ +static void vfio_sub_page_bar_update_mapping(PCIDevice *pdev, int bar) +{ + VFIOPCIDevice *vdev = DO_UPCAST(VFIOPCIDevice, pdev, pdev); + VFIORegion *region = &vdev->bars[bar].region; + MemoryRegion *mmap_mr, *mr; + PCIIORegion *r; + pcibus_t bar_addr; + uint64_t size = region->size; + + /* Make sure that the whole region is allowed to be mmapped */ + if (region->nr_mmaps != 1 || !region->mmaps[0].mmap || + region->mmaps[0].size != region->size) { + return; + } + + r = &pdev->io_regions[bar]; + bar_addr = r->addr; + mr = region->mem; + mmap_mr = ®ion->mmaps[0].mem; + + /* If BAR is mapped and page aligned, update to fill PAGE_SIZE */ + if (bar_addr != PCI_BAR_UNMAPPED && + !(bar_addr & ~qemu_real_host_page_mask)) { + size = qemu_real_host_page_size; + } + + memory_region_transaction_begin(); + + memory_region_set_size(mr, size); + memory_region_set_size(mmap_mr, size); + if (size != region->size && memory_region_is_mapped(mr)) { + memory_region_del_subregion(r->address_space, mr); + memory_region_add_subregion_overlap(r->address_space, + bar_addr, mr, 0); + } + + memory_region_transaction_commit(); +} + +/* * PCI config space */ uint32_t vfio_pci_read_config(PCIDevice *pdev, uint32_t addr, int len) @@ -1153,6 +1202,24 @@ void vfio_pci_write_config(PCIDevice *pdev, } else if (was_enabled && !is_enabled) { vfio_msix_disable(vdev); } + } else if (ranges_overlap(addr, len, PCI_BASE_ADDRESS_0, 24) || + range_covers_byte(addr, len, PCI_COMMAND)) { + pcibus_t old_addr[PCI_NUM_REGIONS - 1]; + int bar; + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + old_addr[bar] = pdev->io_regions[bar].addr; + } + + pci_default_write_config(pdev, addr, val, len); + + for (bar = 0; bar < PCI_ROM_SLOT; bar++) { + if (old_addr[bar] != pdev->io_regions[bar].addr && + pdev->io_regions[bar].size > 0 && + pdev->io_regions[bar].size < qemu_real_host_page_size) { + vfio_sub_page_bar_update_mapping(pdev, bar); + } + } } else { /* Write everything to QEMU to keep emulated bits correct */ pci_default_write_config(pdev, addr, val, len); @@ -1922,11 +1989,23 @@ static void vfio_pci_pre_reset(VFIOPCIDevice *vdev) static void vfio_pci_post_reset(VFIOPCIDevice *vdev) { Error *err = NULL; + int nr; vfio_intx_enable(vdev, &err); if (err) { error_reportf_err(err, ERR_PREFIX, vdev->vbasedev.name); } + + for (nr = 0; nr < PCI_NUM_REGIONS - 1; ++nr) { + off_t addr = vdev->config_offset + PCI_BASE_ADDRESS_0 + (4 * nr); + uint32_t val = 0; + uint32_t len = sizeof(val); + + if (pwrite(vdev->vbasedev.fd, &val, len, addr) != len) { + error_report("%s(%s) reset bar %d failed: %m", __func__, + vdev->vbasedev.name, nr); + } + } } static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name) diff --git a/hw/vfio/spapr.c b/hw/vfio/spapr.c index 7443d348d9..4409bcc0d7 100644 --- a/hw/vfio/spapr.c +++ b/hw/vfio/spapr.c @@ -25,7 +25,7 @@ static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section) } return !memory_region_is_ram(section->mr) || - memory_region_is_skip_dump(section->mr); + memory_region_is_ram_device(section->mr); } static void *vfio_prereg_gpa_to_vaddr(MemoryRegionSection *section, hwaddr gpa) diff --git a/include/exec/memory.h b/include/exec/memory.h index 79ccaaba1f..9728a2fb1a 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -209,7 +209,7 @@ struct MemoryRegion { void (*destructor)(MemoryRegion *mr); uint64_t align; bool terminates; - bool skip_dump; + bool ram_device; bool enabled; bool warning_printed; /* For reservations */ uint8_t vga_logging_count; @@ -449,6 +449,30 @@ void memory_region_init_ram_ptr(MemoryRegion *mr, void *ptr); /** + * memory_region_init_ram_device_ptr: Initialize RAM device memory region from + * a user-provided pointer. + * + * A RAM device represents a mapping to a physical device, such as to a PCI + * MMIO BAR of an vfio-pci assigned device. The memory region may be mapped + * into the VM address space and access to the region will modify memory + * directly. However, the memory region should not be included in a memory + * dump (device may not be enabled/mapped at the time of the dump), and + * operations incompatible with manipulating MMIO should be avoided. Replaces + * skip_dump flag. + * + * @mr: the #MemoryRegion to be initialized. + * @owner: the object that tracks the region's reference count + * @name: the name of the region. + * @size: size of the region. + * @ptr: memory to be mapped; must contain at least @size bytes. + */ +void memory_region_init_ram_device_ptr(MemoryRegion *mr, + struct Object *owner, + const char *name, + uint64_t size, + void *ptr); + +/** * memory_region_init_alias: Initialize a memory region that aliases all or a * part of another memory region. * @@ -574,22 +598,13 @@ static inline bool memory_region_is_ram(MemoryRegion *mr) } /** - * memory_region_is_skip_dump: check whether a memory region should not be - * dumped - * - * Returns %true is a memory region should not be dumped(e.g. VFIO BAR MMAP). + * memory_region_is_ram_device: check whether a memory region is a ram device * - * @mr: the memory region being queried - */ -bool memory_region_is_skip_dump(MemoryRegion *mr); - -/** - * memory_region_set_skip_dump: Set skip_dump flag, dump will ignore this memory - * region + * Returns %true is a memory region is a device backed ram region * * @mr: the memory region being queried */ -void memory_region_set_skip_dump(MemoryRegion *mr); +bool memory_region_is_ram_device(MemoryRegion *mr); /** * memory_region_is_romd: check whether a memory region is in ROMD mode @@ -1465,9 +1480,11 @@ void *qemu_map_ram_ptr(RAMBlock *ram_block, ram_addr_t addr); static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write) { if (is_write) { - return memory_region_is_ram(mr) && !mr->readonly; + return memory_region_is_ram(mr) && + !mr->readonly && !memory_region_is_ram_device(mr); } else { - return memory_region_is_ram(mr) || memory_region_is_romd(mr); + return (memory_region_is_ram(mr) && !memory_region_is_ram_device(mr)) || + memory_region_is_romd(mr); } } @@ -1128,6 +1128,71 @@ const MemoryRegionOps unassigned_mem_ops = { .endianness = DEVICE_NATIVE_ENDIAN, }; +static uint64_t memory_region_ram_device_read(void *opaque, + hwaddr addr, unsigned size) +{ + MemoryRegion *mr = opaque; + uint64_t data = (uint64_t)~0; + + switch (size) { + case 1: + data = *(uint8_t *)(mr->ram_block->host + addr); + break; + case 2: + data = *(uint16_t *)(mr->ram_block->host + addr); + break; + case 4: + data = *(uint32_t *)(mr->ram_block->host + addr); + break; + case 8: + data = *(uint64_t *)(mr->ram_block->host + addr); + break; + } + + trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); + + return data; +} + +static void memory_region_ram_device_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + MemoryRegion *mr = opaque; + + trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); + + switch (size) { + case 1: + *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; + break; + case 2: + *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; + break; + case 4: + *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; + break; + case 8: + *(uint64_t *)(mr->ram_block->host + addr) = data; + break; + } +} + +static const MemoryRegionOps ram_device_mem_ops = { + .read = memory_region_ram_device_read, + .write = memory_region_ram_device_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = true, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + .unaligned = true, + }, +}; + bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr, unsigned size, @@ -1355,9 +1420,16 @@ void memory_region_init_ram_ptr(MemoryRegion *mr, mr->ram_block = qemu_ram_alloc_from_ptr(size, ptr, mr, &error_fatal); } -void memory_region_set_skip_dump(MemoryRegion *mr) +void memory_region_init_ram_device_ptr(MemoryRegion *mr, + Object *owner, + const char *name, + uint64_t size, + void *ptr) { - mr->skip_dump = true; + memory_region_init_ram_ptr(mr, owner, name, size, ptr); + mr->ram_device = true; + mr->ops = &ram_device_mem_ops; + mr->opaque = mr; } void memory_region_init_alias(MemoryRegion *mr, @@ -1491,9 +1563,9 @@ const char *memory_region_name(const MemoryRegion *mr) return mr->name; } -bool memory_region_is_skip_dump(MemoryRegion *mr) +bool memory_region_is_ram_device(MemoryRegion *mr) { - return mr->skip_dump; + return mr->ram_device; } uint8_t memory_region_get_dirty_log_mask(MemoryRegion *mr) diff --git a/memory_mapping.c b/memory_mapping.c index e3e0d95172..6a39d71da2 100644 --- a/memory_mapping.c +++ b/memory_mapping.c @@ -206,7 +206,7 @@ static void guest_phys_blocks_region_add(MemoryListener *listener, /* we only care about RAM */ if (!memory_region_is_ram(section->mr) || - memory_region_is_skip_dump(section->mr)) { + memory_region_is_ram_device(section->mr)) { return; } diff --git a/trace-events b/trace-events index 8ecded5150..f74e1d3d22 100644 --- a/trace-events +++ b/trace-events @@ -121,6 +121,8 @@ memory_region_subpage_read(int cpu_index, void *mr, uint64_t offset, uint64_t va memory_region_subpage_write(int cpu_index, void *mr, uint64_t offset, uint64_t value, unsigned size) "cpu %d mr %p offset %#"PRIx64" value %#"PRIx64" size %u" memory_region_tb_read(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr %#"PRIx64" value %#"PRIx64" size %u" memory_region_tb_write(int cpu_index, uint64_t addr, uint64_t value, unsigned size) "cpu %d addr %#"PRIx64" value %#"PRIx64" size %u" +memory_region_ram_device_read(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr %#"PRIx64" value %#"PRIx64" size %u" +memory_region_ram_device_write(int cpu_index, void *mr, uint64_t addr, uint64_t value, unsigned size) "cpu %d mr %p addr %#"PRIx64" value %#"PRIx64" size %u" ### Guest events, keep at bottom |