diff options
Diffstat (limited to 'hw/virtio/vhost.c')
-rw-r--r-- | hw/virtio/vhost.c | 504 |
1 files changed, 173 insertions, 331 deletions
diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 338e4395b7..4a44e6e6bf 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -156,160 +156,6 @@ static void vhost_log_sync_range(struct vhost_dev *dev, } } -/* Assign/unassign. Keep an unsorted array of non-overlapping - * memory regions in dev->mem. */ -static void vhost_dev_unassign_memory(struct vhost_dev *dev, - uint64_t start_addr, - uint64_t size) -{ - int from, to, n = dev->mem->nregions; - /* Track overlapping/split regions for sanity checking. */ - int overlap_start = 0, overlap_end = 0, overlap_middle = 0, split = 0; - - for (from = 0, to = 0; from < n; ++from, ++to) { - struct vhost_memory_region *reg = dev->mem->regions + to; - uint64_t reglast; - uint64_t memlast; - uint64_t change; - - /* clone old region */ - if (to != from) { - memcpy(reg, dev->mem->regions + from, sizeof *reg); - } - - /* No overlap is simple */ - if (!ranges_overlap(reg->guest_phys_addr, reg->memory_size, - start_addr, size)) { - continue; - } - - /* Split only happens if supplied region - * is in the middle of an existing one. Thus it can not - * overlap with any other existing region. */ - assert(!split); - - reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); - memlast = range_get_last(start_addr, size); - - /* Remove whole region */ - if (start_addr <= reg->guest_phys_addr && memlast >= reglast) { - --dev->mem->nregions; - --to; - ++overlap_middle; - continue; - } - - /* Shrink region */ - if (memlast >= reglast) { - reg->memory_size = start_addr - reg->guest_phys_addr; - assert(reg->memory_size); - assert(!overlap_end); - ++overlap_end; - continue; - } - - /* Shift region */ - if (start_addr <= reg->guest_phys_addr) { - change = memlast + 1 - reg->guest_phys_addr; - reg->memory_size -= change; - reg->guest_phys_addr += change; - reg->userspace_addr += change; - assert(reg->memory_size); - assert(!overlap_start); - ++overlap_start; - continue; - } - - /* This only happens if supplied region - * is in the middle of an existing one. Thus it can not - * overlap with any other existing region. */ - assert(!overlap_start); - assert(!overlap_end); - assert(!overlap_middle); - /* Split region: shrink first part, shift second part. */ - memcpy(dev->mem->regions + n, reg, sizeof *reg); - reg->memory_size = start_addr - reg->guest_phys_addr; - assert(reg->memory_size); - change = memlast + 1 - reg->guest_phys_addr; - reg = dev->mem->regions + n; - reg->memory_size -= change; - assert(reg->memory_size); - reg->guest_phys_addr += change; - reg->userspace_addr += change; - /* Never add more than 1 region */ - assert(dev->mem->nregions == n); - ++dev->mem->nregions; - ++split; - } -} - -/* Called after unassign, so no regions overlap the given range. */ -static void vhost_dev_assign_memory(struct vhost_dev *dev, - uint64_t start_addr, - uint64_t size, - uint64_t uaddr) -{ - int from, to; - struct vhost_memory_region *merged = NULL; - for (from = 0, to = 0; from < dev->mem->nregions; ++from, ++to) { - struct vhost_memory_region *reg = dev->mem->regions + to; - uint64_t prlast, urlast; - uint64_t pmlast, umlast; - uint64_t s, e, u; - - /* clone old region */ - if (to != from) { - memcpy(reg, dev->mem->regions + from, sizeof *reg); - } - prlast = range_get_last(reg->guest_phys_addr, reg->memory_size); - pmlast = range_get_last(start_addr, size); - urlast = range_get_last(reg->userspace_addr, reg->memory_size); - umlast = range_get_last(uaddr, size); - - /* check for overlapping regions: should never happen. */ - assert(prlast < start_addr || pmlast < reg->guest_phys_addr); - /* Not an adjacent or overlapping region - do not merge. */ - if ((prlast + 1 != start_addr || urlast + 1 != uaddr) && - (pmlast + 1 != reg->guest_phys_addr || - umlast + 1 != reg->userspace_addr)) { - continue; - } - - if (dev->vhost_ops->vhost_backend_can_merge && - !dev->vhost_ops->vhost_backend_can_merge(dev, uaddr, size, - reg->userspace_addr, - reg->memory_size)) { - continue; - } - - if (merged) { - --to; - assert(to >= 0); - } else { - merged = reg; - } - u = MIN(uaddr, reg->userspace_addr); - s = MIN(start_addr, reg->guest_phys_addr); - e = MAX(pmlast, prlast); - uaddr = merged->userspace_addr = u; - start_addr = merged->guest_phys_addr = s; - size = merged->memory_size = e - s + 1; - assert(merged->memory_size); - } - - if (!merged) { - struct vhost_memory_region *reg = dev->mem->regions + to; - memset(reg, 0, sizeof *reg); - reg->memory_size = size; - assert(reg->memory_size); - reg->guest_phys_addr = start_addr; - reg->userspace_addr = uaddr; - ++to; - } - assert(to <= dev->mem->nregions + 1); - dev->mem->nregions = to; -} - static uint64_t vhost_get_log_size(struct vhost_dev *dev) { uint64_t log_size = 0; @@ -456,35 +302,37 @@ static void vhost_memory_unmap(struct vhost_dev *dev, void *buffer, } } -static int vhost_verify_ring_part_mapping(struct vhost_dev *dev, - void *part, - uint64_t part_addr, - uint64_t part_size, - uint64_t start_addr, - uint64_t size) +static int vhost_verify_ring_part_mapping(void *ring_hva, + uint64_t ring_gpa, + uint64_t ring_size, + void *reg_hva, + uint64_t reg_gpa, + uint64_t reg_size) { - hwaddr l; - void *p; - int r = 0; + uint64_t hva_ring_offset; + uint64_t ring_last = range_get_last(ring_gpa, ring_size); + uint64_t reg_last = range_get_last(reg_gpa, reg_size); - if (!ranges_overlap(start_addr, size, part_addr, part_size)) { + if (ring_last < reg_gpa || ring_gpa > reg_last) { return 0; } - l = part_size; - p = vhost_memory_map(dev, part_addr, &l, 1); - if (!p || l != part_size) { - r = -ENOMEM; + /* check that whole ring's is mapped */ + if (ring_last > reg_last) { + return -ENOMEM; } - if (p != part) { - r = -EBUSY; + /* check that ring's MemoryRegion wasn't replaced */ + hva_ring_offset = ring_gpa - reg_gpa; + if (ring_hva != reg_hva + hva_ring_offset) { + return -EBUSY; } - vhost_memory_unmap(dev, p, l, 0, 0); - return r; + + return 0; } static int vhost_verify_ring_mappings(struct vhost_dev *dev, - uint64_t start_addr, - uint64_t size) + void *reg_hva, + uint64_t reg_gpa, + uint64_t reg_size) { int i, j; int r = 0; @@ -498,22 +346,25 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev, struct vhost_virtqueue *vq = dev->vqs + i; j = 0; - r = vhost_verify_ring_part_mapping(dev, vq->desc, vq->desc_phys, - vq->desc_size, start_addr, size); + r = vhost_verify_ring_part_mapping( + vq->desc, vq->desc_phys, vq->desc_size, + reg_hva, reg_gpa, reg_size); if (r) { break; } j++; - r = vhost_verify_ring_part_mapping(dev, vq->avail, vq->avail_phys, - vq->avail_size, start_addr, size); + r = vhost_verify_ring_part_mapping( + vq->desc, vq->desc_phys, vq->desc_size, + reg_hva, reg_gpa, reg_size); if (r) { break; } j++; - r = vhost_verify_ring_part_mapping(dev, vq->used, vq->used_phys, - vq->used_size, start_addr, size); + r = vhost_verify_ring_part_mapping( + vq->desc, vq->desc_phys, vq->desc_size, + reg_hva, reg_gpa, reg_size); if (r) { break; } @@ -527,134 +378,95 @@ static int vhost_verify_ring_mappings(struct vhost_dev *dev, return r; } -static struct vhost_memory_region *vhost_dev_find_reg(struct vhost_dev *dev, - uint64_t start_addr, - uint64_t size) -{ - int i, n = dev->mem->nregions; - for (i = 0; i < n; ++i) { - struct vhost_memory_region *reg = dev->mem->regions + i; - if (ranges_overlap(reg->guest_phys_addr, reg->memory_size, - start_addr, size)) { - return reg; - } - } - return NULL; -} - -static bool vhost_dev_cmp_memory(struct vhost_dev *dev, - uint64_t start_addr, - uint64_t size, - uint64_t uaddr) -{ - struct vhost_memory_region *reg = vhost_dev_find_reg(dev, start_addr, size); - uint64_t reglast; - uint64_t memlast; - - if (!reg) { - return true; - } - - reglast = range_get_last(reg->guest_phys_addr, reg->memory_size); - memlast = range_get_last(start_addr, size); - - /* Need to extend region? */ - if (start_addr < reg->guest_phys_addr || memlast > reglast) { - return true; - } - /* userspace_addr changed? */ - return uaddr != reg->userspace_addr + start_addr - reg->guest_phys_addr; -} - -static void vhost_set_memory(MemoryListener *listener, - MemoryRegionSection *section, - bool add) -{ - struct vhost_dev *dev = container_of(listener, struct vhost_dev, - memory_listener); - hwaddr start_addr = section->offset_within_address_space; - ram_addr_t size = int128_get64(section->size); - bool log_dirty = - memory_region_get_dirty_log_mask(section->mr) & ~(1 << DIRTY_MEMORY_MIGRATION); - int s = offsetof(struct vhost_memory, regions) + - (dev->mem->nregions + 1) * sizeof dev->mem->regions[0]; - void *ram; - - dev->mem = g_realloc(dev->mem, s); - - if (log_dirty) { - add = false; - } - - assert(size); - - /* Optimize no-change case. At least cirrus_vga does this a lot at this time. */ - ram = memory_region_get_ram_ptr(section->mr) + section->offset_within_region; - if (add) { - if (!vhost_dev_cmp_memory(dev, start_addr, size, (uintptr_t)ram)) { - /* Region exists with same address. Nothing to do. */ - return; - } - } else { - if (!vhost_dev_find_reg(dev, start_addr, size)) { - /* Removing region that we don't access. Nothing to do. */ - return; - } - } - - vhost_dev_unassign_memory(dev, start_addr, size); - if (add) { - /* Add given mapping, merging adjacent regions if any */ - vhost_dev_assign_memory(dev, start_addr, size, (uintptr_t)ram); - } else { - /* Remove old mapping for this memory, if any. */ - vhost_dev_unassign_memory(dev, start_addr, size); - } - dev->mem_changed_start_addr = MIN(dev->mem_changed_start_addr, start_addr); - dev->mem_changed_end_addr = MAX(dev->mem_changed_end_addr, start_addr + size - 1); - dev->memory_changed = true; - used_memslots = dev->mem->nregions; -} - static bool vhost_section(MemoryRegionSection *section) { - return memory_region_is_ram(section->mr) && + bool result; + bool log_dirty = memory_region_get_dirty_log_mask(section->mr) & + ~(1 << DIRTY_MEMORY_MIGRATION); + result = memory_region_is_ram(section->mr) && !memory_region_is_rom(section->mr); + + /* Vhost doesn't handle any block which is doing dirty-tracking other + * than migration; this typically fires on VGA areas. + */ + result &= !log_dirty; + + trace_vhost_section(section->mr->name, result); + return result; } static void vhost_begin(MemoryListener *listener) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, memory_listener); - dev->mem_changed_end_addr = 0; - dev->mem_changed_start_addr = -1; + dev->tmp_sections = NULL; + dev->n_tmp_sections = 0; } static void vhost_commit(MemoryListener *listener) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, memory_listener); - hwaddr start_addr = 0; - ram_addr_t size = 0; + MemoryRegionSection *old_sections; + int n_old_sections; uint64_t log_size; + size_t regions_size; int r; + int i; + bool changed = false; - if (!dev->memory_changed) { - return; + /* Note we can be called before the device is started, but then + * starting the device calls set_mem_table, so we need to have + * built the data structures. + */ + old_sections = dev->mem_sections; + n_old_sections = dev->n_mem_sections; + dev->mem_sections = dev->tmp_sections; + dev->n_mem_sections = dev->n_tmp_sections; + + if (dev->n_mem_sections != n_old_sections) { + changed = true; + } else { + /* Same size, lets check the contents */ + changed = n_old_sections && memcmp(dev->mem_sections, old_sections, + n_old_sections * sizeof(old_sections[0])) != 0; } - if (!dev->started) { - return; + + trace_vhost_commit(dev->started, changed); + if (!changed) { + goto out; } - if (dev->mem_changed_start_addr > dev->mem_changed_end_addr) { - return; + + /* Rebuild the regions list from the new sections list */ + regions_size = offsetof(struct vhost_memory, regions) + + dev->n_mem_sections * sizeof dev->mem->regions[0]; + dev->mem = g_realloc(dev->mem, regions_size); + dev->mem->nregions = dev->n_mem_sections; + used_memslots = dev->mem->nregions; + for (i = 0; i < dev->n_mem_sections; i++) { + struct vhost_memory_region *cur_vmr = dev->mem->regions + i; + struct MemoryRegionSection *mrs = dev->mem_sections + i; + + cur_vmr->guest_phys_addr = mrs->offset_within_address_space; + cur_vmr->memory_size = int128_get64(mrs->size); + cur_vmr->userspace_addr = + (uintptr_t)memory_region_get_ram_ptr(mrs->mr) + + mrs->offset_within_region; + cur_vmr->flags_padding = 0; } - if (dev->started) { - start_addr = dev->mem_changed_start_addr; - size = dev->mem_changed_end_addr - dev->mem_changed_start_addr + 1; + if (!dev->started) { + goto out; + } - r = vhost_verify_ring_mappings(dev, start_addr, size); - assert(r >= 0); + for (i = 0; i < dev->mem->nregions; i++) { + if (vhost_verify_ring_mappings(dev, + (void *)(uintptr_t)dev->mem->regions[i].userspace_addr, + dev->mem->regions[i].guest_phys_addr, + dev->mem->regions[i].memory_size)) { + error_report("Verify ring failure on region %d", i); + abort(); + } } if (!dev->log_enabled) { @@ -662,8 +474,7 @@ static void vhost_commit(MemoryListener *listener) if (r < 0) { VHOST_OPS_DEBUG("vhost_set_mem_table failed"); } - dev->memory_changed = false; - return; + goto out; } log_size = vhost_get_log_size(dev); /* We allocate an extra 4K bytes to log, @@ -681,51 +492,91 @@ static void vhost_commit(MemoryListener *listener) if (dev->log_size > log_size + VHOST_LOG_BUFFER) { vhost_dev_log_resize(dev, log_size); } - dev->memory_changed = false; + +out: + /* Deref the old list of sections, this must happen _after_ the + * vhost_set_mem_table to ensure the client isn't still using the + * section we're about to unref. + */ + while (n_old_sections--) { + memory_region_unref(old_sections[n_old_sections].mr); + } + g_free(old_sections); + return; } -static void vhost_region_add(MemoryListener *listener, - MemoryRegionSection *section) +/* Adds the section data to the tmp_section structure. + * It relies on the listener calling us in memory address order + * and for each region (via the _add and _nop methods) to + * join neighbours. + */ +static void vhost_region_add_section(struct vhost_dev *dev, + MemoryRegionSection *section) { - struct vhost_dev *dev = container_of(listener, struct vhost_dev, - memory_listener); - - if (!vhost_section(section)) { - return; + bool need_add = true; + uint64_t mrs_size = int128_get64(section->size); + uint64_t mrs_gpa = section->offset_within_address_space; + uintptr_t mrs_host = (uintptr_t)memory_region_get_ram_ptr(section->mr) + + section->offset_within_region; + + trace_vhost_region_add_section(section->mr->name, mrs_gpa, mrs_size, + mrs_host); + + if (dev->n_tmp_sections) { + /* Since we already have at least one section, lets see if + * this extends it; since we're scanning in order, we only + * have to look at the last one, and the FlatView that calls + * us shouldn't have overlaps. + */ + MemoryRegionSection *prev_sec = dev->tmp_sections + + (dev->n_tmp_sections - 1); + uint64_t prev_gpa_start = prev_sec->offset_within_address_space; + uint64_t prev_size = int128_get64(prev_sec->size); + uint64_t prev_gpa_end = range_get_last(prev_gpa_start, prev_size); + uint64_t prev_host_start = + (uintptr_t)memory_region_get_ram_ptr(prev_sec->mr) + + prev_sec->offset_within_region; + uint64_t prev_host_end = range_get_last(prev_host_start, prev_size); + + if (prev_gpa_end + 1 == mrs_gpa && + prev_host_end + 1 == mrs_host && + section->mr == prev_sec->mr && + (!dev->vhost_ops->vhost_backend_can_merge || + dev->vhost_ops->vhost_backend_can_merge(dev, + mrs_host, mrs_size, + prev_host_start, prev_size))) { + /* The two sections abut */ + need_add = false; + prev_sec->size = int128_add(prev_sec->size, section->size); + trace_vhost_region_add_section_abut(section->mr->name, + mrs_size + prev_size); + } } - trace_vhost_region_add(dev, section->mr->name ?: NULL); - ++dev->n_mem_sections; - dev->mem_sections = g_renew(MemoryRegionSection, dev->mem_sections, - dev->n_mem_sections); - dev->mem_sections[dev->n_mem_sections - 1] = *section; - memory_region_ref(section->mr); - vhost_set_memory(listener, section, true); + if (need_add) { + ++dev->n_tmp_sections; + dev->tmp_sections = g_renew(MemoryRegionSection, dev->tmp_sections, + dev->n_tmp_sections); + dev->tmp_sections[dev->n_tmp_sections - 1] = *section; + /* The flatview isn't stable and we don't use it, making it NULL + * means we can memcmp the list. + */ + dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; + memory_region_ref(section->mr); + } } -static void vhost_region_del(MemoryListener *listener, - MemoryRegionSection *section) +/* Used for both add and nop callbacks */ +static void vhost_region_addnop(MemoryListener *listener, + MemoryRegionSection *section) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, memory_listener); - int i; if (!vhost_section(section)) { return; } - - trace_vhost_region_del(dev, section->mr->name ?: NULL); - vhost_set_memory(listener, section, false); - memory_region_unref(section->mr); - for (i = 0; i < dev->n_mem_sections; ++i) { - if (dev->mem_sections[i].offset_within_address_space - == section->offset_within_address_space) { - --dev->n_mem_sections; - memmove(&dev->mem_sections[i], &dev->mem_sections[i+1], - (dev->n_mem_sections - i) * sizeof(*dev->mem_sections)); - break; - } - } + vhost_region_add_section(dev, section); } static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) @@ -752,8 +603,6 @@ static void vhost_iommu_region_add(MemoryListener *listener, return; } - trace_vhost_iommu_region_add(dev, section->mr->name ?: NULL); - iommu = g_malloc0(sizeof(*iommu)); end = int128_add(int128_make64(section->offset_within_region), section->size); @@ -782,8 +631,6 @@ static void vhost_iommu_region_del(MemoryListener *listener, return; } - trace_vhost_iommu_region_del(dev, section->mr->name ?: NULL); - QLIST_FOREACH(iommu, &dev->iommu_list, iommu_next) { if (iommu->mr == section->mr && iommu->n.start == section->offset_within_region) { @@ -796,11 +643,6 @@ static void vhost_iommu_region_del(MemoryListener *listener, } } -static void vhost_region_nop(MemoryListener *listener, - MemoryRegionSection *section) -{ -} - static int vhost_virtqueue_set_addr(struct vhost_dev *dev, struct vhost_virtqueue *vq, unsigned idx, bool enable_log) @@ -1305,9 +1147,8 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, hdev->memory_listener = (MemoryListener) { .begin = vhost_begin, .commit = vhost_commit, - .region_add = vhost_region_add, - .region_del = vhost_region_del, - .region_nop = vhost_region_nop, + .region_add = vhost_region_addnop, + .region_nop = vhost_region_addnop, .log_start = vhost_log_start, .log_stop = vhost_log_stop, .log_sync = vhost_log_sync, @@ -1349,7 +1190,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, hdev->log_size = 0; hdev->log_enabled = false; hdev->started = false; - hdev->memory_changed = false; memory_listener_register(&hdev->memory_listener, &address_space_memory); QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); return 0; @@ -1425,6 +1265,7 @@ fail_vq: error_report("vhost VQ %d notifier cleanup error: %d", i, -r); } assert (e >= 0); + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); } virtio_device_release_ioeventfd(vdev); fail: @@ -1448,6 +1289,7 @@ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev) error_report("vhost VQ %d notifier cleanup failed: %d", i, -r); } assert (r >= 0); + virtio_bus_cleanup_host_notifier(VIRTIO_BUS(qbus), hdev->vq_index + i); } virtio_device_release_ioeventfd(vdev); } |