diff options
author | Avi Kivity <avi@redhat.com> | 2012-10-02 18:21:54 +0200 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2012-10-22 14:50:00 +0200 |
commit | 95d2994a2f756c9c8684709421d40c45e63e4e04 (patch) | |
tree | 7b51a5234bb44cece7c7eca14c444a67a9425b43 | |
parent | bf83601fddb976753b498a879cbdc8f107f59f53 (diff) |
memory: manage coalesced mmio via a MemoryListener
Instead of calling a global function on coalesced mmio changes, which
routes the call to kvm if enabled, add coalesced mmio hooks to
MemoryListener and make kvm use that instead.
The motivation is support for multiple address spaces (which means we
we need to filter the call on the right address space) but the result
is cleaner as well.
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | exec.c | 13 | ||||
-rw-r--r-- | kvm-all.c | 20 | ||||
-rw-r--r-- | kvm-stub.c | 10 | ||||
-rw-r--r-- | kvm.h | 2 | ||||
-rw-r--r-- | memory.c | 17 | ||||
-rw-r--r-- | memory.h | 4 |
6 files changed, 27 insertions, 39 deletions
@@ -2305,19 +2305,6 @@ void cpu_register_physical_memory_log(MemoryRegionSection *section, } } - -void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) -{ - if (kvm_enabled()) - kvm_coalesce_mmio_region(addr, size); -} - -void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size) -{ - if (kvm_enabled()) - kvm_uncoalesce_mmio_region(addr, size); -} - void qemu_flush_coalesced_mmio_buffer(void) { if (kvm_enabled()) @@ -454,9 +454,10 @@ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) return ret; } -int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) +static void kvm_coalesce_mmio_region(MemoryListener *listener, + MemoryRegionSection *secion, + target_phys_addr_t start, target_phys_addr_t size) { - int ret = -ENOSYS; KVMState *s = kvm_state; if (s->coalesced_mmio) { @@ -466,15 +467,14 @@ int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) zone.size = size; zone.pad = 0; - ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); + (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); } - - return ret; } -int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) +static void kvm_uncoalesce_mmio_region(MemoryListener *listener, + MemoryRegionSection *secion, + target_phys_addr_t start, target_phys_addr_t size) { - int ret = -ENOSYS; KVMState *s = kvm_state; if (s->coalesced_mmio) { @@ -484,10 +484,8 @@ int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) zone.size = size; zone.pad = 0; - ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); + (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); } - - return ret; } int kvm_check_extension(KVMState *s, unsigned int extension) @@ -817,6 +815,8 @@ static MemoryListener kvm_memory_listener = { .log_global_stop = kvm_log_global_stop, .eventfd_add = kvm_mem_ioeventfd_add, .eventfd_del = kvm_mem_ioeventfd_del, + .coalesced_mmio_add = kvm_coalesce_mmio_region, + .coalesced_mmio_del = kvm_uncoalesce_mmio_region, .priority = 10, }; diff --git a/kvm-stub.c b/kvm-stub.c index 3c52eb5bc6..a3455e2203 100644 --- a/kvm-stub.c +++ b/kvm-stub.c @@ -29,16 +29,6 @@ int kvm_init_vcpu(CPUArchState *env) return -ENOSYS; } -int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) -{ - return -ENOSYS; -} - -int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) -{ - return -ENOSYS; -} - int kvm_init(void) { return -ENOSYS; @@ -129,8 +129,6 @@ void *kvm_vmalloc(ram_addr_t size); void *kvm_arch_vmalloc(ram_addr_t size); void kvm_setup_guest_memory(void *start, size_t size); -int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); -int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size); void kvm_flush_coalesced_mmio_buffer(void); #endif @@ -1136,11 +1136,19 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa FlatRange *fr; CoalescedMemoryRange *cmr; AddrRange tmp; + MemoryRegionSection section; FOR_EACH_FLAT_RANGE(fr, as->current_map) { if (fr->mr == mr) { - qemu_unregister_coalesced_mmio(int128_get64(fr->addr.start), - int128_get64(fr->addr.size)); + section = (MemoryRegionSection) { + .address_space = as->root, + .offset_within_address_space = int128_get64(fr->addr.start), + .size = int128_get64(fr->addr.size), + }; + + MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, §ion, + int128_get64(fr->addr.start), + int128_get64(fr->addr.size)); QTAILQ_FOREACH(cmr, &mr->coalesced, link) { tmp = addrrange_shift(cmr->addr, int128_sub(fr->addr.start, @@ -1149,8 +1157,9 @@ static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpa continue; } tmp = addrrange_intersection(tmp, fr->addr); - qemu_register_coalesced_mmio(int128_get64(tmp.start), - int128_get64(tmp.size)); + MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, §ion, + int128_get64(tmp.start), + int128_get64(tmp.size)); } } } @@ -217,6 +217,10 @@ struct MemoryListener { bool match_data, uint64_t data, EventNotifier *e); void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section, bool match_data, uint64_t data, EventNotifier *e); + void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section, + target_phys_addr_t addr, target_phys_addr_t len); + void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section, + target_phys_addr_t addr, target_phys_addr_t len); /* Lower = earlier (during add), later (during del) */ unsigned priority; MemoryRegion *address_space_filter; |