diff options
author | Juan Quintela <quintela@redhat.com> | 2013-11-05 15:52:54 +0100 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2014-01-13 14:04:55 +0100 |
commit | 5ff7fb77b3cee8e26648e4fdccb23a77c2a6d3c6 (patch) | |
tree | 25d52129ce3f7a666a3f4ddf5be726845c3f7513 /kvm-all.c | |
parent | c9dd46fc0d64d9f314aa3c220d4aff9d01ab778e (diff) |
memory: move bitmap synchronization to its own function
We want to have all the functions that handle directly the dirty
bitmap near. We will change it later.
Signed-off-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Orit Wasserman <owasserm@redhat.com>
Diffstat (limited to 'kvm-all.c')
-rw-r--r-- | kvm-all.c | 27 |
1 files changed, 2 insertions, 25 deletions
@@ -380,33 +380,10 @@ static int kvm_set_migration_log(int enable) static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, unsigned long *bitmap) { - unsigned int i, j; - unsigned long page_number, c; - hwaddr addr; ram_addr_t start = section->offset_within_region + section->mr->ram_addr; - ram_addr_t ram_addr; - unsigned int pages = int128_get64(section->size) / getpagesize(); - unsigned int len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS; - unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE; + ram_addr_t pages = int128_get64(section->size) / getpagesize(); - /* - * bitmap-traveling is faster than memory-traveling (for addr...) - * especially when most of the memory is not dirty. - */ - for (i = 0; i < len; i++) { - if (bitmap[i] != 0) { - c = leul_to_cpu(bitmap[i]); - do { - j = ffsl(c) - 1; - c &= ~(1ul << j); - page_number = (i * HOST_LONG_BITS + j) * hpratio; - addr = page_number * TARGET_PAGE_SIZE; - ram_addr = start + addr; - cpu_physical_memory_set_dirty_range(ram_addr, - TARGET_PAGE_SIZE * hpratio); - } while (c != 0); - } - } + cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages); return 0; } |