aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--accel/kvm/kvm-all.c260
-rw-r--r--accel/kvm/trace-events1
-rw-r--r--exec.c15
-rw-r--r--include/exec/memory.h19
-rw-r--r--include/exec/ram_addr.h92
-rw-r--r--include/qemu/bitmap.h9
-rw-r--r--include/sysemu/kvm_int.h4
-rw-r--r--memory.c56
-rw-r--r--migration/migration.c4
-rw-r--r--migration/migration.h27
-rw-r--r--migration/ram.c113
-rw-r--r--migration/savevm.c1
-rw-r--r--migration/trace-events3
-rw-r--r--tests/Makefile.include2
-rw-r--r--tests/migration-test.c55
-rw-r--r--tests/test-bitmap.c72
-rw-r--r--util/bitmap.c85
-rw-r--r--util/cutils.c8
18 files changed, 732 insertions, 94 deletions
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index 3d86ae5052..35ea3cb624 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -91,6 +91,7 @@ struct KVMState
int many_ioeventfds;
int intx_set_mask;
bool sync_mmu;
+ bool manual_dirty_log_protect;
/* The man page (and posix) say ioctl numbers are signed int, but
* they're not. Linux, glibc and *BSD all treat ioctl numbers as
* unsigned, and treating them as signed here can break things */
@@ -138,6 +139,9 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_LAST_INFO
};
+#define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
+#define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
+
int kvm_get_max_memslots(void)
{
KVMState *s = KVM_STATE(current_machine->accelerator);
@@ -165,6 +169,7 @@ int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
return 1;
}
+/* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{
KVMState *s = kvm_state;
@@ -182,10 +187,17 @@ static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
bool kvm_has_free_slot(MachineState *ms)
{
KVMState *s = KVM_STATE(ms->accelerator);
+ bool result;
+ KVMMemoryListener *kml = &s->memory_listener;
+
+ kvm_slots_lock(kml);
+ result = !!kvm_get_free_slot(kml);
+ kvm_slots_unlock(kml);
- return kvm_get_free_slot(&s->memory_listener);
+ return result;
}
+/* Called with KVMMemoryListener.slots_lock held */
static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
{
KVMSlot *slot = kvm_get_free_slot(kml);
@@ -244,18 +256,21 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
hwaddr *phys_addr)
{
KVMMemoryListener *kml = &s->memory_listener;
- int i;
+ int i, ret = 0;
+ kvm_slots_lock(kml);
for (i = 0; i < s->nr_slots; i++) {
KVMSlot *mem = &kml->slots[i];
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
*phys_addr = mem->start_addr + (ram - mem->ram);
- return 1;
+ ret = 1;
+ break;
}
}
+ kvm_slots_unlock(kml);
- return 0;
+ return ret;
}
static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
@@ -396,6 +411,7 @@ static int kvm_mem_flags(MemoryRegion *mr)
return flags;
}
+/* Called with KVMMemoryListener.slots_lock held */
static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
MemoryRegion *mr)
{
@@ -414,19 +430,26 @@ static int kvm_section_update_flags(KVMMemoryListener *kml,
{
hwaddr start_addr, size;
KVMSlot *mem;
+ int ret = 0;
size = kvm_align_section(section, &start_addr);
if (!size) {
return 0;
}
+ kvm_slots_lock(kml);
+
mem = kvm_lookup_matching_slot(kml, start_addr, size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
- return 0;
+ goto out;
}
- return kvm_slot_update_flags(kml, mem, section->mr);
+ ret = kvm_slot_update_flags(kml, mem, section->mr);
+
+out:
+ kvm_slots_unlock(kml);
+ return ret;
}
static void kvm_log_start(MemoryListener *listener,
@@ -478,13 +501,15 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
/**
- * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
- * This function updates qemu's dirty bitmap using
- * memory_region_set_dirty(). This means all bits are set
- * to dirty.
+ * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
+ *
+ * This function will first try to fetch dirty bitmap from the kernel,
+ * and then updates qemu's dirty bitmap.
+ *
+ * NOTE: caller must be with kml->slots_lock held.
*
- * @start_add: start of logged region.
- * @end_addr: end of logged region.
+ * @kml: the KVM memory listener object
+ * @section: the memory section to sync the dirty bitmap with
*/
static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
MemoryRegionSection *section)
@@ -493,13 +518,14 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
struct kvm_dirty_log d = {};
KVMSlot *mem;
hwaddr start_addr, size;
+ int ret = 0;
size = kvm_align_section(section, &start_addr);
if (size) {
mem = kvm_lookup_matching_slot(kml, start_addr, size);
if (!mem) {
/* We don't have a slot if we want to trap every access. */
- return 0;
+ goto out;
}
/* XXX bad kernel interface alert
@@ -516,20 +542,176 @@ static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
*/
size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
/*HOST_LONG_BITS*/ 64) / 8;
- d.dirty_bitmap = g_malloc0(size);
+ if (!mem->dirty_bmap) {
+ /* Allocate on the first log_sync, once and for all */
+ mem->dirty_bmap = g_malloc0(size);
+ }
+ d.dirty_bitmap = mem->dirty_bmap;
d.slot = mem->slot | (kml->as_id << 16);
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
DPRINTF("ioctl failed %d\n", errno);
- g_free(d.dirty_bitmap);
- return -1;
+ ret = -1;
+ goto out;
}
kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
- g_free(d.dirty_bitmap);
}
+out:
+ return ret;
+}
- return 0;
+/* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
+#define KVM_CLEAR_LOG_SHIFT 6
+#define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
+#define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
+
+/**
+ * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
+ *
+ * NOTE: this will be a no-op if we haven't enabled manual dirty log
+ * protection in the host kernel because in that case this operation
+ * will be done within log_sync().
+ *
+ * @kml: the kvm memory listener
+ * @section: the memory range to clear dirty bitmap
+ */
+static int kvm_physical_log_clear(KVMMemoryListener *kml,
+ MemoryRegionSection *section)
+{
+ KVMState *s = kvm_state;
+ struct kvm_clear_dirty_log d;
+ uint64_t start, end, bmap_start, start_delta, bmap_npages, size;
+ unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
+ KVMSlot *mem = NULL;
+ int ret, i;
+
+ if (!s->manual_dirty_log_protect) {
+ /* No need to do explicit clear */
+ return 0;
+ }
+
+ start = section->offset_within_address_space;
+ size = int128_get64(section->size);
+
+ if (!size) {
+ /* Nothing more we can do... */
+ return 0;
+ }
+
+ kvm_slots_lock(kml);
+
+ /* Find any possible slot that covers the section */
+ for (i = 0; i < s->nr_slots; i++) {
+ mem = &kml->slots[i];
+ if (mem->start_addr <= start &&
+ start + size <= mem->start_addr + mem->memory_size) {
+ break;
+ }
+ }
+
+ /*
+ * We should always find one memslot until this point, otherwise
+ * there could be something wrong from the upper layer
+ */
+ assert(mem && i != s->nr_slots);
+
+ /*
+ * We need to extend either the start or the size or both to
+ * satisfy the KVM interface requirement. Firstly, do the start
+ * page alignment on 64 host pages
+ */
+ bmap_start = (start - mem->start_addr) & KVM_CLEAR_LOG_MASK;
+ start_delta = start - mem->start_addr - bmap_start;
+ bmap_start /= psize;
+
+ /*
+ * The kernel interface has restriction on the size too, that either:
+ *
+ * (1) the size is 64 host pages aligned (just like the start), or
+ * (2) the size fills up until the end of the KVM memslot.
+ */
+ bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
+ << KVM_CLEAR_LOG_SHIFT;
+ end = mem->memory_size / psize;
+ if (bmap_npages > end - bmap_start) {
+ bmap_npages = end - bmap_start;
+ }
+ start_delta /= psize;
+
+ /*
+ * Prepare the bitmap to clear dirty bits. Here we must guarantee
+ * that we won't clear any unknown dirty bits otherwise we might
+ * accidentally clear some set bits which are not yet synced from
+ * the kernel into QEMU's bitmap, then we'll lose track of the
+ * guest modifications upon those pages (which can directly lead
+ * to guest data loss or panic after migration).
+ *
+ * Layout of the KVMSlot.dirty_bmap:
+ *
+ * |<-------- bmap_npages -----------..>|
+ * [1]
+ * start_delta size
+ * |----------------|-------------|------------------|------------|
+ * ^ ^ ^ ^
+ * | | | |
+ * start bmap_start (start) end
+ * of memslot of memslot
+ *
+ * [1] bmap_npages can be aligned to either 64 pages or the end of slot
+ */
+
+ assert(bmap_start % BITS_PER_LONG == 0);
+ /* We should never do log_clear before log_sync */
+ assert(mem->dirty_bmap);
+ if (start_delta) {
+ /* Slow path - we need to manipulate a temp bitmap */
+ bmap_clear = bitmap_new(bmap_npages);
+ bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
+ bmap_start, start_delta + size / psize);
+ /*
+ * We need to fill the holes at start because that was not
+ * specified by the caller and we extended the bitmap only for
+ * 64 pages alignment
+ */
+ bitmap_clear(bmap_clear, 0, start_delta);
+ d.dirty_bitmap = bmap_clear;
+ } else {
+ /* Fast path - start address aligns well with BITS_PER_LONG */
+ d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
+ }
+
+ d.first_page = bmap_start;
+ /* It should never overflow. If it happens, say something */
+ assert(bmap_npages <= UINT32_MAX);
+ d.num_pages = bmap_npages;
+ d.slot = mem->slot | (kml->as_id << 16);
+
+ if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
+ ret = -errno;
+ error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
+ "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
+ __func__, d.slot, (uint64_t)d.first_page,
+ (uint32_t)d.num_pages, ret);
+ } else {
+ ret = 0;
+ trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
+ }
+
+ /*
+ * After we have updated the remote dirty bitmap, we update the
+ * cached bitmap as well for the memslot, then if another user
+ * clears the same region we know we shouldn't clear it again on
+ * the remote otherwise it's data loss as well.
+ */
+ bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
+ size / psize);
+ /* This handles the NULL case well */
+ g_free(bmap_clear);
+
+ kvm_slots_unlock(kml);
+
+ return ret;
}
static void kvm_coalesce_mmio_region(MemoryListener *listener,
@@ -791,16 +973,20 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
(start_addr - section->offset_within_address_space);
+ kvm_slots_lock(kml);
+
if (!add) {
mem = kvm_lookup_matching_slot(kml, start_addr, size);
if (!mem) {
- return;
+ goto out;
}
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
kvm_physical_sync_dirty_bitmap(kml, section);
}
/* unregister the slot */
+ g_free(mem->dirty_bmap);
+ mem->dirty_bmap = NULL;
mem->memory_size = 0;
mem->flags = 0;
err = kvm_set_user_memory_region(kml, mem, false);
@@ -809,7 +995,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
__func__, strerror(-err));
abort();
}
- return;
+ goto out;
}
/* register the new slot */
@@ -825,6 +1011,9 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml,
strerror(-err));
abort();
}
+
+out:
+ kvm_slots_unlock(kml);
}
static void kvm_region_add(MemoryListener *listener,
@@ -851,8 +1040,26 @@ static void kvm_log_sync(MemoryListener *listener,
KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
+ kvm_slots_lock(kml);
r = kvm_physical_sync_dirty_bitmap(kml, section);
+ kvm_slots_unlock(kml);
+ if (r < 0) {
+ abort();
+ }
+}
+
+static void kvm_log_clear(MemoryListener *listener,
+ MemoryRegionSection *section)
+{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
+ int r;
+
+ r = kvm_physical_log_clear(kml, section);
if (r < 0) {
+ error_report_once("%s: kvm log clear failed: mr=%s "
+ "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
+ section->mr->name, section->offset_within_region,
+ int128_get64(section->size));
abort();
}
}
@@ -935,6 +1142,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
{
int i;
+ qemu_mutex_init(&kml->slots_lock);
kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
kml->as_id = as_id;
@@ -947,6 +1155,7 @@ void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
kml->listener.log_start = kvm_log_start;
kml->listener.log_stop = kvm_log_stop;
kml->listener.log_sync = kvm_log_sync;
+ kml->listener.log_clear = kvm_log_clear;
kml->listener.priority = 10;
memory_listener_register(&kml->listener, as);
@@ -1671,6 +1880,17 @@ static int kvm_init(MachineState *ms)
s->coalesced_pio = s->coalesced_mmio &&
kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
+ s->manual_dirty_log_protect =
+ kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
+ if (s->manual_dirty_log_protect) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0, 1);
+ if (ret) {
+ warn_report("Trying to enable KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 "
+ "but failed. Falling back to the legacy mode. ");
+ s->manual_dirty_log_protect = false;
+ }
+ }
+
#ifdef KVM_CAP_VCPU_EVENTS
s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
#endif
diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events
index 33c5b1b3af..4fb6e59d19 100644
--- a/accel/kvm/trace-events
+++ b/accel/kvm/trace-events
@@ -15,4 +15,5 @@ kvm_irqchip_release_virq(int virq) "virq %d"
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
+kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
diff --git a/exec.c b/exec.c
index 50ea9c5aaa..3e78de3b8f 100644
--- a/exec.c
+++ b/exec.c
@@ -1358,6 +1358,8 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
DirtyMemoryBlocks *blocks;
unsigned long end, page;
bool dirty = false;
+ RAMBlock *ramblock;
+ uint64_t mr_offset, mr_size;
if (length == 0) {
return false;
@@ -1369,6 +1371,10 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
rcu_read_lock();
blocks = atomic_rcu_read(&ram_list.dirty_memory[client]);
+ ramblock = qemu_get_ram_block(start);
+ /* Range sanity check on the ramblock */
+ assert(start >= ramblock->offset &&
+ start + length <= ramblock->offset + ramblock->used_length);
while (page < end) {
unsigned long idx = page / DIRTY_MEMORY_BLOCK_SIZE;
@@ -1380,6 +1386,10 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
page += num;
}
+ mr_offset = (ram_addr_t)(page << TARGET_PAGE_BITS) - ramblock->offset;
+ mr_size = (end - page) << TARGET_PAGE_BITS;
+ memory_region_clear_dirty_bitmap(ramblock->mr, mr_offset, mr_size);
+
rcu_read_unlock();
if (dirty && tcg_enabled()) {
@@ -1390,9 +1400,10 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
}
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
- (ram_addr_t start, ram_addr_t length, unsigned client)
+ (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client)
{
DirtyMemoryBlocks *blocks;
+ ram_addr_t start = memory_region_get_ram_addr(mr) + offset;
unsigned long align = 1UL << (TARGET_PAGE_BITS + BITS_PER_LEVEL);
ram_addr_t first = QEMU_ALIGN_DOWN(start, align);
ram_addr_t last = QEMU_ALIGN_UP(start + length, align);
@@ -1434,6 +1445,8 @@ DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
tlb_reset_dirty_range_all(start, length);
}
+ memory_region_clear_dirty_bitmap(mr, offset, length);
+
return snap;
}
diff --git a/include/exec/memory.h b/include/exec/memory.h
index 2c5cdffa31..bb0961ddb9 100644
--- a/include/exec/memory.h
+++ b/include/exec/memory.h
@@ -46,6 +46,8 @@
OBJECT_GET_CLASS(IOMMUMemoryRegionClass, (obj), \
TYPE_IOMMU_MEMORY_REGION)
+extern bool global_dirty_log;
+
typedef struct MemoryRegionOps MemoryRegionOps;
typedef struct MemoryRegionMmio MemoryRegionMmio;
@@ -414,6 +416,7 @@ struct MemoryListener {
void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section,
int old, int new);
void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+ void (*log_clear)(MemoryListener *listener, MemoryRegionSection *section);
void (*log_global_start)(MemoryListener *listener);
void (*log_global_stop)(MemoryListener *listener);
void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
@@ -1268,6 +1271,22 @@ void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
hwaddr size);
/**
+ * memory_region_clear_dirty_bitmap - clear dirty bitmap for memory range
+ *
+ * This function is called when the caller wants to clear the remote
+ * dirty bitmap of a memory range within the memory region. This can
+ * be used by e.g. KVM to manually clear dirty log when
+ * KVM_CAP_MANUAL_DIRTY_LOG_PROTECT is declared support by the host
+ * kernel.
+ *
+ * @mr: the memory region to clear the dirty log upon
+ * @start: start address offset within the memory region
+ * @len: length of the memory region to clear dirty bitmap
+ */
+void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
+ hwaddr len);
+
+/**
* memory_region_snapshot_and_clear_dirty: Get a snapshot of the dirty
* bitmap and clear it.
*
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index f96777bb99..b7b2e60ff6 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -51,8 +51,70 @@ struct RAMBlock {
unsigned long *unsentmap;
/* bitmap of already received pages in postcopy */
unsigned long *receivedmap;
+
+ /*
+ * bitmap to track already cleared dirty bitmap. When the bit is
+ * set, it means the corresponding memory chunk needs a log-clear.
+ * Set this up to non-NULL to enable the capability to postpone
+ * and split clearing of dirty bitmap on the remote node (e.g.,
+ * KVM). The bitmap will be set only when doing global sync.
+ *
+ * NOTE: this bitmap is different comparing to the other bitmaps
+ * in that one bit can represent multiple guest pages (which is
+ * decided by the `clear_bmap_shift' variable below). On
+ * destination side, this should always be NULL, and the variable
+ * `clear_bmap_shift' is meaningless.
+ */
+ unsigned long *clear_bmap;
+ uint8_t clear_bmap_shift;
};
+/**
+ * clear_bmap_size: calculate clear bitmap size
+ *
+ * @pages: number of guest pages
+ * @shift: guest page number shift
+ *
+ * Returns: number of bits for the clear bitmap
+ */
+static inline long clear_bmap_size(uint64_t pages, uint8_t shift)
+{
+ return DIV_ROUND_UP(pages, 1UL << shift);
+}
+
+/**
+ * clear_bmap_set: set clear bitmap for the page range
+ *
+ * @rb: the ramblock to operate on
+ * @start: the start page number
+ * @size: number of pages to set in the bitmap
+ *
+ * Returns: None
+ */
+static inline void clear_bmap_set(RAMBlock *rb, uint64_t start,
+ uint64_t npages)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ bitmap_set_atomic(rb->clear_bmap, start >> shift,
+ clear_bmap_size(npages, shift));
+}
+
+/**
+ * clear_bmap_test_and_clear: test clear bitmap for the page, clear if set
+ *
+ * @rb: the ramblock to operate on
+ * @page: the page number to check
+ *
+ * Returns: true if the bit was set, false otherwise
+ */
+static inline bool clear_bmap_test_and_clear(RAMBlock *rb, uint64_t page)
+{
+ uint8_t shift = rb->clear_bmap_shift;
+
+ return bitmap_test_and_clear_atomic(rb->clear_bmap, page >> shift, 1);
+}
+
static inline bool offset_in_ramblock(RAMBlock *b, ram_addr_t offset)
{
return (b && b->host && offset < b->used_length) ? true : false;
@@ -349,8 +411,13 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
if (bitmap[k]) {
unsigned long temp = leul_to_cpu(bitmap[k]);
- atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], temp);
atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp);
+
+ if (global_dirty_log) {
+ atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset],
+ temp);
+ }
+
if (tcg_enabled()) {
atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp);
}
@@ -367,6 +434,11 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap,
xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS);
} else {
uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE;
+
+ if (!global_dirty_log) {
+ clients &= ~(1 << DIRTY_MEMORY_MIGRATION);
+ }
+
/*
* bitmap-traveling is faster than memory-traveling (for addr...)
* especially when most of the memory is not dirty.
@@ -394,7 +466,7 @@ bool cpu_physical_memory_test_and_clear_dirty(ram_addr_t start,
unsigned client);
DirtyBitmapSnapshot *cpu_physical_memory_snapshot_and_clear_dirty
- (ram_addr_t start, ram_addr_t length, unsigned client);
+ (MemoryRegion *mr, hwaddr offset, hwaddr length, unsigned client);
bool cpu_physical_memory_snapshot_get_dirty(DirtyBitmapSnapshot *snap,
ram_addr_t start,
@@ -409,6 +481,7 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
}
+/* Called with RCU critical section */
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
ram_addr_t start,
@@ -432,8 +505,6 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
DIRTY_MEMORY_BLOCK_SIZE);
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
- rcu_read_lock();
-
src = atomic_rcu_read(
&ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION])->blocks;
@@ -454,7 +525,18 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb,
}
}
- rcu_read_unlock();
+ if (rb->clear_bmap) {
+ /*
+ * Postpone the dirty bitmap clear to the point before we
+ * really send the pages, also we will split the clear
+ * dirty procedure into smaller chunks.
+ */
+ clear_bmap_set(rb, start >> TARGET_PAGE_BITS,
+ length >> TARGET_PAGE_BITS);
+ } else {
+ /* Slow path - still do that in a huge chunk */
+ memory_region_clear_dirty_bitmap(rb->mr, start, length);
+ }
} else {
ram_addr_t offset = rb->offset;
diff --git a/include/qemu/bitmap.h b/include/qemu/bitmap.h
index 5c313346b9..82a1d2f41f 100644
--- a/include/qemu/bitmap.h
+++ b/include/qemu/bitmap.h
@@ -41,6 +41,10 @@
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_to_le(dst, src, nbits) Convert bitmap to little endian
* bitmap_from_le(dst, src, nbits) Convert bitmap from little endian
+ * bitmap_copy_with_src_offset(dst, src, offset, nbits)
+ * *dst = *src (with an offset into src)
+ * bitmap_copy_with_dst_offset(dst, src, offset, nbits)
+ * *dst = *src (with an offset into dst)
*/
/*
@@ -271,4 +275,9 @@ void bitmap_to_le(unsigned long *dst, const unsigned long *src,
void bitmap_from_le(unsigned long *dst, const unsigned long *src,
long nbits);
+void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src,
+ unsigned long offset, unsigned long nbits);
+void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src,
+ unsigned long shift, unsigned long nbits);
+
#endif /* BITMAP_H */
diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h
index f838412491..31df465fdc 100644
--- a/include/sysemu/kvm_int.h
+++ b/include/sysemu/kvm_int.h
@@ -21,10 +21,14 @@ typedef struct KVMSlot
int slot;
int flags;
int old_flags;
+ /* Dirty bitmap cache for the slot */
+ unsigned long *dirty_bmap;
} KVMSlot;
typedef struct KVMMemoryListener {
MemoryListener listener;
+ /* Protects the slots and all inside them */
+ QemuMutex slots_lock;
KVMSlot *slots;
int as_id;
} KVMMemoryListener;
diff --git a/memory.c b/memory.c
index 480f3d989b..beac26e173 100644
--- a/memory.c
+++ b/memory.c
@@ -38,7 +38,7 @@
static unsigned memory_region_transaction_depth;
static bool memory_region_update_pending;
static bool ioeventfd_update_pending;
-static bool global_dirty_log = false;
+bool global_dirty_log;
static QTAILQ_HEAD(, MemoryListener) memory_listeners
= QTAILQ_HEAD_INITIALIZER(memory_listeners);
@@ -2064,6 +2064,57 @@ static void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
}
}
+void memory_region_clear_dirty_bitmap(MemoryRegion *mr, hwaddr start,
+ hwaddr len)
+{
+ MemoryRegionSection mrs;
+ MemoryListener *listener;
+ AddressSpace *as;
+ FlatView *view;
+ FlatRange *fr;
+ hwaddr sec_start, sec_end, sec_size;
+
+ QTAILQ_FOREACH(listener, &memory_listeners, link) {
+ if (!listener->log_clear) {
+ continue;
+ }
+ as = listener->address_space;
+ view = address_space_get_flatview(as);
+ FOR_EACH_FLAT_RANGE(fr, view) {
+ if (!fr->dirty_log_mask || fr->mr != mr) {
+ /*
+ * Clear dirty bitmap operation only applies to those
+ * regions whose dirty logging is at least enabled
+ */
+ continue;
+ }
+
+ mrs = section_from_flat_range(fr, view);
+
+ sec_start = MAX(mrs.offset_within_region, start);
+ sec_end = mrs.offset_within_region + int128_get64(mrs.size);
+ sec_end = MIN(sec_end, start + len);
+
+ if (sec_start >= sec_end) {
+ /*
+ * If this memory region section has no intersection
+ * with the requested range, skip.
+ */
+ continue;
+ }
+
+ /* Valid case; shrink the section if needed */
+ mrs.offset_within_address_space +=
+ sec_start - mrs.offset_within_region;
+ mrs.offset_within_region = sec_start;
+ sec_size = sec_end - sec_start;
+ mrs.size = int128_make64(sec_size);
+ listener->log_clear(listener, &mrs);
+ }
+ flatview_unref(view);
+ }
+}
+
DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
hwaddr addr,
hwaddr size,
@@ -2071,8 +2122,7 @@ DirtyBitmapSnapshot *memory_region_snapshot_and_clear_dirty(MemoryRegion *mr,
{
assert(mr->ram_block);
memory_region_sync_dirty_bitmap(mr);
- return cpu_physical_memory_snapshot_and_clear_dirty(
- memory_region_get_ram_addr(mr) + addr, size, client);
+ return cpu_physical_memory_snapshot_and_clear_dirty(mr, addr, size, client);
}
bool memory_region_snapshot_get_dirty(MemoryRegion *mr, DirtyBitmapSnapshot *snap,
diff --git a/migration/migration.c b/migration/migration.c
index 2865ae3fa9..8a607fe1e2 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -3362,6 +3362,8 @@ void migration_global_dump(Monitor *mon)
ms->send_section_footer ? "on" : "off");
monitor_printf(mon, "decompress-error-check: %s\n",
ms->decompress_error_check ? "on" : "off");
+ monitor_printf(mon, "clear-bitmap-shift: %u\n",
+ ms->clear_bitmap_shift);
}
#define DEFINE_PROP_MIG_CAP(name, x) \
@@ -3376,6 +3378,8 @@ static Property migration_properties[] = {
send_section_footer, true),
DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
decompress_error_check, true),
+ DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
+ clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
/* Migration parameters */
DEFINE_PROP_UINT8("x-compress-level", MigrationState,
diff --git a/migration/migration.h b/migration/migration.h
index 5e8f09c6db..1fdd7b21fd 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -26,6 +26,23 @@ struct PostcopyBlocktimeContext;
#define MIGRATION_RESUME_ACK_VALUE (1)
+/*
+ * 1<<6=64 pages -> 256K chunk when page size is 4K. This gives us
+ * the benefit that all the chunks are 64 pages aligned then the
+ * bitmaps are always aligned to LONG.
+ */
+#define CLEAR_BITMAP_SHIFT_MIN 6
+/*
+ * 1<<18=256K pages -> 1G chunk when page size is 4K. This is the
+ * default value to use if no one specified.
+ */
+#define CLEAR_BITMAP_SHIFT_DEFAULT 18
+/*
+ * 1<<31=2G pages -> 8T chunk when page size is 4K. This should be
+ * big enough and make sure we won't overflow easily.
+ */
+#define CLEAR_BITMAP_SHIFT_MAX 31
+
/* State for the incoming migration */
struct MigrationIncomingState {
QEMUFile *from_src_file;
@@ -232,6 +249,16 @@ struct MigrationState
* do not trigger spurious decompression errors.
*/
bool decompress_error_check;
+
+ /*
+ * This decides the size of guest memory chunk that will be used
+ * to track dirty bitmap clearing. The size of memory chunk will
+ * be GUEST_PAGE_SIZE << N. Say, N=0 means we will clear dirty
+ * bitmap for each page to send (1<<0=1); N=10 means we will clear
+ * dirty bitmap only once for 1<<10=1K continuous guest pages
+ * (which is in 4M chunk).
+ */
+ uint8_t clear_bitmap_shift;
};
void migrate_set_state(int *state, int old_state, int new_state);
diff --git a/migration/ram.c b/migration/ram.c
index 908517fc2b..2b0774c2bf 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1291,15 +1291,15 @@ static void multifd_recv_sync_main(void)
trace_multifd_recv_sync_main_wait(p->id);
qemu_sem_wait(&multifd_recv_state->sem_sync);
+ }
+ for (i = 0; i < migrate_multifd_channels(); i++) {
+ MultiFDRecvParams *p = &multifd_recv_state->params[i];
+
qemu_mutex_lock(&p->mutex);
if (multifd_recv_state->packet_num < p->packet_num) {
multifd_recv_state->packet_num = p->packet_num;
}
qemu_mutex_unlock(&p->mutex);
- }
- for (i = 0; i < migrate_multifd_channels(); i++) {
- MultiFDRecvParams *p = &multifd_recv_state->params[i];
-
trace_multifd_recv_sync_main_signal(p->id);
qemu_sem_post(&p->sem_sync);
}
@@ -1585,25 +1585,30 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
TARGET_PAGE_SIZE);
+
+ /*
+ * Update the cache contents, so that it corresponds to the data
+ * sent, in all cases except where we skip the page.
+ */
+ if (!last_stage && encoded_len != 0) {
+ memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
+ /*
+ * In the case where we couldn't compress, ensure that the caller
+ * sends the data from the cache, since the guest might have
+ * changed the RAM since we copied it.
+ */
+ *current_data = prev_cached_page;
+ }
+
if (encoded_len == 0) {
trace_save_xbzrle_page_skipping();
return 0;
} else if (encoded_len == -1) {
trace_save_xbzrle_page_overflow();
xbzrle_counters.overflow++;
- /* update data in the cache */
- if (!last_stage) {
- memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
- *current_data = prev_cached_page;
- }
return -1;
}
- /* we need to update the data in the cache, in order to get the same data */
- if (!last_stage) {
- memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
- }
-
/* Send XBZRLE based compressed page */
bytes_xbzrle = save_page_header(rs, rs->f, block,
offset | RAM_SAVE_FLAG_XBZRLE);
@@ -1659,6 +1664,33 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
bool ret;
qemu_mutex_lock(&rs->bitmap_mutex);
+
+ /*
+ * Clear dirty bitmap if needed. This _must_ be called before we
+ * send any of the page in the chunk because we need to make sure
+ * we can capture further page content changes when we sync dirty
+ * log the next time. So as long as we are going to send any of
+ * the page in the chunk we clear the remote dirty bitmap for all.
+ * Clearing it earlier won't be a problem, but too late will.
+ */
+ if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
+ uint8_t shift = rb->clear_bmap_shift;
+ hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
+ hwaddr start = (page << TARGET_PAGE_BITS) & (-size);
+
+ /*
+ * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
+ * can make things easier sometimes since then start address
+ * of the small chunk will always be 64 pages aligned so the
+ * bitmap will always be aligned to unsigned long. We should
+ * even be able to remove this restriction but I'm simply
+ * keeping it.
+ */
+ assert(shift >= 6);
+ trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
+ memory_region_clear_dirty_bitmap(rb->mr, start, size);
+ }
+
ret = test_and_clear_bit(page, rb->bmap);
if (ret) {
@@ -1669,6 +1701,7 @@ static inline bool migration_bitmap_clear_dirty(RAMState *rs,
return ret;
}
+/* Called with RCU critical section */
static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
ram_addr_t length)
{
@@ -2281,6 +2314,12 @@ static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
*/
pss->block = block;
pss->page = offset >> TARGET_PAGE_BITS;
+
+ /*
+ * This unqueued page would break the "one round" check, even is
+ * really rare.
+ */
+ pss->complete_round = false;
}
return !!block;
@@ -2675,6 +2714,8 @@ static void ram_save_cleanup(void *opaque)
memory_global_dirty_log_stop();
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
+ g_free(block->clear_bmap);
+ block->clear_bmap = NULL;
g_free(block->bmap);
block->bmap = NULL;
g_free(block->unsentmap);
@@ -2763,8 +2804,7 @@ void ram_postcopy_migrated_memory_release(MigrationState *ms)
*
* @ms: current migration state
* @pds: state for postcopy
- * @start: RAMBlock starting page
- * @length: RAMBlock size
+ * @block: RAMBlock to discard
*/
static int postcopy_send_discard_bm_ram(MigrationState *ms,
PostcopyDiscardState *pds,
@@ -2963,7 +3003,7 @@ static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
}
/**
- * postcopy_chuck_hostpages: discrad any partially sent host page
+ * postcopy_chunk_hostpages: discard any partially sent host page
*
* Utility for the outgoing postcopy code.
*
@@ -3173,11 +3213,11 @@ static int ram_state_init(RAMState **rsp)
QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
/*
+ * Count the total number of pages used by ram blocks not including any
+ * gaps due to alignment or unplugs.
* This must match with the initial values of dirty bitmap.
- * Currently we initialize the dirty bitmap to all zeros so
- * here the total dirty page count is zero.
*/
- (*rsp)->migration_dirty_pages = 0;
+ (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
ram_state_reset(*rsp);
return 0;
@@ -3185,23 +3225,39 @@ static int ram_state_init(RAMState **rsp)
static void ram_list_init_bitmaps(void)
{
+ MigrationState *ms = migrate_get_current();
RAMBlock *block;
unsigned long pages;
+ uint8_t shift;
/* Skip setting bitmap if there is no RAM */
if (ram_bytes_total()) {
+ shift = ms->clear_bitmap_shift;
+ if (shift > CLEAR_BITMAP_SHIFT_MAX) {
+ error_report("clear_bitmap_shift (%u) too big, using "
+ "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
+ shift = CLEAR_BITMAP_SHIFT_MAX;
+ } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
+ error_report("clear_bitmap_shift (%u) too small, using "
+ "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
+ shift = CLEAR_BITMAP_SHIFT_MIN;
+ }
+
RAMBLOCK_FOREACH_NOT_IGNORED(block) {
pages = block->max_length >> TARGET_PAGE_BITS;
/*
* The initial dirty bitmap for migration must be set with all
* ones to make sure we'll migrate every guest RAM page to
* destination.
- * Here we didn't set RAMBlock.bmap simply because it is already
- * set in ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION] in
- * ram_block_add, and that's where we'll sync the dirty bitmaps.
- * Here setting RAMBlock.bmap would be fine too but not necessary.
+ * Here we set RAMBlock.bmap all to 1 because when rebegin a
+ * new migration after a failed migration, ram_list.
+ * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
+ * guest memory.
*/
block->bmap = bitmap_new(pages);
+ bitmap_set(block->bmap, 0, pages);
+ block->clear_bmap_shift = shift;
+ block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
if (migrate_postcopy_ram()) {
block->unsentmap = bitmap_new(pages);
bitmap_set(block->unsentmap, 0, pages);
@@ -3370,7 +3426,6 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
}
if (migrate_ignore_shared()) {
qemu_put_be64(f, block->mr->addr);
- qemu_put_byte(f, ramblock_is_ignored(block) ? 1 : 0);
}
}
@@ -3466,8 +3521,8 @@ static int ram_save_iterate(QEMUFile *f, void *opaque)
*/
ram_control_after_iterate(f, RAM_CONTROL_ROUND);
- multifd_send_sync_main();
out:
+ multifd_send_sync_main();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
qemu_fflush(f);
ram_counters.transferred += 8;
@@ -4337,12 +4392,6 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
}
if (migrate_ignore_shared()) {
hwaddr addr = qemu_get_be64(f);
- bool ignored = qemu_get_byte(f);
- if (ignored != ramblock_is_ignored(block)) {
- error_report("RAM block %s should %s be migrated",
- id, ignored ? "" : "not");
- ret = -EINVAL;
- }
if (ramblock_is_ignored(block) &&
block->mr->addr != addr) {
error_report("Mismatched GPAs for block %s "
diff --git a/migration/savevm.c b/migration/savevm.c
index c0e557b4c2..79ed44d475 100644
--- a/migration/savevm.c
+++ b/migration/savevm.c
@@ -1863,7 +1863,6 @@ static void loadvm_postcopy_handle_run_bh(void *opaque)
}
trace_loadvm_postcopy_handle_run_cpu_sync();
- cpu_synchronize_all_post_init();
trace_loadvm_postcopy_handle_run_vmstart();
diff --git a/migration/trace-events b/migration/trace-events
index de2e136e57..d8e54c367a 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -79,8 +79,9 @@ get_queued_page(const char *block_name, uint64_t tmp_offset, unsigned long page_
get_queued_page_not_dirty(const char *block_name, uint64_t tmp_offset, unsigned long page_abs, int sent) "%s/0x%" PRIx64 " page_abs=0x%lx (sent=%d)"
migration_bitmap_sync_start(void) ""
migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64
+migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx"
migration_throttle(void) ""
-multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet number %" PRIu64 " pages %d flags 0x%x next packet size %d"
+multifd_recv(uint8_t id, uint64_t packet_num, uint32_t used, uint32_t flags, uint32_t next_packet_size) "channel %d packet_num %" PRIu64 " pages %d flags 0x%x next packet size %d"
multifd_recv_sync_main(long packet_num) "packet num %ld"
multifd_recv_sync_main_signal(uint8_t id) "channel %d"
multifd_recv_sync_main_wait(uint8_t id) "channel %d"
diff --git a/tests/Makefile.include b/tests/Makefile.include
index a983dd32da..fd7fdb8658 100644
--- a/tests/Makefile.include
+++ b/tests/Makefile.include
@@ -65,6 +65,7 @@ check-unit-y += tests/test-opts-visitor$(EXESUF)
check-unit-$(CONFIG_BLOCK) += tests/test-coroutine$(EXESUF)
check-unit-y += tests/test-visitor-serialization$(EXESUF)
check-unit-y += tests/test-iov$(EXESUF)
+check-unit-y += tests/test-bitmap$(EXESUF)
check-unit-$(CONFIG_BLOCK) += tests/test-aio$(EXESUF)
check-unit-$(CONFIG_BLOCK) += tests/test-aio-multithread$(EXESUF)
check-unit-$(CONFIG_BLOCK) += tests/test-throttle$(EXESUF)
@@ -538,6 +539,7 @@ tests/test-image-locking$(EXESUF): tests/test-image-locking.o $(test-block-obj-y
tests/test-thread-pool$(EXESUF): tests/test-thread-pool.o $(test-block-obj-y)
tests/test-iov$(EXESUF): tests/test-iov.o $(test-util-obj-y)
tests/test-hbitmap$(EXESUF): tests/test-hbitmap.o $(test-util-obj-y) $(test-crypto-obj-y)
+tests/test-bitmap$(EXESUF): tests/test-bitmap.o $(test-util-obj-y)
tests/test-x86-cpuid$(EXESUF): tests/test-x86-cpuid.o
tests/test-xbzrle$(EXESUF): tests/test-xbzrle.o migration/xbzrle.o migration/page_cache.o $(test-util-obj-y)
tests/test-cutils$(EXESUF): tests/test-cutils.o util/cutils.o $(test-util-obj-y)
diff --git a/tests/migration-test.c b/tests/migration-test.c
index b6434628e1..a4feb9545d 100644
--- a/tests/migration-test.c
+++ b/tests/migration-test.c
@@ -398,7 +398,8 @@ static char *migrate_get_socket_address(QTestState *who, const char *parameter)
return result;
}
-static long long migrate_get_parameter(QTestState *who, const char *parameter)
+static long long migrate_get_parameter_int(QTestState *who,
+ const char *parameter)
{
QDict *rsp;
long long result;
@@ -409,17 +410,17 @@ static long long migrate_get_parameter(QTestState *who, const char *parameter)
return result;
}
-static void migrate_check_parameter(QTestState *who, const char *parameter,
- long long value)
+static void migrate_check_parameter_int(QTestState *who, const char *parameter,
+ long long value)
{
long long result;
- result = migrate_get_parameter(who, parameter);
+ result = migrate_get_parameter_int(who, parameter);
g_assert_cmpint(result, ==, value);
}
-static void migrate_set_parameter(QTestState *who, const char *parameter,
- long long value)
+static void migrate_set_parameter_int(QTestState *who, const char *parameter,
+ long long value)
{
QDict *rsp;
@@ -429,7 +430,7 @@ static void migrate_set_parameter(QTestState *who, const char *parameter,
parameter, value);
g_assert(qdict_haskey(rsp, "return"));
qobject_unref(rsp);
- migrate_check_parameter(who, parameter, value);
+ migrate_check_parameter_int(who, parameter, value);
}
static void migrate_pause(QTestState *who)
@@ -681,7 +682,7 @@ static void deprecated_set_downtime(QTestState *who, const double value)
" 'arguments': { 'value': %f } }", value);
g_assert(qdict_haskey(rsp, "return"));
qobject_unref(rsp);
- migrate_check_parameter(who, "downtime-limit", value * 1000);
+ migrate_check_parameter_int(who, "downtime-limit", value * 1000);
}
static void deprecated_set_speed(QTestState *who, long long value)
@@ -692,7 +693,7 @@ static void deprecated_set_speed(QTestState *who, long long value)
"'arguments': { 'value': %lld } }", value);
g_assert(qdict_haskey(rsp, "return"));
qobject_unref(rsp);
- migrate_check_parameter(who, "max-bandwidth", value);
+ migrate_check_parameter_int(who, "max-bandwidth", value);
}
static void deprecated_set_cache_size(QTestState *who, long long value)
@@ -703,7 +704,7 @@ static void deprecated_set_cache_size(QTestState *who, long long value)
"'arguments': { 'value': %lld } }", value);
g_assert(qdict_haskey(rsp, "return"));
qobject_unref(rsp);
- migrate_check_parameter(who, "xbzrle-cache-size", value);
+ migrate_check_parameter_int(who, "xbzrle-cache-size", value);
}
static void test_deprecated(void)
@@ -738,8 +739,8 @@ static int migrate_postcopy_prepare(QTestState **from_ptr,
* quickly, but that it doesn't complete precopy even on a slow
* machine, so also set the downtime.
*/
- migrate_set_parameter(from, "max-bandwidth", 100000000);
- migrate_set_parameter(from, "downtime-limit", 1);
+ migrate_set_parameter_int(from, "max-bandwidth", 100000000);
+ migrate_set_parameter_int(from, "downtime-limit", 1);
/* Wait for the first serial output from the source */
wait_for_serial("src_serial");
@@ -790,7 +791,7 @@ static void test_postcopy_recovery(void)
}
/* Turn postcopy speed down, 4K/s is slow enough on any machines */
- migrate_set_parameter(from, "max-postcopy-bandwidth", 4096);
+ migrate_set_parameter_int(from, "max-postcopy-bandwidth", 4096);
/* Now we start the postcopy */
migrate_postcopy_start(from, to);
@@ -831,7 +832,7 @@ static void test_postcopy_recovery(void)
g_free(uri);
/* Restore the postcopy bandwidth to unlimited */
- migrate_set_parameter(from, "max-postcopy-bandwidth", 0);
+ migrate_set_parameter_int(from, "max-postcopy-bandwidth", 0);
migrate_postcopy_complete(from, to);
}
@@ -877,9 +878,9 @@ static void test_precopy_unix(void)
* machine, so also set the downtime.
*/
/* 1 ms should make it not converge*/
- migrate_set_parameter(from, "downtime-limit", 1);
+ migrate_set_parameter_int(from, "downtime-limit", 1);
/* 1GB/s */
- migrate_set_parameter(from, "max-bandwidth", 1000000000);
+ migrate_set_parameter_int(from, "max-bandwidth", 1000000000);
/* Wait for the first serial output from the source */
wait_for_serial("src_serial");
@@ -889,7 +890,7 @@ static void test_precopy_unix(void)
wait_for_migration_pass(from);
/* 300 ms should converge */
- migrate_set_parameter(from, "downtime-limit", 300);
+ migrate_set_parameter_int(from, "downtime-limit", 300);
if (!got_stop) {
qtest_qmp_eventwait(from, "STOP");
@@ -956,11 +957,11 @@ static void test_xbzrle(const char *uri)
* machine, so also set the downtime.
*/
/* 1 ms should make it not converge*/
- migrate_set_parameter(from, "downtime-limit", 1);
+ migrate_set_parameter_int(from, "downtime-limit", 1);
/* 1GB/s */
- migrate_set_parameter(from, "max-bandwidth", 1000000000);
+ migrate_set_parameter_int(from, "max-bandwidth", 1000000000);
- migrate_set_parameter(from, "xbzrle-cache-size", 33554432);
+ migrate_set_parameter_int(from, "xbzrle-cache-size", 33554432);
migrate_set_capability(from, "xbzrle", "true");
migrate_set_capability(to, "xbzrle", "true");
@@ -972,7 +973,7 @@ static void test_xbzrle(const char *uri)
wait_for_migration_pass(from);
/* 300ms should converge */
- migrate_set_parameter(from, "downtime-limit", 300);
+ migrate_set_parameter_int(from, "downtime-limit", 300);
if (!got_stop) {
qtest_qmp_eventwait(from, "STOP");
@@ -1008,9 +1009,9 @@ static void test_precopy_tcp(void)
* machine, so also set the downtime.
*/
/* 1 ms should make it not converge*/
- migrate_set_parameter(from, "downtime-limit", 1);
+ migrate_set_parameter_int(from, "downtime-limit", 1);
/* 1GB/s */
- migrate_set_parameter(from, "max-bandwidth", 1000000000);
+ migrate_set_parameter_int(from, "max-bandwidth", 1000000000);
/* Wait for the first serial output from the source */
wait_for_serial("src_serial");
@@ -1022,7 +1023,7 @@ static void test_precopy_tcp(void)
wait_for_migration_pass(from);
/* 300ms should converge */
- migrate_set_parameter(from, "downtime-limit", 300);
+ migrate_set_parameter_int(from, "downtime-limit", 300);
if (!got_stop) {
qtest_qmp_eventwait(from, "STOP");
@@ -1054,9 +1055,9 @@ static void test_migrate_fd_proto(void)
* machine, so also set the downtime.
*/
/* 1 ms should make it not converge */
- migrate_set_parameter(from, "downtime-limit", 1);
+ migrate_set_parameter_int(from, "downtime-limit", 1);
/* 1GB/s */
- migrate_set_parameter(from, "max-bandwidth", 1000000000);
+ migrate_set_parameter_int(from, "max-bandwidth", 1000000000);
/* Wait for the first serial output from the source */
wait_for_serial("src_serial");
@@ -1090,7 +1091,7 @@ static void test_migrate_fd_proto(void)
wait_for_migration_pass(from);
/* 300ms should converge */
- migrate_set_parameter(from, "downtime-limit", 300);
+ migrate_set_parameter_int(from, "downtime-limit", 300);
if (!got_stop) {
qtest_qmp_eventwait(from, "STOP");
diff --git a/tests/test-bitmap.c b/tests/test-bitmap.c
new file mode 100644
index 0000000000..cb7c5e462d
--- /dev/null
+++ b/tests/test-bitmap.c
@@ -0,0 +1,72 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * Bitmap.c unit-tests.
+ *
+ * Copyright (C) 2019, Red Hat, Inc.
+ *
+ * Author: Peter Xu <peterx@redhat.com>
+ */
+
+#include <stdlib.h>
+#include "qemu/osdep.h"
+#include "qemu/bitmap.h"
+
+#define BMAP_SIZE 1024
+
+static void check_bitmap_copy_with_offset(void)
+{
+ unsigned long *bmap1, *bmap2, *bmap3, total;
+
+ bmap1 = bitmap_new(BMAP_SIZE);
+ bmap2 = bitmap_new(BMAP_SIZE);
+ bmap3 = bitmap_new(BMAP_SIZE);
+
+ bmap1[0] = random();
+ bmap1[1] = random();
+ bmap1[2] = random();
+ bmap1[3] = random();
+ total = BITS_PER_LONG * 4;
+
+ /* Shift 115 bits into bmap2 */
+ bitmap_copy_with_dst_offset(bmap2, bmap1, 115, total);
+ /* Shift another 85 bits into bmap3 */
+ bitmap_copy_with_dst_offset(bmap3, bmap2, 85, total + 115);
+ /* Shift back 200 bits back */
+ bitmap_copy_with_src_offset(bmap2, bmap3, 200, total);
+
+ g_assert_cmpmem(bmap1, total / BITS_PER_LONG,
+ bmap2, total / BITS_PER_LONG);
+
+ bitmap_clear(bmap1, 0, BMAP_SIZE);
+ /* Set bits in bmap1 are 100-245 */
+ bitmap_set(bmap1, 100, 145);
+
+ /* Set bits in bmap2 are 60-205 */
+ bitmap_copy_with_src_offset(bmap2, bmap1, 40, 250);
+ g_assert_cmpint(find_first_bit(bmap2, 60), ==, 60);
+ g_assert_cmpint(find_next_zero_bit(bmap2, 205, 60), ==, 205);
+ g_assert(test_bit(205, bmap2) == 0);
+
+ /* Set bits in bmap3 are 135-280 */
+ bitmap_copy_with_dst_offset(bmap3, bmap1, 35, 250);
+ g_assert_cmpint(find_first_bit(bmap3, 135), ==, 135);
+ g_assert_cmpint(find_next_zero_bit(bmap3, 280, 135), ==, 280);
+ g_assert(test_bit(280, bmap3) == 0);
+
+ g_free(bmap1);
+ g_free(bmap2);
+ g_free(bmap3);
+}
+
+int main(int argc, char **argv)
+{
+ g_test_init(&argc, &argv, NULL);
+
+ g_test_add_func("/bitmap/bitmap_copy_with_offset",
+ check_bitmap_copy_with_offset);
+
+ g_test_run();
+
+ return 0;
+}
diff --git a/util/bitmap.c b/util/bitmap.c
index cb618c65a5..1753ff7f5b 100644
--- a/util/bitmap.c
+++ b/util/bitmap.c
@@ -402,3 +402,88 @@ void bitmap_to_le(unsigned long *dst, const unsigned long *src,
{
bitmap_to_from_le(dst, src, nbits);
}
+
+/*
+ * Copy "src" bitmap with a positive offset and put it into the "dst"
+ * bitmap. The caller needs to make sure the bitmap size of "src"
+ * is bigger than (shift + nbits).
+ */
+void bitmap_copy_with_src_offset(unsigned long *dst, const unsigned long *src,
+ unsigned long shift, unsigned long nbits)
+{
+ unsigned long left_mask, right_mask, last_mask;
+
+ /* Proper shift src pointer to the first word to copy from */
+ src += BIT_WORD(shift);
+ shift %= BITS_PER_LONG;
+
+ if (!shift) {
+ /* Fast path */
+ bitmap_copy(dst, src, nbits);
+ return;
+ }
+
+ right_mask = (1ul << shift) - 1;
+ left_mask = ~right_mask;
+
+ while (nbits >= BITS_PER_LONG) {
+ *dst = (*src & left_mask) >> shift;
+ *dst |= (src[1] & right_mask) << (BITS_PER_LONG - shift);
+ dst++;
+ src++;
+ nbits -= BITS_PER_LONG;
+ }
+
+ if (nbits > BITS_PER_LONG - shift) {
+ *dst = (*src & left_mask) >> shift;
+ nbits -= BITS_PER_LONG - shift;
+ last_mask = (1ul << nbits) - 1;
+ *dst |= (src[1] & last_mask) << (BITS_PER_LONG - shift);
+ } else if (nbits) {
+ last_mask = (1ul << nbits) - 1;
+ *dst = (*src >> shift) & last_mask;
+ }
+}
+
+/*
+ * Copy "src" bitmap into the "dst" bitmap with an offset in the
+ * "dst". The caller needs to make sure the bitmap size of "dst" is
+ * bigger than (shift + nbits).
+ */
+void bitmap_copy_with_dst_offset(unsigned long *dst, const unsigned long *src,
+ unsigned long shift, unsigned long nbits)
+{
+ unsigned long left_mask, right_mask, last_mask;
+
+ /* Proper shift dst pointer to the first word to copy from */
+ dst += BIT_WORD(shift);
+ shift %= BITS_PER_LONG;
+
+ if (!shift) {
+ /* Fast path */
+ bitmap_copy(dst, src, nbits);
+ return;
+ }
+
+ right_mask = (1ul << (BITS_PER_LONG - shift)) - 1;
+ left_mask = ~right_mask;
+
+ *dst &= (1ul << shift) - 1;
+ while (nbits >= BITS_PER_LONG) {
+ *dst |= (*src & right_mask) << shift;
+ dst[1] = (*src & left_mask) >> (BITS_PER_LONG - shift);
+ dst++;
+ src++;
+ nbits -= BITS_PER_LONG;
+ }
+
+ if (nbits > BITS_PER_LONG - shift) {
+ *dst |= (*src & right_mask) << shift;
+ nbits -= BITS_PER_LONG - shift;
+ last_mask = ((1ul << nbits) - 1) << (BITS_PER_LONG - shift);
+ dst[1] = (*src & last_mask) >> (BITS_PER_LONG - shift);
+ } else if (nbits) {
+ last_mask = (1ul << nbits) - 1;
+ *dst |= (*src & last_mask) << shift;
+ }
+}
diff --git a/util/cutils.c b/util/cutils.c
index dfc605f1ef..fd591cadf0 100644
--- a/util/cutils.c
+++ b/util/cutils.c
@@ -756,11 +756,11 @@ int uleb128_encode_small(uint8_t *out, uint32_t n)
{
g_assert(n <= 0x3fff);
if (n < 0x80) {
- *out++ = n;
+ *out = n;
return 1;
} else {
*out++ = (n & 0x7f) | 0x80;
- *out++ = n >> 7;
+ *out = n >> 7;
return 2;
}
}
@@ -768,7 +768,7 @@ int uleb128_encode_small(uint8_t *out, uint32_t n)
int uleb128_decode_small(const uint8_t *in, uint32_t *n)
{
if (!(*in & 0x80)) {
- *n = *in++;
+ *n = *in;
return 1;
} else {
*n = *in++ & 0x7f;
@@ -776,7 +776,7 @@ int uleb128_decode_small(const uint8_t *in, uint32_t *n)
if (*in & 0x80) {
return -1;
}
- *n |= *in++ << 7;
+ *n |= *in << 7;
return 2;
}
}