From aa777e297c8408ee5bebc4f6a2e00071224e4a64 Mon Sep 17 00:00:00 2001 From: "Dr. David Alan Gilbert" Date: Wed, 3 Jan 2018 18:33:36 +0000 Subject: cpu_physical_memory_sync_dirty_bitmap: Another alignment fix This code has an optimised, word aligned version, and a boring unaligned version. My commit f70d345 fixed one alignment issue, but there's another. The optimised version operates on 'longs' dealing with (typically) 64 pages at a time, replacing the whole long by a 0 and counting the bits. If the Ramblock is less than 64bits in length that long can contain bits representing two different RAMBlocks, but the code will update the bmap belinging to the 1st RAMBlock only while having updated the total dirty page count for both. This probably didn't matter prior to 6b6712ef which split the dirty bitmap by RAMBlock, but now they're separate RAMBlocks we end up with a count that doesn't match the state in the bitmaps. Symptom: Migration showing a few dirty pages left to be sent constantly Seen on aarch64 and x86 with x86+ovmf Signed-off-by: Dr. David Alan Gilbert Reported-by: Wei Huang Fixes: 6b6712efccd383b48a909bee0b29e079a57601ec Signed-off-by: Paolo Bonzini --- include/exec/ram_addr.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 6cbc02aa0f..7633ef6342 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -391,9 +391,10 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(RAMBlock *rb, uint64_t num_dirty = 0; unsigned long *dest = rb->bmap; - /* start address is aligned at the start of a word? */ + /* start address and length is aligned at the start of a word? */ if (((word * BITS_PER_LONG) << TARGET_PAGE_BITS) == - (start + rb->offset)) { + (start + rb->offset) && + !(length & ((BITS_PER_LONG << TARGET_PAGE_BITS) - 1))) { int k; int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS); unsigned long * const *src; -- cgit v1.2.3