aboutsummaryrefslogtreecommitdiff
path: root/include/exec/ram_addr.h
diff options
context:
space:
mode:
authorChao Fan <fanc.fnst@cn.fujitsu.com>2017-03-14 09:55:07 +0800
committerJuan Quintela <quintela@redhat.com>2017-03-16 08:55:56 +0100
commit1ffb5dfd35888cd9de78cc97d3e3e3cb1f3c4887 (patch)
tree17af5040ece1efd2a522516066a5c8591b35e26e /include/exec/ram_addr.h
parent1883ff34b540daacae948f493b0ba525edf5f642 (diff)
Change the method to calculate dirty-pages-rate
In function cpu_physical_memory_sync_dirty_bitmap, file include/exec/ram_addr.h: if (src[idx][offset]) { unsigned long bits = atomic_xchg(&src[idx][offset], 0); unsigned long new_dirty; new_dirty = ~dest[k]; dest[k] |= bits; new_dirty &= bits; num_dirty += ctpopl(new_dirty); } After these codes executed, only the pages not dirtied in bitmap(dest), but dirtied in dirty_memory[DIRTY_MEMORY_MIGRATION] will be calculated. For example: When ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION] = 0b00001111, and atomic_rcu_read(&migration_bitmap_rcu)->bmap = 0b00000011, the new_dirty will be 0b00001100, and this function will return 2 but not 4 which is expected. the dirty pages in dirty_memory[DIRTY_MEMORY_MIGRATION] are all new, so these should be calculated also. Signed-off-by: Chao Fan <fanc.fnst@cn.fujitsu.com> Signed-off-by: Li Zhijian <lizhijian@cn.fujitsu.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Signed-off-by: Juan Quintela <quintela@redhat.com>
Diffstat (limited to 'include/exec/ram_addr.h')
-rw-r--r--include/exec/ram_addr.h5
1 files changed, 4 insertions, 1 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index cd432e73ae..b05dc84ab9 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -355,7 +355,8 @@ static inline void cpu_physical_memory_clear_dirty_range(ram_addr_t start,
static inline
uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
ram_addr_t start,
- ram_addr_t length)
+ ram_addr_t length,
+ int64_t *real_dirty_pages)
{
ram_addr_t addr;
unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
@@ -379,6 +380,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
if (src[idx][offset]) {
unsigned long bits = atomic_xchg(&src[idx][offset], 0);
unsigned long new_dirty;
+ *real_dirty_pages += ctpopl(bits);
new_dirty = ~dest[k];
dest[k] |= bits;
new_dirty &= bits;
@@ -398,6 +400,7 @@ uint64_t cpu_physical_memory_sync_dirty_bitmap(unsigned long *dest,
start + addr,
TARGET_PAGE_SIZE,
DIRTY_MEMORY_MIGRATION)) {
+ *real_dirty_pages += 1;
long k = (start + addr) >> TARGET_PAGE_BITS;
if (!test_and_set_bit(k, dest)) {
num_dirty++;