diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2014-12-02 11:23:16 +0000 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-06-05 17:10:00 +0200 |
commit | d114875b9a1c21162f69a12d72f69a22e7bab376 (patch) | |
tree | 4e7dc4d1ae0b17be4d51d4aa529ecdbc6fa63ff0 /include/exec/ram_addr.h | |
parent | 36546e5b803f6e363906607307f27c489441fd15 (diff) |
memory: use atomic ops for setting dirty memory bits
Use set_bit_atomic() and bitmap_set_atomic() so that multiple threads
can dirty memory without race conditions.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <1417519399-3166-4-git-send-email-stefanha@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'include/exec/ram_addr.h')
-rw-r--r-- | include/exec/ram_addr.h | 16 |
1 files changed, 9 insertions, 7 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index 98110108b7..9f73076044 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -114,7 +114,7 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, unsigned client) { assert(client < DIRTY_MEMORY_NUM); - set_bit(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]); + set_bit_atomic(addr >> TARGET_PAGE_BITS, ram_list.dirty_memory[client]); } static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, @@ -122,17 +122,18 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, uint8_t mask) { unsigned long end, page; + unsigned long **d = ram_list.dirty_memory; end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; page = start >> TARGET_PAGE_BITS; if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) { - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION], page, end - page); + bitmap_set_atomic(d[DIRTY_MEMORY_MIGRATION], page, end - page); } if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) { - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_VGA], page, end - page); + bitmap_set_atomic(d[DIRTY_MEMORY_VGA], page, end - page); } if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) { - bitmap_set(ram_list.dirty_memory[DIRTY_MEMORY_CODE], page, end - page); + bitmap_set_atomic(d[DIRTY_MEMORY_CODE], page, end - page); } xen_modified_memory(start, length); } @@ -159,11 +160,12 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, for (k = 0; k < nr; k++) { if (bitmap[k]) { unsigned long temp = leul_to_cpu(bitmap[k]); + unsigned long **d = ram_list.dirty_memory; - ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION][page + k] |= temp; - ram_list.dirty_memory[DIRTY_MEMORY_VGA][page + k] |= temp; + atomic_or(&d[DIRTY_MEMORY_MIGRATION][page + k], temp); + atomic_or(&d[DIRTY_MEMORY_VGA][page + k], temp); if (tcg_enabled()) { - ram_list.dirty_memory[DIRTY_MEMORY_CODE][page + k] |= temp; + atomic_or(&d[DIRTY_MEMORY_CODE][page + k], temp); } } } |