diff options
author | Peter Maydell <peter.maydell@linaro.org> | 2019-10-14 16:09:52 +0100 |
---|---|---|
committer | Peter Maydell <peter.maydell@linaro.org> | 2019-10-14 16:09:52 +0100 |
commit | c760cb77e511eb05094df67c1b30029a952efa35 (patch) | |
tree | d57ed37e184c39091bc94f67a760e417b0fc2d3b /include | |
parent | 22dbfdecc3c52228d3489da3fe81da92b21197bf (diff) | |
parent | 9a85e4b8f672016adbf7b7d5beaab2a99b9b5615 (diff) |
Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20191011a' into staging
Migration pull 2019-10-11
Mostly cleanups and minor fixes
[Note I'm seeing a hang on the aarch64 hosted x86-64 tcg migration
test in xbzrle; but I'm seeing that on current head as well]
# gpg: Signature made Fri 11 Oct 2019 20:14:31 BST
# gpg: using RSA key 45F5C71B4A0CB7FB977A9FA90516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>" [full]
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A 9FA9 0516 331E BC5B FDE7
* remotes/dgilbert/tags/pull-migration-20191011a: (21 commits)
migration: Support gtree migration
migration/multifd: pages->used would be cleared when attach to multifd_send_state
migration/multifd: initialize packet->magic/version once at setup stage
migration/multifd: use pages->allocated instead of the static max
migration/multifd: fix a typo in comment of multifd_recv_unfill_packet()
migration/postcopy: check PostcopyState before setting to POSTCOPY_INCOMING_RUNNING
migration/postcopy: rename postcopy_ram_enable_notify to postcopy_ram_incoming_setup
migration/postcopy: postpone setting PostcopyState to END
migration/postcopy: mis->have_listen_thread check will never be touched
migration: report SaveStateEntry id and name on failure
migration: pass in_postcopy instead of check state again
migration/postcopy: fix typo in mark_postcopy_blocktime_begin's comment
migration/postcopy: map large zero page in postcopy_ram_incoming_setup()
migration/postcopy: allocate tmp_page in setup stage
migration: Don't try and recover return path in non-postcopy
rcu: Use automatic rc_read unlock in core memory/exec code
migration: Use automatic rcu_read unlock in rdma.c
migration: Use automatic rcu_read unlock in ram.c
migration: Fix missing rcu_read_unlock
rcu: Add automatically released rcu_read_lock variants
...
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Diffstat (limited to 'include')
-rw-r--r-- | include/exec/ram_addr.h | 138 | ||||
-rw-r--r-- | include/migration/misc.h | 1 | ||||
-rw-r--r-- | include/migration/vmstate.h | 40 | ||||
-rw-r--r-- | include/qemu/rcu.h | 25 |
4 files changed, 131 insertions, 73 deletions
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h index e96e621de5..ad158bb247 100644 --- a/include/exec/ram_addr.h +++ b/include/exec/ram_addr.h @@ -193,30 +193,29 @@ static inline bool cpu_physical_memory_get_dirty(ram_addr_t start, end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; page = start >> TARGET_PAGE_BITS; - rcu_read_lock(); - - blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); + WITH_RCU_READ_LOCK_GUARD() { + blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); + + idx = page / DIRTY_MEMORY_BLOCK_SIZE; + offset = page % DIRTY_MEMORY_BLOCK_SIZE; + base = page - offset; + while (page < end) { + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); + unsigned long num = next - base; + unsigned long found = find_next_bit(blocks->blocks[idx], + num, offset); + if (found < num) { + dirty = true; + break; + } - idx = page / DIRTY_MEMORY_BLOCK_SIZE; - offset = page % DIRTY_MEMORY_BLOCK_SIZE; - base = page - offset; - while (page < end) { - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); - unsigned long num = next - base; - unsigned long found = find_next_bit(blocks->blocks[idx], num, offset); - if (found < num) { - dirty = true; - break; + page = next; + idx++; + offset = 0; + base += DIRTY_MEMORY_BLOCK_SIZE; } - - page = next; - idx++; - offset = 0; - base += DIRTY_MEMORY_BLOCK_SIZE; } - rcu_read_unlock(); - return dirty; } @@ -234,7 +233,7 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; page = start >> TARGET_PAGE_BITS; - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); @@ -256,8 +255,6 @@ static inline bool cpu_physical_memory_all_dirty(ram_addr_t start, base += DIRTY_MEMORY_BLOCK_SIZE; } - rcu_read_unlock(); - return dirty; } @@ -309,13 +306,11 @@ static inline void cpu_physical_memory_set_dirty_flag(ram_addr_t addr, idx = page / DIRTY_MEMORY_BLOCK_SIZE; offset = page % DIRTY_MEMORY_BLOCK_SIZE; - rcu_read_lock(); + RCU_READ_LOCK_GUARD(); blocks = atomic_rcu_read(&ram_list.dirty_memory[client]); set_bit_atomic(offset, blocks->blocks[idx]); - - rcu_read_unlock(); } static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, @@ -334,39 +329,37 @@ static inline void cpu_physical_memory_set_dirty_range(ram_addr_t start, end = TARGET_PAGE_ALIGN(start + length) >> TARGET_PAGE_BITS; page = start >> TARGET_PAGE_BITS; - rcu_read_lock(); + WITH_RCU_READ_LOCK_GUARD() { + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]); + } - for (i = 0; i < DIRTY_MEMORY_NUM; i++) { - blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i]); - } + idx = page / DIRTY_MEMORY_BLOCK_SIZE; + offset = page % DIRTY_MEMORY_BLOCK_SIZE; + base = page - offset; + while (page < end) { + unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); - idx = page / DIRTY_MEMORY_BLOCK_SIZE; - offset = page % DIRTY_MEMORY_BLOCK_SIZE; - base = page - offset; - while (page < end) { - unsigned long next = MIN(end, base + DIRTY_MEMORY_BLOCK_SIZE); + if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) { + bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx], + offset, next - page); + } + if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) { + bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx], + offset, next - page); + } + if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) { + bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx], + offset, next - page); + } - if (likely(mask & (1 << DIRTY_MEMORY_MIGRATION))) { - bitmap_set_atomic(blocks[DIRTY_MEMORY_MIGRATION]->blocks[idx], - offset, next - page); + page = next; + idx++; + offset = 0; + base += DIRTY_MEMORY_BLOCK_SIZE; } - if (unlikely(mask & (1 << DIRTY_MEMORY_VGA))) { - bitmap_set_atomic(blocks[DIRTY_MEMORY_VGA]->blocks[idx], - offset, next - page); - } - if (unlikely(mask & (1 << DIRTY_MEMORY_CODE))) { - bitmap_set_atomic(blocks[DIRTY_MEMORY_CODE]->blocks[idx], - offset, next - page); - } - - page = next; - idx++; - offset = 0; - base += DIRTY_MEMORY_BLOCK_SIZE; } - rcu_read_unlock(); - xen_hvm_modified_memory(start, length); } @@ -396,36 +389,35 @@ static inline void cpu_physical_memory_set_dirty_lebitmap(unsigned long *bitmap, offset = BIT_WORD((start >> TARGET_PAGE_BITS) % DIRTY_MEMORY_BLOCK_SIZE); - rcu_read_lock(); + WITH_RCU_READ_LOCK_GUARD() { + for (i = 0; i < DIRTY_MEMORY_NUM; i++) { + blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks; + } - for (i = 0; i < DIRTY_MEMORY_NUM; i++) { - blocks[i] = atomic_rcu_read(&ram_list.dirty_memory[i])->blocks; - } + for (k = 0; k < nr; k++) { + if (bitmap[k]) { + unsigned long temp = leul_to_cpu(bitmap[k]); - for (k = 0; k < nr; k++) { - if (bitmap[k]) { - unsigned long temp = leul_to_cpu(bitmap[k]); + atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); - atomic_or(&blocks[DIRTY_MEMORY_VGA][idx][offset], temp); + if (global_dirty_log) { + atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], + temp); + } - if (global_dirty_log) { - atomic_or(&blocks[DIRTY_MEMORY_MIGRATION][idx][offset], - temp); + if (tcg_enabled()) { + atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], + temp); + } } - if (tcg_enabled()) { - atomic_or(&blocks[DIRTY_MEMORY_CODE][idx][offset], temp); + if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { + offset = 0; + idx++; } } - - if (++offset >= BITS_TO_LONGS(DIRTY_MEMORY_BLOCK_SIZE)) { - offset = 0; - idx++; - } } - rcu_read_unlock(); - xen_hvm_modified_memory(start, pages << TARGET_PAGE_BITS); } else { uint8_t clients = tcg_enabled() ? DIRTY_CLIENTS_ALL : DIRTY_CLIENTS_NOCODE; diff --git a/include/migration/misc.h b/include/migration/misc.h index b9d8e787af..d2762257aa 100644 --- a/include/migration/misc.h +++ b/include/migration/misc.h @@ -60,6 +60,7 @@ void migration_object_init(void); void migration_shutdown(void); void qemu_start_incoming_migration(const char *uri, Error **errp); bool migration_is_idle(void); +bool migration_is_active(MigrationState *); void add_migration_state_change_notifier(Notifier *notify); void remove_migration_state_change_notifier(Notifier *notify); bool migration_in_setup(MigrationState *); diff --git a/include/migration/vmstate.h b/include/migration/vmstate.h index 1fbfd099dd..b9ee563aa4 100644 --- a/include/migration/vmstate.h +++ b/include/migration/vmstate.h @@ -224,6 +224,7 @@ extern const VMStateInfo vmstate_info_unused_buffer; extern const VMStateInfo vmstate_info_tmp; extern const VMStateInfo vmstate_info_bitmap; extern const VMStateInfo vmstate_info_qtailq; +extern const VMStateInfo vmstate_info_gtree; #define type_check_2darray(t1,t2,n,m) ((t1(*)[n][m])0 - (t2*)0) /* @@ -754,6 +755,45 @@ extern const VMStateInfo vmstate_info_qtailq; .start = offsetof(_type, _next), \ } +/* + * For migrating a GTree whose key is a pointer to _key_type and the + * value, a pointer to _val_type + * The target tree must have been properly initialized + * _vmsd: Start address of the 2 element array containing the data vmsd + * and the key vmsd, in that order + * _key_type: type of the key + * _val_type: type of the value + */ +#define VMSTATE_GTREE_V(_field, _state, _version, _vmsd, \ + _key_type, _val_type) \ +{ \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .vmsd = (_vmsd), \ + .info = &vmstate_info_gtree, \ + .start = sizeof(_key_type), \ + .size = sizeof(_val_type), \ + .offset = offsetof(_state, _field), \ +} + +/* + * For migrating a GTree with direct key and the value a pointer + * to _val_type + * The target tree must have been properly initialized + * _vmsd: data vmsd + * _val_type: type of the value + */ +#define VMSTATE_GTREE_DIRECT_KEY_V(_field, _state, _version, _vmsd, _val_type) \ +{ \ + .name = (stringify(_field)), \ + .version_id = (_version), \ + .vmsd = (_vmsd), \ + .info = &vmstate_info_gtree, \ + .start = 0, \ + .size = sizeof(_val_type), \ + .offset = offsetof(_state, _field), \ +} + /* _f : field name _f_n : num of elements field_name _n : num of elements diff --git a/include/qemu/rcu.h b/include/qemu/rcu.h index 22876d1428..9c82683e37 100644 --- a/include/qemu/rcu.h +++ b/include/qemu/rcu.h @@ -154,6 +154,31 @@ extern void call_rcu1(struct rcu_head *head, RCUCBFunc *func); }), \ (RCUCBFunc *)g_free); +typedef void RCUReadAuto; +static inline RCUReadAuto *rcu_read_auto_lock(void) +{ + rcu_read_lock(); + /* Anything non-NULL causes the cleanup function to be called */ + return (void *)(uintptr_t)0x1; +} + +static inline void rcu_read_auto_unlock(RCUReadAuto *r) +{ + rcu_read_unlock(); +} + +G_DEFINE_AUTOPTR_CLEANUP_FUNC(RCUReadAuto, rcu_read_auto_unlock) + +#define WITH_RCU_READ_LOCK_GUARD() \ + WITH_RCU_READ_LOCK_GUARD_(_rcu_read_auto##__COUNTER__) + +#define WITH_RCU_READ_LOCK_GUARD_(var) \ + for (g_autoptr(RCUReadAuto) var = rcu_read_auto_lock(); \ + (var); rcu_read_auto_unlock(var), (var) = NULL) + +#define RCU_READ_LOCK_GUARD() \ + g_autoptr(RCUReadAuto) _rcu_read_auto __attribute__((unused)) = rcu_read_auto_lock() + #ifdef __cplusplus } #endif |