diff options
author | Juan Quintela <quintela@redhat.com> | 2012-06-28 15:31:37 +0200 |
---|---|---|
committer | Juan Quintela <quintela@redhat.com> | 2012-07-20 08:19:27 +0200 |
commit | 16310a3cca7320edb9341c976f7819de0a8c27e0 (patch) | |
tree | 6caba265f6f9cf0462522f0179adb40724ae7bf9 | |
parent | d1315aac6e4df1f472a6f87ef6e310b8c109f498 (diff) |
savevm: split save_live into stage2 and stage3
We split it into 2 functions, foo_live_iterate, and foo_live_complete.
At this point, we only remove the bits that are for the other stage,
functionally this is equivalent to previous code.
Signed-off-by: Juan Quintela <quintela@redhat.com>
-rw-r--r-- | arch_init.c | 72 | ||||
-rw-r--r-- | block-migration.c | 99 | ||||
-rw-r--r-- | savevm.c | 10 | ||||
-rw-r--r-- | vmstate.h | 3 |
4 files changed, 122 insertions, 62 deletions
diff --git a/arch_init.c b/arch_init.c index 2a4903c4fa..7e04ea5e77 100644 --- a/arch_init.c +++ b/arch_init.c @@ -374,12 +374,13 @@ static int ram_save_setup(QEMUFile *f, void *opaque) return 0; } -static int ram_save_live(QEMUFile *f, int stage, void *opaque) +static int ram_save_iterate(QEMUFile *f, void *opaque) { uint64_t bytes_transferred_last; double bwidth = 0; int ret; int i; + uint64_t expected_time; memory_global_sync_dirty_bitmap(get_system_memory()); @@ -424,28 +425,64 @@ static int ram_save_live(QEMUFile *f, int stage, void *opaque) bwidth = 0.000001; } - /* try transferring iterative blocks of memory */ - if (stage == 3) { - int bytes_sent; + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); + + expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; + + DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n", + expected_time, migrate_max_downtime()); + + return expected_time <= migrate_max_downtime(); +} + +static int ram_save_complete(QEMUFile *f, void *opaque) +{ + double bwidth = 0; + int ret; + int i; + int bytes_sent; - /* flush all remaining blocks regardless of rate limiting */ - while ((bytes_sent = ram_save_block(f)) != 0) { - bytes_transferred += bytes_sent; + memory_global_sync_dirty_bitmap(get_system_memory()); + + bwidth = qemu_get_clock_ns(rt_clock); + + i = 0; + while ((ret = qemu_file_rate_limit(f)) == 0) { + bytes_sent = ram_save_block(f); + bytes_transferred += bytes_sent; + if (bytes_sent == 0) { /* no more blocks */ + break; } - memory_global_dirty_log_stop(); + /* we want to check in the 1st loop, just in case it was the 1st time + and we had to sync the dirty bitmap. + qemu_get_clock_ns() is a bit expensive, so we only check each some + iterations + */ + if ((i & 63) == 0) { + uint64_t t1 = (qemu_get_clock_ns(rt_clock) - bwidth) / 1000000; + if (t1 > MAX_WAIT) { + DPRINTF("big wait: " PRIu64 " milliseconds, %d iterations\n", + t1, i); + break; + } + } + i++; } - qemu_put_be64(f, RAM_SAVE_FLAG_EOS); - - if (stage == 2) { - uint64_t expected_time; - expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth; + if (ret < 0) { + return ret; + } - DPRINTF("ram_save_live: expected(" PRIu64 ") <= max(" PRIu64 ")?\n", - expected_time, migrate_max_downtime()); + /* try transferring iterative blocks of memory */ - return expected_time <= migrate_max_downtime(); + /* flush all remaining blocks regardless of rate limiting */ + while ((bytes_sent = ram_save_block(f)) != 0) { + bytes_transferred += bytes_sent; } + memory_global_dirty_log_stop(); + + qemu_put_be64(f, RAM_SAVE_FLAG_EOS); + return 0; } @@ -578,7 +615,8 @@ done: SaveVMHandlers savevm_ram_handlers = { .save_live_setup = ram_save_setup, - .save_live_state = ram_save_live, + .save_live_iterate = ram_save_iterate, + .save_live_complete = ram_save_complete, .load_state = ram_load, .cancel = ram_migration_cancel, }; diff --git a/block-migration.c b/block-migration.c index fc3d1f46c6..7def8ab197 100644 --- a/block-migration.c +++ b/block-migration.c @@ -568,12 +568,12 @@ static int block_save_setup(QEMUFile *f, void *opaque) return 0; } -static int block_save_live(QEMUFile *f, int stage, void *opaque) +static int block_save_iterate(QEMUFile *f, void *opaque) { int ret; - DPRINTF("Enter save live stage %d submitted %d transferred %d\n", - stage, block_mig_state.submitted, block_mig_state.transferred); + DPRINTF("Enter save live iterate submitted %d transferred %d\n", + block_mig_state.submitted, block_mig_state.transferred); flush_blks(f); @@ -585,56 +585,76 @@ static int block_save_live(QEMUFile *f, int stage, void *opaque) blk_mig_reset_dirty_cursor(); - if (stage == 2) { - /* control the rate of transfer */ - while ((block_mig_state.submitted + - block_mig_state.read_done) * BLOCK_SIZE < - qemu_file_get_rate_limit(f)) { - if (block_mig_state.bulk_completed == 0) { - /* first finish the bulk phase */ - if (blk_mig_save_bulked_block(f) == 0) { - /* finished saving bulk on all devices */ - block_mig_state.bulk_completed = 1; - } - } else { - if (blk_mig_save_dirty_block(f, 1) == 0) { - /* no more dirty blocks */ - break; - } + /* control the rate of transfer */ + while ((block_mig_state.submitted + + block_mig_state.read_done) * BLOCK_SIZE < + qemu_file_get_rate_limit(f)) { + if (block_mig_state.bulk_completed == 0) { + /* first finish the bulk phase */ + if (blk_mig_save_bulked_block(f) == 0) { + /* finished saving bulk on all devices */ + block_mig_state.bulk_completed = 1; + } + } else { + if (blk_mig_save_dirty_block(f, 1) == 0) { + /* no more dirty blocks */ + break; } } + } - flush_blks(f); + flush_blks(f); - ret = qemu_file_get_error(f); - if (ret) { - blk_mig_cleanup(); - return ret; - } + ret = qemu_file_get_error(f); + if (ret) { + blk_mig_cleanup(); + return ret; } - if (stage == 3) { - /* we know for sure that save bulk is completed and - all async read completed */ - assert(block_mig_state.submitted == 0); + qemu_put_be64(f, BLK_MIG_FLAG_EOS); + + return is_stage2_completed(); +} + +static int block_save_complete(QEMUFile *f, void *opaque) +{ + int ret; + + DPRINTF("Enter save live complete submitted %d transferred %d\n", + block_mig_state.submitted, block_mig_state.transferred); + + flush_blks(f); - while (blk_mig_save_dirty_block(f, 0) != 0); + ret = qemu_file_get_error(f); + if (ret) { blk_mig_cleanup(); + return ret; + } - /* report completion */ - qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS); + blk_mig_reset_dirty_cursor(); - ret = qemu_file_get_error(f); - if (ret) { - return ret; - } + /* we know for sure that save bulk is completed and + all async read completed */ + assert(block_mig_state.submitted == 0); + + while (blk_mig_save_dirty_block(f, 0) != 0) { + /* Do nothing */ + } + blk_mig_cleanup(); + + /* report completion */ + qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS); - DPRINTF("Block migration completed\n"); + ret = qemu_file_get_error(f); + if (ret) { + return ret; } + DPRINTF("Block migration completed\n"); + qemu_put_be64(f, BLK_MIG_FLAG_EOS); - return ((stage == 2) && is_stage2_completed()); + return 0; } static int block_load(QEMUFile *f, void *opaque, int version_id) @@ -731,7 +751,8 @@ static bool block_is_active(void *opaque) SaveVMHandlers savevm_block_handlers = { .set_params = block_set_params, .save_live_setup = block_save_setup, - .save_live_state = block_save_live, + .save_live_iterate = block_save_iterate, + .save_live_complete = block_save_complete, .load_state = block_load, .cancel = block_migration_cancel, .is_active = block_is_active, @@ -1236,7 +1236,7 @@ int register_savevm_live(DeviceState *dev, se->vmsd = NULL; se->no_migrate = 0; /* if this is a live_savem then set is_ram */ - if (ops->save_live_state != NULL) { + if (ops->save_live_setup != NULL) { se->is_ram = 1; } @@ -1620,7 +1620,7 @@ int qemu_savevm_state_iterate(QEMUFile *f) int ret = 1; QTAILQ_FOREACH(se, &savevm_handlers, entry) { - if (!se->ops || !se->ops->save_live_state) { + if (!se->ops || !se->ops->save_live_iterate) { continue; } if (se->ops && se->ops->is_active) { @@ -1636,7 +1636,7 @@ int qemu_savevm_state_iterate(QEMUFile *f) qemu_put_byte(f, QEMU_VM_SECTION_PART); qemu_put_be32(f, se->section_id); - ret = se->ops->save_live_state(f, QEMU_VM_SECTION_PART, se->opaque); + ret = se->ops->save_live_iterate(f, se->opaque); trace_savevm_section_end(se->section_id); if (ret <= 0) { @@ -1665,7 +1665,7 @@ int qemu_savevm_state_complete(QEMUFile *f) cpu_synchronize_all_states(); QTAILQ_FOREACH(se, &savevm_handlers, entry) { - if (!se->ops || !se->ops->save_live_state) { + if (!se->ops || !se->ops->save_live_complete) { continue; } if (se->ops && se->ops->is_active) { @@ -1678,7 +1678,7 @@ int qemu_savevm_state_complete(QEMUFile *f) qemu_put_byte(f, QEMU_VM_SECTION_END); qemu_put_be32(f, se->section_id); - ret = se->ops->save_live_state(f, QEMU_VM_SECTION_END, se->opaque); + ret = se->ops->save_live_complete(f, se->opaque); trace_savevm_section_end(se->section_id); if (ret < 0) { return ret; @@ -33,7 +33,8 @@ typedef struct SaveVMHandlers { void (*set_params)(const MigrationParams *params, void * opaque); SaveStateHandler *save_state; int (*save_live_setup)(QEMUFile *f, void *opaque); - int (*save_live_state)(QEMUFile *f, int stage, void *opaque); + int (*save_live_iterate)(QEMUFile *f, void *opaque); + int (*save_live_complete)(QEMUFile *f, void *opaque); void (*cancel)(void *opaque); LoadStateHandler *load_state; bool (*is_active)(void *opaque); |