diff options
-rw-r--r-- | arch_init.c | 38 | ||||
-rw-r--r-- | hmp.c | 6 | ||||
-rw-r--r-- | migration.c | 6 | ||||
-rw-r--r-- | migration.h | 5 | ||||
-rw-r--r-- | qapi-schema.json | 11 | ||||
-rw-r--r-- | qmp-commands.hx | 27 |
6 files changed, 88 insertions, 5 deletions
diff --git a/arch_init.c b/arch_init.c index 7fbfe91ad5..7ddbd7fa11 100644 --- a/arch_init.c +++ b/arch_init.c @@ -201,6 +201,40 @@ int64_t xbzrle_cache_resize(int64_t new_size) return pow2floor(new_size); } +/* accounting for migration statistics */ +typedef struct AccountingInfo { + uint64_t dup_pages; + uint64_t norm_pages; + uint64_t iterations; +} AccountingInfo; + +static AccountingInfo acct_info; + +static void acct_clear(void) +{ + memset(&acct_info, 0, sizeof(acct_info)); +} + +uint64_t dup_mig_bytes_transferred(void) +{ + return acct_info.dup_pages * TARGET_PAGE_SIZE; +} + +uint64_t dup_mig_pages_transferred(void) +{ + return acct_info.dup_pages; +} + +uint64_t norm_mig_bytes_transferred(void) +{ + return acct_info.norm_pages * TARGET_PAGE_SIZE; +} + +uint64_t norm_mig_pages_transferred(void) +{ + return acct_info.norm_pages; +} + static void save_block_hdr(QEMUFile *f, RAMBlock *block, ram_addr_t offset, int cont, int flag) { @@ -295,6 +329,7 @@ static int ram_save_block(QEMUFile *f) p = memory_region_get_ram_ptr(mr) + offset; if (is_dup_page(p)) { + acct_info.dup_pages++; save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_COMPRESS); qemu_put_byte(f, *p); bytes_sent = 1; @@ -310,6 +345,7 @@ static int ram_save_block(QEMUFile *f) save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE); qemu_put_buffer(f, p, TARGET_PAGE_SIZE); bytes_sent = TARGET_PAGE_SIZE; + acct_info.norm_pages++; } /* if page is unmodified, continue to the next */ @@ -431,6 +467,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque) } XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE); XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE); + acct_clear(); } /* Make sure all dirty bits are set */ @@ -479,6 +516,7 @@ static int ram_save_iterate(QEMUFile *f, void *opaque) break; } bytes_transferred += bytes_sent; + acct_info.iterations++; /* we want to check in the 1st loop, just in case it was the 1st time and we had to sync the dirty bitmap. qemu_get_clock_ns() is a bit expensive, so we only check each some @@ -160,6 +160,12 @@ void hmp_info_migrate(Monitor *mon) info->ram->total >> 10); monitor_printf(mon, "total time: %" PRIu64 " milliseconds\n", info->ram->total_time); + monitor_printf(mon, "duplicate: %" PRIu64 " pages\n", + info->ram->duplicate); + monitor_printf(mon, "normal: %" PRIu64 " pages\n", + info->ram->normal); + monitor_printf(mon, "normal bytes: %" PRIu64 " kbytes\n", + info->ram->normal_bytes >> 10); } if (info->has_disk) { diff --git a/migration.c b/migration.c index a6c007215a..ab5e09d2cb 100644 --- a/migration.c +++ b/migration.c @@ -161,6 +161,9 @@ MigrationInfo *qmp_query_migrate(Error **errp) info->ram->total = ram_bytes_total(); info->ram->total_time = qemu_get_clock_ms(rt_clock) - s->total_time; + info->ram->duplicate = dup_mig_pages_transferred(); + info->ram->normal = norm_mig_pages_transferred(); + info->ram->normal_bytes = norm_mig_bytes_transferred(); if (blk_mig_active()) { info->has_disk = true; @@ -180,6 +183,9 @@ MigrationInfo *qmp_query_migrate(Error **errp) info->ram->remaining = 0; info->ram->total = ram_bytes_total(); info->ram->total_time = s->total_time; + info->ram->duplicate = dup_mig_pages_transferred(); + info->ram->normal = norm_mig_pages_transferred(); + info->ram->normal_bytes = norm_mig_bytes_transferred(); break; case MIG_STATE_ERROR: info->has_status = true; diff --git a/migration.h b/migration.h index 337e2255a8..e4a7cd72fc 100644 --- a/migration.h +++ b/migration.h @@ -87,6 +87,11 @@ uint64_t ram_bytes_total(void); extern SaveVMHandlers savevm_ram_handlers; +uint64_t dup_mig_bytes_transferred(void); +uint64_t dup_mig_pages_transferred(void); +uint64_t norm_mig_bytes_transferred(void); +uint64_t norm_mig_pages_transferred(void); + /** * @migrate_add_blocker - prevent migration from proceeding * diff --git a/qapi-schema.json b/qapi-schema.json index 901cdf1255..3f67b1ec59 100644 --- a/qapi-schema.json +++ b/qapi-schema.json @@ -264,11 +264,18 @@ # migration has ended, it returns the total migration # time. (since 1.2) # -# Since: 0.14.0. +# @duplicate: number of duplicate pages (since 1.2) +# +# @normal : number of normal pages (since 1.2) +# +# @normal-bytes : number of normal bytes sent (since 1.2) +# +# Since: 0.14.0 ## { 'type': 'MigrationStats', 'data': {'transferred': 'int', 'remaining': 'int', 'total': 'int' , - 'total-time': 'int' } } + 'total-time': 'int', 'duplicate': 'int', 'normal': 'int', + 'normal-bytes': 'int' } } ## # @MigrationInfo diff --git a/qmp-commands.hx b/qmp-commands.hx index d426e567c3..c3368f3809 100644 --- a/qmp-commands.hx +++ b/qmp-commands.hx @@ -2125,6 +2125,9 @@ The main json-object contains the following: - "total-time": total amount of ms since migration started. If migration has ended, it returns the total migration time (json-int) + - "duplicate": number of duplicated pages (json-int) + - "normal" : number of normal pages transferred (json-int) + - "normal-bytes" : number of normal bytes transferred (json-int) - "disk": only present if "status" is "active" and it is a block migration, it is a json-object with the following disk information (in bytes): - "transferred": amount transferred (json-int) @@ -2141,7 +2144,19 @@ Examples: 2. Migration is done and has succeeded -> { "execute": "query-migrate" } -<- { "return": { "status": "completed" } } +<- { "return": { + "status": "completed", + "ram":{ + "transferred":123, + "remaining":123, + "total":246, + "total-time":12345, + "duplicate":123, + "normal":123, + "normal-bytes":123456 + } + } + } 3. Migration is done and has failed @@ -2158,7 +2173,10 @@ Examples: "transferred":123, "remaining":123, "total":246, - "total-time":12345 + "total-time":12345, + "duplicate":123, + "normal":123, + "normal-bytes":123456 } } } @@ -2173,7 +2191,10 @@ Examples: "total":1057024, "remaining":1053304, "transferred":3720, - "total-time":12345 + "total-time":12345, + "duplicate":123, + "normal":123, + "normal-bytes":123456 }, "disk":{ "total":20971520, |