aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnthony Liguori <aliguori@us.ibm.com>2013-02-26 07:44:32 -0600
committerAnthony Liguori <aliguori@us.ibm.com>2013-02-26 07:44:32 -0600
commit9a1d7f00efd4b69f051d4223a70ca91af0ccb19d (patch)
tree8b0e390c0eafe651f6ebb9503d5759360b1a8fc7
parentb1c07f06ed0e2094197274e636a90ad33674653c (diff)
parent90f8ae724a575861f093fbdbfd49a925bcfec327 (diff)
Merge remote-tracking branch 'quintela/stats.next' into staging
# By Juan Quintela # Via Juan Quintela * quintela/stats.next: migration: calculate expected_downtime migration: don't account sleep time for calculating bandwidth migration: calculate end time after we have sent the data migration: change initial value of expected_downtime
-rw-r--r--arch_init.c1
-rw-r--r--include/migration/migration.h1
-rw-r--r--migration.c15
3 files changed, 15 insertions, 2 deletions
diff --git a/arch_init.c b/arch_init.c
index 8da868b988..8daeafaf5c 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -414,6 +414,7 @@ static void migration_bitmap_sync(void)
if (end_time > start_time + 1000) {
s->dirty_pages_rate = num_dirty_pages_period * 1000
/ (end_time - start_time);
+ s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
start_time = end_time;
num_dirty_pages_period = 0;
}
diff --git a/include/migration/migration.h b/include/migration/migration.h
index a8c9639732..d1214097fe 100644
--- a/include/migration/migration.h
+++ b/include/migration/migration.h
@@ -51,6 +51,7 @@ struct MigrationState
int64_t downtime;
int64_t expected_downtime;
int64_t dirty_pages_rate;
+ int64_t dirty_bytes_rate;
bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
int64_t xbzrle_cache_size;
bool complete;
diff --git a/migration.c b/migration.c
index b1ebb01145..11725ae3fc 100644
--- a/migration.c
+++ b/migration.c
@@ -658,6 +658,7 @@ static void *buffered_file_thread(void *opaque)
{
MigrationState *s = opaque;
int64_t initial_time = qemu_get_clock_ms(rt_clock);
+ int64_t sleep_time = 0;
int64_t max_size = 0;
bool last_round = false;
int ret;
@@ -673,7 +674,7 @@ static void *buffered_file_thread(void *opaque)
qemu_mutex_unlock_iothread();
while (true) {
- int64_t current_time = qemu_get_clock_ms(rt_clock);
+ int64_t current_time;
uint64_t pending_size;
qemu_mutex_lock_iothread();
@@ -727,22 +728,30 @@ static void *buffered_file_thread(void *opaque)
}
}
qemu_mutex_unlock_iothread();
+ current_time = qemu_get_clock_ms(rt_clock);
if (current_time >= initial_time + BUFFER_DELAY) {
uint64_t transferred_bytes = s->bytes_xfer;
- uint64_t time_spent = current_time - initial_time;
+ uint64_t time_spent = current_time - initial_time - sleep_time;
double bandwidth = transferred_bytes / time_spent;
max_size = bandwidth * migrate_max_downtime() / 1000000;
DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64
" bandwidth %g max_size %" PRId64 "\n",
transferred_bytes, time_spent, bandwidth, max_size);
+ /* if we haven't sent anything, we don't want to recalculate
+ 10000 is a small enough number for our purposes */
+ if (s->dirty_bytes_rate && transferred_bytes > 10000) {
+ s->expected_downtime = s->dirty_bytes_rate / bandwidth;
+ }
s->bytes_xfer = 0;
+ sleep_time = 0;
initial_time = current_time;
}
if (!last_round && (s->bytes_xfer >= s->xfer_limit)) {
/* usleep expects microseconds */
g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
+ sleep_time += qemu_get_clock_ms(rt_clock) - current_time;
}
ret = buffered_flush(s);
if (ret < 0) {
@@ -774,6 +783,8 @@ void migrate_fd_connect(MigrationState *s)
s->buffer = NULL;
s->buffer_size = 0;
s->buffer_capacity = 0;
+ /* This is a best 1st approximation. ns to ms */
+ s->expected_downtime = max_downtime/1000000;
s->xfer_limit = s->bandwidth_limit / XFER_LIMIT_RATIO;
s->complete = false;