diff options
author | Keqian Zhu <zhukeqian1@huawei.com> | 2020-02-24 10:31:42 +0800 |
---|---|---|
committer | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2020-03-13 09:36:30 +0000 |
commit | dc14a470763c96fd9d360e1028ce38e8c3613a77 (patch) | |
tree | 94e4b1b2ae5f0d37d588db7beb45bfe56c6a4282 /migration/ram.c | |
parent | 297254c71b302abf9f4f37c41e274b5722e1a5da (diff) |
migration/throttle: Add throttle-trig-thres migration parameter
Currently, if the bytes_dirty_period is more than the 50% of
bytes_xfer_period, we start or increase throttling.
If we make this percentage higher, then we can tolerate higher
dirty rate during migration, which means less impact on guest.
The side effect of higher percentage is longer migration time.
We can make this parameter configurable to switch between mig-
ration time first or guest performance first.
The default value is 50 and valid range is 1 to 100.
Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Message-Id: <20200224023142.39360-1-zhukeqian1@huawei.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 52 |
1 files changed, 30 insertions, 22 deletions
diff --git a/migration/ram.c b/migration/ram.c index 0ef68798d2..02cfd76d19 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -896,11 +896,38 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) } } +static void migration_trigger_throttle(RAMState *rs) +{ + MigrationState *s = migrate_get_current(); + uint64_t threshold = s->parameters.throttle_trigger_threshold; + + uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev; + uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; + uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; + + /* During block migration the auto-converge logic incorrectly detects + * that ram migration makes no progress. Avoid this by disabling the + * throttling logic during the bulk phase of block migration. */ + if (migrate_auto_converge() && !blk_mig_bulk_active()) { + /* The following detection logic can be refined later. For now: + Check to see if the ratio between dirtied bytes and the approx. + amount of bytes that just got transferred since the last time + we were in this routine reaches the threshold. If that happens + twice, start or increase throttling. */ + + if ((bytes_dirty_period > bytes_dirty_threshold) && + (++rs->dirty_rate_high_cnt >= 2)) { + trace_migration_throttle(); + rs->dirty_rate_high_cnt = 0; + mig_throttle_guest_down(); + } + } +} + static void migration_bitmap_sync(RAMState *rs) { RAMBlock *block; int64_t end_time; - uint64_t bytes_xfer_now; ram_counters.dirty_sync_count++; @@ -927,26 +954,7 @@ static void migration_bitmap_sync(RAMState *rs) /* more than 1 second = 1000 millisecons */ if (end_time > rs->time_last_bitmap_sync + 1000) { - bytes_xfer_now = ram_counters.transferred; - - /* During block migration the auto-converge logic incorrectly detects - * that ram migration makes no progress. Avoid this by disabling the - * throttling logic during the bulk phase of block migration. */ - if (migrate_auto_converge() && !blk_mig_bulk_active()) { - /* The following detection logic can be refined later. For now: - Check to see if the dirtied bytes is 50% more than the approx. - amount of bytes that just got transferred since the last time we - were in this routine. If that happens twice, start or increase - throttling */ - - if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE > - (bytes_xfer_now - rs->bytes_xfer_prev) / 2) && - (++rs->dirty_rate_high_cnt >= 2)) { - trace_migration_throttle(); - rs->dirty_rate_high_cnt = 0; - mig_throttle_guest_down(); - } - } + migration_trigger_throttle(rs); migration_update_rates(rs, end_time); @@ -955,7 +963,7 @@ static void migration_bitmap_sync(RAMState *rs) /* reset period counters */ rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = bytes_xfer_now; + rs->bytes_xfer_prev = ram_counters.transferred; } if (migrate_use_events()) { qapi_event_send_migration_pass(ram_counters.dirty_sync_count); |