diff options
author | Keqian Zhu <zhukeqian1@huawei.com> | 2020-02-24 10:31:42 +0800 |
---|---|---|
committer | Dr. David Alan Gilbert <dgilbert@redhat.com> | 2020-03-13 09:36:30 +0000 |
commit | dc14a470763c96fd9d360e1028ce38e8c3613a77 (patch) | |
tree | 94e4b1b2ae5f0d37d588db7beb45bfe56c6a4282 /migration | |
parent | 297254c71b302abf9f4f37c41e274b5722e1a5da (diff) |
migration/throttle: Add throttle-trig-thres migration parameter
Currently, if the bytes_dirty_period is more than the 50% of
bytes_xfer_period, we start or increase throttling.
If we make this percentage higher, then we can tolerate higher
dirty rate during migration, which means less impact on guest.
The side effect of higher percentage is longer migration time.
We can make this parameter configurable to switch between mig-
ration time first or guest performance first.
The default value is 50 and valid range is 1 to 100.
Signed-off-by: Keqian Zhu <zhukeqian1@huawei.com>
Message-Id: <20200224023142.39360-1-zhukeqian1@huawei.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r-- | migration/migration.c | 24 | ||||
-rw-r--r-- | migration/ram.c | 52 |
2 files changed, 54 insertions, 22 deletions
diff --git a/migration/migration.c b/migration/migration.c index 0b2045ccbd..59da5d0625 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -78,6 +78,7 @@ /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 /* Define default autoconverge cpu throttle migration parameters */ +#define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10 #define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99 @@ -778,6 +779,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) params->compress_wait_thread = s->parameters.compress_wait_thread; params->has_decompress_threads = true; params->decompress_threads = s->parameters.decompress_threads; + params->has_throttle_trigger_threshold = true; + params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold; params->has_cpu_throttle_initial = true; params->cpu_throttle_initial = s->parameters.cpu_throttle_initial; params->has_cpu_throttle_increment = true; @@ -1169,6 +1172,15 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) return false; } + if (params->has_throttle_trigger_threshold && + (params->throttle_trigger_threshold < 1 || + params->throttle_trigger_threshold > 100)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "throttle_trigger_threshold", + "an integer in the range of 1 to 100"); + return false; + } + if (params->has_cpu_throttle_initial && (params->cpu_throttle_initial < 1 || params->cpu_throttle_initial > 99)) { @@ -1298,6 +1310,10 @@ static void migrate_params_test_apply(MigrateSetParameters *params, dest->decompress_threads = params->decompress_threads; } + if (params->has_throttle_trigger_threshold) { + dest->throttle_trigger_threshold = params->throttle_trigger_threshold; + } + if (params->has_cpu_throttle_initial) { dest->cpu_throttle_initial = params->cpu_throttle_initial; } @@ -1382,6 +1398,10 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) s->parameters.decompress_threads = params->decompress_threads; } + if (params->has_throttle_trigger_threshold) { + s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold; + } + if (params->has_cpu_throttle_initial) { s->parameters.cpu_throttle_initial = params->cpu_throttle_initial; } @@ -3558,6 +3578,9 @@ static Property migration_properties[] = { DEFINE_PROP_UINT8("x-decompress-threads", MigrationState, parameters.decompress_threads, DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), + DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, + parameters.throttle_trigger_threshold, + DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD), DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState, parameters.cpu_throttle_initial, DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL), @@ -3667,6 +3690,7 @@ static void migration_instance_init(Object *obj) params->has_compress_level = true; params->has_compress_threads = true; params->has_decompress_threads = true; + params->has_throttle_trigger_threshold = true; params->has_cpu_throttle_initial = true; params->has_cpu_throttle_increment = true; params->has_max_bandwidth = true; diff --git a/migration/ram.c b/migration/ram.c index 0ef68798d2..02cfd76d19 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -896,11 +896,38 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) } } +static void migration_trigger_throttle(RAMState *rs) +{ + MigrationState *s = migrate_get_current(); + uint64_t threshold = s->parameters.throttle_trigger_threshold; + + uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev; + uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE; + uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100; + + /* During block migration the auto-converge logic incorrectly detects + * that ram migration makes no progress. Avoid this by disabling the + * throttling logic during the bulk phase of block migration. */ + if (migrate_auto_converge() && !blk_mig_bulk_active()) { + /* The following detection logic can be refined later. For now: + Check to see if the ratio between dirtied bytes and the approx. + amount of bytes that just got transferred since the last time + we were in this routine reaches the threshold. If that happens + twice, start or increase throttling. */ + + if ((bytes_dirty_period > bytes_dirty_threshold) && + (++rs->dirty_rate_high_cnt >= 2)) { + trace_migration_throttle(); + rs->dirty_rate_high_cnt = 0; + mig_throttle_guest_down(); + } + } +} + static void migration_bitmap_sync(RAMState *rs) { RAMBlock *block; int64_t end_time; - uint64_t bytes_xfer_now; ram_counters.dirty_sync_count++; @@ -927,26 +954,7 @@ static void migration_bitmap_sync(RAMState *rs) /* more than 1 second = 1000 millisecons */ if (end_time > rs->time_last_bitmap_sync + 1000) { - bytes_xfer_now = ram_counters.transferred; - - /* During block migration the auto-converge logic incorrectly detects - * that ram migration makes no progress. Avoid this by disabling the - * throttling logic during the bulk phase of block migration. */ - if (migrate_auto_converge() && !blk_mig_bulk_active()) { - /* The following detection logic can be refined later. For now: - Check to see if the dirtied bytes is 50% more than the approx. - amount of bytes that just got transferred since the last time we - were in this routine. If that happens twice, start or increase - throttling */ - - if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE > - (bytes_xfer_now - rs->bytes_xfer_prev) / 2) && - (++rs->dirty_rate_high_cnt >= 2)) { - trace_migration_throttle(); - rs->dirty_rate_high_cnt = 0; - mig_throttle_guest_down(); - } - } + migration_trigger_throttle(rs); migration_update_rates(rs, end_time); @@ -955,7 +963,7 @@ static void migration_bitmap_sync(RAMState *rs) /* reset period counters */ rs->time_last_bitmap_sync = end_time; rs->num_dirty_pages_period = 0; - rs->bytes_xfer_prev = bytes_xfer_now; + rs->bytes_xfer_prev = ram_counters.transferred; } if (migrate_use_events()) { qapi_event_send_migration_pass(ram_counters.dirty_sync_count); |