diff options
Diffstat (limited to 'migration/ram.c')
-rw-r--r-- | migration/ram.c | 25 |
1 files changed, 20 insertions, 5 deletions
diff --git a/migration/ram.c b/migration/ram.c index 53166fc279..52fc032b83 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -616,20 +616,34 @@ static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block, * able to complete migration. Some workloads dirty memory way too * fast and will not effectively converge, even with auto-converge. */ -static void mig_throttle_guest_down(void) +static void mig_throttle_guest_down(uint64_t bytes_dirty_period, + uint64_t bytes_dirty_threshold) { MigrationState *s = migrate_get_current(); uint64_t pct_initial = s->parameters.cpu_throttle_initial; - uint64_t pct_icrement = s->parameters.cpu_throttle_increment; + uint64_t pct_increment = s->parameters.cpu_throttle_increment; + bool pct_tailslow = s->parameters.cpu_throttle_tailslow; int pct_max = s->parameters.max_cpu_throttle; + uint64_t throttle_now = cpu_throttle_get_percentage(); + uint64_t cpu_now, cpu_ideal, throttle_inc; + /* We have not started throttling yet. Let's start it. */ if (!cpu_throttle_active()) { cpu_throttle_set(pct_initial); } else { /* Throttling already on, just increase the rate */ - cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement, - pct_max)); + if (!pct_tailslow) { + throttle_inc = pct_increment; + } else { + /* Compute the ideal CPU percentage used by Guest, which may + * make the dirty rate match the dirty rate threshold. */ + cpu_now = 100 - throttle_now; + cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 / + bytes_dirty_period); + throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment); + } + cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max)); } } @@ -919,7 +933,8 @@ static void migration_trigger_throttle(RAMState *rs) (++rs->dirty_rate_high_cnt >= 2)) { trace_migration_throttle(); rs->dirty_rate_high_cnt = 0; - mig_throttle_guest_down(); + mig_throttle_guest_down(bytes_dirty_period, + bytes_dirty_threshold); } } } |