aboutsummaryrefslogtreecommitdiff
path: root/migration
diff options
context:
space:
mode:
authorHyman Huang <yong.huang@smartx.com>2024-10-17 14:42:54 +0800
committerPeter Xu <peterx@redhat.com>2024-10-31 15:48:18 -0400
commit52ac968ab28b2e1eee2e083f19f2a70fdece5a2e (patch)
tree9ccd48373aeb80ad9c269d5c41dac8f5459d2ff3 /migration
parent6a39ba7cab67da05b91e215142ce5781e77e5d9f (diff)
migration: Support periodic RAMBlock dirty bitmap sync
When VM is configured with huge memory, the current throttle logic doesn't look like to scale, because migration_trigger_throttle() is only called for each iteration, so it won't be invoked for a long time if one iteration can take a long time. The periodic dirty sync aims to fix the above issue by synchronizing the ramblock from remote dirty bitmap and, when necessary, triggering the CPU throttle multiple times during a long iteration. This is a trade-off between synchronization overhead and CPU throttle impact. Signed-off-by: Hyman Huang <yong.huang@smartx.com> Reviewed-by: Fabiano Rosas <farosas@suse.de> Link: https://lore.kernel.org/r/f61f1b3653f2acf026901103e1c73d157d38b08f.1729146786.git.yong.huang@smartx.com [peterx: make prev_cnt global, and reset for each migration] Signed-off-by: Peter Xu <peterx@redhat.com>
Diffstat (limited to 'migration')
-rw-r--r--migration/cpu-throttle.c70
-rw-r--r--migration/migration.c14
-rw-r--r--migration/migration.h1
-rw-r--r--migration/ram.c2
-rw-r--r--migration/trace-events1
5 files changed, 84 insertions, 4 deletions
diff --git a/migration/cpu-throttle.c b/migration/cpu-throttle.c
index 7632dc6143..5179019e33 100644
--- a/migration/cpu-throttle.c
+++ b/migration/cpu-throttle.c
@@ -28,16 +28,23 @@
#include "qemu/main-loop.h"
#include "sysemu/cpus.h"
#include "sysemu/cpu-throttle.h"
+#include "migration.h"
+#include "migration-stats.h"
#include "trace.h"
/* vcpu throttling controls */
-static QEMUTimer *throttle_timer;
+static QEMUTimer *throttle_timer, *throttle_dirty_sync_timer;
static unsigned int throttle_percentage;
+static bool throttle_dirty_sync_timer_active;
+static uint64_t throttle_dirty_sync_count_prev;
#define CPU_THROTTLE_PCT_MIN 1
#define CPU_THROTTLE_PCT_MAX 99
#define CPU_THROTTLE_TIMESLICE_NS 10000000
+/* Making sure RAMBlock dirty bitmap is synchronized every five seconds */
+#define CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS 5000
+
static void cpu_throttle_thread(CPUState *cpu, run_on_cpu_data opaque)
{
double pct;
@@ -112,6 +119,7 @@ void cpu_throttle_set(int new_throttle_pct)
void cpu_throttle_stop(void)
{
qatomic_set(&throttle_percentage, 0);
+ cpu_throttle_dirty_sync_timer(false);
}
bool cpu_throttle_active(void)
@@ -124,8 +132,68 @@ int cpu_throttle_get_percentage(void)
return qatomic_read(&throttle_percentage);
}
+void cpu_throttle_dirty_sync_timer_tick(void *opaque)
+{
+ uint64_t sync_cnt = stat64_get(&mig_stats.dirty_sync_count);
+
+ /*
+ * The first iteration copies all memory anyhow and has no
+ * effect on guest performance, therefore omit it to avoid
+ * paying extra for the sync penalty.
+ */
+ if (sync_cnt <= 1) {
+ goto end;
+ }
+
+ if (sync_cnt == throttle_dirty_sync_count_prev) {
+ trace_cpu_throttle_dirty_sync();
+ WITH_RCU_READ_LOCK_GUARD() {
+ migration_bitmap_sync_precopy(false);
+ }
+ }
+
+end:
+ throttle_dirty_sync_count_prev = stat64_get(&mig_stats.dirty_sync_count);
+
+ timer_mod(throttle_dirty_sync_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+ CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+}
+
+static bool cpu_throttle_dirty_sync_active(void)
+{
+ return qatomic_read(&throttle_dirty_sync_timer_active);
+}
+
+void cpu_throttle_dirty_sync_timer(bool enable)
+{
+ assert(throttle_dirty_sync_timer);
+
+ if (enable) {
+ if (!cpu_throttle_dirty_sync_active()) {
+ /*
+ * Always reset the dirty sync count cache, in case migration
+ * was cancelled once.
+ */
+ throttle_dirty_sync_count_prev = 0;
+ timer_mod(throttle_dirty_sync_timer,
+ qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) +
+ CPU_THROTTLE_DIRTY_SYNC_TIMESLICE_MS);
+ qatomic_set(&throttle_dirty_sync_timer_active, 1);
+ }
+ } else {
+ if (cpu_throttle_dirty_sync_active()) {
+ timer_del(throttle_dirty_sync_timer);
+ qatomic_set(&throttle_dirty_sync_timer_active, 0);
+ }
+ }
+}
+
void cpu_throttle_init(void)
{
throttle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
cpu_throttle_timer_tick, NULL);
+ throttle_dirty_sync_timer =
+ timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
+ cpu_throttle_dirty_sync_timer_tick, NULL);
}
diff --git a/migration/migration.c b/migration/migration.c
index 64c1d0c1db..9bcc9db4fb 100644
--- a/migration/migration.c
+++ b/migration/migration.c
@@ -3297,12 +3297,17 @@ static MigIterateState migration_iteration_run(MigrationState *s)
static void migration_iteration_finish(MigrationState *s)
{
- /* If we enabled cpu throttling for auto-converge, turn it off. */
+ bql_lock();
+
+ /*
+ * If we enabled cpu throttling for auto-converge, turn it off.
+ * Stopping CPU throttle should be serialized by BQL to avoid
+ * racing for the throttle_dirty_sync_timer.
+ */
if (migrate_auto_converge()) {
cpu_throttle_stop();
}
- bql_lock();
switch (s->state) {
case MIGRATION_STATUS_COMPLETED:
runstate_set(RUN_STATE_POSTMIGRATE);
@@ -3520,6 +3525,11 @@ static void *migration_thread(void *opaque)
qemu_savevm_send_colo_enable(s->to_dst_file);
}
+ if (migrate_auto_converge()) {
+ /* Start RAMBlock dirty bitmap sync timer */
+ cpu_throttle_dirty_sync_timer(true);
+ }
+
bql_lock();
ret = qemu_savevm_state_setup(s->to_dst_file, &local_err);
bql_unlock();
diff --git a/migration/migration.h b/migration/migration.h
index b9ce5aa4ff..7dc59c5e8d 100644
--- a/migration/migration.h
+++ b/migration/migration.h
@@ -551,4 +551,5 @@ int migration_rp_wait(MigrationState *s);
*/
void migration_rp_kick(MigrationState *s);
+void migration_bitmap_sync_precopy(bool last_stage);
#endif
diff --git a/migration/ram.c b/migration/ram.c
index 04fd805901..b24e45442c 100644
--- a/migration/ram.c
+++ b/migration/ram.c
@@ -1088,7 +1088,7 @@ static void migration_bitmap_sync(RAMState *rs, bool last_stage)
}
}
-static void migration_bitmap_sync_precopy(bool last_stage)
+void migration_bitmap_sync_precopy(bool last_stage)
{
Error *local_err = NULL;
assert(ram_state);
diff --git a/migration/trace-events b/migration/trace-events
index 9a19599804..0638183056 100644
--- a/migration/trace-events
+++ b/migration/trace-events
@@ -381,3 +381,4 @@ migration_pagecache_insert(void) "Error allocating page"
# cpu-throttle.c
cpu_throttle_set(int new_throttle_pct) "set guest CPU throttled by %d%%"
+cpu_throttle_dirty_sync(void) ""