diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2023-11-06 08:37:22 +0800 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2023-11-06 08:37:22 +0800 |
commit | f3604191e296da90feb1b0bb24e986bc016809a8 (patch) | |
tree | bbb69ec98ae304416ab4c27e4e41f764321c5243 /tests | |
parent | 1d6e13c1c7624244865bd3aa715b83e764085e3f (diff) | |
parent | 0983125b405c479a6e7eb49b81cfdae969e28cee (diff) |
Merge tag 'migration-20231103-pull-request' of https://gitlab.com/juan.quintela/qemu into staging
Migration Pull request (20231103)
Hi
In this PULL:
- dirty limit fixes (hyman)
- coverity issues (juan)
Please apply.
# -----BEGIN PGP SIGNATURE-----
#
# iQIzBAABCAAdFiEEGJn/jt6/WMzuA0uC9IfvGFhy1yMFAmVE4dgACgkQ9IfvGFhy
# 1yPBgxAAvrsCHwU6/m9y+XGokyHTKKKIVysLip/14jAjDL+viLYgxdVyOGQKQGBa
# +yV+XHTaEyKdihKG4Z5nWuC0yM+sdZQpWXQAcHJLPaPs5wDGICICpdAFY2LbAWSK
# jtX9uq7crywIL4mVKiX+HOjRUPCAYUx/2TcqJf2+0+MKDEVC33ikxNbcx8ZELY+Q
# +hGyOws3mkHSQjyaNUVgnnQtGzikYqcNO2efa+zVPdXYd+TUWW2e9I++Qf48r0Hv
# OqeZAB7bSAb39PNRuj0I1gt4d3WTHzHt7BSpX1OuFqQnzLw8vS5iDQH943WAyGkY
# NblZVb8pyzSg1Jy18H/SmrJDXeufRwqFwD+1NHyxGjsF89KOuVUqGrGpRXhMBtmA
# DSzdgn5jqW5lI1po9FqGdlPTFlhstpMH3DSfPQWurvJh42oM38gmSEHLBNpc4tXo
# 8udMYI09H/kHUoNMTZNGjnZO9LfarGsag6eOJP1bMMublhRlKCaL9RIyV9oOHycE
# IeOeQFeBP/BmYFLWbVPeZej7uiqsEc7VPDJK2QXns210UYanaWmggkmpdAr0I0EV
# pEKHSfVv1qlIlFH4d7MhcJzP2/rY62EC5tYQjT0UaBnCRcDInKrNWa3kbDL0akwr
# 0aJgpbT5ipknVChtwnMWJlbqpeW/VUF5g0jVpYQ3jbe/Zf+OtmU=
# =Pv8z
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 03 Nov 2023 20:04:40 HKT
# gpg: using RSA key 1899FF8EDEBF58CCEE034B82F487EF185872D723
# gpg: Good signature from "Juan Quintela <quintela@redhat.com>" [full]
# gpg: aka "Juan Quintela <quintela@trasno.org>" [full]
# Primary key fingerprint: 1899 FF8E DEBF 58CC EE03 4B82 F487 EF18 5872 D723
* tag 'migration-20231103-pull-request' of https://gitlab.com/juan.quintela/qemu:
migration: Unlock mutex in error case
docs/migration: Add the dirty limit section
tests/migration: Introduce dirty-limit into guestperf
tests/migration: Introduce dirty-ring-size option into guestperf
tests: Add migration dirty-limit capability test
system/dirtylimit: Drop the reduplicative check
system/dirtylimit: Fix a race situation
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'tests')
-rw-r--r-- | tests/migration/guestperf/comparison.py | 23 | ||||
-rw-r--r-- | tests/migration/guestperf/engine.py | 23 | ||||
-rw-r--r-- | tests/migration/guestperf/hardware.py | 8 | ||||
-rw-r--r-- | tests/migration/guestperf/progress.py | 16 | ||||
-rw-r--r-- | tests/migration/guestperf/scenario.py | 11 | ||||
-rw-r--r-- | tests/migration/guestperf/shell.py | 24 | ||||
-rw-r--r-- | tests/qtest/migration-test.c | 164 |
7 files changed, 261 insertions, 8 deletions
diff --git a/tests/migration/guestperf/comparison.py b/tests/migration/guestperf/comparison.py index c03b3f6d7e..42cc0372d1 100644 --- a/tests/migration/guestperf/comparison.py +++ b/tests/migration/guestperf/comparison.py @@ -135,4 +135,27 @@ COMPARISONS = [ Scenario("compr-multifd-channels-64", multifd=True, multifd_channels=64), ]), + + # Looking at effect of dirty-limit with + # varying x_vcpu_dirty_limit_period + Comparison("compr-dirty-limit-period", scenarios = [ + Scenario("compr-dirty-limit-period-500", + dirty_limit=True, x_vcpu_dirty_limit_period=500), + Scenario("compr-dirty-limit-period-800", + dirty_limit=True, x_vcpu_dirty_limit_period=800), + Scenario("compr-dirty-limit-period-1000", + dirty_limit=True, x_vcpu_dirty_limit_period=1000), + ]), + + + # Looking at effect of dirty-limit with + # varying vcpu_dirty_limit + Comparison("compr-dirty-limit", scenarios = [ + Scenario("compr-dirty-limit-10MB", + dirty_limit=True, vcpu_dirty_limit=10), + Scenario("compr-dirty-limit-20MB", + dirty_limit=True, vcpu_dirty_limit=20), + Scenario("compr-dirty-limit-50MB", + dirty_limit=True, vcpu_dirty_limit=50), + ]), ] diff --git a/tests/migration/guestperf/engine.py b/tests/migration/guestperf/engine.py index da96ca034a..608d7270f6 100644 --- a/tests/migration/guestperf/engine.py +++ b/tests/migration/guestperf/engine.py @@ -102,6 +102,8 @@ class Engine(object): info.get("expected-downtime", 0), info.get("setup-time", 0), info.get("cpu-throttle-percentage", 0), + info.get("dirty-limit-throttle-time-per-round", 0), + info.get("dirty-limit-ring-full-time", 0), ) def _migrate(self, hardware, scenario, src, dst, connect_uri): @@ -203,6 +205,21 @@ class Engine(object): resp = dst.cmd("migrate-set-parameters", multifd_channels=scenario._multifd_channels) + if scenario._dirty_limit: + if not hardware._dirty_ring_size: + raise Exception("dirty ring size must be configured when " + "testing dirty limit migration") + + resp = src.cmd("migrate-set-capabilities", + capabilities = [ + { "capability": "dirty-limit", + "state": True } + ]) + resp = src.cmd("migrate-set-parameters", + x_vcpu_dirty_limit_period=scenario._x_vcpu_dirty_limit_period) + resp = src.cmd("migrate-set-parameters", + vcpu_dirty_limit=scenario._vcpu_dirty_limit) + resp = src.cmd("migrate", uri=connect_uri) post_copy = False @@ -325,7 +342,6 @@ class Engine(object): cmdline = "'" + cmdline + "'" argv = [ - "-accel", "kvm", "-cpu", "host", "-kernel", self._kernel, "-initrd", self._initrd, @@ -333,6 +349,11 @@ class Engine(object): "-m", str((hardware._mem * 1024) + 512), "-smp", str(hardware._cpus), ] + if hardware._dirty_ring_size: + argv.extend(["-accel", "kvm,dirty-ring-size=%s" % + hardware._dirty_ring_size]) + else: + argv.extend(["-accel", "kvm"]) argv.extend(self._get_qemu_serial_args()) diff --git a/tests/migration/guestperf/hardware.py b/tests/migration/guestperf/hardware.py index 3145785ffd..f779cc050b 100644 --- a/tests/migration/guestperf/hardware.py +++ b/tests/migration/guestperf/hardware.py @@ -23,7 +23,8 @@ class Hardware(object): src_cpu_bind=None, src_mem_bind=None, dst_cpu_bind=None, dst_mem_bind=None, prealloc_pages = False, - huge_pages=False, locked_pages=False): + huge_pages=False, locked_pages=False, + dirty_ring_size=0): self._cpus = cpus self._mem = mem # GiB self._src_mem_bind = src_mem_bind # List of NUMA nodes @@ -33,6 +34,7 @@ class Hardware(object): self._prealloc_pages = prealloc_pages self._huge_pages = huge_pages self._locked_pages = locked_pages + self._dirty_ring_size = dirty_ring_size def serialize(self): @@ -46,6 +48,7 @@ class Hardware(object): "prealloc_pages": self._prealloc_pages, "huge_pages": self._huge_pages, "locked_pages": self._locked_pages, + "dirty_ring_size": self._dirty_ring_size, } @classmethod @@ -59,4 +62,5 @@ class Hardware(object): data["dst_mem_bind"], data["prealloc_pages"], data["huge_pages"], - data["locked_pages"]) + data["locked_pages"], + data["dirty_ring_size"]) diff --git a/tests/migration/guestperf/progress.py b/tests/migration/guestperf/progress.py index ab1ee57273..d490584217 100644 --- a/tests/migration/guestperf/progress.py +++ b/tests/migration/guestperf/progress.py @@ -81,7 +81,9 @@ class Progress(object): downtime, downtime_expected, setup_time, - throttle_pcent): + throttle_pcent, + dirty_limit_throttle_time_per_round, + dirty_limit_ring_full_time): self._status = status self._ram = ram @@ -91,6 +93,10 @@ class Progress(object): self._downtime_expected = downtime_expected self._setup_time = setup_time self._throttle_pcent = throttle_pcent + self._dirty_limit_throttle_time_per_round = \ + dirty_limit_throttle_time_per_round + self._dirty_limit_ring_full_time = \ + dirty_limit_ring_full_time def serialize(self): return { @@ -102,6 +108,10 @@ class Progress(object): "downtime_expected": self._downtime_expected, "setup_time": self._setup_time, "throttle_pcent": self._throttle_pcent, + "dirty_limit_throttle_time_per_round": + self._dirty_limit_throttle_time_per_round, + "dirty_limit_ring_full_time": + self._dirty_limit_ring_full_time, } @classmethod @@ -114,4 +124,6 @@ class Progress(object): data["downtime"], data["downtime_expected"], data["setup_time"], - data["throttle_pcent"]) + data["throttle_pcent"], + data["dirty_limit_throttle_time_per_round"], + data["dirty_limit_ring_full_time"]) diff --git a/tests/migration/guestperf/scenario.py b/tests/migration/guestperf/scenario.py index de70d9b2f5..154c4f5d5f 100644 --- a/tests/migration/guestperf/scenario.py +++ b/tests/migration/guestperf/scenario.py @@ -30,7 +30,9 @@ class Scenario(object): auto_converge=False, auto_converge_step=10, compression_mt=False, compression_mt_threads=1, compression_xbzrle=False, compression_xbzrle_cache=10, - multifd=False, multifd_channels=2): + multifd=False, multifd_channels=2, + dirty_limit=False, x_vcpu_dirty_limit_period=500, + vcpu_dirty_limit=1): self._name = name @@ -60,6 +62,10 @@ class Scenario(object): self._multifd = multifd self._multifd_channels = multifd_channels + self._dirty_limit = dirty_limit + self._x_vcpu_dirty_limit_period = x_vcpu_dirty_limit_period + self._vcpu_dirty_limit = vcpu_dirty_limit + def serialize(self): return { "name": self._name, @@ -79,6 +85,9 @@ class Scenario(object): "compression_xbzrle_cache": self._compression_xbzrle_cache, "multifd": self._multifd, "multifd_channels": self._multifd_channels, + "dirty_limit": self._dirty_limit, + "x_vcpu_dirty_limit_period": self._x_vcpu_dirty_limit_period, + "vcpu_dirty_limit": self._vcpu_dirty_limit, } @classmethod diff --git a/tests/migration/guestperf/shell.py b/tests/migration/guestperf/shell.py index 8a809e3dda..c85d89efec 100644 --- a/tests/migration/guestperf/shell.py +++ b/tests/migration/guestperf/shell.py @@ -60,6 +60,8 @@ class BaseShell(object): parser.add_argument("--prealloc-pages", dest="prealloc_pages", default=False) parser.add_argument("--huge-pages", dest="huge_pages", default=False) parser.add_argument("--locked-pages", dest="locked_pages", default=False) + parser.add_argument("--dirty-ring-size", dest="dirty_ring_size", + default=0, type=int) self._parser = parser @@ -89,7 +91,9 @@ class BaseShell(object): locked_pages=args.locked_pages, huge_pages=args.huge_pages, - prealloc_pages=args.prealloc_pages) + prealloc_pages=args.prealloc_pages, + + dirty_ring_size=args.dirty_ring_size) class Shell(BaseShell): @@ -127,6 +131,17 @@ class Shell(BaseShell): parser.add_argument("--multifd-channels", dest="multifd_channels", default=2, type=int) + parser.add_argument("--dirty-limit", dest="dirty_limit", default=False, + action="store_true") + + parser.add_argument("--x-vcpu-dirty-limit-period", + dest="x_vcpu_dirty_limit_period", + default=500, type=int) + + parser.add_argument("--vcpu-dirty-limit", + dest="vcpu_dirty_limit", + default=1, type=int) + def get_scenario(self, args): return Scenario(name="perfreport", downtime=args.downtime, @@ -150,7 +165,12 @@ class Shell(BaseShell): compression_xbzrle_cache=args.compression_xbzrle_cache, multifd=args.multifd, - multifd_channels=args.multifd_channels) + multifd_channels=args.multifd_channels, + + dirty_limit=args.dirty_limit, + x_vcpu_dirty_limit_period=\ + args.x_vcpu_dirty_limit_period, + vcpu_dirty_limit=args.vcpu_dirty_limit) def run(self, argv): args = self._parser.parse_args(argv) diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index e803b46039..5752412b64 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -3091,6 +3091,166 @@ static void test_vcpu_dirty_limit(void) dirtylimit_stop_vm(vm); } +static void migrate_dirty_limit_wait_showup(QTestState *from, + const int64_t period, + const int64_t value) +{ + /* Enable dirty limit capability */ + migrate_set_capability(from, "dirty-limit", true); + + /* Set dirty limit parameters */ + migrate_set_parameter_int(from, "x-vcpu-dirty-limit-period", period); + migrate_set_parameter_int(from, "vcpu-dirty-limit", value); + + /* Make sure migrate can't converge */ + migrate_ensure_non_converge(from); + + /* To check limit rate after precopy */ + migrate_set_capability(from, "pause-before-switchover", true); + + /* Wait for the serial output from the source */ + wait_for_serial("src_serial"); +} + +/* + * This test does: + * source destination + * start vm + * start incoming vm + * migrate + * wait dirty limit to begin + * cancel migrate + * cancellation check + * restart incoming vm + * migrate + * wait dirty limit to begin + * wait pre-switchover event + * convergence condition check + * + * And see if dirty limit migration works correctly. + * This test case involves many passes, so it runs in slow mode only. + */ +static void test_migrate_dirty_limit(void) +{ + g_autofree char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); + QTestState *from, *to; + int64_t remaining; + uint64_t throttle_us_per_full; + /* + * We want the test to be stable and as fast as possible. + * E.g., with 1Gb/s bandwith migration may pass without dirty limit, + * so we need to decrease a bandwidth. + */ + const int64_t dirtylimit_period = 1000, dirtylimit_value = 50; + const int64_t max_bandwidth = 400000000; /* ~400Mb/s */ + const int64_t downtime_limit = 250; /* 250ms */ + /* + * We migrate through unix-socket (> 500Mb/s). + * Thus, expected migration speed ~= bandwidth limit (< 500Mb/s). + * So, we can predict expected_threshold + */ + const int64_t expected_threshold = max_bandwidth * downtime_limit / 1000; + int max_try_count = 10; + MigrateCommon args = { + .start = { + .hide_stderr = true, + .use_dirty_ring = true, + }, + .listen_uri = uri, + .connect_uri = uri, + }; + + /* Start src, dst vm */ + if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) { + return; + } + + /* Prepare for dirty limit migration and wait src vm show up */ + migrate_dirty_limit_wait_showup(from, dirtylimit_period, dirtylimit_value); + + /* Start migrate */ + migrate_qmp(from, uri, "{}"); + + /* Wait for dirty limit throttle begin */ + throttle_us_per_full = 0; + while (throttle_us_per_full == 0) { + throttle_us_per_full = + read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); + usleep(100); + g_assert_false(got_src_stop); + } + + /* Now cancel migrate and wait for dirty limit throttle switch off */ + migrate_cancel(from); + wait_for_migration_status(from, "cancelled", NULL); + + /* Check if dirty limit throttle switched off, set timeout 1ms */ + do { + throttle_us_per_full = + read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); + usleep(100); + g_assert_false(got_src_stop); + } while (throttle_us_per_full != 0 && --max_try_count); + + /* Assert dirty limit is not in service */ + g_assert_cmpint(throttle_us_per_full, ==, 0); + + args = (MigrateCommon) { + .start = { + .only_target = true, + .use_dirty_ring = true, + }, + .listen_uri = uri, + .connect_uri = uri, + }; + + /* Restart dst vm, src vm already show up so we needn't wait anymore */ + if (test_migrate_start(&from, &to, args.listen_uri, &args.start)) { + return; + } + + /* Start migrate */ + migrate_qmp(from, uri, "{}"); + + /* Wait for dirty limit throttle begin */ + throttle_us_per_full = 0; + while (throttle_us_per_full == 0) { + throttle_us_per_full = + read_migrate_property_int(from, "dirty-limit-throttle-time-per-round"); + usleep(100); + g_assert_false(got_src_stop); + } + + /* + * The dirty limit rate should equals the return value of + * query-vcpu-dirty-limit if dirty limit cap set + */ + g_assert_cmpint(dirtylimit_value, ==, get_limit_rate(from)); + + /* Now, we have tested if dirty limit works, let it converge */ + migrate_set_parameter_int(from, "downtime-limit", downtime_limit); + migrate_set_parameter_int(from, "max-bandwidth", max_bandwidth); + + /* + * Wait for pre-switchover status to check if migration + * satisfy the convergence condition + */ + wait_for_migration_status(from, "pre-switchover", NULL); + + remaining = read_ram_property_int(from, "remaining"); + g_assert_cmpint(remaining, <, + (expected_threshold + expected_threshold / 100)); + + migrate_continue(from, "pre-switchover"); + + qtest_qmp_eventwait(to, "RESUME"); + + wait_for_serial("dest_serial"); + wait_for_migration_complete(from); + + test_migrate_end(from, to, true); +} + static bool kvm_dirty_ring_supported(void) { #if defined(__linux__) && defined(HOST_X86_64) @@ -3301,6 +3461,10 @@ int main(int argc, char **argv) */ if (g_test_slow()) { qtest_add_func("/migration/auto_converge", test_migrate_auto_converge); + if (g_str_equal(arch, "x86_64") && + has_kvm && kvm_dirty_ring_supported()) { + qtest_add_func("/migration/dirty_limit", test_migrate_dirty_limit); + } } qtest_add_func("/migration/multifd/tcp/plain/none", test_multifd_tcp_none); |