From d00dd63135f7e18ddca2642d1933da1507f3f1cd Mon Sep 17 00:00:00 2001 From: Vladimir Sementsov-Ogievskiy Date: Fri, 5 Feb 2021 19:37:18 +0300 Subject: iotests/264: add mirror-cancel test-case Check that cancel doesn't wait for 10s of nbd reconnect timeout. Signed-off-by: Vladimir Sementsov-Ogievskiy Reviewed-by: Eric Blake Message-Id: <20210205163720.887197-9-vsementsov@virtuozzo.com> Signed-off-by: Eric Blake --- tests/qemu-iotests/264 | 38 ++++++++++++++++++++++++++++++-------- tests/qemu-iotests/264.out | 4 ++-- 2 files changed, 32 insertions(+), 10 deletions(-) (limited to 'tests/qemu-iotests') diff --git a/tests/qemu-iotests/264 b/tests/qemu-iotests/264 index 6feeaa4056..347e53add5 100755 --- a/tests/qemu-iotests/264 +++ b/tests/qemu-iotests/264 @@ -27,25 +27,26 @@ from iotests import qemu_img_create, file_path, qemu_nbd_popen disk_a, disk_b, nbd_sock = file_path('disk_a', 'disk_b', 'nbd-sock') nbd_uri = 'nbd+unix:///?socket=' + nbd_sock -size = 5 * 1024 * 1024 wait_limit = 3.0 wait_step = 0.2 class TestNbdReconnect(iotests.QMPTestCase): - def setUp(self): - qemu_img_create('-f', iotests.imgfmt, disk_a, str(size)) - qemu_img_create('-f', iotests.imgfmt, disk_b, str(size)) + def init_vm(self, disk_size): + qemu_img_create('-f', iotests.imgfmt, disk_a, str(disk_size)) + qemu_img_create('-f', iotests.imgfmt, disk_b, str(disk_size)) self.vm = iotests.VM().add_drive(disk_a) self.vm.launch() - self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(size)) + self.vm.hmp_qemu_io('drive0', 'write 0 {}'.format(disk_size)) def tearDown(self): self.vm.shutdown() os.remove(disk_a) os.remove(disk_b) - def test(self): + def start_job(self, job): + """Stat job with nbd target and kill the server""" + assert job in ('blockdev-backup', 'blockdev-mirror') with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b): result = self.vm.qmp('blockdev-add', **{'node_name': 'backup0', @@ -55,7 +56,7 @@ class TestNbdReconnect(iotests.QMPTestCase): 'path': nbd_sock}, 'reconnect-delay': 10}}) self.assert_qmp(result, 'return', {}) - result = self.vm.qmp('blockdev-backup', device='drive0', + result = self.vm.qmp(job, device='drive0', sync='full', target='backup0', speed=(1 * 1024 * 1024)) self.assert_qmp(result, 'return', {}) @@ -73,7 +74,8 @@ class TestNbdReconnect(iotests.QMPTestCase): jobs = self.vm.qmp('query-block-jobs')['return'] # Check that job is still in progress - self.assertTrue(jobs and jobs[0]['offset'] < jobs[0]['len']) + self.assertTrue(jobs) + self.assertTrue(jobs[0]['offset'] < jobs[0]['len']) result = self.vm.qmp('block-job-set-speed', device='drive0', speed=0) self.assert_qmp(result, 'return', {}) @@ -81,12 +83,32 @@ class TestNbdReconnect(iotests.QMPTestCase): # Emulate server down time for 1 second time.sleep(1) + def test_backup(self): + size = 5 * 1024 * 1024 + self.init_vm(size) + self.start_job('blockdev-backup') + with qemu_nbd_popen('-k', nbd_sock, '-f', iotests.imgfmt, disk_b): e = self.vm.event_wait('BLOCK_JOB_COMPLETED') self.assertEqual(e['data']['offset'], size) result = self.vm.qmp('blockdev-del', node_name='backup0') self.assert_qmp(result, 'return', {}) + def test_mirror_cancel(self): + # Mirror speed limit doesn't work well enough, it seems that mirror + # will run many parallel requests anyway. MAX_IN_FLIGHT is 16 and + # MAX_IO_BYTES is 1M in mirror.c, so let's use 20M disk. + self.init_vm(20 * 1024 * 1024) + self.start_job('blockdev-mirror') + + result = self.vm.qmp('block-job-cancel', device='drive0') + self.assert_qmp(result, 'return', {}) + + start_t = time.time() + self.vm.event_wait('BLOCK_JOB_CANCELLED') + delta_t = time.time() - start_t + self.assertTrue(delta_t < 2.0) + if __name__ == '__main__': iotests.main(supported_fmts=['qcow2']) diff --git a/tests/qemu-iotests/264.out b/tests/qemu-iotests/264.out index ae1213e6f8..fbc63e62f8 100644 --- a/tests/qemu-iotests/264.out +++ b/tests/qemu-iotests/264.out @@ -1,5 +1,5 @@ -. +.. ---------------------------------------------------------------------- -Ran 1 tests +Ran 2 tests OK -- cgit v1.2.3