diff options
author | Alberto Garcia <berto@igalia.com> | 2018-08-02 17:50:23 +0300 |
---|---|---|
committer | Kevin Wolf <kwolf@redhat.com> | 2018-08-15 12:50:39 +0200 |
commit | ef7a6a3c2a7725b169d054aa7487f9738bd6c4a6 (patch) | |
tree | 5a753c244aab646df4da696fe32c75d3a0b86126 /tests/qemu-iotests | |
parent | f62492bb8d1ea7f7e156ffbdf411de46107072c5 (diff) |
qemu-iotests: Test removing a throttle group member with a pending timer
A throttle group can have several members, and each one of them can
have several pending requests in the queue.
The requests are processed in a round-robin fashion, so the algorithm
decides the drive that is going to run the next request and sets a
timer in it. Once the timer fires and the throttled request is run
then the next drive from the group is selected and a new timer is set.
If the user tried to remove a drive from a group and that drive had a
timer set then the code was not taking care of setting up a new timer
in one of the remaining members of the group, freezing their I/O.
This problem was fixed in 6fccbb475bc6effc313ee9481726a1748b6dae57,
and this patch adds a new test case that reproduces this exact
scenario.
Signed-off-by: Alberto Garcia <berto@igalia.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
Diffstat (limited to 'tests/qemu-iotests')
-rwxr-xr-x | tests/qemu-iotests/093 | 52 | ||||
-rw-r--r-- | tests/qemu-iotests/093.out | 4 |
2 files changed, 54 insertions, 2 deletions
diff --git a/tests/qemu-iotests/093 b/tests/qemu-iotests/093 index 68e344f8c1..b26cd34e32 100755 --- a/tests/qemu-iotests/093 +++ b/tests/qemu-iotests/093 @@ -208,6 +208,58 @@ class ThrottleTestCase(iotests.QMPTestCase): limits[tk] = rate self.do_test_throttle(ndrives, 5, limits) + # Test that removing a drive from a throttle group should not + # affect the remaining members of the group. + # https://bugzilla.redhat.com/show_bug.cgi?id=1535914 + def test_remove_group_member(self): + # Create a throttle group with two drives + # and set a 4 KB/s read limit. + params = {"bps": 0, + "bps_rd": 4096, + "bps_wr": 0, + "iops": 0, + "iops_rd": 0, + "iops_wr": 0 } + self.configure_throttle(2, params) + + # Read 4KB from drive0. This is performed immediately. + self.vm.hmp_qemu_io("drive0", "aio_read 0 4096") + + # Read 4KB again. The I/O limit has been exceeded so this + # request is throttled and a timer is set to wake it up. + self.vm.hmp_qemu_io("drive0", "aio_read 0 4096") + + # Read from drive1. We're still over the I/O limit so this + # request is also throttled. There's no timer set in drive1 + # because there's already one in drive0. Once the timer in + # drive0 fires and its throttled request is processed then the + # next request in the queue will be scheduled: this one. + self.vm.hmp_qemu_io("drive1", "aio_read 0 4096") + + # At this point only the first 4KB have been read from drive0. + # The other requests are throttled. + self.assertEqual(self.blockstats('drive0')[0], 4096) + self.assertEqual(self.blockstats('drive1')[0], 0) + + # Remove drive0 from the throttle group and disable its I/O limits. + # drive1 remains in the group with a throttled request. + params['bps_rd'] = 0 + params['device'] = 'drive0' + result = self.vm.qmp("block_set_io_throttle", conv_keys=False, **params) + self.assert_qmp(result, 'return', {}) + + # Removing the I/O limits from drive0 drains its pending request. + # The read request in drive1 is still throttled. + self.assertEqual(self.blockstats('drive0')[0], 8192) + self.assertEqual(self.blockstats('drive1')[0], 0) + + # Advance the clock 5 seconds. This completes the request in drive1 + self.vm.qtest("clock_step %d" % (5 * nsec_per_sec)) + + # Now all requests have been processed. + self.assertEqual(self.blockstats('drive0')[0], 8192) + self.assertEqual(self.blockstats('drive1')[0], 4096) + class ThrottleTestCoroutine(ThrottleTestCase): test_img = "null-co://" diff --git a/tests/qemu-iotests/093.out b/tests/qemu-iotests/093.out index 594c16f49f..36376bed87 100644 --- a/tests/qemu-iotests/093.out +++ b/tests/qemu-iotests/093.out @@ -1,5 +1,5 @@ -........ +.......... ---------------------------------------------------------------------- -Ran 8 tests +Ran 10 tests OK |