aboutsummaryrefslogtreecommitdiff
path: root/block/throttle-groups.c
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2017-11-16 11:21:50 +0000
committerStefan Hajnoczi <stefanha@redhat.com>2017-11-16 14:12:57 +0000
commit341e0b5658681f46680024cdbfc998717d85cc35 (patch)
tree8fd842bb0bc340a224185f706cfda6cf4fc73e0b /block/throttle-groups.c
parent8048082f7a11040a366942a2de8abb4c3d0020c9 (diff)
throttle-groups: forget timer and schedule next TGM on detach
tg->any_timer_armed[] must be cleared when detaching pending timers from the AioContext. Failure to do so leads to hung I/O because it looks like there are still timers pending when in fact they have been removed. Other ThrottleGroupMembers might have requests pending too so it's necessary to schedule the next TGM so it can set a timer. This patch fixes hung I/O when QEMU is launched with drives that are in the same throttling group: (guest)$ dd if=/dev/zero of=/dev/vdb oflag=direct bs=512 & (guest)$ dd if=/dev/zero of=/dev/vdc oflag=direct bs=512 & (qemu) stop (qemu) cont ...I/O is stuck... Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Message-id: 20171116112150.27607-1-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'block/throttle-groups.c')
-rw-r--r--block/throttle-groups.c12
1 files changed, 12 insertions, 0 deletions
diff --git a/block/throttle-groups.c b/block/throttle-groups.c
index 2587f19ca3..f26bcb5eee 100644
--- a/block/throttle-groups.c
+++ b/block/throttle-groups.c
@@ -593,13 +593,25 @@ void throttle_group_attach_aio_context(ThrottleGroupMember *tgm,
void throttle_group_detach_aio_context(ThrottleGroupMember *tgm)
{
+ ThrottleGroup *tg = container_of(tgm->throttle_state, ThrottleGroup, ts);
ThrottleTimers *tt = &tgm->throttle_timers;
+ int i;
/* Requests must have been drained */
assert(tgm->pending_reqs[0] == 0 && tgm->pending_reqs[1] == 0);
assert(qemu_co_queue_empty(&tgm->throttled_reqs[0]));
assert(qemu_co_queue_empty(&tgm->throttled_reqs[1]));
+ /* Kick off next ThrottleGroupMember, if necessary */
+ qemu_mutex_lock(&tg->lock);
+ for (i = 0; i < 2; i++) {
+ if (timer_pending(tt->timers[i])) {
+ tg->any_timer_armed[i] = false;
+ schedule_next_request(tgm, i);
+ }
+ }
+ qemu_mutex_unlock(&tg->lock);
+
throttle_timers_detach_aio_context(tt);
tgm->aio_context = NULL;
}