diff options
Diffstat (limited to 'block/throttle-groups.c')
-rw-r--r-- | block/throttle-groups.c | 91 |
1 files changed, 68 insertions, 23 deletions
diff --git a/block/throttle-groups.c b/block/throttle-groups.c index b73e7a800b..a181cb1dee 100644 --- a/block/throttle-groups.c +++ b/block/throttle-groups.c @@ -240,7 +240,7 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); bool must_wait; - if (blkp->io_limits_disabled) { + if (atomic_read(&blkp->io_limits_disabled)) { return false; } @@ -260,6 +260,25 @@ static bool throttle_group_schedule_timer(BlockBackend *blk, bool is_write) return must_wait; } +/* Start the next pending I/O request for a BlockBackend. Return whether + * any request was actually pending. + * + * @blk: the current BlockBackend + * @is_write: the type of operation (read/write) + */ +static bool coroutine_fn throttle_group_co_restart_queue(BlockBackend *blk, + bool is_write) +{ + BlockBackendPublic *blkp = blk_get_public(blk); + bool ret; + + qemu_co_mutex_lock(&blkp->throttled_reqs_lock); + ret = qemu_co_queue_next(&blkp->throttled_reqs[is_write]); + qemu_co_mutex_unlock(&blkp->throttled_reqs_lock); + + return ret; +} + /* Look for the next pending I/O request and schedule it. * * This assumes that tg->lock is held. @@ -287,12 +306,12 @@ static void schedule_next_request(BlockBackend *blk, bool is_write) if (!must_wait) { /* Give preference to requests from the current blk */ if (qemu_in_coroutine() && - qemu_co_queue_next(&blkp->throttled_reqs[is_write])) { + throttle_group_co_restart_queue(blk, is_write)) { token = blk; } else { ThrottleTimers *tt = &blk_get_public(token)->throttle_timers; int64_t now = qemu_clock_get_ns(tt->clock_type); - timer_mod(tt->timers[is_write], now + 1); + timer_mod(tt->timers[is_write], now); tg->any_timer_armed[is_write] = true; } tg->tokens[is_write] = token; @@ -326,7 +345,10 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk, if (must_wait || blkp->pending_reqs[is_write]) { blkp->pending_reqs[is_write]++; qemu_mutex_unlock(&tg->lock); - qemu_co_queue_wait(&blkp->throttled_reqs[is_write], NULL); + qemu_co_mutex_lock(&blkp->throttled_reqs_lock); + qemu_co_queue_wait(&blkp->throttled_reqs[is_write], + &blkp->throttled_reqs_lock); + qemu_co_mutex_unlock(&blkp->throttled_reqs_lock); qemu_mutex_lock(&tg->lock); blkp->pending_reqs[is_write]--; } @@ -340,15 +362,50 @@ void coroutine_fn throttle_group_co_io_limits_intercept(BlockBackend *blk, qemu_mutex_unlock(&tg->lock); } +typedef struct { + BlockBackend *blk; + bool is_write; +} RestartData; + +static void coroutine_fn throttle_group_restart_queue_entry(void *opaque) +{ + RestartData *data = opaque; + BlockBackend *blk = data->blk; + bool is_write = data->is_write; + BlockBackendPublic *blkp = blk_get_public(blk); + ThrottleGroup *tg = container_of(blkp->throttle_state, ThrottleGroup, ts); + bool empty_queue; + + empty_queue = !throttle_group_co_restart_queue(blk, is_write); + + /* If the request queue was empty then we have to take care of + * scheduling the next one */ + if (empty_queue) { + qemu_mutex_lock(&tg->lock); + schedule_next_request(blk, is_write); + qemu_mutex_unlock(&tg->lock); + } +} + +static void throttle_group_restart_queue(BlockBackend *blk, bool is_write) +{ + Coroutine *co; + RestartData rd = { + .blk = blk, + .is_write = is_write + }; + + co = qemu_coroutine_create(throttle_group_restart_queue_entry, &rd); + aio_co_enter(blk_get_aio_context(blk), co); +} + void throttle_group_restart_blk(BlockBackend *blk) { BlockBackendPublic *blkp = blk_get_public(blk); - int i; - for (i = 0; i < 2; i++) { - while (qemu_co_enter_next(&blkp->throttled_reqs[i])) { - ; - } + if (blkp->throttle_state) { + throttle_group_restart_queue(blk, 0); + throttle_group_restart_queue(blk, 1); } } @@ -376,8 +433,7 @@ void throttle_group_config(BlockBackend *blk, ThrottleConfig *cfg) throttle_config(ts, tt, cfg); qemu_mutex_unlock(&tg->lock); - qemu_co_enter_next(&blkp->throttled_reqs[0]); - qemu_co_enter_next(&blkp->throttled_reqs[1]); + throttle_group_restart_blk(blk); } /* Get the throttle configuration from a particular group. Similar to @@ -408,7 +464,6 @@ static void timer_cb(BlockBackend *blk, bool is_write) BlockBackendPublic *blkp = blk_get_public(blk); ThrottleState *ts = blkp->throttle_state; ThrottleGroup *tg = container_of(ts, ThrottleGroup, ts); - bool empty_queue; /* The timer has just been fired, so we can update the flag */ qemu_mutex_lock(&tg->lock); @@ -416,17 +471,7 @@ static void timer_cb(BlockBackend *blk, bool is_write) qemu_mutex_unlock(&tg->lock); /* Run the request that was waiting for this timer */ - aio_context_acquire(blk_get_aio_context(blk)); - empty_queue = !qemu_co_enter_next(&blkp->throttled_reqs[is_write]); - aio_context_release(blk_get_aio_context(blk)); - - /* If the request queue was empty then we have to take care of - * scheduling the next one */ - if (empty_queue) { - qemu_mutex_lock(&tg->lock); - schedule_next_request(blk, is_write); - qemu_mutex_unlock(&tg->lock); - } + throttle_group_restart_queue(blk, is_write); } static void read_timer_cb(void *opaque) |