diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2013-04-11 15:41:13 +0200 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2013-08-19 15:45:34 +0200 |
commit | 88266f5aa70fa71fd5cc20aa4dbeb7a7bd8d2e92 (patch) | |
tree | 59a8cc958f76256abfa69357edc131b69c566d8d /block.c | |
parent | e1b5c52e04d04bb93546c6e37e8884889d047cb1 (diff) |
block: stop relying on io_flush() in bdrv_drain_all()
If a block driver has no file descriptors to monitor but there are still
active requests, it can return 1 from .io_flush(). This is used to spin
during synchronous I/O.
Stop relying on .io_flush() and instead check
QLIST_EMPTY(&bs->tracked_requests) to decide whether there are active
requests.
This is the first step in removing .io_flush() so that event loops no
longer need to have the concept of synchronous I/O. Eventually we may
be able to kill synchronous I/O completely by running everything in a
coroutine, but that is future work.
Note this patch moves bs->throttled_reqs initialization to bdrv_new() so
that bdrv_requests_pending(bs) can safely access it. In practice bs is
g_malloc0() so the memory is already zeroed but it's safer to initialize
the queue properly.
We also need to fix up block/stream.c:close_unused_images() to prevent
traversing a dangling pointer while it rearranges the backing file
chain. This is necessary since the new bdrv_drain_all() traverses the
backing file chain.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'block.c')
-rw-r--r-- | block.c | 45 |
1 files changed, 35 insertions, 10 deletions
@@ -148,7 +148,6 @@ static void bdrv_block_timer(void *opaque) void bdrv_io_limits_enable(BlockDriverState *bs) { - qemu_co_queue_init(&bs->throttled_reqs); bs->block_timer = qemu_new_timer_ns(vm_clock, bdrv_block_timer, bs); bs->io_limits_enabled = true; } @@ -306,6 +305,7 @@ BlockDriverState *bdrv_new(const char *device_name) bdrv_iostatus_disable(bs); notifier_list_init(&bs->close_notifiers); notifier_with_return_list_init(&bs->before_write_notifiers); + qemu_co_queue_init(&bs->throttled_reqs); return bs; } @@ -1428,6 +1428,35 @@ void bdrv_close_all(void) } } +/* Check if any requests are in-flight (including throttled requests) */ +static bool bdrv_requests_pending(BlockDriverState *bs) +{ + if (!QLIST_EMPTY(&bs->tracked_requests)) { + return true; + } + if (!qemu_co_queue_empty(&bs->throttled_reqs)) { + return true; + } + if (bs->file && bdrv_requests_pending(bs->file)) { + return true; + } + if (bs->backing_hd && bdrv_requests_pending(bs->backing_hd)) { + return true; + } + return false; +} + +static bool bdrv_requests_pending_all(void) +{ + BlockDriverState *bs; + QTAILQ_FOREACH(bs, &bdrv_states, list) { + if (bdrv_requests_pending(bs)) { + return true; + } + } + return false; +} + /* * Wait for pending requests to complete across all BlockDriverStates * @@ -1442,12 +1471,11 @@ void bdrv_close_all(void) */ void bdrv_drain_all(void) { + /* Always run first iteration so any pending completion BHs run */ + bool busy = true; BlockDriverState *bs; - bool busy; - - do { - busy = qemu_aio_wait(); + while (busy) { /* FIXME: We do not have timer support here, so this is effectively * a busy wait. */ @@ -1456,12 +1484,9 @@ void bdrv_drain_all(void) busy = true; } } - } while (busy); - /* If requests are still pending there is a bug somewhere */ - QTAILQ_FOREACH(bs, &bdrv_states, list) { - assert(QLIST_EMPTY(&bs->tracked_requests)); - assert(qemu_co_queue_empty(&bs->throttled_reqs)); + busy = bdrv_requests_pending_all(); + busy |= aio_poll(qemu_get_aio_context(), busy); } } |