aboutsummaryrefslogtreecommitdiff
path: root/blockjob.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-10-27 12:48:50 +0200
committerFam Zheng <famz@redhat.com>2016-10-28 21:50:18 +0800
commitbae8196d9f97916de6323e70e3e374362ee16ec4 (patch)
treed733f20bed98d2ddaf2a66709fb81f20c9f31d2b /blockjob.c
parent50ab0e0908d592b8bda56c2d7495e1190d734b0b (diff)
blockjob: introduce .drain callback for jobs
This is required to decouple block jobs from running in an AioContext. With multiqueue block devices, a BlockDriverState does not really belong to a single AioContext. The solution is to first wait until all I/O operations are complete; then loop in the main thread for the block job to complete entirely. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Reviewed-by: Fam Zheng <famz@redhat.com> Message-Id: <1477565348-5458-3-git-send-email-pbonzini@redhat.com> Signed-off-by: Fam Zheng <famz@redhat.com>
Diffstat (limited to 'blockjob.c')
-rw-r--r--blockjob.c37
1 files changed, 20 insertions, 17 deletions
diff --git a/blockjob.c b/blockjob.c
index 43fecbe13e..7c88b30074 100644
--- a/blockjob.c
+++ b/blockjob.c
@@ -74,17 +74,6 @@ BlockJob *block_job_get(const char *id)
return NULL;
}
-/* Normally the job runs in its BlockBackend's AioContext. The exception is
- * block_job_defer_to_main_loop() where it runs in the QEMU main loop. Code
- * that supports both cases uses this helper function.
- */
-static AioContext *block_job_get_aio_context(BlockJob *job)
-{
- return job->deferred_to_main_loop ?
- qemu_get_aio_context() :
- blk_get_aio_context(job->blk);
-}
-
static void block_job_attached_aio_context(AioContext *new_context,
void *opaque)
{
@@ -97,6 +86,17 @@ static void block_job_attached_aio_context(AioContext *new_context,
block_job_resume(job);
}
+static void block_job_drain(BlockJob *job)
+{
+ /* If job is !job->busy this kicks it into the next pause point. */
+ block_job_enter(job);
+
+ blk_drain(job->blk);
+ if (job->driver->drain) {
+ job->driver->drain(job);
+ }
+}
+
static void block_job_detach_aio_context(void *opaque)
{
BlockJob *job = opaque;
@@ -106,12 +106,8 @@ static void block_job_detach_aio_context(void *opaque)
block_job_pause(job);
- if (!job->paused) {
- /* If job is !job->busy this kicks it into the next pause point. */
- block_job_enter(job);
- }
while (!job->paused && !job->completed) {
- aio_poll(block_job_get_aio_context(job), true);
+ block_job_drain(job);
}
block_job_unref(job);
@@ -413,14 +409,21 @@ static int block_job_finish_sync(BlockJob *job,
assert(blk_bs(job->blk)->job == job);
block_job_ref(job);
+
finish(job, &local_err);
if (local_err) {
error_propagate(errp, local_err);
block_job_unref(job);
return -EBUSY;
}
+ /* block_job_drain calls block_job_enter, and it should be enough to
+ * induce progress until the job completes or moves to the main thread.
+ */
+ while (!job->deferred_to_main_loop && !job->completed) {
+ block_job_drain(job);
+ }
while (!job->completed) {
- aio_poll(block_job_get_aio_context(job), true);
+ aio_poll(qemu_get_aio_context(), true);
}
ret = (job->cancelled && job->ret == 0) ? -ECANCELED : job->ret;
block_job_unref(job);