diff options
-rw-r--r-- | block/mirror.c | 10 | ||||
-rw-r--r-- | include/qemu/job.h | 8 | ||||
-rw-r--r-- | job.c | 14 |
3 files changed, 23 insertions, 9 deletions
diff --git a/block/mirror.c b/block/mirror.c index 010b9e1672..2d9642cb00 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -943,7 +943,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) /* Transition to the READY state and wait for complete. */ job_transition_to_ready(&s->common.job); s->actively_synced = true; - while (!job_is_cancelled(&s->common.job) && !s->should_complete) { + while (!job_cancel_requested(&s->common.job) && !s->should_complete) { job_yield(&s->common.job); } s->common.job.cancelled = false; @@ -1050,7 +1050,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) } should_complete = s->should_complete || - job_is_cancelled(&s->common.job); + job_cancel_requested(&s->common.job); cnt = bdrv_get_dirty_count(s->dirty_bitmap); } @@ -1094,7 +1094,7 @@ static int coroutine_fn mirror_run(Job *job, Error **errp) trace_mirror_before_sleep(s, cnt, job_is_ready(&s->common.job), delay_ns); job_sleep_ns(&s->common.job, delay_ns); - if (job_is_cancelled(&s->common.job) && s->common.job.force_cancel) { + if (job_is_cancelled(&s->common.job)) { break; } s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); @@ -1106,9 +1106,7 @@ immediate_exit: * or it was cancelled prematurely so that we do not guarantee that * the target is a copy of the source. */ - assert(ret < 0 || - (s->common.job.force_cancel && - job_is_cancelled(&s->common.job))); + assert(ret < 0 || job_is_cancelled(&s->common.job)); assert(need_drain); mirror_wait_for_all_io(s); } diff --git a/include/qemu/job.h b/include/qemu/job.h index 90f6abbd6a..6e67b6977f 100644 --- a/include/qemu/job.h +++ b/include/qemu/job.h @@ -436,9 +436,15 @@ const char *job_type_str(const Job *job); /** Returns true if the job should not be visible to the management layer. */ bool job_is_internal(Job *job); -/** Returns whether the job is scheduled for cancellation. */ +/** Returns whether the job is being cancelled. */ bool job_is_cancelled(Job *job); +/** + * Returns whether the job is scheduled for cancellation (at an + * indefinite point). + */ +bool job_cancel_requested(Job *job); + /** Returns whether the job is in a completed state. */ bool job_is_completed(Job *job); @@ -217,6 +217,11 @@ const char *job_type_str(const Job *job) bool job_is_cancelled(Job *job) { + return job->cancelled && job->force_cancel; +} + +bool job_cancel_requested(Job *job) +{ return job->cancelled; } @@ -798,7 +803,7 @@ static void job_completed_txn_abort(Job *job) ctx = other_job->aio_context; aio_context_acquire(ctx); if (!job_is_completed(other_job)) { - assert(job_is_cancelled(other_job)); + assert(job_cancel_requested(other_job)); job_finish_sync(other_job, NULL, NULL); } job_finalize_single(other_job); @@ -977,6 +982,11 @@ void job_cancel(Job *job, bool force) * job_cancel_async() ignores soft-cancel requests for jobs * that are already done (i.e. deferred to the main loop). We * have to check again whether the job is really cancelled. + * (job_cancel_requested() and job_is_cancelled() are equivalent + * here, because job_cancel_async() will make soft-cancel + * requests no-ops when deferred_to_main_loop is true. We + * choose to call job_is_cancelled() to show that we invoke + * job_completed_txn_abort() only for force-cancelled jobs.) */ if (job_is_cancelled(job)) { job_completed_txn_abort(job); @@ -1044,7 +1054,7 @@ void job_complete(Job *job, Error **errp) if (job_apply_verb(job, JOB_VERB_COMPLETE, errp)) { return; } - if (job_is_cancelled(job) || !job->driver->complete) { + if (job_cancel_requested(job) || !job->driver->complete) { error_setg(errp, "The active block job '%s' cannot be completed", job->id); return; |