aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/block-copy.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/block/block-copy.c b/block/block-copy.c
index f3550d0825..0becad52da 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -52,9 +52,9 @@ typedef struct BlockCopyCallState {
Coroutine *co;
/* Fields whose state changes throughout the execution */
- bool finished;
+ bool finished; /* atomic */
QemuCoSleep sleep; /* TODO: protect API with a lock */
- bool cancelled;
+ bool cancelled; /* atomic */
/* To reference all call states from BlockCopyState */
QLIST_ENTRY(BlockCopyCallState) list;
@@ -667,7 +667,8 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
- while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) {
+ while (bytes && aio_task_pool_status(aio) == 0 &&
+ !qatomic_read(&call_state->cancelled)) {
BlockCopyTask *task;
int64_t status_bytes;
@@ -779,7 +780,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
do {
ret = block_copy_dirty_clusters(call_state);
- if (ret == 0 && !call_state->cancelled) {
+ if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
WITH_QEMU_LOCK_GUARD(&s->lock) {
/*
* Check that there is no task we still need to
@@ -815,9 +816,9 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
* 2. We have waited for some intersecting block-copy request
* It may have failed and produced new dirty bits.
*/
- } while (ret > 0 && !call_state->cancelled);
+ } while (ret > 0 && !qatomic_read(&call_state->cancelled));
- call_state->finished = true;
+ qatomic_store_release(&call_state->finished, true);
if (call_state->cb) {
call_state->cb(call_state->cb_opaque);
@@ -880,44 +881,50 @@ void block_copy_call_free(BlockCopyCallState *call_state)
return;
}
- assert(call_state->finished);
+ assert(qatomic_read(&call_state->finished));
g_free(call_state);
}
bool block_copy_call_finished(BlockCopyCallState *call_state)
{
- return call_state->finished;
+ return qatomic_read(&call_state->finished);
}
bool block_copy_call_succeeded(BlockCopyCallState *call_state)
{
- return call_state->finished && !call_state->cancelled &&
- call_state->ret == 0;
+ return qatomic_load_acquire(&call_state->finished) &&
+ !qatomic_read(&call_state->cancelled) &&
+ call_state->ret == 0;
}
bool block_copy_call_failed(BlockCopyCallState *call_state)
{
- return call_state->finished && !call_state->cancelled &&
- call_state->ret < 0;
+ return qatomic_load_acquire(&call_state->finished) &&
+ !qatomic_read(&call_state->cancelled) &&
+ call_state->ret < 0;
}
bool block_copy_call_cancelled(BlockCopyCallState *call_state)
{
- return call_state->cancelled;
+ return qatomic_read(&call_state->cancelled);
}
int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
{
- assert(call_state->finished);
+ assert(qatomic_load_acquire(&call_state->finished));
if (error_is_read) {
*error_is_read = call_state->error_is_read;
}
return call_state->ret;
}
+/*
+ * Note that cancelling and finishing are racy.
+ * User can cancel a block-copy that is already finished.
+ */
void block_copy_call_cancel(BlockCopyCallState *call_state)
{
- call_state->cancelled = true;
+ qatomic_set(&call_state->cancelled, true);
block_copy_kick(call_state);
}