aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorEmanuele Giuseppe Esposito <eesposit@redhat.com>2021-06-24 09:20:43 +0200
committerVladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>2021-06-25 14:33:51 +0300
commit149009bef4b4b4db37b3cf72b41dc2c6e8ca1885 (patch)
treeb07a043b4259059d8eec8dd6bbf0ae733899b03e /block
parentd0c389d2ce6031d80e872e8e1b6ebb0f96afbe69 (diff)
block-copy: atomic .cancelled and .finished fields in BlockCopyCallState
By adding acquire/release pairs, we ensure that .ret and .error_is_read fields are written by block_copy_dirty_clusters before .finished is true, and that they are read by API user after .finished is true. The atomic here are necessary because the fields are concurrently modified in coroutines, and read outside. Signed-off-by: Emanuele Giuseppe Esposito <eesposit@redhat.com> Message-Id: <20210624072043.180494-6-eesposit@redhat.com> Reviewed-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com> Signed-off-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Diffstat (limited to 'block')
-rw-r--r--block/block-copy.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/block/block-copy.c b/block/block-copy.c
index f3550d0825..0becad52da 100644
--- a/block/block-copy.c
+++ b/block/block-copy.c
@@ -52,9 +52,9 @@ typedef struct BlockCopyCallState {
Coroutine *co;
/* Fields whose state changes throughout the execution */
- bool finished;
+ bool finished; /* atomic */
QemuCoSleep sleep; /* TODO: protect API with a lock */
- bool cancelled;
+ bool cancelled; /* atomic */
/* To reference all call states from BlockCopyState */
QLIST_ENTRY(BlockCopyCallState) list;
@@ -667,7 +667,8 @@ block_copy_dirty_clusters(BlockCopyCallState *call_state)
assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
- while (bytes && aio_task_pool_status(aio) == 0 && !call_state->cancelled) {
+ while (bytes && aio_task_pool_status(aio) == 0 &&
+ !qatomic_read(&call_state->cancelled)) {
BlockCopyTask *task;
int64_t status_bytes;
@@ -779,7 +780,7 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
do {
ret = block_copy_dirty_clusters(call_state);
- if (ret == 0 && !call_state->cancelled) {
+ if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
WITH_QEMU_LOCK_GUARD(&s->lock) {
/*
* Check that there is no task we still need to
@@ -815,9 +816,9 @@ static int coroutine_fn block_copy_common(BlockCopyCallState *call_state)
* 2. We have waited for some intersecting block-copy request
* It may have failed and produced new dirty bits.
*/
- } while (ret > 0 && !call_state->cancelled);
+ } while (ret > 0 && !qatomic_read(&call_state->cancelled));
- call_state->finished = true;
+ qatomic_store_release(&call_state->finished, true);
if (call_state->cb) {
call_state->cb(call_state->cb_opaque);
@@ -880,44 +881,50 @@ void block_copy_call_free(BlockCopyCallState *call_state)
return;
}
- assert(call_state->finished);
+ assert(qatomic_read(&call_state->finished));
g_free(call_state);
}
bool block_copy_call_finished(BlockCopyCallState *call_state)
{
- return call_state->finished;
+ return qatomic_read(&call_state->finished);
}
bool block_copy_call_succeeded(BlockCopyCallState *call_state)
{
- return call_state->finished && !call_state->cancelled &&
- call_state->ret == 0;
+ return qatomic_load_acquire(&call_state->finished) &&
+ !qatomic_read(&call_state->cancelled) &&
+ call_state->ret == 0;
}
bool block_copy_call_failed(BlockCopyCallState *call_state)
{
- return call_state->finished && !call_state->cancelled &&
- call_state->ret < 0;
+ return qatomic_load_acquire(&call_state->finished) &&
+ !qatomic_read(&call_state->cancelled) &&
+ call_state->ret < 0;
}
bool block_copy_call_cancelled(BlockCopyCallState *call_state)
{
- return call_state->cancelled;
+ return qatomic_read(&call_state->cancelled);
}
int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
{
- assert(call_state->finished);
+ assert(qatomic_load_acquire(&call_state->finished));
if (error_is_read) {
*error_is_read = call_state->error_is_read;
}
return call_state->ret;
}
+/*
+ * Note that cancelling and finishing are racy.
+ * User can cancel a block-copy that is already finished.
+ */
void block_copy_call_cancel(BlockCopyCallState *call_state)
{
- call_state->cancelled = true;
+ qatomic_set(&call_state->cancelled, true);
block_copy_kick(call_state);
}