diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 11:56:46 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 16:07:44 +0100 |
commit | d73415a315471ac0b127ed3fad45c8ec5d711de1 (patch) | |
tree | bae20b3a39968fdfb4340b1a39b533333a8e6fd0 /block | |
parent | ed7db34b5aedba4487fd949b2e545eef954f093e (diff) |
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:
$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)
Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.
This patch was generated using:
$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done
I manually fixed line-wrap issues and misaligned rST tables.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/block-backend.c | 15 | ||||
-rw-r--r-- | block/io.c | 48 | ||||
-rw-r--r-- | block/nfs.c | 2 | ||||
-rw-r--r-- | block/sheepdog.c | 2 | ||||
-rw-r--r-- | block/throttle-groups.c | 12 | ||||
-rw-r--r-- | block/throttle.c | 4 |
6 files changed, 42 insertions, 41 deletions
diff --git a/block/block-backend.c b/block/block-backend.c index 24dd0670d1..ce78d30794 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -1353,12 +1353,12 @@ int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags) void blk_inc_in_flight(BlockBackend *blk) { - atomic_inc(&blk->in_flight); + qatomic_inc(&blk->in_flight); } void blk_dec_in_flight(BlockBackend *blk) { - atomic_dec(&blk->in_flight); + qatomic_dec(&blk->in_flight); aio_wait_kick(); } @@ -1720,7 +1720,7 @@ void blk_drain(BlockBackend *blk) /* We may have -ENOMEDIUM completions in flight */ AIO_WAIT_WHILE(blk_get_aio_context(blk), - atomic_mb_read(&blk->in_flight) > 0); + qatomic_mb_read(&blk->in_flight) > 0); if (bs) { bdrv_drained_end(bs); @@ -1739,7 +1739,7 @@ void blk_drain_all(void) aio_context_acquire(ctx); /* We may have -ENOMEDIUM completions in flight */ - AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0); + AIO_WAIT_WHILE(ctx, qatomic_mb_read(&blk->in_flight) > 0); aio_context_release(ctx); } @@ -2346,6 +2346,7 @@ void blk_io_limits_update_group(BlockBackend *blk, const char *group) static void blk_root_drained_begin(BdrvChild *child) { BlockBackend *blk = child->opaque; + ThrottleGroupMember *tgm = &blk->public.throttle_group_member; if (++blk->quiesce_counter == 1) { if (blk->dev_ops && blk->dev_ops->drained_begin) { @@ -2356,8 +2357,8 @@ static void blk_root_drained_begin(BdrvChild *child) /* Note that blk->root may not be accessible here yet if we are just * attaching to a BlockDriverState that is drained. Use child instead. */ - if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) { - throttle_group_restart_tgm(&blk->public.throttle_group_member); + if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) { + throttle_group_restart_tgm(tgm); } } @@ -2374,7 +2375,7 @@ static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter) assert(blk->quiesce_counter); assert(blk->public.throttle_group_member.io_limits_disabled); - atomic_dec(&blk->public.throttle_group_member.io_limits_disabled); + qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled); if (--blk->quiesce_counter == 0) { if (blk->dev_ops && blk->dev_ops->drained_end) { diff --git a/block/io.c b/block/io.c index a2389bb38c..11df1889f1 100644 --- a/block/io.c +++ b/block/io.c @@ -69,7 +69,7 @@ void bdrv_parent_drained_end_single(BdrvChild *c) { int drained_end_counter = 0; bdrv_parent_drained_end_single_no_poll(c, &drained_end_counter); - BDRV_POLL_WHILE(c->bs, atomic_read(&drained_end_counter) > 0); + BDRV_POLL_WHILE(c->bs, qatomic_read(&drained_end_counter) > 0); } static void bdrv_parent_drained_end(BlockDriverState *bs, BdrvChild *ignore, @@ -186,12 +186,12 @@ void bdrv_refresh_limits(BlockDriverState *bs, Error **errp) */ void bdrv_enable_copy_on_read(BlockDriverState *bs) { - atomic_inc(&bs->copy_on_read); + qatomic_inc(&bs->copy_on_read); } void bdrv_disable_copy_on_read(BlockDriverState *bs) { - int old = atomic_fetch_dec(&bs->copy_on_read); + int old = qatomic_fetch_dec(&bs->copy_on_read); assert(old >= 1); } @@ -219,9 +219,9 @@ static void coroutine_fn bdrv_drain_invoke_entry(void *opaque) } /* Set data->done and decrement drained_end_counter before bdrv_wakeup() */ - atomic_mb_set(&data->done, true); + qatomic_mb_set(&data->done, true); if (!data->begin) { - atomic_dec(data->drained_end_counter); + qatomic_dec(data->drained_end_counter); } bdrv_dec_in_flight(bs); @@ -248,7 +248,7 @@ static void bdrv_drain_invoke(BlockDriverState *bs, bool begin, }; if (!begin) { - atomic_inc(drained_end_counter); + qatomic_inc(drained_end_counter); } /* Make sure the driver callback completes during the polling phase for @@ -268,7 +268,7 @@ bool bdrv_drain_poll(BlockDriverState *bs, bool recursive, return true; } - if (atomic_read(&bs->in_flight)) { + if (qatomic_read(&bs->in_flight)) { return true; } @@ -382,7 +382,7 @@ void bdrv_do_drained_begin_quiesce(BlockDriverState *bs, assert(!qemu_in_coroutine()); /* Stop things in parent-to-child order */ - if (atomic_fetch_inc(&bs->quiesce_counter) == 0) { + if (qatomic_fetch_inc(&bs->quiesce_counter) == 0) { aio_disable_external(bdrv_get_aio_context(bs)); } @@ -473,7 +473,7 @@ static void bdrv_do_drained_end(BlockDriverState *bs, bool recursive, bdrv_parent_drained_end(bs, parent, ignore_bds_parents, drained_end_counter); - old_quiesce_counter = atomic_fetch_dec(&bs->quiesce_counter); + old_quiesce_counter = qatomic_fetch_dec(&bs->quiesce_counter); if (old_quiesce_counter == 1) { aio_enable_external(bdrv_get_aio_context(bs)); } @@ -492,7 +492,7 @@ void bdrv_drained_end(BlockDriverState *bs) { int drained_end_counter = 0; bdrv_do_drained_end(bs, false, NULL, false, &drained_end_counter); - BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); + BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); } void bdrv_drained_end_no_poll(BlockDriverState *bs, int *drained_end_counter) @@ -504,7 +504,7 @@ void bdrv_subtree_drained_end(BlockDriverState *bs) { int drained_end_counter = 0; bdrv_do_drained_end(bs, true, NULL, false, &drained_end_counter); - BDRV_POLL_WHILE(bs, atomic_read(&drained_end_counter) > 0); + BDRV_POLL_WHILE(bs, qatomic_read(&drained_end_counter) > 0); } void bdrv_apply_subtree_drain(BdrvChild *child, BlockDriverState *new_parent) @@ -526,7 +526,7 @@ void bdrv_unapply_subtree_drain(BdrvChild *child, BlockDriverState *old_parent) &drained_end_counter); } - BDRV_POLL_WHILE(child->bs, atomic_read(&drained_end_counter) > 0); + BDRV_POLL_WHILE(child->bs, qatomic_read(&drained_end_counter) > 0); } /* @@ -553,7 +553,7 @@ static void bdrv_drain_assert_idle(BlockDriverState *bs) { BdrvChild *child, *next; - assert(atomic_read(&bs->in_flight) == 0); + assert(qatomic_read(&bs->in_flight) == 0); QLIST_FOREACH_SAFE(child, &bs->children, next, next) { bdrv_drain_assert_idle(child->bs); } @@ -655,7 +655,7 @@ void bdrv_drain_all_end(void) } assert(qemu_get_current_aio_context() == qemu_get_aio_context()); - AIO_WAIT_WHILE(NULL, atomic_read(&drained_end_counter) > 0); + AIO_WAIT_WHILE(NULL, qatomic_read(&drained_end_counter) > 0); assert(bdrv_drain_all_count > 0); bdrv_drain_all_count--; @@ -675,7 +675,7 @@ void bdrv_drain_all(void) static void tracked_request_end(BdrvTrackedRequest *req) { if (req->serialising) { - atomic_dec(&req->bs->serialising_in_flight); + qatomic_dec(&req->bs->serialising_in_flight); } qemu_co_mutex_lock(&req->bs->reqs_lock); @@ -777,7 +777,7 @@ bool bdrv_mark_request_serialising(BdrvTrackedRequest *req, uint64_t align) qemu_co_mutex_lock(&bs->reqs_lock); if (!req->serialising) { - atomic_inc(&req->bs->serialising_in_flight); + qatomic_inc(&req->bs->serialising_in_flight); req->serialising = true; } @@ -841,7 +841,7 @@ static int bdrv_get_cluster_size(BlockDriverState *bs) void bdrv_inc_in_flight(BlockDriverState *bs) { - atomic_inc(&bs->in_flight); + qatomic_inc(&bs->in_flight); } void bdrv_wakeup(BlockDriverState *bs) @@ -851,7 +851,7 @@ void bdrv_wakeup(BlockDriverState *bs) void bdrv_dec_in_flight(BlockDriverState *bs) { - atomic_dec(&bs->in_flight); + qatomic_dec(&bs->in_flight); bdrv_wakeup(bs); } @@ -860,7 +860,7 @@ static bool coroutine_fn bdrv_wait_serialising_requests(BdrvTrackedRequest *self BlockDriverState *bs = self->bs; bool waited = false; - if (!atomic_read(&bs->serialising_in_flight)) { + if (!qatomic_read(&bs->serialising_in_flight)) { return false; } @@ -1747,7 +1747,7 @@ int coroutine_fn bdrv_co_preadv_part(BdrvChild *child, bdrv_inc_in_flight(bs); /* Don't do copy-on-read if we read data before write operation */ - if (atomic_read(&bs->copy_on_read)) { + if (qatomic_read(&bs->copy_on_read)) { flags |= BDRV_REQ_COPY_ON_READ; } @@ -1935,7 +1935,7 @@ bdrv_co_write_req_finish(BdrvChild *child, int64_t offset, uint64_t bytes, int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE); BlockDriverState *bs = child->bs; - atomic_inc(&bs->write_gen); + qatomic_inc(&bs->write_gen); /* * Discard cannot extend the image, but in error handling cases, such as @@ -2768,7 +2768,7 @@ int coroutine_fn bdrv_co_flush(BlockDriverState *bs) } qemu_co_mutex_lock(&bs->reqs_lock); - current_gen = atomic_read(&bs->write_gen); + current_gen = qatomic_read(&bs->write_gen); /* Wait until any previous flushes are completed */ while (bs->active_flush_req) { @@ -3116,7 +3116,7 @@ void bdrv_io_plug(BlockDriverState *bs) bdrv_io_plug(child->bs); } - if (atomic_fetch_inc(&bs->io_plugged) == 0) { + if (qatomic_fetch_inc(&bs->io_plugged) == 0) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_io_plug) { drv->bdrv_io_plug(bs); @@ -3129,7 +3129,7 @@ void bdrv_io_unplug(BlockDriverState *bs) BdrvChild *child; assert(bs->io_plugged); - if (atomic_fetch_dec(&bs->io_plugged) == 1) { + if (qatomic_fetch_dec(&bs->io_plugged) == 1) { BlockDriver *drv = bs->drv; if (drv && drv->bdrv_io_unplug) { drv->bdrv_io_unplug(bs); diff --git a/block/nfs.c b/block/nfs.c index 61a249a9fc..f86e660374 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -721,7 +721,7 @@ nfs_get_allocated_file_size_cb(int ret, struct nfs_context *nfs, void *data, } /* Set task->complete before reading bs->wakeup. */ - atomic_mb_set(&task->complete, 1); + qatomic_mb_set(&task->complete, 1); bdrv_wakeup(task->bs); } diff --git a/block/sheepdog.c b/block/sheepdog.c index cbbebc1aaf..2f5c0eb376 100644 --- a/block/sheepdog.c +++ b/block/sheepdog.c @@ -665,7 +665,7 @@ out: srco->co = NULL; srco->ret = ret; /* Set srco->finished before reading bs->wakeup. */ - atomic_mb_set(&srco->finished, true); + qatomic_mb_set(&srco->finished, true); if (srco->bs) { bdrv_wakeup(srco->bs); } diff --git a/block/throttle-groups.c b/block/throttle-groups.c index 4e28365d8d..e2f2813c0f 100644 --- a/block/throttle-groups.c +++ b/block/throttle-groups.c @@ -228,7 +228,7 @@ static ThrottleGroupMember *next_throttle_token(ThrottleGroupMember *tgm, * immediately if it has pending requests. Otherwise we could be * forcing it to wait for other member's throttled requests. */ if (tgm_has_pending_reqs(tgm, is_write) && - atomic_read(&tgm->io_limits_disabled)) { + qatomic_read(&tgm->io_limits_disabled)) { return tgm; } @@ -272,7 +272,7 @@ static bool throttle_group_schedule_timer(ThrottleGroupMember *tgm, ThrottleTimers *tt = &tgm->throttle_timers; bool must_wait; - if (atomic_read(&tgm->io_limits_disabled)) { + if (qatomic_read(&tgm->io_limits_disabled)) { return false; } @@ -417,7 +417,7 @@ static void coroutine_fn throttle_group_restart_queue_entry(void *opaque) g_free(data); - atomic_dec(&tgm->restart_pending); + qatomic_dec(&tgm->restart_pending); aio_wait_kick(); } @@ -434,7 +434,7 @@ static void throttle_group_restart_queue(ThrottleGroupMember *tgm, bool is_write * be no timer pending on this tgm at this point */ assert(!timer_pending(tgm->throttle_timers.timers[is_write])); - atomic_inc(&tgm->restart_pending); + qatomic_inc(&tgm->restart_pending); co = qemu_coroutine_create(throttle_group_restart_queue_entry, rd); aio_co_enter(tgm->aio_context, co); @@ -544,7 +544,7 @@ void throttle_group_register_tgm(ThrottleGroupMember *tgm, tgm->throttle_state = ts; tgm->aio_context = ctx; - atomic_set(&tgm->restart_pending, 0); + qatomic_set(&tgm->restart_pending, 0); qemu_mutex_lock(&tg->lock); /* If the ThrottleGroup is new set this ThrottleGroupMember as the token */ @@ -592,7 +592,7 @@ void throttle_group_unregister_tgm(ThrottleGroupMember *tgm) } /* Wait for throttle_group_restart_queue_entry() coroutines to finish */ - AIO_WAIT_WHILE(tgm->aio_context, atomic_read(&tgm->restart_pending) > 0); + AIO_WAIT_WHILE(tgm->aio_context, qatomic_read(&tgm->restart_pending) > 0); qemu_mutex_lock(&tg->lock); for (i = 0; i < 2; i++) { diff --git a/block/throttle.c b/block/throttle.c index 9a0f38149a..b685166ad4 100644 --- a/block/throttle.c +++ b/block/throttle.c @@ -217,7 +217,7 @@ static void throttle_reopen_abort(BDRVReopenState *reopen_state) static void coroutine_fn throttle_co_drain_begin(BlockDriverState *bs) { ThrottleGroupMember *tgm = bs->opaque; - if (atomic_fetch_inc(&tgm->io_limits_disabled) == 0) { + if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) { throttle_group_restart_tgm(tgm); } } @@ -226,7 +226,7 @@ static void coroutine_fn throttle_co_drain_end(BlockDriverState *bs) { ThrottleGroupMember *tgm = bs->opaque; assert(tgm->io_limits_disabled); - atomic_dec(&tgm->io_limits_disabled); + qatomic_dec(&tgm->io_limits_disabled); } static const char *const throttle_strong_runtime_opts[] = { |