diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 11:56:46 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 16:07:44 +0100 |
commit | d73415a315471ac0b127ed3fad45c8ec5d711de1 (patch) | |
tree | bae20b3a39968fdfb4340b1a39b533333a8e6fd0 /util/rcu.c | |
parent | ed7db34b5aedba4487fd949b2e545eef954f093e (diff) |
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:
$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)
Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.
This patch was generated using:
$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done
I manually fixed line-wrap issues and misaligned rST tables.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
Diffstat (limited to 'util/rcu.c')
-rw-r--r-- | util/rcu.c | 36 |
1 files changed, 18 insertions, 18 deletions
diff --git a/util/rcu.c b/util/rcu.c index c4fefa9333..13ac0f75cb 100644 --- a/util/rcu.c +++ b/util/rcu.c @@ -57,7 +57,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr) { unsigned long v; - v = atomic_read(ctr); + v = qatomic_read(ctr); return v && (v != rcu_gp_ctr); } @@ -82,14 +82,14 @@ static void wait_for_readers(void) */ qemu_event_reset(&rcu_gp_event); - /* Instead of using atomic_mb_set for index->waiting, and - * atomic_mb_read for index->ctr, memory barriers are placed + /* Instead of using qatomic_mb_set for index->waiting, and + * qatomic_mb_read for index->ctr, memory barriers are placed * manually since writes to different threads are independent. * qemu_event_reset has acquire semantics, so no memory barrier * is needed here. */ QLIST_FOREACH(index, ®istry, node) { - atomic_set(&index->waiting, true); + qatomic_set(&index->waiting, true); } /* Here, order the stores to index->waiting before the loads of @@ -106,7 +106,7 @@ static void wait_for_readers(void) /* No need for mb_set here, worst of all we * get some extra futex wakeups. */ - atomic_set(&index->waiting, false); + qatomic_set(&index->waiting, false); } } @@ -151,7 +151,7 @@ void synchronize_rcu(void) QEMU_LOCK_GUARD(&rcu_registry_lock); if (!QLIST_EMPTY(®istry)) { - /* In either case, the atomic_mb_set below blocks stores that free + /* In either case, the qatomic_mb_set below blocks stores that free * old RCU-protected pointers. */ if (sizeof(rcu_gp_ctr) < 8) { @@ -160,12 +160,12 @@ void synchronize_rcu(void) * * Switch parity: 0 -> 1, 1 -> 0. */ - atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); + qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); wait_for_readers(); - atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); + qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR); } else { /* Increment current grace period. */ - atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); + qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR); } wait_for_readers(); @@ -188,8 +188,8 @@ static void enqueue(struct rcu_head *node) struct rcu_head **old_tail; node->next = NULL; - old_tail = atomic_xchg(&tail, &node->next); - atomic_mb_set(old_tail, node); + old_tail = qatomic_xchg(&tail, &node->next); + qatomic_mb_set(old_tail, node); } static struct rcu_head *try_dequeue(void) @@ -203,7 +203,7 @@ retry: * The tail, because it is the first step in the enqueuing. * It is only the next pointers that might be inconsistent. */ - if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) { + if (head == &dummy && qatomic_mb_read(&tail) == &dummy.next) { abort(); } @@ -211,7 +211,7 @@ retry: * wrong and we need to wait until its enqueuer finishes the update. */ node = head; - next = atomic_mb_read(&head->next); + next = qatomic_mb_read(&head->next); if (!next) { return NULL; } @@ -240,7 +240,7 @@ static void *call_rcu_thread(void *opaque) for (;;) { int tries = 0; - int n = atomic_read(&rcu_call_count); + int n = qatomic_read(&rcu_call_count); /* Heuristically wait for a decent number of callbacks to pile up. * Fetch rcu_call_count now, we only must process elements that were @@ -250,7 +250,7 @@ static void *call_rcu_thread(void *opaque) g_usleep(10000); if (n == 0) { qemu_event_reset(&rcu_call_ready_event); - n = atomic_read(&rcu_call_count); + n = qatomic_read(&rcu_call_count); if (n == 0) { #if defined(CONFIG_MALLOC_TRIM) malloc_trim(4 * 1024 * 1024); @@ -258,10 +258,10 @@ static void *call_rcu_thread(void *opaque) qemu_event_wait(&rcu_call_ready_event); } } - n = atomic_read(&rcu_call_count); + n = qatomic_read(&rcu_call_count); } - atomic_sub(&rcu_call_count, n); + qatomic_sub(&rcu_call_count, n); synchronize_rcu(); qemu_mutex_lock_iothread(); while (n > 0) { @@ -289,7 +289,7 @@ void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node)) { node->func = func; enqueue(node); - atomic_inc(&rcu_call_count); + qatomic_inc(&rcu_call_count); qemu_event_set(&rcu_call_ready_event); } |