aboutsummaryrefslogtreecommitdiff
path: root/util
diff options
context:
space:
mode:
Diffstat (limited to 'util')
-rw-r--r--util/aio-posix.c14
-rw-r--r--util/aio-wait.c2
-rw-r--r--util/aio-win32.c5
-rw-r--r--util/async.c28
-rw-r--r--util/atomic64.c10
-rw-r--r--util/bitmap.c14
-rw-r--r--util/cacheinfo.c2
-rw-r--r--util/fdmon-epoll.c4
-rw-r--r--util/fdmon-io_uring.c12
-rw-r--r--util/fdmon-poll.c1
-rw-r--r--util/iov.c50
-rw-r--r--util/lockcnt.c52
-rw-r--r--util/log.c10
-rw-r--r--util/qemu-coroutine-lock.c18
-rw-r--r--util/qemu-coroutine-sleep.c4
-rw-r--r--util/qemu-coroutine.c6
-rw-r--r--util/qemu-sockets.c4
-rw-r--r--util/qemu-thread-posix.c12
-rw-r--r--util/qemu-thread-win32.c12
-rw-r--r--util/qemu-timer.c12
-rw-r--r--util/qht.c57
-rw-r--r--util/qsp.c50
-rw-r--r--util/rcu.c36
-rw-r--r--util/stats64.c34
24 files changed, 248 insertions, 201 deletions
diff --git a/util/aio-posix.c b/util/aio-posix.c
index f7f13ebfc2..280f27bb99 100644
--- a/util/aio-posix.c
+++ b/util/aio-posix.c
@@ -27,7 +27,7 @@
bool aio_poll_disabled(AioContext *ctx)
{
- return atomic_read(&ctx->poll_disable_cnt);
+ return qatomic_read(&ctx->poll_disable_cnt);
}
void aio_add_ready_handler(AioHandlerList *ready_list,
@@ -148,8 +148,8 @@ void aio_set_fd_handler(AioContext *ctx,
* Changing handlers is a rare event, and a little wasted polling until
* the aio_notify below is not an issue.
*/
- atomic_set(&ctx->poll_disable_cnt,
- atomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
+ qatomic_set(&ctx->poll_disable_cnt,
+ qatomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
ctx->fdmon_ops->update(ctx, node, new_node);
if (node) {
@@ -581,7 +581,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
*/
use_notify_me = timeout != 0;
if (use_notify_me) {
- atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
+ qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
/*
* Write ctx->notify_me before reading ctx->notified. Pairs with
* smp_mb in aio_notify().
@@ -589,7 +589,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
smp_mb();
/* Don't block if aio_notify() was called */
- if (atomic_read(&ctx->notified)) {
+ if (qatomic_read(&ctx->notified)) {
timeout = 0;
}
}
@@ -603,8 +603,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
if (use_notify_me) {
/* Finish the poll before clearing the flag. */
- atomic_store_release(&ctx->notify_me,
- atomic_read(&ctx->notify_me) - 2);
+ qatomic_store_release(&ctx->notify_me,
+ qatomic_read(&ctx->notify_me) - 2);
}
aio_notify_accept(ctx);
diff --git a/util/aio-wait.c b/util/aio-wait.c
index b4877493f8..bdb3d3af22 100644
--- a/util/aio-wait.c
+++ b/util/aio-wait.c
@@ -36,7 +36,7 @@ static void dummy_bh_cb(void *opaque)
void aio_wait_kick(void)
{
/* The barrier (or an atomic op) is in the caller. */
- if (atomic_read(&global_aio_wait.num_waiters)) {
+ if (qatomic_read(&global_aio_wait.num_waiters)) {
aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
}
}
diff --git a/util/aio-win32.c b/util/aio-win32.c
index 49bd90e62e..e7b1d649e9 100644
--- a/util/aio-win32.c
+++ b/util/aio-win32.c
@@ -345,7 +345,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
* so disable the optimization now.
*/
if (blocking) {
- atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) + 2);
+ qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
/*
* Write ctx->notify_me before computing the timeout
* (reading bottom half flags, etc.). Pairs with
@@ -384,7 +384,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
ret = WaitForMultipleObjects(count, events, FALSE, timeout);
if (blocking) {
assert(first);
- atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) - 2);
+ qatomic_store_release(&ctx->notify_me,
+ qatomic_read(&ctx->notify_me) - 2);
aio_notify_accept(ctx);
}
diff --git a/util/async.c b/util/async.c
index 4266745dee..f758354c6a 100644
--- a/util/async.c
+++ b/util/async.c
@@ -70,13 +70,13 @@ static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
unsigned old_flags;
/*
- * The memory barrier implicit in atomic_fetch_or makes sure that:
+ * The memory barrier implicit in qatomic_fetch_or makes sure that:
* 1. idle & any writes needed by the callback are done before the
* locations are read in the aio_bh_poll.
* 2. ctx is loaded before the callback has a chance to execute and bh
* could be freed.
*/
- old_flags = atomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
+ old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
if (!(old_flags & BH_PENDING)) {
QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
}
@@ -96,13 +96,13 @@ static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
QSLIST_REMOVE_HEAD(head, next);
/*
- * The atomic_and is paired with aio_bh_enqueue(). The implicit memory
+ * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory
* barrier ensures that the callback sees all writes done by the scheduling
* thread. It also ensures that the scheduling thread sees the cleared
* flag before bh->cb has run, and thus will call aio_notify again if
* necessary.
*/
- *flags = atomic_fetch_and(&bh->flags,
+ *flags = qatomic_fetch_and(&bh->flags,
~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
return bh;
}
@@ -185,7 +185,7 @@ void qemu_bh_schedule(QEMUBH *bh)
*/
void qemu_bh_cancel(QEMUBH *bh)
{
- atomic_and(&bh->flags, ~BH_SCHEDULED);
+ qatomic_and(&bh->flags, ~BH_SCHEDULED);
}
/* This func is async.The bottom half will do the delete action at the finial
@@ -249,7 +249,7 @@ aio_ctx_prepare(GSource *source, gint *timeout)
{
AioContext *ctx = (AioContext *) source;
- atomic_set(&ctx->notify_me, atomic_read(&ctx->notify_me) | 1);
+ qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
/*
* Write ctx->notify_me before computing the timeout
@@ -276,7 +276,7 @@ aio_ctx_check(GSource *source)
BHListSlice *s;
/* Finish computing the timeout before clearing the flag. */
- atomic_store_release(&ctx->notify_me, atomic_read(&ctx->notify_me) & ~1);
+ qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
aio_notify_accept(ctx);
QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
@@ -424,21 +424,21 @@ void aio_notify(AioContext *ctx)
* aio_notify_accept.
*/
smp_wmb();
- atomic_set(&ctx->notified, true);
+ qatomic_set(&ctx->notified, true);
/*
* Write ctx->notified before reading ctx->notify_me. Pairs
* with smp_mb in aio_ctx_prepare or aio_poll.
*/
smp_mb();
- if (atomic_read(&ctx->notify_me)) {
+ if (qatomic_read(&ctx->notify_me)) {
event_notifier_set(&ctx->notifier);
}
}
void aio_notify_accept(AioContext *ctx)
{
- atomic_set(&ctx->notified, false);
+ qatomic_set(&ctx->notified, false);
/*
* Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb
@@ -465,7 +465,7 @@ static bool aio_context_notifier_poll(void *opaque)
EventNotifier *e = opaque;
AioContext *ctx = container_of(e, AioContext, notifier);
- return atomic_read(&ctx->notified);
+ return qatomic_read(&ctx->notified);
}
static void co_schedule_bh_cb(void *opaque)
@@ -489,7 +489,7 @@ static void co_schedule_bh_cb(void *opaque)
aio_context_acquire(ctx);
/* Protected by write barrier in qemu_aio_coroutine_enter */
- atomic_set(&co->scheduled, NULL);
+ qatomic_set(&co->scheduled, NULL);
qemu_aio_coroutine_enter(ctx, co);
aio_context_release(ctx);
}
@@ -546,7 +546,7 @@ fail:
void aio_co_schedule(AioContext *ctx, Coroutine *co)
{
trace_aio_co_schedule(ctx, co);
- const char *scheduled = atomic_cmpxchg(&co->scheduled, NULL,
+ const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
__func__);
if (scheduled) {
@@ -577,7 +577,7 @@ void aio_co_wake(struct Coroutine *co)
* qemu_coroutine_enter.
*/
smp_read_barrier_depends();
- ctx = atomic_read(&co->ctx);
+ ctx = qatomic_read(&co->ctx);
aio_co_enter(ctx, co);
}
diff --git a/util/atomic64.c b/util/atomic64.c
index b198a6c9c8..93037d5b11 100644
--- a/util/atomic64.c
+++ b/util/atomic64.c
@@ -51,8 +51,8 @@ static QemuSpin *addr_to_lock(const void *addr)
return ret; \
}
-GEN_READ(atomic_read_i64, int64_t)
-GEN_READ(atomic_read_u64, uint64_t)
+GEN_READ(qatomic_read_i64, int64_t)
+GEN_READ(qatomic_read_u64, uint64_t)
#undef GEN_READ
#define GEN_SET(name, type) \
@@ -65,11 +65,11 @@ GEN_READ(atomic_read_u64, uint64_t)
qemu_spin_unlock(lock); \
}
-GEN_SET(atomic_set_i64, int64_t)
-GEN_SET(atomic_set_u64, uint64_t)
+GEN_SET(qatomic_set_i64, int64_t)
+GEN_SET(qatomic_set_u64, uint64_t)
#undef GEN_SET
-void atomic64_init(void)
+void qatomic64_init(void)
{
int i;
diff --git a/util/bitmap.c b/util/bitmap.c
index 1753ff7f5b..1f201393ae 100644
--- a/util/bitmap.c
+++ b/util/bitmap.c
@@ -190,7 +190,7 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr)
/* First word */
if (nr - bits_to_set > 0) {
- atomic_or(p, mask_to_set);
+ qatomic_or(p, mask_to_set);
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
@@ -209,9 +209,9 @@ void bitmap_set_atomic(unsigned long *map, long start, long nr)
/* Last word */
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
- atomic_or(p, mask_to_set);
+ qatomic_or(p, mask_to_set);
} else {
- /* If we avoided the full barrier in atomic_or(), issue a
+ /* If we avoided the full barrier in qatomic_or(), issue a
* barrier to account for the assignments in the while loop.
*/
smp_mb();
@@ -253,7 +253,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
/* First word */
if (nr - bits_to_clear > 0) {
- old_bits = atomic_fetch_and(p, ~mask_to_clear);
+ old_bits = qatomic_fetch_and(p, ~mask_to_clear);
dirty |= old_bits & mask_to_clear;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
@@ -265,7 +265,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
if (bits_to_clear == BITS_PER_LONG) {
while (nr >= BITS_PER_LONG) {
if (*p) {
- old_bits = atomic_xchg(p, 0);
+ old_bits = qatomic_xchg(p, 0);
dirty |= old_bits;
}
nr -= BITS_PER_LONG;
@@ -276,7 +276,7 @@ bool bitmap_test_and_clear_atomic(unsigned long *map, long start, long nr)
/* Last word */
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
- old_bits = atomic_fetch_and(p, ~mask_to_clear);
+ old_bits = qatomic_fetch_and(p, ~mask_to_clear);
dirty |= old_bits & mask_to_clear;
} else {
if (!dirty) {
@@ -291,7 +291,7 @@ void bitmap_copy_and_clear_atomic(unsigned long *dst, unsigned long *src,
long nr)
{
while (nr > 0) {
- *dst = atomic_xchg(src, 0);
+ *dst = qatomic_xchg(src, 0);
dst++;
src++;
nr -= BITS_PER_LONG;
diff --git a/util/cacheinfo.c b/util/cacheinfo.c
index d94dc6adc8..7804c186b6 100644
--- a/util/cacheinfo.c
+++ b/util/cacheinfo.c
@@ -193,5 +193,5 @@ static void __attribute__((constructor)) init_cache_info(void)
qemu_dcache_linesize = dsize;
qemu_dcache_linesize_log = ctz32(dsize);
- atomic64_init();
+ qatomic64_init();
}
diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c
index fcd989d47d..e11a8a022e 100644
--- a/util/fdmon-epoll.c
+++ b/util/fdmon-epoll.c
@@ -65,7 +65,7 @@ static int fdmon_epoll_wait(AioContext *ctx, AioHandlerList *ready_list,
struct epoll_event events[128];
/* Fall back while external clients are disabled */
- if (atomic_read(&ctx->external_disable_cnt)) {
+ if (qatomic_read(&ctx->external_disable_cnt)) {
return fdmon_poll_ops.wait(ctx, ready_list, timeout);
}
@@ -132,7 +132,7 @@ bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd)
}
/* Do not upgrade while external clients are disabled */
- if (atomic_read(&ctx->external_disable_cnt)) {
+ if (qatomic_read(&ctx->external_disable_cnt)) {
return false;
}
diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c
index 1d14177df0..1461dfa407 100644
--- a/util/fdmon-io_uring.c
+++ b/util/fdmon-io_uring.c
@@ -103,7 +103,7 @@ static void enqueue(AioHandlerSList *head, AioHandler *node, unsigned flags)
{
unsigned old_flags;
- old_flags = atomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags);
+ old_flags = qatomic_fetch_or(&node->flags, FDMON_IO_URING_PENDING | flags);
if (!(old_flags & FDMON_IO_URING_PENDING)) {
QSLIST_INSERT_HEAD_ATOMIC(head, node, node_submitted);
}
@@ -127,7 +127,7 @@ static AioHandler *dequeue(AioHandlerSList *head, unsigned *flags)
* telling process_cqe() to delete the AioHandler when its
* IORING_OP_POLL_ADD completes.
*/
- *flags = atomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING |
+ *flags = qatomic_fetch_and(&node->flags, ~(FDMON_IO_URING_PENDING |
FDMON_IO_URING_ADD));
return node;
}
@@ -233,7 +233,7 @@ static bool process_cqe(AioContext *ctx,
* with enqueue() here then we can safely clear the FDMON_IO_URING_REMOVE
* bit before IORING_OP_POLL_REMOVE is submitted.
*/
- flags = atomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE);
+ flags = qatomic_fetch_and(&node->flags, ~FDMON_IO_URING_REMOVE);
if (flags & FDMON_IO_URING_REMOVE) {
QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
return false;
@@ -273,7 +273,7 @@ static int fdmon_io_uring_wait(AioContext *ctx, AioHandlerList *ready_list,
int ret;
/* Fall back while external clients are disabled */
- if (atomic_read(&ctx->external_disable_cnt)) {
+ if (qatomic_read(&ctx->external_disable_cnt)) {
return fdmon_poll_ops.wait(ctx, ready_list, timeout);
}
@@ -312,7 +312,7 @@ static bool fdmon_io_uring_need_wait(AioContext *ctx)
}
/* Are we falling back to fdmon-poll? */
- return atomic_read(&ctx->external_disable_cnt);
+ return qatomic_read(&ctx->external_disable_cnt);
}
static const FDMonOps fdmon_io_uring_ops = {
@@ -344,7 +344,7 @@ void fdmon_io_uring_destroy(AioContext *ctx)
/* Move handlers due to be removed onto the deleted list */
while ((node = QSLIST_FIRST_RCU(&ctx->submit_list))) {
- unsigned flags = atomic_fetch_and(&node->flags,
+ unsigned flags = qatomic_fetch_and(&node->flags,
~(FDMON_IO_URING_PENDING |
FDMON_IO_URING_ADD |
FDMON_IO_URING_REMOVE));
diff --git a/util/fdmon-poll.c b/util/fdmon-poll.c
index 488067b679..5fe3b47865 100644
--- a/util/fdmon-poll.c
+++ b/util/fdmon-poll.c
@@ -73,6 +73,7 @@ static int fdmon_poll_wait(AioContext *ctx, AioHandlerList *ready_list,
/* epoll(7) is faster above a certain number of fds */
if (fdmon_epoll_try_upgrade(ctx, npfd)) {
+ npfd = 0; /* we won't need pollfds[], reset npfd */
return ctx->fdmon_ops->wait(ctx, ready_list, timeout);
}
diff --git a/util/iov.c b/util/iov.c
index ae61d696aa..f3a9e92a37 100644
--- a/util/iov.c
+++ b/util/iov.c
@@ -636,14 +636,33 @@ void qemu_iovec_clone(QEMUIOVector *dest, const QEMUIOVector *src, void *buf)
}
}
-size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
- size_t bytes)
+void iov_discard_undo(IOVDiscardUndo *undo)
+{
+ /* Restore original iovec if it was modified */
+ if (undo->modified_iov) {
+ *undo->modified_iov = undo->orig;
+ }
+}
+
+size_t iov_discard_front_undoable(struct iovec **iov,
+ unsigned int *iov_cnt,
+ size_t bytes,
+ IOVDiscardUndo *undo)
{
size_t total = 0;
struct iovec *cur;
+ if (undo) {
+ undo->modified_iov = NULL;
+ }
+
for (cur = *iov; *iov_cnt > 0; cur++) {
if (cur->iov_len > bytes) {
+ if (undo) {
+ undo->modified_iov = cur;
+ undo->orig = *cur;
+ }
+
cur->iov_base += bytes;
cur->iov_len -= bytes;
total += bytes;
@@ -659,12 +678,24 @@ size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
return total;
}
-size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
- size_t bytes)
+size_t iov_discard_front(struct iovec **iov, unsigned int *iov_cnt,
+ size_t bytes)
+{
+ return iov_discard_front_undoable(iov, iov_cnt, bytes, NULL);
+}
+
+size_t iov_discard_back_undoable(struct iovec *iov,
+ unsigned int *iov_cnt,
+ size_t bytes,
+ IOVDiscardUndo *undo)
{
size_t total = 0;
struct iovec *cur;
+ if (undo) {
+ undo->modified_iov = NULL;
+ }
+
if (*iov_cnt == 0) {
return 0;
}
@@ -673,6 +704,11 @@ size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
while (*iov_cnt > 0) {
if (cur->iov_len > bytes) {
+ if (undo) {
+ undo->modified_iov = cur;
+ undo->orig = *cur;
+ }
+
cur->iov_len -= bytes;
total += bytes;
break;
@@ -687,6 +723,12 @@ size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
return total;
}
+size_t iov_discard_back(struct iovec *iov, unsigned int *iov_cnt,
+ size_t bytes)
+{
+ return iov_discard_back_undoable(iov, iov_cnt, bytes, NULL);
+}
+
void qemu_iovec_discard_back(QEMUIOVector *qiov, size_t bytes)
{
size_t total;
diff --git a/util/lockcnt.c b/util/lockcnt.c
index 4f88dcf8b8..5da36946b1 100644
--- a/util/lockcnt.c
+++ b/util/lockcnt.c
@@ -61,7 +61,7 @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
int expected = *val;
trace_lockcnt_fast_path_attempt(lockcnt, expected, new_if_free);
- *val = atomic_cmpxchg(&lockcnt->count, expected, new_if_free);
+ *val = qatomic_cmpxchg(&lockcnt->count, expected, new_if_free);
if (*val == expected) {
trace_lockcnt_fast_path_success(lockcnt, expected, new_if_free);
*val = new_if_free;
@@ -81,7 +81,7 @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
int new = expected - QEMU_LOCKCNT_STATE_LOCKED + QEMU_LOCKCNT_STATE_WAITING;
trace_lockcnt_futex_wait_prepare(lockcnt, expected, new);
- *val = atomic_cmpxchg(&lockcnt->count, expected, new);
+ *val = qatomic_cmpxchg(&lockcnt->count, expected, new);
if (*val == expected) {
*val = new;
}
@@ -92,7 +92,7 @@ static bool qemu_lockcnt_cmpxchg_or_wait(QemuLockCnt *lockcnt, int *val,
*waited = true;
trace_lockcnt_futex_wait(lockcnt, *val);
qemu_futex_wait(&lockcnt->count, *val);
- *val = atomic_read(&lockcnt->count);
+ *val = qatomic_read(&lockcnt->count);
trace_lockcnt_futex_wait_resume(lockcnt, *val);
continue;
}
@@ -110,13 +110,14 @@ static void lockcnt_wake(QemuLockCnt *lockcnt)
void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
{
- int val = atomic_read(&lockcnt->count);
+ int val = qatomic_read(&lockcnt->count);
bool waited = false;
for (;;) {
if (val >= QEMU_LOCKCNT_COUNT_STEP) {
int expected = val;
- val = atomic_cmpxchg(&lockcnt->count, val, val + QEMU_LOCKCNT_COUNT_STEP);
+ val = qatomic_cmpxchg(&lockcnt->count, val,
+ val + QEMU_LOCKCNT_COUNT_STEP);
if (val == expected) {
break;
}
@@ -142,7 +143,7 @@ void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
{
- atomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP);
+ qatomic_sub(&lockcnt->count, QEMU_LOCKCNT_COUNT_STEP);
}
/* Decrement a counter, and return locked if it is decremented to zero.
@@ -151,14 +152,15 @@ void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
*/
bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
{
- int val = atomic_read(&lockcnt->count);
+ int val = qatomic_read(&lockcnt->count);
int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
bool waited = false;
for (;;) {
if (val >= 2 * QEMU_LOCKCNT_COUNT_STEP) {
int expected = val;
- val = atomic_cmpxchg(&lockcnt->count, val, val - QEMU_LOCKCNT_COUNT_STEP);
+ val = qatomic_cmpxchg(&lockcnt->count, val,
+ val - QEMU_LOCKCNT_COUNT_STEP);
if (val == expected) {
break;
}
@@ -199,7 +201,7 @@ bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
*/
bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
{
- int val = atomic_read(&lockcnt->count);
+ int val = qatomic_read(&lockcnt->count);
int locked_state = QEMU_LOCKCNT_STATE_LOCKED;
bool waited = false;
@@ -233,7 +235,7 @@ bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
{
- int val = atomic_read(&lockcnt->count);
+ int val = qatomic_read(&lockcnt->count);
int step = QEMU_LOCKCNT_STATE_LOCKED;
bool waited = false;
@@ -255,12 +257,12 @@ void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
{
int expected, new, val;
- val = atomic_read(&lockcnt->count);
+ val = qatomic_read(&lockcnt->count);
do {
expected = val;
new = (val + QEMU_LOCKCNT_COUNT_STEP) & ~QEMU_LOCKCNT_STATE_MASK;
trace_lockcnt_unlock_attempt(lockcnt, val, new);
- val = atomic_cmpxchg(&lockcnt->count, val, new);
+ val = qatomic_cmpxchg(&lockcnt->count, val, new);
} while (val != expected);
trace_lockcnt_unlock_success(lockcnt, val, new);
@@ -273,12 +275,12 @@ void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
{
int expected, new, val;
- val = atomic_read(&lockcnt->count);
+ val = qatomic_read(&lockcnt->count);
do {
expected = val;
new = val & ~QEMU_LOCKCNT_STATE_MASK;
trace_lockcnt_unlock_attempt(lockcnt, val, new);
- val = atomic_cmpxchg(&lockcnt->count, val, new);
+ val = qatomic_cmpxchg(&lockcnt->count, val, new);
} while (val != expected);
trace_lockcnt_unlock_success(lockcnt, val, new);
@@ -289,7 +291,7 @@ void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
{
- return atomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT;
+ return qatomic_read(&lockcnt->count) >> QEMU_LOCKCNT_COUNT_SHIFT;
}
#else
void qemu_lockcnt_init(QemuLockCnt *lockcnt)
@@ -307,13 +309,13 @@ void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
{
int old;
for (;;) {
- old = atomic_read(&lockcnt->count);
+ old = qatomic_read(&lockcnt->count);
if (old == 0) {
qemu_lockcnt_lock(lockcnt);
qemu_lockcnt_inc_and_unlock(lockcnt);
return;
} else {
- if (atomic_cmpxchg(&lockcnt->count, old, old + 1) == old) {
+ if (qatomic_cmpxchg(&lockcnt->count, old, old + 1) == old) {
return;
}
}
@@ -322,7 +324,7 @@ void qemu_lockcnt_inc(QemuLockCnt *lockcnt)
void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
{
- atomic_dec(&lockcnt->count);
+ qatomic_dec(&lockcnt->count);
}
/* Decrement a counter, and return locked if it is decremented to zero.
@@ -331,9 +333,9 @@ void qemu_lockcnt_dec(QemuLockCnt *lockcnt)
*/
bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
{
- int val = atomic_read(&lockcnt->count);
+ int val = qatomic_read(&lockcnt->count);
while (val > 1) {
- int old = atomic_cmpxchg(&lockcnt->count, val, val - 1);
+ int old = qatomic_cmpxchg(&lockcnt->count, val, val - 1);
if (old != val) {
val = old;
continue;
@@ -343,7 +345,7 @@ bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
}
qemu_lockcnt_lock(lockcnt);
- if (atomic_fetch_dec(&lockcnt->count) == 1) {
+ if (qatomic_fetch_dec(&lockcnt->count) == 1) {
return true;
}
@@ -360,13 +362,13 @@ bool qemu_lockcnt_dec_and_lock(QemuLockCnt *lockcnt)
bool qemu_lockcnt_dec_if_lock(QemuLockCnt *lockcnt)
{
/* No need for acquire semantics if we return false. */
- int val = atomic_read(&lockcnt->count);
+ int val = qatomic_read(&lockcnt->count);
if (val > 1) {
return false;
}
qemu_lockcnt_lock(lockcnt);
- if (atomic_fetch_dec(&lockcnt->count) == 1) {
+ if (qatomic_fetch_dec(&lockcnt->count) == 1) {
return true;
}
@@ -381,7 +383,7 @@ void qemu_lockcnt_lock(QemuLockCnt *lockcnt)
void qemu_lockcnt_inc_and_unlock(QemuLockCnt *lockcnt)
{
- atomic_inc(&lockcnt->count);
+ qatomic_inc(&lockcnt->count);
qemu_mutex_unlock(&lockcnt->mutex);
}
@@ -392,6 +394,6 @@ void qemu_lockcnt_unlock(QemuLockCnt *lockcnt)
unsigned qemu_lockcnt_count(QemuLockCnt *lockcnt)
{
- return atomic_read(&lockcnt->count);
+ return qatomic_read(&lockcnt->count);
}
#endif
diff --git a/util/log.c b/util/log.c
index bdb3d712e8..4b423062aa 100644
--- a/util/log.c
+++ b/util/log.c
@@ -41,7 +41,7 @@ int qemu_log(const char *fmt, ...)
QemuLogFile *logfile;
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile) {
va_list ap;
va_start(ap, fmt);
@@ -98,7 +98,7 @@ void qemu_set_log(int log_flags)
QEMU_LOCK_GUARD(&qemu_logfile_mutex);
if (qemu_logfile && !need_to_open_file) {
logfile = qemu_logfile;
- atomic_rcu_set(&qemu_logfile, NULL);
+ qatomic_rcu_set(&qemu_logfile, NULL);
call_rcu(logfile, qemu_logfile_free, rcu);
} else if (!qemu_logfile && need_to_open_file) {
logfile = g_new0(QemuLogFile, 1);
@@ -135,7 +135,7 @@ void qemu_set_log(int log_flags)
#endif
log_append = 1;
}
- atomic_rcu_set(&qemu_logfile, logfile);
+ qatomic_rcu_set(&qemu_logfile, logfile);
}
}
@@ -272,7 +272,7 @@ void qemu_log_flush(void)
QemuLogFile *logfile;
rcu_read_lock();
- logfile = atomic_rcu_read(&qemu_logfile);
+ logfile = qatomic_rcu_read(&qemu_logfile);
if (logfile) {
fflush(logfile->fd);
}
@@ -288,7 +288,7 @@ void qemu_log_close(void)
logfile = qemu_logfile;
if (logfile) {
- atomic_rcu_set(&qemu_logfile, NULL);
+ qatomic_rcu_set(&qemu_logfile, NULL);
call_rcu(logfile, qemu_logfile_free, rcu);
}
qemu_mutex_unlock(&qemu_logfile_mutex);
diff --git a/util/qemu-coroutine-lock.c b/util/qemu-coroutine-lock.c
index 5da5234155..36927b5f88 100644
--- a/util/qemu-coroutine-lock.c
+++ b/util/qemu-coroutine-lock.c
@@ -212,10 +212,10 @@ static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
/* This is the "Responsibility Hand-Off" protocol; a lock() picks from
* a concurrent unlock() the responsibility of waking somebody up.
*/
- old_handoff = atomic_mb_read(&mutex->handoff);
+ old_handoff = qatomic_mb_read(&mutex->handoff);
if (old_handoff &&
has_waiters(mutex) &&
- atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
+ qatomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
/* There can be no concurrent pops, because there can be only
* one active handoff at a time.
*/
@@ -250,18 +250,18 @@ void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
*/
i = 0;
retry_fast_path:
- waiters = atomic_cmpxchg(&mutex->locked, 0, 1);
+ waiters = qatomic_cmpxchg(&mutex->locked, 0, 1);
if (waiters != 0) {
while (waiters == 1 && ++i < 1000) {
- if (atomic_read(&mutex->ctx) == ctx) {
+ if (qatomic_read(&mutex->ctx) == ctx) {
break;
}
- if (atomic_read(&mutex->locked) == 0) {
+ if (qatomic_read(&mutex->locked) == 0) {
goto retry_fast_path;
}
cpu_relax();
}
- waiters = atomic_fetch_inc(&mutex->locked);
+ waiters = qatomic_fetch_inc(&mutex->locked);
}
if (waiters == 0) {
@@ -288,7 +288,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
mutex->ctx = NULL;
mutex->holder = NULL;
self->locks_held--;
- if (atomic_fetch_dec(&mutex->locked) == 1) {
+ if (qatomic_fetch_dec(&mutex->locked) == 1) {
/* No waiting qemu_co_mutex_lock(). Pfew, that was easy! */
return;
}
@@ -311,7 +311,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
}
our_handoff = mutex->sequence;
- atomic_mb_set(&mutex->handoff, our_handoff);
+ qatomic_mb_set(&mutex->handoff, our_handoff);
if (!has_waiters(mutex)) {
/* The concurrent lock has not added itself yet, so it
* will be able to pick our handoff.
@@ -322,7 +322,7 @@ void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
/* Try to do the handoff protocol ourselves; if somebody else has
* already taken it, however, we're done and they're responsible.
*/
- if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
+ if (qatomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
break;
}
}
diff --git a/util/qemu-coroutine-sleep.c b/util/qemu-coroutine-sleep.c
index 769a76e57d..8c4dac4fd7 100644
--- a/util/qemu-coroutine-sleep.c
+++ b/util/qemu-coroutine-sleep.c
@@ -28,7 +28,7 @@ struct QemuCoSleepState {
void qemu_co_sleep_wake(QemuCoSleepState *sleep_state)
{
/* Write of schedule protected by barrier write in aio_co_schedule */
- const char *scheduled = atomic_cmpxchg(&sleep_state->co->scheduled,
+ const char *scheduled = qatomic_cmpxchg(&sleep_state->co->scheduled,
qemu_co_sleep_ns__scheduled, NULL);
assert(scheduled == qemu_co_sleep_ns__scheduled);
@@ -54,7 +54,7 @@ void coroutine_fn qemu_co_sleep_ns_wakeable(QEMUClockType type, int64_t ns,
.user_state_pointer = sleep_state,
};
- const char *scheduled = atomic_cmpxchg(&state.co->scheduled, NULL,
+ const char *scheduled = qatomic_cmpxchg(&state.co->scheduled, NULL,
qemu_co_sleep_ns__scheduled);
if (scheduled) {
fprintf(stderr,
diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c
index c3caa6c770..38fb6d3084 100644
--- a/util/qemu-coroutine.c
+++ b/util/qemu-coroutine.c
@@ -60,7 +60,7 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque)
* release_pool_size and the actual size of release_pool. But
* it is just a heuristic, it does not need to be perfect.
*/
- alloc_pool_size = atomic_xchg(&release_pool_size, 0);
+ alloc_pool_size = qatomic_xchg(&release_pool_size, 0);
QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool);
co = QSLIST_FIRST(&alloc_pool);
}
@@ -88,7 +88,7 @@ static void coroutine_delete(Coroutine *co)
if (CONFIG_COROUTINE_POOL) {
if (release_pool_size < POOL_BATCH_SIZE * 2) {
QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next);
- atomic_inc(&release_pool_size);
+ qatomic_inc(&release_pool_size);
return;
}
if (alloc_pool_size < POOL_BATCH_SIZE) {
@@ -115,7 +115,7 @@ void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co)
/* Cannot rely on the read barrier for to in aio_co_wake(), as there are
* callers outside of aio_co_wake() */
- const char *scheduled = atomic_mb_read(&to->scheduled);
+ const char *scheduled = qatomic_mb_read(&to->scheduled);
QSIMPLEQ_REMOVE_HEAD(&pending, co_queue_next);
diff --git a/util/qemu-sockets.c b/util/qemu-sockets.c
index 99ce2fd5e6..de4bf7616e 100644
--- a/util/qemu-sockets.c
+++ b/util/qemu-sockets.c
@@ -395,7 +395,7 @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr,
memset(&ai, 0, sizeof(ai));
ai.ai_flags = AI_CANONNAME | AI_ADDRCONFIG;
- if (atomic_read(&useV4Mapped)) {
+ if (qatomic_read(&useV4Mapped)) {
ai.ai_flags |= AI_V4MAPPED;
}
ai.ai_family = inet_ai_family_from_address(saddr, &err);
@@ -421,7 +421,7 @@ static struct addrinfo *inet_parse_connect_saddr(InetSocketAddress *saddr,
*/
if (rc == EAI_BADFLAGS &&
(ai.ai_flags & AI_V4MAPPED)) {
- atomic_set(&useV4Mapped, 0);
+ qatomic_set(&useV4Mapped, 0);
ai.ai_flags &= ~AI_V4MAPPED;
rc = getaddrinfo(saddr->host, saddr->port, &ai, &res);
}
diff --git a/util/qemu-thread-posix.c b/util/qemu-thread-posix.c
index b4c2359272..dcff5e7c5d 100644
--- a/util/qemu-thread-posix.c
+++ b/util/qemu-thread-posix.c
@@ -414,8 +414,8 @@ void qemu_event_set(QemuEvent *ev)
*/
assert(ev->initialized);
smp_mb();
- if (atomic_read(&ev->value) != EV_SET) {
- if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
+ if (qatomic_read(&ev->value) != EV_SET) {
+ if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
/* There were waiters, wake them up. */
qemu_futex_wake(ev, INT_MAX);
}
@@ -427,14 +427,14 @@ void qemu_event_reset(QemuEvent *ev)
unsigned value;
assert(ev->initialized);
- value = atomic_read(&ev->value);
+ value = qatomic_read(&ev->value);
smp_mb_acquire();
if (value == EV_SET) {
/*
* If there was a concurrent reset (or even reset+wait),
* do nothing. Otherwise change EV_SET->EV_FREE.
*/
- atomic_or(&ev->value, EV_FREE);
+ qatomic_or(&ev->value, EV_FREE);
}
}
@@ -443,7 +443,7 @@ void qemu_event_wait(QemuEvent *ev)
unsigned value;
assert(ev->initialized);
- value = atomic_read(&ev->value);
+ value = qatomic_read(&ev->value);
smp_mb_acquire();
if (value != EV_SET) {
if (value == EV_FREE) {
@@ -453,7 +453,7 @@ void qemu_event_wait(QemuEvent *ev)
* a concurrent busy->free transition. After the CAS, the
* event will be either set or busy.
*/
- if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
+ if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
return;
}
}
diff --git a/util/qemu-thread-win32.c b/util/qemu-thread-win32.c
index d207b0cb58..cb5aa2018c 100644
--- a/util/qemu-thread-win32.c
+++ b/util/qemu-thread-win32.c
@@ -250,8 +250,8 @@ void qemu_event_set(QemuEvent *ev)
* ev->value we need a full memory barrier here.
*/
smp_mb();
- if (atomic_read(&ev->value) != EV_SET) {
- if (atomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
+ if (qatomic_read(&ev->value) != EV_SET) {
+ if (qatomic_xchg(&ev->value, EV_SET) == EV_BUSY) {
/* There were waiters, wake them up. */
SetEvent(ev->event);
}
@@ -263,13 +263,13 @@ void qemu_event_reset(QemuEvent *ev)
unsigned value;
assert(ev->initialized);
- value = atomic_read(&ev->value);
+ value = qatomic_read(&ev->value);
smp_mb_acquire();
if (value == EV_SET) {
/* If there was a concurrent reset (or even reset+wait),
* do nothing. Otherwise change EV_SET->EV_FREE.
*/
- atomic_or(&ev->value, EV_FREE);
+ qatomic_or(&ev->value, EV_FREE);
}
}
@@ -278,7 +278,7 @@ void qemu_event_wait(QemuEvent *ev)
unsigned value;
assert(ev->initialized);
- value = atomic_read(&ev->value);
+ value = qatomic_read(&ev->value);
smp_mb_acquire();
if (value != EV_SET) {
if (value == EV_FREE) {
@@ -292,7 +292,7 @@ void qemu_event_wait(QemuEvent *ev)
* because there cannot be a concurrent busy->free transition.
* After the CAS, the event will be either set or busy.
*/
- if (atomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
+ if (qatomic_cmpxchg(&ev->value, EV_FREE, EV_BUSY) == EV_SET) {
value = EV_SET;
} else {
value = EV_BUSY;
diff --git a/util/qemu-timer.c b/util/qemu-timer.c
index 878d80fd5e..ca677836cb 100644
--- a/util/qemu-timer.c
+++ b/util/qemu-timer.c
@@ -170,7 +170,7 @@ void qemu_clock_enable(QEMUClockType type, bool enabled)
bool timerlist_has_timers(QEMUTimerList *timer_list)
{
- return !!atomic_read(&timer_list->active_timers);
+ return !!qatomic_read(&timer_list->active_timers);
}
bool qemu_clock_has_timers(QEMUClockType type)
@@ -183,7 +183,7 @@ bool timerlist_expired(QEMUTimerList *timer_list)
{
int64_t expire_time;
- if (!atomic_read(&timer_list->active_timers)) {
+ if (!qatomic_read(&timer_list->active_timers)) {
return false;
}
@@ -213,7 +213,7 @@ int64_t timerlist_deadline_ns(QEMUTimerList *timer_list)
int64_t delta;
int64_t expire_time;
- if (!atomic_read(&timer_list->active_timers)) {
+ if (!qatomic_read(&timer_list->active_timers)) {
return -1;
}
@@ -385,7 +385,7 @@ static void timer_del_locked(QEMUTimerList *timer_list, QEMUTimer *ts)
if (!t)
break;
if (t == ts) {
- atomic_set(pt, t->next);
+ qatomic_set(pt, t->next);
break;
}
pt = &t->next;
@@ -408,7 +408,7 @@ static bool timer_mod_ns_locked(QEMUTimerList *timer_list,
}
ts->expire_time = MAX(expire_time, 0);
ts->next = *pt;
- atomic_set(pt, ts);
+ qatomic_set(pt, ts);
return pt == &timer_list->active_timers;
}
@@ -502,7 +502,7 @@ bool timerlist_run_timers(QEMUTimerList *timer_list)
QEMUTimerCB *cb;
void *opaque;
- if (!atomic_read(&timer_list->active_timers)) {
+ if (!qatomic_read(&timer_list->active_timers)) {
return false;
}
diff --git a/util/qht.c b/util/qht.c
index b2e020c398..079605121b 100644
--- a/util/qht.c
+++ b/util/qht.c
@@ -131,11 +131,11 @@ static inline void qht_unlock(struct qht *ht)
/*
* Note: reading partially-updated pointers in @pointers could lead to
- * segfaults. We thus access them with atomic_read/set; this guarantees
+ * segfaults. We thus access them with qatomic_read/set; this guarantees
* that the compiler makes all those accesses atomic. We also need the
- * volatile-like behavior in atomic_read, since otherwise the compiler
+ * volatile-like behavior in qatomic_read, since otherwise the compiler
* might refetch the pointer.
- * atomic_read's are of course not necessary when the bucket lock is held.
+ * qatomic_read's are of course not necessary when the bucket lock is held.
*
* If both ht->lock and b->lock are grabbed, ht->lock should always
* be grabbed first.
@@ -286,7 +286,7 @@ void qht_map_lock_buckets__no_stale(struct qht *ht, struct qht_map **pmap)
{
struct qht_map *map;
- map = atomic_rcu_read(&ht->map);
+ map = qatomic_rcu_read(&ht->map);
qht_map_lock_buckets(map);
if (likely(!qht_map_is_stale__locked(ht, map))) {
*pmap = map;
@@ -318,7 +318,7 @@ struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
struct qht_bucket *b;
struct qht_map *map;
- map = atomic_rcu_read(&ht->map);
+ map = qatomic_rcu_read(&ht->map);
b = qht_map_to_bucket(map, hash);
qemu_spin_lock(&b->lock);
@@ -340,7 +340,8 @@ struct qht_bucket *qht_bucket_lock__no_stale(struct qht *ht, uint32_t hash,
static inline bool qht_map_needs_resize(const struct qht_map *map)
{
- return atomic_read(&map->n_added_buckets) > map->n_added_buckets_threshold;
+ return qatomic_read(&map->n_added_buckets) >
+ map->n_added_buckets_threshold;
}
static inline void qht_chain_destroy(const struct qht_bucket *head)
@@ -404,7 +405,7 @@ void qht_init(struct qht *ht, qht_cmp_func_t cmp, size_t n_elems,
ht->mode = mode;
qemu_mutex_init(&ht->lock);
map = qht_map_create(n_buckets);
- atomic_rcu_set(&ht->map, map);
+ qatomic_rcu_set(&ht->map, map);
}
/* call only when there are no readers/writers left */
@@ -425,8 +426,8 @@ static void qht_bucket_reset__locked(struct qht_bucket *head)
if (b->pointers[i] == NULL) {
goto done;
}
- atomic_set(&b->hashes[i], 0);
- atomic_set(&b->pointers[i], NULL);
+ qatomic_set(&b->hashes[i], 0);
+ qatomic_set(&b->pointers[i], NULL);
}
b = b->next;
} while (b);
@@ -492,19 +493,19 @@ void *qht_do_lookup(const struct qht_bucket *head, qht_lookup_func_t func,
do {
for (i = 0; i < QHT_BUCKET_ENTRIES; i++) {
- if (atomic_read(&b->hashes[i]) == hash) {
+ if (qatomic_read(&b->hashes[i]) == hash) {
/* The pointer is dereferenced before seqlock_read_retry,
* so (unlike qht_insert__locked) we need to use
- * atomic_rcu_read here.
+ * qatomic_rcu_read here.
*/
- void *p = atomic_rcu_read(&b->pointers[i]);
+ void *p = qatomic_rcu_read(&b->pointers[i]);
if (likely(p) && likely(func(p, userp))) {
return p;
}
}
}
- b = atomic_rcu_read(&b->next);
+ b = qatomic_rcu_read(&b->next);
} while (b);
return NULL;
@@ -532,7 +533,7 @@ void *qht_lookup_custom(const struct qht *ht, const void *userp, uint32_t hash,
unsigned int version;
void *ret;
- map = atomic_rcu_read(&ht->map);
+ map = qatomic_rcu_read(&ht->map);
b = qht_map_to_bucket(map, hash);
version = seqlock_read_begin(&b->sequence);
@@ -584,7 +585,7 @@ static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
memset(b, 0, sizeof(*b));
new = b;
i = 0;
- atomic_inc(&map->n_added_buckets);
+ qatomic_inc(&map->n_added_buckets);
if (unlikely(qht_map_needs_resize(map)) && needs_resize) {
*needs_resize = true;
}
@@ -593,11 +594,11 @@ static void *qht_insert__locked(const struct qht *ht, struct qht_map *map,
/* found an empty key: acquire the seqlock and write */
seqlock_write_begin(&head->sequence);
if (new) {
- atomic_rcu_set(&prev->next, b);
+ qatomic_rcu_set(&prev->next, b);
}
/* smp_wmb() implicit in seqlock_write_begin. */
- atomic_set(&b->hashes[i], hash);
- atomic_set(&b->pointers[i], p);
+ qatomic_set(&b->hashes[i], hash);
+ qatomic_set(&b->pointers[i], p);
seqlock_write_end(&head->sequence);
return NULL;
}
@@ -668,11 +669,11 @@ qht_entry_move(struct qht_bucket *to, int i, struct qht_bucket *from, int j)
qht_debug_assert(to->pointers[i]);
qht_debug_assert(from->pointers[j]);
- atomic_set(&to->hashes[i], from->hashes[j]);
- atomic_set(&to->pointers[i], from->pointers[j]);
+ qatomic_set(&to->hashes[i], from->hashes[j]);
+ qatomic_set(&to->pointers[i], from->pointers[j]);
- atomic_set(&from->hashes[j], 0);
- atomic_set(&from->pointers[j], NULL);
+ qatomic_set(&from->hashes[j], 0);
+ qatomic_set(&from->pointers[j], NULL);
}
/*
@@ -687,7 +688,7 @@ static inline void qht_bucket_remove_entry(struct qht_bucket *orig, int pos)
if (qht_entry_is_last(orig, pos)) {
orig->hashes[pos] = 0;
- atomic_set(&orig->pointers[pos], NULL);
+ qatomic_set(&orig->pointers[pos], NULL);
return;
}
do {
@@ -803,7 +804,7 @@ do_qht_iter(struct qht *ht, const struct qht_iter *iter, void *userp)
{
struct qht_map *map;
- map = atomic_rcu_read(&ht->map);
+ map = qatomic_rcu_read(&ht->map);
qht_map_lock_buckets(map);
qht_map_iter__all_locked(map, iter, userp);
qht_map_unlock_buckets(map);
@@ -876,7 +877,7 @@ static void qht_do_resize_reset(struct qht *ht, struct qht_map *new, bool reset)
qht_map_iter__all_locked(old, &iter, &data);
qht_map_debug__all_locked(new);
- atomic_rcu_set(&ht->map, new);
+ qatomic_rcu_set(&ht->map, new);
qht_map_unlock_buckets(old);
call_rcu(old, qht_map_destroy, rcu);
}
@@ -905,7 +906,7 @@ void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
const struct qht_map *map;
int i;
- map = atomic_rcu_read(&ht->map);
+ map = qatomic_rcu_read(&ht->map);
stats->used_head_buckets = 0;
stats->entries = 0;
@@ -933,13 +934,13 @@ void qht_statistics_init(const struct qht *ht, struct qht_stats *stats)
b = head;
do {
for (j = 0; j < QHT_BUCKET_ENTRIES; j++) {
- if (atomic_read(&b->pointers[j]) == NULL) {
+ if (qatomic_read(&b->pointers[j]) == NULL) {
break;
}
entries++;
}
buckets++;
- b = atomic_rcu_read(&b->next);
+ b = qatomic_rcu_read(&b->next);
} while (b);
} while (seqlock_read_retry(&head->sequence, version));
diff --git a/util/qsp.c b/util/qsp.c
index 7d5147f1b2..bacc5fa2f6 100644
--- a/util/qsp.c
+++ b/util/qsp.c
@@ -245,11 +245,11 @@ static void qsp_do_init(void)
static __attribute__((noinline)) void qsp_init__slowpath(void)
{
- if (atomic_cmpxchg(&qsp_initializing, false, true) == false) {
+ if (qatomic_cmpxchg(&qsp_initializing, false, true) == false) {
qsp_do_init();
- atomic_set(&qsp_initialized, true);
+ qatomic_set(&qsp_initialized, true);
} else {
- while (!atomic_read(&qsp_initialized)) {
+ while (!qatomic_read(&qsp_initialized)) {
cpu_relax();
}
}
@@ -258,7 +258,7 @@ static __attribute__((noinline)) void qsp_init__slowpath(void)
/* qsp_init() must be called from _all_ exported functions */
static inline void qsp_init(void)
{
- if (likely(atomic_read(&qsp_initialized))) {
+ if (likely(qatomic_read(&qsp_initialized))) {
return;
}
qsp_init__slowpath();
@@ -346,9 +346,9 @@ static QSPEntry *qsp_entry_get(const void *obj, const char *file, int line,
*/
static inline void do_qsp_entry_record(QSPEntry *e, int64_t delta, bool acq)
{
- atomic_set_u64(&e->ns, e->ns + delta);
+ qatomic_set_u64(&e->ns, e->ns + delta);
if (acq) {
- atomic_set_u64(&e->n_acqs, e->n_acqs + 1);
+ qatomic_set_u64(&e->n_acqs, e->n_acqs + 1);
}
}
@@ -432,29 +432,29 @@ qsp_cond_timedwait(QemuCond *cond, QemuMutex *mutex, int ms,
bool qsp_is_enabled(void)
{
- return atomic_read(&qemu_mutex_lock_func) == qsp_mutex_lock;
+ return qatomic_read(&qemu_mutex_lock_func) == qsp_mutex_lock;
}
void qsp_enable(void)
{
- atomic_set(&qemu_mutex_lock_func, qsp_mutex_lock);
- atomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock);
- atomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock);
- atomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock);
- atomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock);
- atomic_set(&qemu_cond_wait_func, qsp_cond_wait);
- atomic_set(&qemu_cond_timedwait_func, qsp_cond_timedwait);
+ qatomic_set(&qemu_mutex_lock_func, qsp_mutex_lock);
+ qatomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock);
+ qatomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock);
+ qatomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock);
+ qatomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock);
+ qatomic_set(&qemu_cond_wait_func, qsp_cond_wait);
+ qatomic_set(&qemu_cond_timedwait_func, qsp_cond_timedwait);
}
void qsp_disable(void)
{
- atomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl);
- atomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl);
- atomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl);
- atomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl);
- atomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl);
- atomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl);
- atomic_set(&qemu_cond_timedwait_func, qemu_cond_timedwait_impl);
+ qatomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl);
+ qatomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl);
+ qatomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl);
+ qatomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl);
+ qatomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl);
+ qatomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl);
+ qatomic_set(&qemu_cond_timedwait_func, qemu_cond_timedwait_impl);
}
static gint qsp_tree_cmp(gconstpointer ap, gconstpointer bp, gpointer up)
@@ -538,8 +538,8 @@ static void qsp_aggregate(void *p, uint32_t h, void *up)
* The entry is in the global hash table; read from it atomically (as in
* "read once").
*/
- agg->ns += atomic_read_u64(&e->ns);
- agg->n_acqs += atomic_read_u64(&e->n_acqs);
+ agg->ns += qatomic_read_u64(&e->ns);
+ agg->n_acqs += qatomic_read_u64(&e->n_acqs);
}
static void qsp_iter_diff(void *p, uint32_t hash, void *htp)
@@ -610,7 +610,7 @@ static void qsp_mktree(GTree *tree, bool callsite_coalesce)
* with the snapshot.
*/
WITH_RCU_READ_LOCK_GUARD() {
- QSPSnapshot *snap = atomic_rcu_read(&qsp_snapshot);
+ QSPSnapshot *snap = qatomic_rcu_read(&qsp_snapshot);
/* Aggregate all results from the global hash table into a local one */
qht_init(&ht, qsp_entry_no_thread_cmp, QSP_INITIAL_SIZE,
@@ -806,7 +806,7 @@ void qsp_reset(void)
qht_iter(&qsp_ht, qsp_aggregate, &new->ht);
/* replace the previous snapshot, if any */
- old = atomic_xchg(&qsp_snapshot, new);
+ old = qatomic_xchg(&qsp_snapshot, new);
if (old) {
call_rcu(old, qsp_snapshot_destroy, rcu);
}
diff --git a/util/rcu.c b/util/rcu.c
index c4fefa9333..13ac0f75cb 100644
--- a/util/rcu.c
+++ b/util/rcu.c
@@ -57,7 +57,7 @@ static inline int rcu_gp_ongoing(unsigned long *ctr)
{
unsigned long v;
- v = atomic_read(ctr);
+ v = qatomic_read(ctr);
return v && (v != rcu_gp_ctr);
}
@@ -82,14 +82,14 @@ static void wait_for_readers(void)
*/
qemu_event_reset(&rcu_gp_event);
- /* Instead of using atomic_mb_set for index->waiting, and
- * atomic_mb_read for index->ctr, memory barriers are placed
+ /* Instead of using qatomic_mb_set for index->waiting, and
+ * qatomic_mb_read for index->ctr, memory barriers are placed
* manually since writes to different threads are independent.
* qemu_event_reset has acquire semantics, so no memory barrier
* is needed here.
*/
QLIST_FOREACH(index, &registry, node) {
- atomic_set(&index->waiting, true);
+ qatomic_set(&index->waiting, true);
}
/* Here, order the stores to index->waiting before the loads of
@@ -106,7 +106,7 @@ static void wait_for_readers(void)
/* No need for mb_set here, worst of all we
* get some extra futex wakeups.
*/
- atomic_set(&index->waiting, false);
+ qatomic_set(&index->waiting, false);
}
}
@@ -151,7 +151,7 @@ void synchronize_rcu(void)
QEMU_LOCK_GUARD(&rcu_registry_lock);
if (!QLIST_EMPTY(&registry)) {
- /* In either case, the atomic_mb_set below blocks stores that free
+ /* In either case, the qatomic_mb_set below blocks stores that free
* old RCU-protected pointers.
*/
if (sizeof(rcu_gp_ctr) < 8) {
@@ -160,12 +160,12 @@ void synchronize_rcu(void)
*
* Switch parity: 0 -> 1, 1 -> 0.
*/
- atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
wait_for_readers();
- atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
+ qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
} else {
/* Increment current grace period. */
- atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
+ qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
}
wait_for_readers();
@@ -188,8 +188,8 @@ static void enqueue(struct rcu_head *node)
struct rcu_head **old_tail;
node->next = NULL;
- old_tail = atomic_xchg(&tail, &node->next);
- atomic_mb_set(old_tail, node);
+ old_tail = qatomic_xchg(&tail, &node->next);
+ qatomic_mb_set(old_tail, node);
}
static struct rcu_head *try_dequeue(void)
@@ -203,7 +203,7 @@ retry:
* The tail, because it is the first step in the enqueuing.
* It is only the next pointers that might be inconsistent.
*/
- if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
+ if (head == &dummy && qatomic_mb_read(&tail) == &dummy.next) {
abort();
}
@@ -211,7 +211,7 @@ retry:
* wrong and we need to wait until its enqueuer finishes the update.
*/
node = head;
- next = atomic_mb_read(&head->next);
+ next = qatomic_mb_read(&head->next);
if (!next) {
return NULL;
}
@@ -240,7 +240,7 @@ static void *call_rcu_thread(void *opaque)
for (;;) {
int tries = 0;
- int n = atomic_read(&rcu_call_count);
+ int n = qatomic_read(&rcu_call_count);
/* Heuristically wait for a decent number of callbacks to pile up.
* Fetch rcu_call_count now, we only must process elements that were
@@ -250,7 +250,7 @@ static void *call_rcu_thread(void *opaque)
g_usleep(10000);
if (n == 0) {
qemu_event_reset(&rcu_call_ready_event);
- n = atomic_read(&rcu_call_count);
+ n = qatomic_read(&rcu_call_count);
if (n == 0) {
#if defined(CONFIG_MALLOC_TRIM)
malloc_trim(4 * 1024 * 1024);
@@ -258,10 +258,10 @@ static void *call_rcu_thread(void *opaque)
qemu_event_wait(&rcu_call_ready_event);
}
}
- n = atomic_read(&rcu_call_count);
+ n = qatomic_read(&rcu_call_count);
}
- atomic_sub(&rcu_call_count, n);
+ qatomic_sub(&rcu_call_count, n);
synchronize_rcu();
qemu_mutex_lock_iothread();
while (n > 0) {
@@ -289,7 +289,7 @@ void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
{
node->func = func;
enqueue(node);
- atomic_inc(&rcu_call_count);
+ qatomic_inc(&rcu_call_count);
qemu_event_set(&rcu_call_ready_event);
}
diff --git a/util/stats64.c b/util/stats64.c
index 389c365a9e..897613c949 100644
--- a/util/stats64.c
+++ b/util/stats64.c
@@ -18,27 +18,27 @@
static inline void stat64_rdlock(Stat64 *s)
{
/* Keep out incoming writers to avoid them starving us. */
- atomic_add(&s->lock, 2);
+ qatomic_add(&s->lock, 2);
/* If there is a concurrent writer, wait for it. */
- while (atomic_read(&s->lock) & 1) {
+ while (qatomic_read(&s->lock) & 1) {
cpu_relax();
}
}
static inline void stat64_rdunlock(Stat64 *s)
{
- atomic_sub(&s->lock, 2);
+ qatomic_sub(&s->lock, 2);
}
static inline bool stat64_wrtrylock(Stat64 *s)
{
- return atomic_cmpxchg(&s->lock, 0, 1) == 0;
+ return qatomic_cmpxchg(&s->lock, 0, 1) == 0;
}
static inline void stat64_wrunlock(Stat64 *s)
{
- atomic_dec(&s->lock);
+ qatomic_dec(&s->lock);
}
uint64_t stat64_get(const Stat64 *s)
@@ -50,8 +50,8 @@ uint64_t stat64_get(const Stat64 *s)
/* 64-bit writes always take the lock, so we can read in
* any order.
*/
- high = atomic_read(&s->high);
- low = atomic_read(&s->low);
+ high = qatomic_read(&s->high);
+ low = qatomic_read(&s->low);
stat64_rdunlock((Stat64 *)s);
return ((uint64_t)high << 32) | low;
@@ -70,9 +70,9 @@ bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
* order of our update. By updating s->low first, we can check
* whether we have to carry into s->high.
*/
- old = atomic_fetch_add(&s->low, low);
+ old = qatomic_fetch_add(&s->low, low);
high += (old + low) < old;
- atomic_add(&s->high, high);
+ qatomic_add(&s->high, high);
stat64_wrunlock(s);
return true;
}
@@ -87,8 +87,8 @@ bool stat64_min_slow(Stat64 *s, uint64_t value)
return false;
}
- high = atomic_read(&s->high);
- low = atomic_read(&s->low);
+ high = qatomic_read(&s->high);
+ low = qatomic_read(&s->low);
orig = ((uint64_t)high << 32) | low;
if (value < orig) {
@@ -98,9 +98,9 @@ bool stat64_min_slow(Stat64 *s, uint64_t value)
* effect on stat64_min is that the slow path may be triggered
* unnecessarily.
*/
- atomic_set(&s->low, (uint32_t)value);
+ qatomic_set(&s->low, (uint32_t)value);
smp_wmb();
- atomic_set(&s->high, value >> 32);
+ qatomic_set(&s->high, value >> 32);
}
stat64_wrunlock(s);
return true;
@@ -116,8 +116,8 @@ bool stat64_max_slow(Stat64 *s, uint64_t value)
return false;
}
- high = atomic_read(&s->high);
- low = atomic_read(&s->low);
+ high = qatomic_read(&s->high);
+ low = qatomic_read(&s->low);
orig = ((uint64_t)high << 32) | low;
if (value > orig) {
@@ -127,9 +127,9 @@ bool stat64_max_slow(Stat64 *s, uint64_t value)
* effect on stat64_max is that the slow path may be triggered
* unnecessarily.
*/
- atomic_set(&s->low, (uint32_t)value);
+ qatomic_set(&s->low, (uint32_t)value);
smp_wmb();
- atomic_set(&s->high, value >> 32);
+ qatomic_set(&s->high, value >> 32);
}
stat64_wrunlock(s);
return true;