diff options
Diffstat (limited to 'util/stats64.c')
-rw-r--r-- | util/stats64.c | 34 |
1 files changed, 17 insertions, 17 deletions
diff --git a/util/stats64.c b/util/stats64.c index 389c365a9e..897613c949 100644 --- a/util/stats64.c +++ b/util/stats64.c @@ -18,27 +18,27 @@ static inline void stat64_rdlock(Stat64 *s) { /* Keep out incoming writers to avoid them starving us. */ - atomic_add(&s->lock, 2); + qatomic_add(&s->lock, 2); /* If there is a concurrent writer, wait for it. */ - while (atomic_read(&s->lock) & 1) { + while (qatomic_read(&s->lock) & 1) { cpu_relax(); } } static inline void stat64_rdunlock(Stat64 *s) { - atomic_sub(&s->lock, 2); + qatomic_sub(&s->lock, 2); } static inline bool stat64_wrtrylock(Stat64 *s) { - return atomic_cmpxchg(&s->lock, 0, 1) == 0; + return qatomic_cmpxchg(&s->lock, 0, 1) == 0; } static inline void stat64_wrunlock(Stat64 *s) { - atomic_dec(&s->lock); + qatomic_dec(&s->lock); } uint64_t stat64_get(const Stat64 *s) @@ -50,8 +50,8 @@ uint64_t stat64_get(const Stat64 *s) /* 64-bit writes always take the lock, so we can read in * any order. */ - high = atomic_read(&s->high); - low = atomic_read(&s->low); + high = qatomic_read(&s->high); + low = qatomic_read(&s->low); stat64_rdunlock((Stat64 *)s); return ((uint64_t)high << 32) | low; @@ -70,9 +70,9 @@ bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high) * order of our update. By updating s->low first, we can check * whether we have to carry into s->high. */ - old = atomic_fetch_add(&s->low, low); + old = qatomic_fetch_add(&s->low, low); high += (old + low) < old; - atomic_add(&s->high, high); + qatomic_add(&s->high, high); stat64_wrunlock(s); return true; } @@ -87,8 +87,8 @@ bool stat64_min_slow(Stat64 *s, uint64_t value) return false; } - high = atomic_read(&s->high); - low = atomic_read(&s->low); + high = qatomic_read(&s->high); + low = qatomic_read(&s->low); orig = ((uint64_t)high << 32) | low; if (value < orig) { @@ -98,9 +98,9 @@ bool stat64_min_slow(Stat64 *s, uint64_t value) * effect on stat64_min is that the slow path may be triggered * unnecessarily. */ - atomic_set(&s->low, (uint32_t)value); + qatomic_set(&s->low, (uint32_t)value); smp_wmb(); - atomic_set(&s->high, value >> 32); + qatomic_set(&s->high, value >> 32); } stat64_wrunlock(s); return true; @@ -116,8 +116,8 @@ bool stat64_max_slow(Stat64 *s, uint64_t value) return false; } - high = atomic_read(&s->high); - low = atomic_read(&s->low); + high = qatomic_read(&s->high); + low = qatomic_read(&s->low); orig = ((uint64_t)high << 32) | low; if (value > orig) { @@ -127,9 +127,9 @@ bool stat64_max_slow(Stat64 *s, uint64_t value) * effect on stat64_max is that the slow path may be triggered * unnecessarily. */ - atomic_set(&s->low, (uint32_t)value); + qatomic_set(&s->low, (uint32_t)value); smp_wmb(); - atomic_set(&s->high, value >> 32); + qatomic_set(&s->high, value >> 32); } stat64_wrunlock(s); return true; |