diff options
author | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 11:56:46 +0100 |
---|---|---|
committer | Stefan Hajnoczi <stefanha@redhat.com> | 2020-09-23 16:07:44 +0100 |
commit | d73415a315471ac0b127ed3fad45c8ec5d711de1 (patch) | |
tree | bae20b3a39968fdfb4340b1a39b533333a8e6fd0 /tcg | |
parent | ed7db34b5aedba4487fd949b2e545eef954f093e (diff) |
qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:
$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)
Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.
This patch was generated using:
$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done
I manually fixed line-wrap issues and misaligned rST tables.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
Diffstat (limited to 'tcg')
-rw-r--r-- | tcg/aarch64/tcg-target.c.inc | 2 | ||||
-rw-r--r-- | tcg/i386/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/mips/tcg-target.c.inc | 2 | ||||
-rw-r--r-- | tcg/ppc/tcg-target.c.inc | 6 | ||||
-rw-r--r-- | tcg/s390/tcg-target.h | 2 | ||||
-rw-r--r-- | tcg/sparc/tcg-target.c.inc | 5 | ||||
-rw-r--r-- | tcg/tcg.c | 58 | ||||
-rw-r--r-- | tcg/tci.c | 2 | ||||
-rw-r--r-- | tcg/tci/tcg-target.h | 2 |
9 files changed, 41 insertions, 40 deletions
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc index 948c35d825..2607fe4ab9 100644 --- a/tcg/aarch64/tcg-target.c.inc +++ b/tcg/aarch64/tcg-target.c.inc @@ -1365,7 +1365,7 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, i2 = I3401_ADDI | rt << 31 | (addr & 0xfff) << 10 | rd << 5 | rd; } pair = (uint64_t)i2 << 32 | i1; - atomic_set((uint64_t *)jmp_addr, pair); + qatomic_set((uint64_t *)jmp_addr, pair); flush_icache_range(jmp_addr, jmp_addr + 8); } diff --git a/tcg/i386/tcg-target.h b/tcg/i386/tcg-target.h index 99ac1e3958..d2baf796b0 100644 --- a/tcg/i386/tcg-target.h +++ b/tcg/i386/tcg-target.h @@ -215,7 +215,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { /* patch the branch destination */ - atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); + qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); /* no need to flush icache explicitly */ } diff --git a/tcg/mips/tcg-target.c.inc b/tcg/mips/tcg-target.c.inc index bd5b8e09a0..7aa2073520 100644 --- a/tcg/mips/tcg-target.c.inc +++ b/tcg/mips/tcg-target.c.inc @@ -2662,7 +2662,7 @@ static void tcg_target_init(TCGContext *s) void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { - atomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2)); + qatomic_set((uint32_t *)jmp_addr, deposit32(OPC_J, 0, 26, addr >> 2)); flush_icache_range(jmp_addr, jmp_addr + 4); } diff --git a/tcg/ppc/tcg-target.c.inc b/tcg/ppc/tcg-target.c.inc index 393c4b30e0..7cb40b0466 100644 --- a/tcg/ppc/tcg-target.c.inc +++ b/tcg/ppc/tcg-target.c.inc @@ -1756,13 +1756,13 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, #endif /* As per the enclosing if, this is ppc64. Avoid the _Static_assert - within atomic_set that would fail to build a ppc32 host. */ - atomic_set__nocheck((uint64_t *)jmp_addr, pair); + within qatomic_set that would fail to build a ppc32 host. */ + qatomic_set__nocheck((uint64_t *)jmp_addr, pair); flush_icache_range(jmp_addr, jmp_addr + 8); } else { intptr_t diff = addr - jmp_addr; tcg_debug_assert(in_range_b(diff)); - atomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc)); + qatomic_set((uint32_t *)jmp_addr, B | (diff & 0x3fffffc)); flush_icache_range(jmp_addr, jmp_addr + 4); } } diff --git a/tcg/s390/tcg-target.h b/tcg/s390/tcg-target.h index 07accabbd1..63c8797bd3 100644 --- a/tcg/s390/tcg-target.h +++ b/tcg/s390/tcg-target.h @@ -154,7 +154,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, { /* patch the branch destination */ intptr_t disp = addr - (jmp_addr - 2); - atomic_set((int32_t *)jmp_addr, disp / 2); + qatomic_set((int32_t *)jmp_addr, disp / 2); /* no need to flush icache explicitly */ } diff --git a/tcg/sparc/tcg-target.c.inc b/tcg/sparc/tcg-target.c.inc index 0f1d91fc21..40bc12290c 100644 --- a/tcg/sparc/tcg-target.c.inc +++ b/tcg/sparc/tcg-target.c.inc @@ -1839,7 +1839,8 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, tcg_debug_assert(br_disp == (int32_t)br_disp); if (!USE_REG_TB) { - atomic_set((uint32_t *)jmp_addr, deposit32(CALL, 0, 30, br_disp >> 2)); + qatomic_set((uint32_t *)jmp_addr, + deposit32(CALL, 0, 30, br_disp >> 2)); flush_icache_range(jmp_addr, jmp_addr + 4); return; } @@ -1863,6 +1864,6 @@ void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, | INSN_IMM13((tb_disp & 0x3ff) | -0x400)); } - atomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1)); + qatomic_set((uint64_t *)jmp_addr, deposit64(i2, 32, 32, i1)); flush_icache_range(jmp_addr, jmp_addr + 8); } @@ -597,7 +597,7 @@ static inline bool tcg_region_initial_alloc__locked(TCGContext *s) /* Call from a safe-work context */ void tcg_region_reset_all(void) { - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); unsigned int i; qemu_mutex_lock(®ion.lock); @@ -605,7 +605,7 @@ void tcg_region_reset_all(void) region.agg_size_full = 0; for (i = 0; i < n_ctxs; i++) { - TCGContext *s = atomic_read(&tcg_ctxs[i]); + TCGContext *s = qatomic_read(&tcg_ctxs[i]); bool err = tcg_region_initial_alloc__locked(s); g_assert(!err); @@ -794,9 +794,9 @@ void tcg_register_thread(void) } /* Claim an entry in tcg_ctxs */ - n = atomic_fetch_inc(&n_tcg_ctxs); + n = qatomic_fetch_inc(&n_tcg_ctxs); g_assert(n < ms->smp.max_cpus); - atomic_set(&tcg_ctxs[n], s); + qatomic_set(&tcg_ctxs[n], s); if (n > 0) { alloc_tcg_plugin_context(s); @@ -819,17 +819,17 @@ void tcg_register_thread(void) */ size_t tcg_code_size(void) { - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); unsigned int i; size_t total; qemu_mutex_lock(®ion.lock); total = region.agg_size_full; for (i = 0; i < n_ctxs; i++) { - const TCGContext *s = atomic_read(&tcg_ctxs[i]); + const TCGContext *s = qatomic_read(&tcg_ctxs[i]); size_t size; - size = atomic_read(&s->code_gen_ptr) - s->code_gen_buffer; + size = qatomic_read(&s->code_gen_ptr) - s->code_gen_buffer; g_assert(size <= s->code_gen_buffer_size); total += size; } @@ -855,14 +855,14 @@ size_t tcg_code_capacity(void) size_t tcg_tb_phys_invalidate_count(void) { - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); unsigned int i; size_t total = 0; for (i = 0; i < n_ctxs; i++) { - const TCGContext *s = atomic_read(&tcg_ctxs[i]); + const TCGContext *s = qatomic_read(&tcg_ctxs[i]); - total += atomic_read(&s->tb_phys_invalidate_count); + total += qatomic_read(&s->tb_phys_invalidate_count); } return total; } @@ -1041,7 +1041,7 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s) } goto retry; } - atomic_set(&s->code_gen_ptr, next); + qatomic_set(&s->code_gen_ptr, next); s->data_gen_ptr = NULL; return tb; } @@ -2134,7 +2134,7 @@ static void tcg_dump_ops(TCGContext *s, bool have_prefs) QemuLogFile *logfile; rcu_read_lock(); - logfile = atomic_rcu_read(&qemu_logfile); + logfile = qatomic_rcu_read(&qemu_logfile); if (logfile) { for (; col < 40; ++col) { putc(' ', logfile->fd); @@ -2341,7 +2341,7 @@ void tcg_op_remove(TCGContext *s, TCGOp *op) s->nb_ops--; #ifdef CONFIG_PROFILER - atomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); + qatomic_set(&s->prof.del_op_count, s->prof.del_op_count + 1); #endif } @@ -3964,12 +3964,12 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) /* avoid copy/paste errors */ #define PROF_ADD(to, from, field) \ do { \ - (to)->field += atomic_read(&((from)->field)); \ + (to)->field += qatomic_read(&((from)->field)); \ } while (0) #define PROF_MAX(to, from, field) \ do { \ - typeof((from)->field) val__ = atomic_read(&((from)->field)); \ + typeof((from)->field) val__ = qatomic_read(&((from)->field)); \ if (val__ > (to)->field) { \ (to)->field = val__; \ } \ @@ -3979,11 +3979,11 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op) static inline void tcg_profile_snapshot(TCGProfile *prof, bool counters, bool table) { - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); unsigned int i; for (i = 0; i < n_ctxs; i++) { - TCGContext *s = atomic_read(&tcg_ctxs[i]); + TCGContext *s = qatomic_read(&tcg_ctxs[i]); const TCGProfile *orig = &s->prof; if (counters) { @@ -4042,15 +4042,15 @@ void tcg_dump_op_count(void) int64_t tcg_cpu_exec_time(void) { - unsigned int n_ctxs = atomic_read(&n_tcg_ctxs); + unsigned int n_ctxs = qatomic_read(&n_tcg_ctxs); unsigned int i; int64_t ret = 0; for (i = 0; i < n_ctxs; i++) { - const TCGContext *s = atomic_read(&tcg_ctxs[i]); + const TCGContext *s = qatomic_read(&tcg_ctxs[i]); const TCGProfile *prof = &s->prof; - ret += atomic_read(&prof->cpu_exec_time); + ret += qatomic_read(&prof->cpu_exec_time); } return ret; } @@ -4083,15 +4083,15 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) QTAILQ_FOREACH(op, &s->ops, link) { n++; } - atomic_set(&prof->op_count, prof->op_count + n); + qatomic_set(&prof->op_count, prof->op_count + n); if (n > prof->op_count_max) { - atomic_set(&prof->op_count_max, n); + qatomic_set(&prof->op_count_max, n); } n = s->nb_temps; - atomic_set(&prof->temp_count, prof->temp_count + n); + qatomic_set(&prof->temp_count, prof->temp_count + n); if (n > prof->temp_count_max) { - atomic_set(&prof->temp_count_max, n); + qatomic_set(&prof->temp_count_max, n); } } #endif @@ -4125,7 +4125,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) #endif #ifdef CONFIG_PROFILER - atomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); + qatomic_set(&prof->opt_time, prof->opt_time - profile_getclock()); #endif #ifdef USE_TCG_OPTIMIZATIONS @@ -4133,8 +4133,8 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) #endif #ifdef CONFIG_PROFILER - atomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); - atomic_set(&prof->la_time, prof->la_time - profile_getclock()); + qatomic_set(&prof->opt_time, prof->opt_time + profile_getclock()); + qatomic_set(&prof->la_time, prof->la_time - profile_getclock()); #endif reachable_code_pass(s); @@ -4159,7 +4159,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) } #ifdef CONFIG_PROFILER - atomic_set(&prof->la_time, prof->la_time + profile_getclock()); + qatomic_set(&prof->la_time, prof->la_time + profile_getclock()); #endif #ifdef DEBUG_DISAS @@ -4190,7 +4190,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) TCGOpcode opc = op->opc; #ifdef CONFIG_PROFILER - atomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); + qatomic_set(&prof->table_op_count[opc], prof->table_op_count[opc] + 1); #endif switch (opc) { @@ -1115,7 +1115,7 @@ uintptr_t tcg_qemu_tb_exec(CPUArchState *env, uint8_t *tb_ptr) case INDEX_op_goto_tb: /* Jump address is aligned */ tb_ptr = QEMU_ALIGN_PTR_UP(tb_ptr, 4); - t0 = atomic_read((int32_t *)tb_ptr); + t0 = qatomic_read((int32_t *)tb_ptr); tb_ptr += sizeof(int32_t); tci_assert(tb_ptr == old_code_ptr + op_size); tb_ptr += (int32_t)t0; diff --git a/tcg/tci/tcg-target.h b/tcg/tci/tcg-target.h index 8b90ab71cb..8c1c1d265d 100644 --- a/tcg/tci/tcg-target.h +++ b/tcg/tci/tcg-target.h @@ -206,7 +206,7 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_addr, uintptr_t addr) { /* patch the branch destination */ - atomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); + qatomic_set((int32_t *)jmp_addr, addr - (jmp_addr + 4)); /* no need to flush icache explicitly */ } |