diff options
Diffstat (limited to 'accel/tcg/cputlb.c')
-rw-r--r-- | accel/tcg/cputlb.c | 24 |
1 files changed, 12 insertions, 12 deletions
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index 6489abbf8c..aaf8e46ae5 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -312,9 +312,9 @@ void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) CPU_FOREACH(cpu) { CPUArchState *env = cpu->env_ptr; - full += atomic_read(&env_tlb(env)->c.full_flush_count); - part += atomic_read(&env_tlb(env)->c.part_flush_count); - elide += atomic_read(&env_tlb(env)->c.elide_flush_count); + full += qatomic_read(&env_tlb(env)->c.full_flush_count); + part += qatomic_read(&env_tlb(env)->c.part_flush_count); + elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); } *pfull = full; *ppart = part; @@ -349,13 +349,13 @@ static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) cpu_tb_jmp_cache_clear(cpu); if (to_clean == ALL_MMUIDX_BITS) { - atomic_set(&env_tlb(env)->c.full_flush_count, + qatomic_set(&env_tlb(env)->c.full_flush_count, env_tlb(env)->c.full_flush_count + 1); } else { - atomic_set(&env_tlb(env)->c.part_flush_count, + qatomic_set(&env_tlb(env)->c.part_flush_count, env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); if (to_clean != asked) { - atomic_set(&env_tlb(env)->c.elide_flush_count, + qatomic_set(&env_tlb(env)->c.elide_flush_count, env_tlb(env)->c.elide_flush_count + ctpop16(asked & ~to_clean)); } @@ -693,7 +693,7 @@ void tlb_unprotect_code(ram_addr_t ram_addr) * generated code. * * Other vCPUs might be reading their TLBs during guest execution, so we update - * te->addr_write with atomic_set. We don't need to worry about this for + * te->addr_write with qatomic_set. We don't need to worry about this for * oversized guests as MTTCG is disabled for them. * * Called with tlb_c.lock held. @@ -711,7 +711,7 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, #if TCG_OVERSIZED_GUEST tlb_entry->addr_write |= TLB_NOTDIRTY; #else - atomic_set(&tlb_entry->addr_write, + qatomic_set(&tlb_entry->addr_write, tlb_entry->addr_write | TLB_NOTDIRTY); #endif } @@ -1138,8 +1138,8 @@ static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) #if TCG_OVERSIZED_GUEST return *(target_ulong *)((uintptr_t)entry + ofs); #else - /* ofs might correspond to .addr_write, so use atomic_read */ - return atomic_read((target_ulong *)((uintptr_t)entry + ofs)); + /* ofs might correspond to .addr_write, so use qatomic_read */ + return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); #endif } @@ -1155,11 +1155,11 @@ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; target_ulong cmp; - /* elt_ofs might correspond to .addr_write, so use atomic_read */ + /* elt_ofs might correspond to .addr_write, so use qatomic_read */ #if TCG_OVERSIZED_GUEST cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); #else - cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); + cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); #endif if (cmp == page) { |