aboutsummaryrefslogtreecommitdiff
path: root/accel/tcg
diff options
context:
space:
mode:
Diffstat (limited to 'accel/tcg')
-rw-r--r--accel/tcg/cpu-exec.c26
-rw-r--r--accel/tcg/cputlb.c16
-rw-r--r--accel/tcg/tcg-accel-ops-icount.c4
-rw-r--r--accel/tcg/tcg-accel-ops-mttcg.c12
-rw-r--r--accel/tcg/tcg-accel-ops-rr.c14
-rw-r--r--accel/tcg/tcg-accel-ops.c2
-rw-r--r--accel/tcg/translate-all.c2
7 files changed, 38 insertions, 38 deletions
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index c938eb96f8..67eda9865e 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -558,8 +558,8 @@ static void cpu_exec_longjmp_cleanup(CPUState *cpu)
tcg_ctx->gen_tb = NULL;
}
#endif
- if (qemu_mutex_iothread_locked()) {
- qemu_mutex_unlock_iothread();
+ if (bql_locked()) {
+ bql_unlock();
}
assert_no_pages_locked();
}
@@ -680,10 +680,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
#if defined(TARGET_I386)
if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
X86CPU *x86_cpu = X86_CPU(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
apic_poll_irq(x86_cpu->apic_state);
cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
#endif /* TARGET_I386 */
if (!cpu_has_work(cpu)) {
@@ -749,9 +749,9 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
#else
if (replay_exception()) {
CPUClass *cc = CPU_GET_CLASS(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
cc->tcg_ops->do_interrupt(cpu);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
cpu->exception_index = -1;
if (unlikely(cpu->singlestep_enabled)) {
@@ -812,7 +812,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (unlikely(qatomic_read(&cpu->interrupt_request))) {
int interrupt_request;
- qemu_mutex_lock_iothread();
+ bql_lock();
interrupt_request = cpu->interrupt_request;
if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
/* Mask out external interrupts for this step. */
@@ -821,7 +821,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
if (interrupt_request & CPU_INTERRUPT_DEBUG) {
cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
cpu->exception_index = EXCP_DEBUG;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#if !defined(CONFIG_USER_ONLY)
@@ -832,7 +832,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
cpu->halted = 1;
cpu->exception_index = EXCP_HLT;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#if defined(TARGET_I386)
@@ -843,14 +843,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
do_cpu_init(x86_cpu);
cpu->exception_index = EXCP_HALTED;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#else
else if (interrupt_request & CPU_INTERRUPT_RESET) {
replay_interrupt();
cpu_reset(cpu);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
#endif /* !TARGET_I386 */
@@ -873,7 +873,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
*/
if (unlikely(cpu->singlestep_enabled)) {
cpu->exception_index = EXCP_DEBUG;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return true;
}
cpu->exception_index = -1;
@@ -892,7 +892,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
}
/* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
/* Finally, check if we need to exit to the main loop. */
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index db3f93fda9..5698a9fd8e 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -2030,10 +2030,10 @@ static uint64_t do_ld_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
ret = int_ld_mmio_beN(cpu, full, ret_be, addr, size, mmu_idx,
type, ra, mr, mr_offset);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
@@ -2054,12 +2054,12 @@ static Int128 do_ld16_mmio_beN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
a = int_ld_mmio_beN(cpu, full, ret_be, addr, size - 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset);
b = int_ld_mmio_beN(cpu, full, ret_be, addr + size - 8, 8, mmu_idx,
MMU_DATA_LOAD, ra, mr, mr_offset + size - 8);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return int128_make128(b, a);
}
@@ -2577,10 +2577,10 @@ static uint64_t do_st_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
ret = int_st_mmio_leN(cpu, full, val_le, addr, size, mmu_idx,
ra, mr, mr_offset);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
@@ -2601,12 +2601,12 @@ static uint64_t do_st16_mmio_leN(CPUState *cpu, CPUTLBEntryFull *full,
section = io_prepare(&mr_offset, cpu, full->xlat_section, attrs, addr, ra);
mr = section->mr;
- qemu_mutex_lock_iothread();
+ bql_lock();
int_st_mmio_leN(cpu, full, int128_getlo(val_le), addr, 8,
mmu_idx, ra, mr, mr_offset);
ret = int_st_mmio_leN(cpu, full, int128_gethi(val_le), addr + 8,
size - 8, mmu_idx, ra, mr, mr_offset + 8);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
return ret;
}
diff --git a/accel/tcg/tcg-accel-ops-icount.c b/accel/tcg/tcg-accel-ops-icount.c
index b25685fb71..5824d92580 100644
--- a/accel/tcg/tcg-accel-ops-icount.c
+++ b/accel/tcg/tcg-accel-ops-icount.c
@@ -126,9 +126,9 @@ void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
* We're called without the iothread lock, so must take it while
* we're calling timer handlers.
*/
- qemu_mutex_lock_iothread();
+ bql_lock();
icount_notify_aio_contexts();
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
}
diff --git a/accel/tcg/tcg-accel-ops-mttcg.c b/accel/tcg/tcg-accel-ops-mttcg.c
index fac80095bb..af7307013a 100644
--- a/accel/tcg/tcg-accel-ops-mttcg.c
+++ b/accel/tcg/tcg-accel-ops-mttcg.c
@@ -76,7 +76,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
rcu_add_force_rcu_notifier(&force_rcu.notifier);
tcg_register_thread();
- qemu_mutex_lock_iothread();
+ bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@@ -91,9 +91,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
do {
if (cpu_can_run(cpu)) {
int r;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
r = tcg_cpus_exec(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
switch (r) {
case EXCP_DEBUG:
cpu_handle_guest_debug(cpu);
@@ -105,9 +105,9 @@ static void *mttcg_cpu_thread_fn(void *arg)
*/
break;
case EXCP_ATOMIC:
- qemu_mutex_unlock_iothread();
+ bql_unlock();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
default:
/* Ignore everything else? */
break;
@@ -119,7 +119,7 @@ static void *mttcg_cpu_thread_fn(void *arg)
} while (!cpu->unplug || cpu_can_run(cpu));
tcg_cpus_destroy(cpu);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
rcu_remove_force_rcu_notifier(&force_rcu.notifier);
rcu_unregister_thread();
return NULL;
diff --git a/accel/tcg/tcg-accel-ops-rr.c b/accel/tcg/tcg-accel-ops-rr.c
index 611932f3c3..c4ea372a3f 100644
--- a/accel/tcg/tcg-accel-ops-rr.c
+++ b/accel/tcg/tcg-accel-ops-rr.c
@@ -188,7 +188,7 @@ static void *rr_cpu_thread_fn(void *arg)
rcu_add_force_rcu_notifier(&force_rcu);
tcg_register_thread();
- qemu_mutex_lock_iothread();
+ bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
@@ -218,9 +218,9 @@ static void *rr_cpu_thread_fn(void *arg)
/* Only used for icount_enabled() */
int64_t cpu_budget = 0;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
replay_mutex_lock();
- qemu_mutex_lock_iothread();
+ bql_lock();
if (icount_enabled()) {
int cpu_count = rr_cpu_count();
@@ -254,7 +254,7 @@ static void *rr_cpu_thread_fn(void *arg)
if (cpu_can_run(cpu)) {
int r;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
if (icount_enabled()) {
icount_prepare_for_run(cpu, cpu_budget);
}
@@ -262,15 +262,15 @@ static void *rr_cpu_thread_fn(void *arg)
if (icount_enabled()) {
icount_process_data(cpu);
}
- qemu_mutex_lock_iothread();
+ bql_lock();
if (r == EXCP_DEBUG) {
cpu_handle_guest_debug(cpu);
break;
} else if (r == EXCP_ATOMIC) {
- qemu_mutex_unlock_iothread();
+ bql_unlock();
cpu_exec_step_atomic(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
break;
}
} else if (cpu->stop) {
diff --git a/accel/tcg/tcg-accel-ops.c b/accel/tcg/tcg-accel-ops.c
index 1b57290682..813065c0ec 100644
--- a/accel/tcg/tcg-accel-ops.c
+++ b/accel/tcg/tcg-accel-ops.c
@@ -88,7 +88,7 @@ static void tcg_cpu_reset_hold(CPUState *cpu)
/* mask must never be zero, except for A20 change call */
void tcg_handle_interrupt(CPUState *cpu, int mask)
{
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
cpu->interrupt_request |= mask;
diff --git a/accel/tcg/translate-all.c b/accel/tcg/translate-all.c
index 79a88f5fb7..1737bb3da5 100644
--- a/accel/tcg/translate-all.c
+++ b/accel/tcg/translate-all.c
@@ -649,7 +649,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
void cpu_interrupt(CPUState *cpu, int mask)
{
- g_assert(qemu_mutex_iothread_locked());
+ g_assert(bql_locked());
cpu->interrupt_request |= mask;
qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
}