aboutsummaryrefslogtreecommitdiff
path: root/accel/kvm
diff options
context:
space:
mode:
authorStefan Hajnoczi <stefanha@redhat.com>2024-01-02 10:35:25 -0500
committerStefan Hajnoczi <stefanha@redhat.com>2024-01-08 10:45:43 -0500
commit195801d700c008b6a8d8acfa299aa5f177446647 (patch)
tree7ab423e4a773b818f6c6d65f2fa06dc4517cad24 /accel/kvm
parent897a06c6d7ce8fb962a33cea1910d17218c746e9 (diff)
system/cpus: rename qemu_mutex_lock_iothread() to bql_lock()
The Big QEMU Lock (BQL) has many names and they are confusing. The actual QemuMutex variable is called qemu_global_mutex but it's commonly referred to as the BQL in discussions and some code comments. The locking APIs, however, are called qemu_mutex_lock_iothread() and qemu_mutex_unlock_iothread(). The "iothread" name is historic and comes from when the main thread was split into into KVM vcpu threads and the "iothread" (now called the main loop thread). I have contributed to the confusion myself by introducing a separate --object iothread, a separate concept unrelated to the BQL. The "iothread" name is no longer appropriate for the BQL. Rename the locking APIs to: - void bql_lock(void) - void bql_unlock(void) - bool bql_locked(void) There are more APIs with "iothread" in their names. Subsequent patches will rename them. There are also comments and documentation that will be updated in later patches. Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com> Reviewed-by: Paul Durrant <paul@xen.org> Acked-by: Fabiano Rosas <farosas@suse.de> Acked-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Cédric Le Goater <clg@kaod.org> Acked-by: Peter Xu <peterx@redhat.com> Acked-by: Eric Farman <farman@linux.ibm.com> Reviewed-by: Harsh Prateek Bora <harshpb@linux.ibm.com> Acked-by: Hyman Huang <yong.huang@smartx.com> Reviewed-by: Akihiko Odaki <akihiko.odaki@daynix.com> Message-id: 20240102153529.486531-2-stefanha@redhat.com Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Diffstat (limited to 'accel/kvm')
-rw-r--r--accel/kvm/kvm-accel-ops.c4
-rw-r--r--accel/kvm/kvm-all.c22
2 files changed, 13 insertions, 13 deletions
diff --git a/accel/kvm/kvm-accel-ops.c b/accel/kvm/kvm-accel-ops.c
index 6195150a0b..45ff06e953 100644
--- a/accel/kvm/kvm-accel-ops.c
+++ b/accel/kvm/kvm-accel-ops.c
@@ -33,7 +33,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
rcu_register_thread();
- qemu_mutex_lock_iothread();
+ bql_lock();
qemu_thread_get_self(cpu->thread);
cpu->thread_id = qemu_get_thread_id();
cpu->neg.can_do_io = true;
@@ -58,7 +58,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
kvm_destroy_vcpu(cpu);
cpu_thread_signal_destroyed(cpu);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
rcu_unregister_thread();
return NULL;
}
diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c
index eb17773f0b..bbc60146d1 100644
--- a/accel/kvm/kvm-all.c
+++ b/accel/kvm/kvm-all.c
@@ -806,7 +806,7 @@ static void kvm_dirty_ring_flush(void)
* should always be with BQL held, serialization is guaranteed.
* However, let's be sure of it.
*/
- assert(qemu_mutex_iothread_locked());
+ assert(bql_locked());
/*
* First make sure to flush the hardware buffers by kicking all
* vcpus out in a synchronous way.
@@ -1391,9 +1391,9 @@ static void *kvm_dirty_ring_reaper_thread(void *data)
trace_kvm_dirty_ring_reaper("wakeup");
r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING;
- qemu_mutex_lock_iothread();
+ bql_lock();
kvm_dirty_ring_reap(s, NULL);
- qemu_mutex_unlock_iothread();
+ bql_unlock();
r->reaper_iteration++;
}
@@ -2817,7 +2817,7 @@ int kvm_cpu_exec(CPUState *cpu)
return EXCP_HLT;
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
cpu_exec_start(cpu);
do {
@@ -2857,11 +2857,11 @@ int kvm_cpu_exec(CPUState *cpu)
#ifdef KVM_HAVE_MCE_INJECTION
if (unlikely(have_sigbus_pending)) {
- qemu_mutex_lock_iothread();
+ bql_lock();
kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
pending_sigbus_addr);
have_sigbus_pending = false;
- qemu_mutex_unlock_iothread();
+ bql_unlock();
}
#endif
@@ -2927,7 +2927,7 @@ int kvm_cpu_exec(CPUState *cpu)
* still full. Got kicked by KVM_RESET_DIRTY_RINGS.
*/
trace_kvm_dirty_ring_full(cpu->cpu_index);
- qemu_mutex_lock_iothread();
+ bql_lock();
/*
* We throttle vCPU by making it sleep once it exit from kernel
* due to dirty ring full. In the dirtylimit scenario, reaping
@@ -2939,7 +2939,7 @@ int kvm_cpu_exec(CPUState *cpu)
} else {
kvm_dirty_ring_reap(kvm_state, NULL);
}
- qemu_mutex_unlock_iothread();
+ bql_unlock();
dirtylimit_vcpu_execute(cpu);
ret = 0;
break;
@@ -2956,9 +2956,9 @@ int kvm_cpu_exec(CPUState *cpu)
break;
case KVM_SYSTEM_EVENT_CRASH:
kvm_cpu_synchronize_state(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
qemu_system_guest_panicked(cpu_get_crash_info(cpu));
- qemu_mutex_unlock_iothread();
+ bql_unlock();
ret = 0;
break;
default:
@@ -2973,7 +2973,7 @@ int kvm_cpu_exec(CPUState *cpu)
} while (ret == 0);
cpu_exec_end(cpu);
- qemu_mutex_lock_iothread();
+ bql_lock();
if (ret < 0) {
cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);