aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2015-06-18 18:47:23 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2015-07-01 15:45:51 +0200
commit4b8523ee896750c37b4fa224a40d34703cbdf4c6 (patch)
tree1083d1e6c59b33c68808094d4b6844ddf935c9f0
parent4840f10eff37eebc609fcc933ab985dc66df95c6 (diff)
kvm: First step to push iothread lock out of inner run loop
This opens the path to get rid of the iothread lock on vmexits in KVM mode. On x86, the in-kernel irqchips has to be used because we otherwise need to synchronize APIC and other per-cpu state accesses that could be changed concurrently. Regarding pre/post-run callbacks, s390x and ARM should be fine without specific locking as the callbacks are empty. MIPS and POWER require locking for the pre-run callback. For the handle_exit callback, it is non-empty in x86, POWER and s390. Some POWER cases could do without the locking, but it is left in place for now. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Message-Id: <1434646046-27150-7-git-send-email-pbonzini@redhat.com>
-rw-r--r--kvm-all.c10
-rw-r--r--target-i386/kvm.c24
-rw-r--r--target-mips/kvm.c4
-rw-r--r--target-ppc/kvm.c7
-rw-r--r--target-s390x/kvm.c3
5 files changed, 46 insertions, 2 deletions
diff --git a/kvm-all.c b/kvm-all.c
index e98b08def2..ca428ca298 100644
--- a/kvm-all.c
+++ b/kvm-all.c
@@ -1755,6 +1755,8 @@ int kvm_cpu_exec(CPUState *cpu)
return EXCP_HLT;
}
+ qemu_mutex_unlock_iothread();
+
do {
MemTxAttrs attrs;
@@ -1773,11 +1775,9 @@ int kvm_cpu_exec(CPUState *cpu)
*/
qemu_cpu_kick_self();
}
- qemu_mutex_unlock_iothread();
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
- qemu_mutex_lock_iothread();
attrs = kvm_arch_post_run(cpu, run);
if (run_ret < 0) {
@@ -1804,20 +1804,24 @@ int kvm_cpu_exec(CPUState *cpu)
switch (run->exit_reason) {
case KVM_EXIT_IO:
DPRINTF("handle_io\n");
+ qemu_mutex_lock_iothread();
kvm_handle_io(run->io.port, attrs,
(uint8_t *)run + run->io.data_offset,
run->io.direction,
run->io.size,
run->io.count);
+ qemu_mutex_unlock_iothread();
ret = 0;
break;
case KVM_EXIT_MMIO:
DPRINTF("handle_mmio\n");
+ qemu_mutex_lock_iothread();
address_space_rw(&address_space_memory,
run->mmio.phys_addr, attrs,
run->mmio.data,
run->mmio.len,
run->mmio.is_write);
+ qemu_mutex_unlock_iothread();
ret = 0;
break;
case KVM_EXIT_IRQ_WINDOW_OPEN:
@@ -1860,6 +1864,8 @@ int kvm_cpu_exec(CPUState *cpu)
}
} while (ret == 0);
+ qemu_mutex_lock_iothread();
+
if (ret < 0) {
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
diff --git a/target-i386/kvm.c b/target-i386/kvm.c
index daced5cb94..6426600c63 100644
--- a/target-i386/kvm.c
+++ b/target-i386/kvm.c
@@ -2191,7 +2191,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
/* Inject NMI */
if (cpu->interrupt_request & CPU_INTERRUPT_NMI) {
+ qemu_mutex_lock_iothread();
cpu->interrupt_request &= ~CPU_INTERRUPT_NMI;
+ qemu_mutex_unlock_iothread();
+
DPRINTF("injected NMI\n");
ret = kvm_vcpu_ioctl(cpu, KVM_NMI);
if (ret < 0) {
@@ -2200,6 +2203,10 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
}
}
+ if (!kvm_irqchip_in_kernel()) {
+ qemu_mutex_lock_iothread();
+ }
+
/* Force the VCPU out of its inner loop to process any INIT requests
* or (for userspace APIC, but it is cheap to combine the checks here)
* pending TPR access reports.
@@ -2243,6 +2250,8 @@ void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
DPRINTF("setting tpr\n");
run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state);
+
+ qemu_mutex_unlock_iothread();
}
}
@@ -2256,8 +2265,17 @@ MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
} else {
env->eflags &= ~IF_MASK;
}
+
+ /* We need to protect the apic state against concurrent accesses from
+ * different threads in case the userspace irqchip is used. */
+ if (!kvm_irqchip_in_kernel()) {
+ qemu_mutex_lock_iothread();
+ }
cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8);
cpu_set_apic_base(x86_cpu->apic_state, run->apic_base);
+ if (!kvm_irqchip_in_kernel()) {
+ qemu_mutex_unlock_iothread();
+ }
return cpu_get_mem_attrs(env);
}
@@ -2550,13 +2568,17 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
switch (run->exit_reason) {
case KVM_EXIT_HLT:
DPRINTF("handle_hlt\n");
+ qemu_mutex_lock_iothread();
ret = kvm_handle_halt(cpu);
+ qemu_mutex_unlock_iothread();
break;
case KVM_EXIT_SET_TPR:
ret = 0;
break;
case KVM_EXIT_TPR_ACCESS:
+ qemu_mutex_lock_iothread();
ret = kvm_handle_tpr_access(cpu);
+ qemu_mutex_unlock_iothread();
break;
case KVM_EXIT_FAIL_ENTRY:
code = run->fail_entry.hardware_entry_failure_reason;
@@ -2582,7 +2604,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
case KVM_EXIT_DEBUG:
DPRINTF("kvm_exit_debug\n");
+ qemu_mutex_lock_iothread();
ret = kvm_handle_debug(cpu, &run->debug.arch);
+ qemu_mutex_unlock_iothread();
break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
diff --git a/target-mips/kvm.c b/target-mips/kvm.c
index 948619fbab..7d2293d934 100644
--- a/target-mips/kvm.c
+++ b/target-mips/kvm.c
@@ -99,6 +99,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
int r;
struct kvm_mips_interrupt intr;
+ qemu_mutex_lock_iothread();
+
if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
cpu_mips_io_interrupts_pending(cpu)) {
intr.cpu = -1;
@@ -109,6 +111,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
__func__, cs->cpu_index, intr.irq);
}
}
+
+ qemu_mutex_unlock_iothread();
}
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
diff --git a/target-ppc/kvm.c b/target-ppc/kvm.c
index afb4696b8a..ddf469fe09 100644
--- a/target-ppc/kvm.c
+++ b/target-ppc/kvm.c
@@ -1242,6 +1242,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
int r;
unsigned irq;
+ qemu_mutex_lock_iothread();
+
/* PowerPC QEMU tracks the various core input pins (interrupt, critical
* interrupt, reset, etc) in PPC-specific env->irq_input_state. */
if (!cap_interrupt_level &&
@@ -1269,6 +1271,8 @@ void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
/* We don't know if there are more interrupts pending after this. However,
* the guest will return to userspace in the course of handling this one
* anyways, so we will get a chance to deliver the rest. */
+
+ qemu_mutex_unlock_iothread();
}
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
@@ -1570,6 +1574,8 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
CPUPPCState *env = &cpu->env;
int ret;
+ qemu_mutex_lock_iothread();
+
switch (run->exit_reason) {
case KVM_EXIT_DCR:
if (run->dcr.is_write) {
@@ -1620,6 +1626,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
break;
}
+ qemu_mutex_unlock_iothread();
return ret;
}
diff --git a/target-s390x/kvm.c b/target-s390x/kvm.c
index 135111a2c4..ae3a0affec 100644
--- a/target-s390x/kvm.c
+++ b/target-s390x/kvm.c
@@ -2007,6 +2007,8 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
S390CPU *cpu = S390_CPU(cs);
int ret = 0;
+ qemu_mutex_lock_iothread();
+
switch (run->exit_reason) {
case KVM_EXIT_S390_SIEIC:
ret = handle_intercept(cpu);
@@ -2027,6 +2029,7 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
break;
}
+ qemu_mutex_unlock_iothread();
if (ret == 0) {
ret = EXCP_INTERRUPT;