diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2010-03-01 19:10:29 +0100 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-04 00:29:26 -0300 |
commit | b0b1d69079fcb9453f45aade9e9f6b71422147b0 (patch) | |
tree | d49073cd79a4987ded9b57a1b057a6329877160f | |
parent | c902760fb25f9c490af01e8f6bccaa8dd71cc224 (diff) |
KVM: Rework of guest debug state writing
So far we synchronized any dirty VCPU state back into the kernel before
updating the guest debug state. This was a tribute to a deficite in x86
kernels before 2.6.33. But as this is an arch-dependent issue, it is
better handle in the x86 part of KVM and remove the writeback point for
generic code. This also avoids overwriting the flushed state later on if
user space decides to change some more registers before resuming the
guest.
We furthermore need to reinject guest exceptions via the appropriate
mechanism. That is KVM_SET_GUEST_DEBUG for older kernels and
KVM_SET_VCPU_EVENTS for recent ones. Using both mechanisms at the same
time will cause state corruptions.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | kvm-all.c | 24 | ||||
-rw-r--r-- | kvm.h | 1 | ||||
-rw-r--r-- | target-i386/kvm.c | 47 |
3 files changed, 60 insertions, 12 deletions
@@ -65,6 +65,7 @@ struct KVMState int broken_set_mem_region; int migration_log; int vcpu_events; + int robust_singlestep; #ifdef KVM_CAP_SET_GUEST_DEBUG struct kvm_sw_breakpoint_head kvm_sw_breakpoints; #endif @@ -659,6 +660,12 @@ int kvm_init(int smp_cpus) s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); #endif + s->robust_singlestep = 0; +#ifdef KVM_CAP_X86_ROBUST_SINGLESTEP + s->robust_singlestep = + kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); +#endif + ret = kvm_arch_init(s, smp_cpus); if (ret < 0) goto err; @@ -917,6 +924,11 @@ int kvm_has_vcpu_events(void) return kvm_state->vcpu_events; } +int kvm_has_robust_singlestep(void) +{ + return kvm_state->robust_singlestep; +} + void kvm_setup_guest_memory(void *start, size_t size) { if (!kvm_has_sync_mmu()) { @@ -974,10 +986,6 @@ static void kvm_invoke_set_guest_debug(void *data) struct kvm_set_guest_debug_data *dbg_data = data; CPUState *env = dbg_data->env; - if (env->kvm_vcpu_dirty) { - kvm_arch_put_registers(env); - env->kvm_vcpu_dirty = 0; - } dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); } @@ -985,12 +993,12 @@ int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) { struct kvm_set_guest_debug_data data; - data.dbg.control = 0; - if (env->singlestep_enabled) - data.dbg.control = KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; + data.dbg.control = reinject_trap; + if (env->singlestep_enabled) { + data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; + } kvm_arch_update_guest_debug(env, &data.dbg); - data.dbg.control |= reinject_trap; data.env = env; on_vcpu(env, kvm_invoke_set_guest_debug, &data); @@ -40,6 +40,7 @@ int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size); int kvm_has_sync_mmu(void); int kvm_has_vcpu_events(void); +int kvm_has_robust_singlestep(void); void kvm_setup_guest_memory(void *start, size_t size); diff --git a/target-i386/kvm.c b/target-i386/kvm.c index d2116a7b3f..e0247ea631 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -852,6 +852,37 @@ static int kvm_get_vcpu_events(CPUState *env) return 0; } +static int kvm_guest_debug_workarounds(CPUState *env) +{ + int ret = 0; +#ifdef KVM_CAP_SET_GUEST_DEBUG + unsigned long reinject_trap = 0; + + if (!kvm_has_vcpu_events()) { + if (env->exception_injected == 1) { + reinject_trap = KVM_GUESTDBG_INJECT_DB; + } else if (env->exception_injected == 3) { + reinject_trap = KVM_GUESTDBG_INJECT_BP; + } + env->exception_injected = -1; + } + + /* + * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF + * injected via SET_GUEST_DEBUG while updating GP regs. Work around this + * by updating the debug state once again if single-stepping is on. + * Another reason to call kvm_update_guest_debug here is a pending debug + * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to + * reinject them via SET_GUEST_DEBUG. + */ + if (reinject_trap || + (!kvm_has_robust_singlestep() && env->singlestep_enabled)) { + ret = kvm_update_guest_debug(env, reinject_trap); + } +#endif /* KVM_CAP_SET_GUEST_DEBUG */ + return ret; +} + int kvm_arch_put_registers(CPUState *env) { int ret; @@ -880,6 +911,11 @@ int kvm_arch_put_registers(CPUState *env) if (ret < 0) return ret; + /* must be last */ + ret = kvm_guest_debug_workarounds(env); + if (ret < 0) + return ret; + return 0; } @@ -1123,10 +1159,13 @@ int kvm_arch_debug(struct kvm_debug_exit_arch *arch_info) } else if (kvm_find_sw_breakpoint(cpu_single_env, arch_info->pc)) handle = 1; - if (!handle) - kvm_update_guest_debug(cpu_single_env, - (arch_info->exception == 1) ? - KVM_GUESTDBG_INJECT_DB : KVM_GUESTDBG_INJECT_BP); + if (!handle) { + cpu_synchronize_state(cpu_single_env); + assert(cpu_single_env->exception_injected == -1); + + cpu_single_env->exception_injected = arch_info->exception; + cpu_single_env->has_error_code = 0; + } return handle; } |