diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2018-08-21 15:31:24 +0200 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-10-02 19:09:12 +0200 |
commit | 92d5f1a4147c3722b5e9a8bcfb7dc261b7a8b855 (patch) | |
tree | a82c08f864204798283f7621c2c7ffd9458275d2 /target/i386/seg_helper.c | |
parent | 27e18b8952f8b7a1e26350846f8a0d5a9b33bfb8 (diff) |
target/i386: unify masking of interrupts
Interrupt handling depends on various flags in env->hflags or env->hflags2,
and the exact detail were not exactly replicated between x86_cpu_has_work
and x86_cpu_exec_interrupt. Create a new function that extracts the
highest-priority non-masked interrupt, and use it in both functions.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386/seg_helper.c')
-rw-r--r-- | target/i386/seg_helper.c | 106 |
1 files changed, 49 insertions, 57 deletions
diff --git a/target/i386/seg_helper.c b/target/i386/seg_helper.c index d1cbc6ebf0..ba9cc688ac 100644 --- a/target/i386/seg_helper.c +++ b/target/i386/seg_helper.c @@ -1319,74 +1319,66 @@ bool x86_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - bool ret = false; + int intno; + interrupt_request = x86_cpu_pending_interrupt(cs, interrupt_request); + if (!interrupt_request) { + return false; + } + + /* Don't process multiple interrupt requests in a single call. + * This is required to make icount-driven execution deterministic. + */ + switch (interrupt_request) { #if !defined(CONFIG_USER_ONLY) - if (interrupt_request & CPU_INTERRUPT_POLL) { + case CPU_INTERRUPT_POLL: cs->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(cpu->apic_state); - /* Don't process multiple interrupt requests in a single call. - This is required to make icount-driven execution deterministic. */ - return true; - } + break; #endif - if (interrupt_request & CPU_INTERRUPT_SIPI) { + case CPU_INTERRUPT_SIPI: do_cpu_sipi(cpu); - ret = true; - } else if (env->hflags2 & HF2_GIF_MASK) { - if ((interrupt_request & CPU_INTERRUPT_SMI) && - !(env->hflags & HF_SMM_MASK)) { - cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_SMI; - do_smm_enter(cpu); - ret = true; - } else if ((interrupt_request & CPU_INTERRUPT_NMI) && - !(env->hflags2 & HF2_NMI_MASK)) { - cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); - cs->interrupt_request &= ~CPU_INTERRUPT_NMI; - env->hflags2 |= HF2_NMI_MASK; - do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); - ret = true; - } else if (interrupt_request & CPU_INTERRUPT_MCE) { - cs->interrupt_request &= ~CPU_INTERRUPT_MCE; - do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); - ret = true; - } else if ((interrupt_request & CPU_INTERRUPT_HARD) && - (((env->hflags2 & HF2_VINTR_MASK) && - (env->hflags2 & HF2_HIF_MASK)) || - (!(env->hflags2 & HF2_VINTR_MASK) && - (env->eflags & IF_MASK && - !(env->hflags & HF_INHIBIT_IRQ_MASK))))) { - int intno; - cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); - cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | - CPU_INTERRUPT_VIRQ); - intno = cpu_get_pic_interrupt(env); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - /* ensure that no TB jump will be modified as - the program flow was changed */ - ret = true; + break; + case CPU_INTERRUPT_SMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_SMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_SMI; + do_smm_enter(cpu); + break; + case CPU_INTERRUPT_NMI: + cpu_svm_check_intercept_param(env, SVM_EXIT_NMI, 0, 0); + cs->interrupt_request &= ~CPU_INTERRUPT_NMI; + env->hflags2 |= HF2_NMI_MASK; + do_interrupt_x86_hardirq(env, EXCP02_NMI, 1); + break; + case CPU_INTERRUPT_MCE: + cs->interrupt_request &= ~CPU_INTERRUPT_MCE; + do_interrupt_x86_hardirq(env, EXCP12_MCHK, 0); + break; + case CPU_INTERRUPT_HARD: + cpu_svm_check_intercept_param(env, SVM_EXIT_INTR, 0, 0); + cs->interrupt_request &= ~(CPU_INTERRUPT_HARD | + CPU_INTERRUPT_VIRQ); + intno = cpu_get_pic_interrupt(env); + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "Servicing hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + break; #if !defined(CONFIG_USER_ONLY) - } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) && - (env->eflags & IF_MASK) && - !(env->hflags & HF_INHIBIT_IRQ_MASK)) { - int intno; - /* FIXME: this should respect TPR */ - cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); - intno = x86_ldl_phys(cs, env->vm_vmcb + case CPU_INTERRUPT_VIRQ: + /* FIXME: this should respect TPR */ + cpu_svm_check_intercept_param(env, SVM_EXIT_VINTR, 0, 0); + intno = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.int_vector)); - qemu_log_mask(CPU_LOG_TB_IN_ASM, - "Servicing virtual hardware INT=0x%02x\n", intno); - do_interrupt_x86_hardirq(env, intno, 1); - cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; - ret = true; + qemu_log_mask(CPU_LOG_TB_IN_ASM, + "Servicing virtual hardware INT=0x%02x\n", intno); + do_interrupt_x86_hardirq(env, intno, 1); + cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; + break; #endif - } } - return ret; + /* Ensure that no TB jump will be modified as the program flow was changed. */ + return true; } void helper_lldt(CPUX86State *env, int selector) |