aboutsummaryrefslogtreecommitdiff
path: root/target/i386/cpu.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-08-21 15:31:24 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2018-10-02 19:09:12 +0200
commit92d5f1a4147c3722b5e9a8bcfb7dc261b7a8b855 (patch)
treea82c08f864204798283f7621c2c7ffd9458275d2 /target/i386/cpu.c
parent27e18b8952f8b7a1e26350846f8a0d5a9b33bfb8 (diff)
target/i386: unify masking of interrupts
Interrupt handling depends on various flags in env->hflags or env->hflags2, and the exact detail were not exactly replicated between x86_cpu_has_work and x86_cpu_exec_interrupt. Create a new function that extracts the highest-priority non-masked interrupt, and use it in both functions. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'target/i386/cpu.c')
-rw-r--r--target/i386/cpu.c51
1 files changed, 41 insertions, 10 deletions
diff --git a/target/i386/cpu.c b/target/i386/cpu.c
index f24295e6e4..c88876dfe3 100644
--- a/target/i386/cpu.c
+++ b/target/i386/cpu.c
@@ -5429,20 +5429,51 @@ static void x86_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
cpu->env.eip = tb->pc - tb->cs_base;
}
-static bool x86_cpu_has_work(CPUState *cs)
+int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request)
{
X86CPU *cpu = X86_CPU(cs);
CPUX86State *env = &cpu->env;
- return ((cs->interrupt_request & (CPU_INTERRUPT_HARD |
- CPU_INTERRUPT_POLL)) &&
- (env->eflags & IF_MASK)) ||
- (cs->interrupt_request & (CPU_INTERRUPT_NMI |
- CPU_INTERRUPT_INIT |
- CPU_INTERRUPT_SIPI |
- CPU_INTERRUPT_MCE)) ||
- ((cs->interrupt_request & CPU_INTERRUPT_SMI) &&
- !(env->hflags & HF_SMM_MASK));
+#if !defined(CONFIG_USER_ONLY)
+ if (interrupt_request & CPU_INTERRUPT_POLL) {
+ return CPU_INTERRUPT_POLL;
+ }
+#endif
+ if (interrupt_request & CPU_INTERRUPT_SIPI) {
+ return CPU_INTERRUPT_SIPI;
+ }
+
+ if (env->hflags2 & HF2_GIF_MASK) {
+ if ((interrupt_request & CPU_INTERRUPT_SMI) &&
+ !(env->hflags & HF_SMM_MASK)) {
+ return CPU_INTERRUPT_SMI;
+ } else if ((interrupt_request & CPU_INTERRUPT_NMI) &&
+ !(env->hflags2 & HF2_NMI_MASK)) {
+ return CPU_INTERRUPT_NMI;
+ } else if (interrupt_request & CPU_INTERRUPT_MCE) {
+ return CPU_INTERRUPT_MCE;
+ } else if ((interrupt_request & CPU_INTERRUPT_HARD) &&
+ (((env->hflags2 & HF2_VINTR_MASK) &&
+ (env->hflags2 & HF2_HIF_MASK)) ||
+ (!(env->hflags2 & HF2_VINTR_MASK) &&
+ (env->eflags & IF_MASK &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK))))) {
+ return CPU_INTERRUPT_HARD;
+#if !defined(CONFIG_USER_ONLY)
+ } else if ((interrupt_request & CPU_INTERRUPT_VIRQ) &&
+ (env->eflags & IF_MASK) &&
+ !(env->hflags & HF_INHIBIT_IRQ_MASK)) {
+ return CPU_INTERRUPT_VIRQ;
+#endif
+ }
+ }
+
+ return 0;
+}
+
+static bool x86_cpu_has_work(CPUState *cs)
+{
+ return x86_cpu_pending_interrupt(cs, cs->interrupt_request) != 0;
}
static void x86_disas_set_info(CPUState *cs, disassemble_info *info)