diff options
author | bellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162> | 2003-08-20 23:02:09 +0000 |
---|---|---|
committer | bellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162> | 2003-08-20 23:02:09 +0000 |
commit | 3f3373166227b13e762e20d2fb51eadfa6a2d653 (patch) | |
tree | 61211017f0428c56b245c36b357aa0e2de4ec91e | |
parent | d05e66d217f8f83487c3b1d3015a67316b47645f (diff) |
pop ss, mov ss, x and sti disable irqs for the next instruction - began dispatch optimization by adding new x86 cpu 'hidden' flags
git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@372 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r-- | cpu-exec.c | 26 | ||||
-rw-r--r-- | cpu-i386.h | 36 | ||||
-rw-r--r-- | exec.h | 10 | ||||
-rw-r--r-- | helper-i386.c | 22 | ||||
-rw-r--r-- | helper2-i386.c | 8 | ||||
-rw-r--r-- | op-i386.c | 10 | ||||
-rw-r--r-- | softmmu_template.h | 8 | ||||
-rw-r--r-- | translate-i386.c | 47 |
8 files changed, 109 insertions, 58 deletions
diff --git a/cpu-exec.c b/cpu-exec.c index 39bb933f51..7899a90862 100644 --- a/cpu-exec.c +++ b/cpu-exec.c @@ -186,7 +186,8 @@ int cpu_exec(CPUState *env1) #if defined(TARGET_I386) /* if hardware interrupt pending, we execute it */ if ((interrupt_request & CPU_INTERRUPT_HARD) && - (env->eflags & IF_MASK)) { + (env->eflags & IF_MASK) && + !(env->hflags & HF_INHIBIT_IRQ_MASK)) { int intno; intno = cpu_x86_get_pic_interrupt(env); if (loglevel) { @@ -233,21 +234,20 @@ int cpu_exec(CPUState *env1) #endif } #endif - /* we compute the CPU state. We assume it will not - change during the whole generated block. */ + /* we record a subset of the CPU state. It will + always be the same before a given translated block + is executed. */ #if defined(TARGET_I386) flags = (env->segs[R_CS].flags & DESC_B_MASK) - >> (DESC_B_SHIFT - GEN_FLAG_CODE32_SHIFT); + >> (DESC_B_SHIFT - HF_CS32_SHIFT); flags |= (env->segs[R_SS].flags & DESC_B_MASK) - >> (DESC_B_SHIFT - GEN_FLAG_SS32_SHIFT); + >> (DESC_B_SHIFT - HF_SS32_SHIFT); flags |= (((unsigned long)env->segs[R_DS].base | (unsigned long)env->segs[R_ES].base | (unsigned long)env->segs[R_SS].base) != 0) << - GEN_FLAG_ADDSEG_SHIFT; - flags |= env->cpl << GEN_FLAG_CPL_SHIFT; - flags |= env->soft_mmu << GEN_FLAG_SOFT_MMU_SHIFT; - flags |= (env->eflags & VM_MASK) >> (17 - GEN_FLAG_VM_SHIFT); - flags |= (env->eflags & (IOPL_MASK | TF_MASK)); + HF_ADDSEG_SHIFT; + flags |= env->hflags; + flags |= (env->eflags & (IOPL_MASK | TF_MASK | VM_MASK)); cs_base = env->segs[R_CS].base; pc = cs_base + env->eip; #elif defined(TARGET_ARM) @@ -337,8 +337,8 @@ int cpu_exec(CPUState *env1) /* reset soft MMU for next block (it can currently only be set by a memory fault) */ #if defined(TARGET_I386) && !defined(CONFIG_SOFTMMU) - if (env->soft_mmu) { - env->soft_mmu = 0; + if (env->hflags & HF_SOFTMMU_MASK) { + env->hflags &= ~HF_SOFTMMU_MASK; /* do not allow linking to another block */ T0 = 0; } @@ -499,7 +499,7 @@ static inline int handle_cpu_signal(unsigned long pc, unsigned long address, raise_exception_err(EXCP0E_PAGE, env->error_code); } else { /* activate soft MMU for this block */ - env->soft_mmu = 1; + env->hflags |= HF_SOFTMMU_MASK; sigprocmask(SIG_SETMASK, old_set, NULL); cpu_loop_exit(); } diff --git a/cpu-i386.h b/cpu-i386.h index a60e959150..ad3a6b801b 100644 --- a/cpu-i386.h +++ b/cpu-i386.h @@ -73,6 +73,10 @@ #define CC_S 0x0080 #define CC_O 0x0800 +#define TF_SHIFT 8 +#define IOPL_SHIFT 12 +#define VM_SHIFT 17 + #define TF_MASK 0x00000100 #define IF_MASK 0x00000200 #define DF_MASK 0x00000400 @@ -85,6 +89,29 @@ #define VIP_MASK 0x00100000 #define ID_MASK 0x00200000 +/* hidden flags - used internally by qemu to represent additionnal cpu + states. Only the CPL and INHIBIT_IRQ are not redundant. We avoid + using the IOPL_MASK, TF_MASK and VM_MASK bit position to ease oring + with eflags. */ +/* current cpl */ +#define HF_CPL_SHIFT 0 +/* true if soft mmu is being used */ +#define HF_SOFTMMU_SHIFT 2 +/* true if hardware interrupts must be disabled for next instruction */ +#define HF_INHIBIT_IRQ_SHIFT 3 +/* 16 or 32 segments */ +#define HF_CS32_SHIFT 4 +#define HF_SS32_SHIFT 5 +/* zero base for DS, ES and SS */ +#define HF_ADDSEG_SHIFT 6 + +#define HF_CPL_MASK (3 << HF_CPL_SHIFT) +#define HF_SOFTMMU_MASK (1 << HF_SOFTMMU_SHIFT) +#define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) +#define HF_CS32_MASK (1 << HF_CS32_SHIFT) +#define HF_SS32_MASK (1 << HF_CS32_SHIFT) +#define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) + #define CR0_PE_MASK (1 << 0) #define CR0_TS_MASK (1 << 3) #define CR0_WP_MASK (1 << 16) @@ -226,6 +253,7 @@ typedef struct CPUX86State { uint32_t cc_dst; uint32_t cc_op; int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ + uint32_t hflags; /* hidden flags, see HF_xxx constants */ /* FPU state */ unsigned int fpstt; /* top of stack index */ @@ -249,8 +277,6 @@ typedef struct CPUX86State { SegmentCache tr; SegmentCache gdt; /* only base and limit are used */ SegmentCache idt; /* only base and limit are used */ - int cpl; /* current cpl */ - int soft_mmu; /* TRUE if soft mmu is being used */ /* sysenter registers */ uint32_t sysenter_cs; @@ -303,7 +329,11 @@ void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); /* wrapper, just in case memory mappings must be changed */ static inline void cpu_x86_set_cpl(CPUX86State *s, int cpl) { - s->cpl = cpl; +#if HF_CPL_MASK == 3 + s->hflags = (s->hflags & ~HF_CPL_MASK) | cpl; +#else +#error HF_CPL_MASK is hardcoded +#endif } /* simulate fsave/frstor */ @@ -61,16 +61,6 @@ extern uint8_t gen_opc_instr_start[OPC_BUF_SIZE]; #if defined(TARGET_I386) -#define GEN_FLAG_CODE32_SHIFT 0 -#define GEN_FLAG_ADDSEG_SHIFT 1 -#define GEN_FLAG_SS32_SHIFT 2 -#define GEN_FLAG_VM_SHIFT 3 -#define GEN_FLAG_ST_SHIFT 4 -#define GEN_FLAG_TF_SHIFT 8 /* same position as eflags */ -#define GEN_FLAG_CPL_SHIFT 9 -#define GEN_FLAG_SOFT_MMU_SHIFT 11 -#define GEN_FLAG_IOPL_SHIFT 12 /* same position as eflags */ - void optimize_flags_init(void); #endif diff --git a/helper-i386.c b/helper-i386.c index fdbb885b12..0d128ea448 100644 --- a/helper-i386.c +++ b/helper-i386.c @@ -189,7 +189,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, { SegmentCache *dt; uint8_t *ptr, *ssp; - int type, dpl, selector, ss_dpl; + int type, dpl, selector, ss_dpl, cpl; int has_error_code, new_stack, shift; uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2, push_size; uint32_t old_cs, old_ss, old_esp, old_eip; @@ -216,8 +216,9 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, break; } dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; /* check privledge if software int */ - if (is_int && dpl < env->cpl) + if (is_int && dpl < cpl) raise_exception_err(EXCP0D_GPF, intno * 8 + 2); /* check valid bit */ if (!(e2 & DESC_P_MASK)) @@ -232,11 +233,11 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); dpl = (e2 >> DESC_DPL_SHIFT) & 3; - if (dpl > env->cpl) + if (dpl > cpl) raise_exception_err(EXCP0D_GPF, selector & 0xfffc); if (!(e2 & DESC_P_MASK)) raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc); - if (!(e2 & DESC_C_MASK) && dpl < env->cpl) { + if (!(e2 & DESC_C_MASK) && dpl < cpl) { /* to inner priviledge */ get_ss_esp_from_tss(&ss, &esp, dpl); if ((ss & 0xfffc) == 0) @@ -255,7 +256,7 @@ static void do_interrupt_protected(int intno, int is_int, int error_code, if (!(ss_e2 & DESC_P_MASK)) raise_exception_err(EXCP0A_TSS, ss & 0xfffc); new_stack = 1; - } else if ((e2 & DESC_C_MASK) || dpl == env->cpl) { + } else if ((e2 & DESC_C_MASK) || dpl == cpl) { /* to same priviledge */ new_stack = 0; } else { @@ -402,7 +403,7 @@ void do_interrupt_user(int intno, int is_int, int error_code, { SegmentCache *dt; uint8_t *ptr; - int dpl; + int dpl, cpl; uint32_t e2; dt = &env->idt; @@ -410,8 +411,9 @@ void do_interrupt_user(int intno, int is_int, int error_code, e2 = ldl(ptr + 4); dpl = (e2 >> DESC_DPL_SHIFT) & 3; + cpl = env->hflags & HF_CPL_MASK; /* check privledge if software int */ - if (is_int && dpl < env->cpl) + if (is_int && dpl < cpl) raise_exception_err(EXCP0D_GPF, intno * 8 + 2); /* Since we emulate only user space, we cannot do more than @@ -742,7 +744,7 @@ void helper_ljmp_protected_T0_T1(void) raise_exception_err(EXCP0D_GPF, 0); if (load_segment(&e1, &e2, new_cs) != 0) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - cpl = env->cpl; + cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); @@ -826,7 +828,7 @@ void helper_lcall_protected_T0_T1(int shift, int next_eip) raise_exception_err(EXCP0D_GPF, 0); if (load_segment(&e1, &e2, new_cs) != 0) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - cpl = env->cpl; + cpl = env->hflags & HF_CPL_MASK; if (e2 & DESC_S_MASK) { if (!(e2 & DESC_CS_MASK)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); @@ -1079,7 +1081,7 @@ static inline void helper_ret_protected(int shift, int is_iret, int addend) if (!(e2 & DESC_S_MASK) || !(e2 & DESC_CS_MASK)) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); - cpl = env->cpl; + cpl = env->hflags & HF_CPL_MASK; rpl = new_cs & 3; if (rpl < cpl) raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc); diff --git a/helper2-i386.c b/helper2-i386.c index 7a6fc13b52..f006c72a28 100644 --- a/helper2-i386.c +++ b/helper2-i386.c @@ -52,7 +52,7 @@ CPUX86State *cpu_x86_init(void) tlb_flush(env); #ifdef CONFIG_SOFTMMU - env->soft_mmu = 1; + env->hflags |= HF_SOFTMMU_MASK; #endif /* init various static tables */ if (!inited) { @@ -228,7 +228,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) int cpl, error_code, is_dirty, is_user, prot, page_size, ret; unsigned long pd; - cpl = env->cpl; + cpl = env->hflags & HF_CPL_MASK; is_user = (cpl == 3); #ifdef DEBUG_MMU @@ -325,7 +325,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) } do_mapping: - if (env->soft_mmu) { + if (env->hflags & HF_SOFTMMU_MASK) { unsigned long paddr, vaddr, address, addend, page_offset; int index; @@ -359,7 +359,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, uint32_t addr, int is_write) if ((pd & 0xfff) != 0) { /* IO access: no mapping is done as it will be handled by the soft MMU */ - if (!env->soft_mmu) + if (!(env->hflags & HF_SOFTMMU_MASK)) ret = 2; } else { void *map_addr; @@ -457,6 +457,16 @@ void OPPROTO op_sti(void) env->eflags |= IF_MASK; } +void OPPROTO op_set_inhibit_irq(void) +{ + env->hflags |= HF_INHIBIT_IRQ_MASK; +} + +void OPPROTO op_reset_inhibit_irq(void) +{ + env->hflags &= ~HF_INHIBIT_IRQ_MASK; +} + #if 0 /* vm86plus instructions */ void OPPROTO op_cli_vm(void) diff --git a/softmmu_template.h b/softmmu_template.h index c36e25ae20..765e913a66 100644 --- a/softmmu_template.h +++ b/softmmu_template.h @@ -90,7 +90,7 @@ DATA_TYPE REGPARM(1) glue(glue(__ld, SUFFIX), _mmu)(unsigned long addr) /* test if there is match for unaligned or IO access */ /* XXX: could done more in memory macro in a non portable way */ - is_user = (env->cpl == 3); + is_user = ((env->hflags & HF_CPL_MASK) == 3); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); redo: tlb_addr = env->tlb_read[is_user][index].address; @@ -126,7 +126,7 @@ static DATA_TYPE glue(slow_ld, SUFFIX)(unsigned long addr, void *retaddr) int is_user, index, shift; unsigned long physaddr, tlb_addr, addr1, addr2; - is_user = (env->cpl == 3); + is_user = ((env->hflags & HF_CPL_MASK) == 3); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); redo: tlb_addr = env->tlb_read[is_user][index].address; @@ -169,7 +169,7 @@ void REGPARM(2) glue(glue(__st, SUFFIX), _mmu)(unsigned long addr, DATA_TYPE val void *retaddr; int is_user, index; - is_user = (env->cpl == 3); + is_user = ((env->hflags & HF_CPL_MASK) == 3); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); redo: tlb_addr = env->tlb_write[is_user][index].address; @@ -203,7 +203,7 @@ static void glue(slow_st, SUFFIX)(unsigned long addr, DATA_TYPE val, unsigned long physaddr, tlb_addr; int is_user, index, i; - is_user = (env->cpl == 3); + is_user = ((env->hflags & HF_CPL_MASK) == 3); index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1); redo: tlb_addr = env->tlb_write[is_user][index].address; diff --git a/translate-i386.c b/translate-i386.c index e527c901d2..0e72acf199 100644 --- a/translate-i386.c +++ b/translate-i386.c @@ -1552,7 +1552,9 @@ static void gen_movl_seg_T0(DisasContext *s, int seg_reg, unsigned int cur_eip) else gen_op_movl_seg_T0_vm(offsetof(CPUX86State,segs[seg_reg])); /* abort translation because the register may have a non zero base - or because ss32 may change */ + or because ss32 may change. For R_SS, translation must always + stop as a special handling must be done to disable hardware + interrupts for the next instruction */ if (seg_reg == R_SS || (!s->addseg && seg_reg < R_FS)) s->is_jmp = 2; } @@ -2356,10 +2358,14 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) case 0x07: /* pop es */ case 0x17: /* pop ss */ case 0x1f: /* pop ds */ + reg = b >> 3; gen_pop_T0(s); - gen_movl_seg_T0(s, b >> 3, pc_start - s->cs_base); + gen_movl_seg_T0(s, reg, pc_start - s->cs_base); gen_pop_update(s); - /* XXX: if reg == SS, inhibit interrupts/trace */ + if (reg == R_SS) { + /* if reg == SS, inhibit interrupts/trace */ + gen_op_set_inhibit_irq(); + } break; case 0x1a1: /* pop fs */ case 0x1a9: /* pop gs */ @@ -2418,7 +2424,10 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) goto illegal_op; gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0); gen_movl_seg_T0(s, reg, pc_start - s->cs_base); - /* XXX: if reg == SS, inhibit interrupts/trace */ + if (reg == R_SS) { + /* if reg == SS, inhibit interrupts/trace */ + gen_op_set_inhibit_irq(); + } break; case 0x8c: /* mov Gv, seg */ modrm = ldub(s->pc++); @@ -3704,6 +3713,8 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) if (!s->vm86) { if (s->cpl <= s->iopl) { gen_op_sti(); + /* interruptions are enabled only the first insn after sti */ + gen_op_set_inhibit_irq(); s->is_jmp = 2; /* give a chance to handle pending irqs */ } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); @@ -3711,12 +3722,13 @@ long disas_insn(DisasContext *s, uint8_t *pc_start) } else { if (s->iopl == 3) { gen_op_sti(); + /* interruptions are enabled only the first insn after sti */ + gen_op_set_inhibit_irq(); s->is_jmp = 2; /* give a chance to handle pending irqs */ } else { gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base); } } - /* XXX: interruptions are enabled only the first insn after sti */ break; case 0x62: /* bound */ ot = dflag ? OT_LONG : OT_WORD; @@ -4380,21 +4392,21 @@ static inline int gen_intermediate_code_internal(CPUState *env, flags = tb->flags; dc->pe = env->cr[0] & CR0_PE_MASK; - dc->code32 = (flags >> GEN_FLAG_CODE32_SHIFT) & 1; - dc->ss32 = (flags >> GEN_FLAG_SS32_SHIFT) & 1; - dc->addseg = (flags >> GEN_FLAG_ADDSEG_SHIFT) & 1; - dc->f_st = (flags >> GEN_FLAG_ST_SHIFT) & 7; - dc->vm86 = (flags >> GEN_FLAG_VM_SHIFT) & 1; - dc->cpl = (flags >> GEN_FLAG_CPL_SHIFT) & 3; - dc->iopl = (flags >> GEN_FLAG_IOPL_SHIFT) & 3; - dc->tf = (flags >> GEN_FLAG_TF_SHIFT) & 1; + dc->code32 = (flags >> HF_CS32_SHIFT) & 1; + dc->ss32 = (flags >> HF_SS32_SHIFT) & 1; + dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1; + dc->f_st = 0; + dc->vm86 = (flags >> VM_SHIFT) & 1; + dc->cpl = (flags >> HF_CPL_SHIFT) & 3; + dc->iopl = (flags >> IOPL_SHIFT) & 3; + dc->tf = (flags >> TF_SHIFT) & 1; dc->cc_op = CC_OP_DYNAMIC; dc->cs_base = cs_base; dc->tb = tb; dc->popl_esp_hack = 0; /* select memory access functions */ dc->mem_index = 0; - if ((flags >> GEN_FLAG_SOFT_MMU_SHIFT) & 1) { + if (flags & HF_SOFTMMU_MASK) { if (dc->cpl == 3) dc->mem_index = 6; else @@ -4408,6 +4420,13 @@ static inline int gen_intermediate_code_internal(CPUState *env, dc->is_jmp = DISAS_NEXT; pc_ptr = pc_start; lj = -1; + + /* if irq were inhibited for the next instruction, we can disable + them here as it is simpler (otherwise jumps would have to + handled as special case) */ + if (flags & HF_INHIBIT_IRQ_MASK) { + gen_op_reset_inhibit_irq(); + } do { if (env->nb_breakpoints > 0) { for(j = 0; j < env->nb_breakpoints; j++) { |