diff options
author | Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 2013-12-17 14:05:40 +1000 |
---|---|---|
committer | Edgar E. Iglesias <edgar.iglesias@xilinx.com> | 2014-02-11 22:57:00 +1000 |
commit | 2c17449b3022ca9623c4a7e2a504a4150ac4ad30 (patch) | |
tree | 2d40d6a844c6116673776a8de73e2d0de28fc6ed /target-i386 | |
parent | fdfba1a298ae26dd44bcfdb0429314139a0bc55a (diff) |
exec: Make ldq/ldub_*_phys input an AddressSpace
Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Edgar E. Iglesias <edgar.iglesias@xilinx.com>
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/arch_memory_mapping.c | 10 | ||||
-rw-r--r-- | target-i386/helper.c | 20 | ||||
-rw-r--r-- | target-i386/smm_helper.c | 32 | ||||
-rw-r--r-- | target-i386/svm_helper.c | 117 |
4 files changed, 103 insertions, 76 deletions
diff --git a/target-i386/arch_memory_mapping.c b/target-i386/arch_memory_mapping.c index a1947092fc..2d35f63e1e 100644 --- a/target-i386/arch_memory_mapping.c +++ b/target-i386/arch_memory_mapping.c @@ -27,7 +27,7 @@ static void walk_pte(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pte_addr = (pte_start_addr + i * 8) & a20_mask; - pte = ldq_phys(pte_addr); + pte = ldq_phys(as, pte_addr); if (!(pte & PG_PRESENT_MASK)) { /* not present */ continue; @@ -89,7 +89,7 @@ static void walk_pde(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pde_addr = (pde_start_addr + i * 8) & a20_mask; - pde = ldq_phys(pde_addr); + pde = ldq_phys(as, pde_addr); if (!(pde & PG_PRESENT_MASK)) { /* not present */ continue; @@ -167,7 +167,7 @@ static void walk_pdpe2(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 4; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; - pdpe = ldq_phys(pdpe_addr); + pdpe = ldq_phys(as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; @@ -192,7 +192,7 @@ static void walk_pdpe(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pdpe_addr = (pdpe_start_addr + i * 8) & a20_mask; - pdpe = ldq_phys(pdpe_addr); + pdpe = ldq_phys(as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { /* not present */ continue; @@ -228,7 +228,7 @@ static void walk_pml4e(MemoryMappingList *list, AddressSpace *as, for (i = 0; i < 512; i++) { pml4e_addr = (pml4e_start_addr + i * 8) & a20_mask; - pml4e = ldq_phys(pml4e_addr); + pml4e = ldq_phys(as, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { /* not present */ continue; diff --git a/target-i386/helper.c b/target-i386/helper.c index 2899779502..02a68bd681 100644 --- a/target-i386/helper.c +++ b/target-i386/helper.c @@ -563,7 +563,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; - pml4e = ldq_phys(pml4e_addr); + pml4e = ldq_phys(cs->as, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; @@ -579,7 +579,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, ptep = pml4e ^ PG_NX_MASK; pdpe_addr = ((pml4e & PHYS_ADDR_MASK) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; - pdpe = ldq_phys(pdpe_addr); + pdpe = ldq_phys(cs->as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; @@ -599,7 +599,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, /* XXX: load them when cr3 is loaded ? */ pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & env->a20_mask; - pdpe = ldq_phys(pdpe_addr); + pdpe = ldq_phys(cs->as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; @@ -609,7 +609,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, pde_addr = ((pdpe & PHYS_ADDR_MASK) + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; - pde = ldq_phys(pde_addr); + pde = ldq_phys(cs->as, pde_addr); if (!(pde & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; @@ -674,7 +674,7 @@ int cpu_x86_handle_mmu_fault(CPUX86State *env, target_ulong addr, } pte_addr = ((pde & PHYS_ADDR_MASK) + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; - pte = ldq_phys(pte_addr); + pte = ldq_phys(cs->as, pte_addr); if (!(pte & PG_PRESENT_MASK)) { error_code = 0; goto do_fault; @@ -920,13 +920,13 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) pml4e_addr = ((env->cr[3] & ~0xfff) + (((addr >> 39) & 0x1ff) << 3)) & env->a20_mask; - pml4e = ldq_phys(pml4e_addr); + pml4e = ldq_phys(cs->as, pml4e_addr); if (!(pml4e & PG_PRESENT_MASK)) return -1; pdpe_addr = ((pml4e & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) + (((addr >> 30) & 0x1ff) << 3)) & env->a20_mask; - pdpe = ldq_phys(pdpe_addr); + pdpe = ldq_phys(cs->as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) return -1; } else @@ -934,14 +934,14 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) { pdpe_addr = ((env->cr[3] & ~0x1f) + ((addr >> 27) & 0x18)) & env->a20_mask; - pdpe = ldq_phys(pdpe_addr); + pdpe = ldq_phys(cs->as, pdpe_addr); if (!(pdpe & PG_PRESENT_MASK)) return -1; } pde_addr = ((pdpe & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) + (((addr >> 21) & 0x1ff) << 3)) & env->a20_mask; - pde = ldq_phys(pde_addr); + pde = ldq_phys(cs->as, pde_addr); if (!(pde & PG_PRESENT_MASK)) { return -1; } @@ -954,7 +954,7 @@ hwaddr x86_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) pte_addr = ((pde & ~0xfff & ~(PG_NX_MASK | PG_HI_USER_MASK)) + (((addr >> 12) & 0x1ff) << 3)) & env->a20_mask; page_size = 4096; - pte = ldq_phys(pte_addr); + pte = ldq_phys(cs->as, pte_addr); } pte &= ~(PG_NX_MASK | PG_HI_USER_MASK); if (!(pte & PG_PRESENT_MASK)) diff --git a/target-i386/smm_helper.c b/target-i386/smm_helper.c index 67a73c06d1..17a568cfc5 100644 --- a/target-i386/smm_helper.c +++ b/target-i386/smm_helper.c @@ -188,46 +188,46 @@ void helper_rsm(CPUX86State *env) sm_state = env->smbase + 0x8000; #ifdef TARGET_X86_64 - cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0)); + cpu_load_efer(env, ldq_phys(cs->as, sm_state + 0x7ed0)); for (i = 0; i < 6; i++) { offset = 0x7e00 + i * 16; cpu_x86_load_seg_cache(env, i, lduw_phys(sm_state + offset), - ldq_phys(sm_state + offset + 8), + ldq_phys(cs->as, sm_state + offset + 8), ldl_phys(cs->as, sm_state + offset + 4), (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8); } - env->gdt.base = ldq_phys(sm_state + 0x7e68); + env->gdt.base = ldq_phys(cs->as, sm_state + 0x7e68); env->gdt.limit = ldl_phys(cs->as, sm_state + 0x7e64); env->ldt.selector = lduw_phys(sm_state + 0x7e70); - env->ldt.base = ldq_phys(sm_state + 0x7e78); + env->ldt.base = ldq_phys(cs->as, sm_state + 0x7e78); env->ldt.limit = ldl_phys(cs->as, sm_state + 0x7e74); env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8; - env->idt.base = ldq_phys(sm_state + 0x7e88); + env->idt.base = ldq_phys(cs->as, sm_state + 0x7e88); env->idt.limit = ldl_phys(cs->as, sm_state + 0x7e84); env->tr.selector = lduw_phys(sm_state + 0x7e90); - env->tr.base = ldq_phys(sm_state + 0x7e98); + env->tr.base = ldq_phys(cs->as, sm_state + 0x7e98); env->tr.limit = ldl_phys(cs->as, sm_state + 0x7e94); env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8; - env->regs[R_EAX] = ldq_phys(sm_state + 0x7ff8); - env->regs[R_ECX] = ldq_phys(sm_state + 0x7ff0); - env->regs[R_EDX] = ldq_phys(sm_state + 0x7fe8); - env->regs[R_EBX] = ldq_phys(sm_state + 0x7fe0); - env->regs[R_ESP] = ldq_phys(sm_state + 0x7fd8); - env->regs[R_EBP] = ldq_phys(sm_state + 0x7fd0); - env->regs[R_ESI] = ldq_phys(sm_state + 0x7fc8); - env->regs[R_EDI] = ldq_phys(sm_state + 0x7fc0); + env->regs[R_EAX] = ldq_phys(cs->as, sm_state + 0x7ff8); + env->regs[R_ECX] = ldq_phys(cs->as, sm_state + 0x7ff0); + env->regs[R_EDX] = ldq_phys(cs->as, sm_state + 0x7fe8); + env->regs[R_EBX] = ldq_phys(cs->as, sm_state + 0x7fe0); + env->regs[R_ESP] = ldq_phys(cs->as, sm_state + 0x7fd8); + env->regs[R_EBP] = ldq_phys(cs->as, sm_state + 0x7fd0); + env->regs[R_ESI] = ldq_phys(cs->as, sm_state + 0x7fc8); + env->regs[R_EDI] = ldq_phys(cs->as, sm_state + 0x7fc0); for (i = 8; i < 16; i++) { - env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8); + env->regs[i] = ldq_phys(cs->as, sm_state + 0x7ff8 - i * 8); } - env->eip = ldq_phys(sm_state + 0x7f78); + env->eip = ldq_phys(cs->as, sm_state + 0x7f78); cpu_load_eflags(env, ldl_phys(cs->as, sm_state + 0x7f70), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); env->dr[6] = ldl_phys(cs->as, sm_state + 0x7f68); diff --git a/target-i386/svm_helper.c b/target-i386/svm_helper.c index 6c3c8bf4e3..cc6de20964 100644 --- a/target-i386/svm_helper.c +++ b/target-i386/svm_helper.c @@ -105,7 +105,7 @@ static inline void svm_load_seg(CPUX86State *env, hwaddr addr, unsigned int flags; sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector)); - sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base)); + sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base)); sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit)); flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib)); sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); @@ -178,7 +178,7 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) /* load the interception bitmaps so we do not need to access the vmcb in svm mode */ - env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.intercept)); env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, @@ -200,15 +200,15 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) /* enable intercepts */ env->hflags |= HF_SVMI_MASK; - env->tsc_offset = ldq_phys(env->vm_vmcb + + env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset)); - env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base)); env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit)); - env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base)); env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit)); @@ -216,13 +216,17 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) /* clear exit_info_2 so we behave like the real hardware */ stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); - cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + cpu_x86_update_cr0(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr0))); - cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + cpu_x86_update_cr4(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr4))); - cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + cpu_x86_update_cr3(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr3))); - env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2)); + env->cr[2] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cr2)); int_ctl = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); @@ -235,9 +239,11 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) } cpu_load_efer(env, - ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer))); + ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.efer))); env->eflags = 0; - cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + cpu_load_eflags(env, ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); CC_OP = CC_OP_EFLAGS; @@ -251,18 +257,25 @@ void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), R_DS); - env->eip = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip)); - - env->regs[R_ESP] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp)); - env->regs[R_EAX] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax)); - env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7)); - env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6)); - cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, + env->eip = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rip)); + + env->regs[R_ESP] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rsp)); + env->regs[R_EAX] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.rax)); + env->dr[7] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.dr7)); + env->dr[6] = ldq_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.dr6)); + cpu_x86_set_cpl(env, ldub_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, save.cpl))); /* FIXME: guest state consistency checks */ - switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { + switch (ldub_phys(cs->as, + env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { case TLB_CONTROL_DO_NOTHING: break; case TLB_CONTROL_FLUSH_ALL_ASID: @@ -339,6 +352,7 @@ void helper_vmmcall(CPUX86State *env) void helper_vmload(CPUX86State *env, int aflag) { + CPUState *cs = ENV_GET_CPU(env); target_ulong addr; cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0); @@ -351,7 +365,7 @@ void helper_vmload(CPUX86State *env, int aflag) qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, ldq_phys(addr + offsetof(struct vmcb, + addr, ldq_phys(cs->as, addr + offsetof(struct vmcb, save.fs.base)), env->segs[R_FS].base); @@ -361,22 +375,24 @@ void helper_vmload(CPUX86State *env, int aflag) svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); #ifdef TARGET_X86_64 - env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, + env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base)); - env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar)); - env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar)); - env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask)); + env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar)); + env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar)); + env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask)); #endif - env->star = ldq_phys(addr + offsetof(struct vmcb, save.star)); - env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs)); - env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, + env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star)); + env->sysenter_cs = ldq_phys(cs->as, + addr + offsetof(struct vmcb, save.sysenter_cs)); + env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp)); - env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, + env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip)); } void helper_vmsave(CPUX86State *env, int aflag) { + CPUState *cs = ENV_GET_CPU(env); target_ulong addr; cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0); @@ -389,7 +405,8 @@ void helper_vmsave(CPUX86State *env, int aflag) qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", - addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), + addr, ldq_phys(cs->as, + addr + offsetof(struct vmcb, save.fs.base)), env->segs[R_FS].base); svm_save_seg(env, addr + offsetof(struct vmcb, save.fs), @@ -455,6 +472,8 @@ void helper_invlpga(CPUX86State *env, int aflag) void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, uint64_t param) { + CPUState *cs = ENV_GET_CPU(env); + if (likely(!(env->hflags & HF_SVMI_MASK))) { return; } @@ -487,7 +506,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, case SVM_EXIT_MSR: if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { /* FIXME: this should be read in at vmrun (faster this way?) */ - uint64_t addr = ldq_phys(env->vm_vmcb + + uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa)); uint32_t t0, t1; @@ -513,7 +532,7 @@ void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, t1 = 0; break; } - if (ldub_phys(addr + t1) & ((1 << param) << t0)) { + if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) { helper_vmexit(env, type, param); } } @@ -535,9 +554,10 @@ void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, uint32_t next_eip_addend) { + CPUState *cs = ENV_GET_CPU(env); if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { /* FIXME: this should be read in at vmrun (faster this way?) */ - uint64_t addr = ldq_phys(env->vm_vmcb + + uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa)); uint16_t mask = (1 << ((param >> 4) & 7)) - 1; @@ -559,7 +579,7 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n", exit_code, exit_info_1, - ldq_phys(env->vm_vmcb + offsetof(struct vmcb, + ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)), env->eip); @@ -625,29 +645,33 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; env->tsc_offset = 0; - env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, + env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base)); env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit)); - env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, + env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base)); env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit)); - cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + cpu_x86_update_cr0(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK); - cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + cpu_x86_update_cr4(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.cr4))); - cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + cpu_x86_update_cr3(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.cr3))); /* we need to set the efer after the crs so the hidden flags get set properly */ - cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.efer))); env->eflags = 0; - cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, + cpu_load_eflags(env, ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.rflags)), ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); CC_OP = CC_OP_EFLAGS; @@ -661,14 +685,17 @@ void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), R_DS); - env->eip = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip)); - env->regs[R_ESP] = ldq_phys(env->vm_hsave + + env->eip = ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.rip)); + env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rsp)); - env->regs[R_EAX] = ldq_phys(env->vm_hsave + + env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rax)); - env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6)); - env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7)); + env->dr[6] = ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.dr6)); + env->dr[7] = ldq_phys(cs->as, + env->vm_hsave + offsetof(struct vmcb, save.dr7)); /* other setups */ cpu_x86_set_cpl(env, 0); |