diff options
author | Sheng Yang <sheng@linux.intel.com> | 2010-06-17 17:53:07 +0800 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-06-28 13:06:03 -0300 |
commit | f1665b21f16c5dc0ac37de60233a4975aff31193 (patch) | |
tree | 0e19f8cd7e54a5aa7dced550c52a296fddb42292 | |
parent | 51e49430c0472c9f609341e3058d47ed93dabe6e (diff) |
kvm: Enable XSAVE live migration support
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | kvm-all.c | 21 | ||||
-rw-r--r-- | kvm.h | 2 | ||||
-rw-r--r-- | target-i386/cpu.h | 7 | ||||
-rw-r--r-- | target-i386/kvm.c | 139 | ||||
-rw-r--r-- | target-i386/machine.c | 20 |
5 files changed, 186 insertions, 3 deletions
@@ -71,6 +71,7 @@ struct KVMState #endif int irqchip_in_kernel; int pit_in_kernel; + int xsave, xcrs; }; static KVMState *kvm_state; @@ -686,6 +687,16 @@ int kvm_init(int smp_cpus) s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif + s->xsave = 0; +#ifdef KVM_CAP_XSAVE + s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); +#endif + + s->xcrs = 0; +#ifdef KVM_CAP_XCRS + s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); +#endif + ret = kvm_arch_init(s, smp_cpus); if (ret < 0) goto err; @@ -1014,6 +1025,16 @@ int kvm_has_debugregs(void) return kvm_state->debugregs; } +int kvm_has_xsave(void) +{ + return kvm_state->xsave; +} + +int kvm_has_xcrs(void) +{ + return kvm_state->xcrs; +} + void kvm_setup_guest_memory(void *start, size_t size) { if (!kvm_has_sync_mmu()) { @@ -40,6 +40,8 @@ int kvm_has_sync_mmu(void); int kvm_has_vcpu_events(void); int kvm_has_robust_singlestep(void); int kvm_has_debugregs(void); +int kvm_has_xsave(void); +int kvm_has_xcrs(void); #ifdef NEED_CPU_H int kvm_init_vcpu(CPUState *env); diff --git a/target-i386/cpu.h b/target-i386/cpu.h index 8dafa0d435..4de486e376 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -718,6 +718,11 @@ typedef struct CPUX86State { uint16_t fpus_vmstate; uint16_t fptag_vmstate; uint16_t fpregs_format_vmstate; + + uint64_t xstate_bv; + XMMReg ymmh_regs[CPU_NB_REGS]; + + uint64_t xcr0; } CPUX86State; CPUX86State *cpu_x86_init(const char *cpu_model); @@ -899,7 +904,7 @@ uint64_t cpu_get_tsc(CPUX86State *env); #define cpu_list_id x86_cpu_list #define cpudef_setup x86_cpudef_setup -#define CPU_SAVE_VERSION 11 +#define CPU_SAVE_VERSION 12 /* MMU modes definitions */ #define MMU_MODE0_SUFFIX _kernel diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 2b14ff558c..436c0c4f29 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -497,6 +497,68 @@ static int kvm_put_fpu(CPUState *env) return kvm_vcpu_ioctl(env, KVM_SET_FPU, &fpu); } +#ifdef KVM_CAP_XSAVE +#define XSAVE_CWD_RIP 2 +#define XSAVE_CWD_RDP 4 +#define XSAVE_MXCSR 6 +#define XSAVE_ST_SPACE 8 +#define XSAVE_XMM_SPACE 40 +#define XSAVE_XSTATE_BV 128 +#define XSAVE_YMMH_SPACE 144 +#endif + +static int kvm_put_xsave(CPUState *env) +{ +#ifdef KVM_CAP_XSAVE + int i; + struct kvm_xsave* xsave; + uint16_t cwd, swd, twd, fop; + + if (!kvm_has_xsave()) + return kvm_put_fpu(env); + + xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); + memset(xsave, 0, sizeof(struct kvm_xsave)); + cwd = swd = twd = fop = 0; + swd = env->fpus & ~(7 << 11); + swd |= (env->fpstt & 7) << 11; + cwd = env->fpuc; + for (i = 0; i < 8; ++i) + twd |= (!env->fptags[i]) << i; + xsave->region[0] = (uint32_t)(swd << 16) + cwd; + xsave->region[1] = (uint32_t)(fop << 16) + twd; + memcpy(&xsave->region[XSAVE_ST_SPACE], env->fpregs, + sizeof env->fpregs); + memcpy(&xsave->region[XSAVE_XMM_SPACE], env->xmm_regs, + sizeof env->xmm_regs); + xsave->region[XSAVE_MXCSR] = env->mxcsr; + *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV] = env->xstate_bv; + memcpy(&xsave->region[XSAVE_YMMH_SPACE], env->ymmh_regs, + sizeof env->ymmh_regs); + return kvm_vcpu_ioctl(env, KVM_SET_XSAVE, xsave); +#else + return kvm_put_fpu(env); +#endif +} + +static int kvm_put_xcrs(CPUState *env) +{ +#ifdef KVM_CAP_XCRS + struct kvm_xcrs xcrs; + + if (!kvm_has_xcrs()) + return 0; + + xcrs.nr_xcrs = 1; + xcrs.flags = 0; + xcrs.xcrs[0].xcr = 0; + xcrs.xcrs[0].value = env->xcr0; + return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs); +#else + return 0; +#endif +} + static int kvm_put_sregs(CPUState *env) { struct kvm_sregs sregs; @@ -614,6 +676,69 @@ static int kvm_get_fpu(CPUState *env) return 0; } +static int kvm_get_xsave(CPUState *env) +{ +#ifdef KVM_CAP_XSAVE + struct kvm_xsave* xsave; + int ret, i; + uint16_t cwd, swd, twd, fop; + + if (!kvm_has_xsave()) + return kvm_get_fpu(env); + + xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); + ret = kvm_vcpu_ioctl(env, KVM_GET_XSAVE, xsave); + if (ret < 0) + return ret; + + cwd = (uint16_t)xsave->region[0]; + swd = (uint16_t)(xsave->region[0] >> 16); + twd = (uint16_t)xsave->region[1]; + fop = (uint16_t)(xsave->region[1] >> 16); + env->fpstt = (swd >> 11) & 7; + env->fpus = swd; + env->fpuc = cwd; + for (i = 0; i < 8; ++i) + env->fptags[i] = !((twd >> i) & 1); + env->mxcsr = xsave->region[XSAVE_MXCSR]; + memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], + sizeof env->fpregs); + memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE], + sizeof env->xmm_regs); + env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; + memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE], + sizeof env->ymmh_regs); + return 0; +#else + return kvm_get_fpu(env); +#endif +} + +static int kvm_get_xcrs(CPUState *env) +{ +#ifdef KVM_CAP_XCRS + int i, ret; + struct kvm_xcrs xcrs; + + if (!kvm_has_xcrs()) + return 0; + + ret = kvm_vcpu_ioctl(env, KVM_GET_XCRS, &xcrs); + if (ret < 0) + return ret; + + for (i = 0; i < xcrs.nr_xcrs; i++) + /* Only support xcr0 now */ + if (xcrs.xcrs[0].xcr == 0) { + env->xcr0 = xcrs.xcrs[0].value; + break; + } + return 0; +#else + return 0; +#endif +} + static int kvm_get_sregs(CPUState *env) { struct kvm_sregs sregs; @@ -958,7 +1083,11 @@ int kvm_arch_put_registers(CPUState *env, int level) if (ret < 0) return ret; - ret = kvm_put_fpu(env); + ret = kvm_put_xsave(env); + if (ret < 0) + return ret; + + ret = kvm_put_xcrs(env); if (ret < 0) return ret; @@ -1002,7 +1131,11 @@ int kvm_arch_get_registers(CPUState *env) if (ret < 0) return ret; - ret = kvm_get_fpu(env); + ret = kvm_get_xsave(env); + if (ret < 0) + return ret; + + ret = kvm_get_xcrs(env); if (ret < 0) return ret; @@ -1290,6 +1423,8 @@ void kvm_arch_update_guest_debug(CPUState *env, struct kvm_guest_debug *dbg) (len_code[hw_breakpoint[n].len] << (18 + n*4)); } } + /* Legal xcr0 for loading */ + env->xcr0 = 1; } #endif /* KVM_CAP_SET_GUEST_DEBUG */ diff --git a/target-i386/machine.c b/target-i386/machine.c index b547e2ac7a..5f8376c37b 100644 --- a/target-i386/machine.c +++ b/target-i386/machine.c @@ -47,6 +47,22 @@ static const VMStateDescription vmstate_xmm_reg = { #define VMSTATE_XMM_REGS(_field, _state, _n) \ VMSTATE_STRUCT_ARRAY(_field, _state, _n, 0, vmstate_xmm_reg, XMMReg) +/* YMMH format is the same as XMM */ +static const VMStateDescription vmstate_ymmh_reg = { + .name = "ymmh_reg", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = (VMStateField []) { + VMSTATE_UINT64(XMM_Q(0), XMMReg), + VMSTATE_UINT64(XMM_Q(1), XMMReg), + VMSTATE_END_OF_LIST() + } +}; + +#define VMSTATE_YMMH_REGS_VARS(_field, _state, _n, _v) \ + VMSTATE_STRUCT_ARRAY(_field, _state, _n, _v, vmstate_ymmh_reg, XMMReg) + static const VMStateDescription vmstate_mtrr_var = { .name = "mtrr_var", .version_id = 1, @@ -453,6 +469,10 @@ static const VMStateDescription vmstate_cpu = { /* KVM pvclock msr */ VMSTATE_UINT64_V(system_time_msr, CPUState, 11), VMSTATE_UINT64_V(wall_clock_msr, CPUState, 11), + /* XSAVE related fields */ + VMSTATE_UINT64_V(xcr0, CPUState, 12), + VMSTATE_UINT64_V(xstate_bv, CPUState, 12), + VMSTATE_YMMH_REGS_VARS(ymmh_regs, CPUState, CPU_NB_REGS, 12), VMSTATE_END_OF_LIST() /* The above list is not sorted /wrt version numbers, watch out! */ } |