diff options
author | Eduardo Habkost <ehabkost@redhat.com> | 2015-12-16 17:06:42 -0200 |
---|---|---|
committer | Eduardo Habkost <ehabkost@redhat.com> | 2016-05-23 19:47:37 -0300 |
commit | d71b62a165b6af463f376bd398cfc1aec4e2f213 (patch) | |
tree | df812ef1df50f2340f2df90094ceebcfbfe8a174 /target-i386 | |
parent | 42ecabaae16c0054dde6d8b0fdf90a8c7cce270d (diff) |
target-i386: kvm: Allocate kvm_msrs struct once per VCPU
Instead of using 2400 bytes in the stack for 150 MSR entries in
kvm_get_msrs() and kvm_put_msrs(), allocate a buffer once for
each VCPU.
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Eduardo Habkost <ehabkost@redhat.com>
Diffstat (limited to 'target-i386')
-rw-r--r-- | target-i386/cpu.h | 4 | ||||
-rw-r--r-- | target-i386/kvm.c | 37 |
2 files changed, 23 insertions, 18 deletions
diff --git a/target-i386/cpu.h b/target-i386/cpu.h index cd26decaaf..0426459bba 100644 --- a/target-i386/cpu.h +++ b/target-i386/cpu.h @@ -1124,6 +1124,8 @@ typedef struct CPUX86State { TPRAccess tpr_access_type; } CPUX86State; +struct kvm_msrs; + /** * X86CPU: * @env: #CPUX86State @@ -1176,6 +1178,8 @@ struct X86CPU { struct DeviceState *apic_state; struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; Notifier machine_done; + + struct kvm_msrs *kvm_msr_buf; }; static inline X86CPU *x86_env_get_cpu(CPUX86State *env) diff --git a/target-i386/kvm.c b/target-i386/kvm.c index 084183510a..7ad9c32326 100644 --- a/target-i386/kvm.c +++ b/target-i386/kvm.c @@ -57,6 +57,9 @@ #define MSR_KVM_WALL_CLOCK 0x11 #define MSR_KVM_SYSTEM_TIME 0x12 +#define MSR_BUF_SIZE \ + (sizeof(struct kvm_msrs) + 150 * sizeof(struct kvm_msr_entry)) + #ifndef BUS_MCEERR_AR #define BUS_MCEERR_AR 4 #endif @@ -914,6 +917,7 @@ int kvm_arch_init_vcpu(CPUState *cs) if (has_xsave) { env->kvm_xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); } + cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); if (env->features[FEAT_1_EDX] & CPUID_MTRR) { has_msr_mtrr = true; @@ -1462,6 +1466,11 @@ static void kvm_msr_entry_set(struct kvm_msr_entry *entry, entry->data = value; } +static void kvm_msr_buf_reset(X86CPU *cpu) +{ + memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); +} + static int kvm_put_tscdeadline_msr(X86CPU *cpu) { CPUX86State *env = &cpu->env; @@ -1528,14 +1537,12 @@ static int kvm_put_msr_feature_control(X86CPU *cpu) static int kvm_put_msrs(X86CPU *cpu, int level) { CPUX86State *env = &cpu->env; - struct { - struct kvm_msrs info; - struct kvm_msr_entry entries[150]; - } msr_data; - struct kvm_msr_entry *msrs = msr_data.entries; + struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; int n = 0, i; int ret; + kvm_msr_buf_reset(cpu); + kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs); kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp); kvm_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip); @@ -1724,11 +1731,9 @@ static int kvm_put_msrs(X86CPU *cpu, int level) } } - msr_data.info = (struct kvm_msrs) { - .nmsrs = n, - }; + cpu->kvm_msr_buf->nmsrs = n; - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, &msr_data); + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; } @@ -1944,13 +1949,11 @@ static int kvm_get_sregs(X86CPU *cpu) static int kvm_get_msrs(X86CPU *cpu) { CPUX86State *env = &cpu->env; - struct { - struct kvm_msrs info; - struct kvm_msr_entry entries[150]; - } msr_data; - struct kvm_msr_entry *msrs = msr_data.entries; + struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; int ret, i, n; + kvm_msr_buf_reset(cpu); + n = 0; msrs[n++].index = MSR_IA32_SYSENTER_CS; msrs[n++].index = MSR_IA32_SYSENTER_ESP; @@ -2092,11 +2095,9 @@ static int kvm_get_msrs(X86CPU *cpu) } } - msr_data.info = (struct kvm_msrs) { - .nmsrs = n, - }; + cpu->kvm_msr_buf->nmsrs = n; - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; } |