aboutsummaryrefslogtreecommitdiff
path: root/target/i386/kvm/kvm.c
diff options
context:
space:
mode:
Diffstat (limited to 'target/i386/kvm/kvm.c')
-rw-r--r--target/i386/kvm/kvm.c123
1 files changed, 123 insertions, 0 deletions
diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c
index ec63b5eb10..1d9a50b02b 100644
--- a/target/i386/kvm/kvm.c
+++ b/target/i386/kvm/kvm.c
@@ -141,6 +141,8 @@ static struct kvm_cpuid2 *cpuid_cache;
static struct kvm_cpuid2 *hv_cpuid_cache;
static struct kvm_msr_list *kvm_feature_msrs;
+static KVMMSRHandlers msr_handlers[KVM_MSR_FILTER_MAX_RANGES];
+
#define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */
static RateLimit bus_lock_ratelimit_ctrl;
static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value);
@@ -2610,6 +2612,15 @@ int kvm_arch_init(MachineState *ms, KVMState *s)
return ret;
}
}
+ if (kvm_vm_check_extension(s, KVM_CAP_X86_USER_SPACE_MSR)) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_X86_USER_SPACE_MSR, 0,
+ KVM_MSR_EXIT_REASON_FILTER);
+ if (ret) {
+ error_report("Could not enable user space MSRs: %s",
+ strerror(-ret));
+ exit(1);
+ }
+ }
return 0;
}
@@ -5109,6 +5120,108 @@ void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
}
}
+static bool kvm_install_msr_filters(KVMState *s)
+{
+ uint64_t zero = 0;
+ struct kvm_msr_filter filter = {
+ .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
+ };
+ int r, i, j = 0;
+
+ for (i = 0; i < KVM_MSR_FILTER_MAX_RANGES; i++) {
+ KVMMSRHandlers *handler = &msr_handlers[i];
+ if (handler->msr) {
+ struct kvm_msr_filter_range *range = &filter.ranges[j++];
+
+ *range = (struct kvm_msr_filter_range) {
+ .flags = 0,
+ .nmsrs = 1,
+ .base = handler->msr,
+ .bitmap = (__u8 *)&zero,
+ };
+
+ if (handler->rdmsr) {
+ range->flags |= KVM_MSR_FILTER_READ;
+ }
+
+ if (handler->wrmsr) {
+ range->flags |= KVM_MSR_FILTER_WRITE;
+ }
+ }
+ }
+
+ r = kvm_vm_ioctl(s, KVM_X86_SET_MSR_FILTER, &filter);
+ if (r) {
+ return false;
+ }
+
+ return true;
+}
+
+bool kvm_filter_msr(KVMState *s, uint32_t msr, QEMURDMSRHandler *rdmsr,
+ QEMUWRMSRHandler *wrmsr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
+ if (!msr_handlers[i].msr) {
+ msr_handlers[i] = (KVMMSRHandlers) {
+ .msr = msr,
+ .rdmsr = rdmsr,
+ .wrmsr = wrmsr,
+ };
+
+ if (!kvm_install_msr_filters(s)) {
+ msr_handlers[i] = (KVMMSRHandlers) { };
+ return false;
+ }
+
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int kvm_handle_rdmsr(X86CPU *cpu, struct kvm_run *run)
+{
+ int i;
+ bool r;
+
+ for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
+ KVMMSRHandlers *handler = &msr_handlers[i];
+ if (run->msr.index == handler->msr) {
+ if (handler->rdmsr) {
+ r = handler->rdmsr(cpu, handler->msr,
+ (uint64_t *)&run->msr.data);
+ run->msr.error = r ? 0 : 1;
+ return 0;
+ }
+ }
+ }
+
+ assert(false);
+}
+
+static int kvm_handle_wrmsr(X86CPU *cpu, struct kvm_run *run)
+{
+ int i;
+ bool r;
+
+ for (i = 0; i < ARRAY_SIZE(msr_handlers); i++) {
+ KVMMSRHandlers *handler = &msr_handlers[i];
+ if (run->msr.index == handler->msr) {
+ if (handler->wrmsr) {
+ r = handler->wrmsr(cpu, handler->msr, run->msr.data);
+ run->msr.error = r ? 0 : 1;
+ return 0;
+ }
+ }
+ }
+
+ assert(false);
+}
+
static bool has_sgx_provisioning;
static bool __kvm_enable_sgx_provisioning(KVMState *s)
@@ -5226,6 +5339,16 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
ret = 0;
}
break;
+ case KVM_EXIT_X86_RDMSR:
+ /* We only enable MSR filtering, any other exit is bogus */
+ assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
+ ret = kvm_handle_rdmsr(cpu, run);
+ break;
+ case KVM_EXIT_X86_WRMSR:
+ /* We only enable MSR filtering, any other exit is bogus */
+ assert(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER);
+ ret = kvm_handle_wrmsr(cpu, run);
+ break;
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;