aboutsummaryrefslogtreecommitdiff
path: root/target/i386/kvm
diff options
context:
space:
mode:
authorAnkur Arora <ankur.a.arora@oracle.com>2022-12-06 11:14:07 +0000
committerDavid Woodhouse <dwmw@amazon.co.uk>2023-03-01 08:22:50 +0000
commit105b47fdf2d0ed18d73cebb055f4cbc1c88a8b30 (patch)
treed12e25f84eb311e2537183abda74f5f992fd46b0 /target/i386/kvm
parent3b06f29b2497da8362ee25b729a727cb8463fd62 (diff)
i386/xen: implement HVMOP_set_evtchn_upcall_vector
The HVMOP_set_evtchn_upcall_vector hypercall sets the per-vCPU upcall vector, to be delivered to the local APIC just like an MSI (with an EOI). This takes precedence over the system-wide delivery method set by the HVMOP_set_param hypercall with HVM_PARAM_CALLBACK_IRQ. It's used by Windows and Xen (PV shim) guests but normally not by Linux. Signed-off-by: Ankur Arora <ankur.a.arora@oracle.com> Signed-off-by: Joao Martins <joao.m.martins@oracle.com> [dwmw2: Rework for upstream kernel changes and split from HVMOP_set_param] Signed-off-by: David Woodhouse <dwmw@amazon.co.uk> Reviewed-by: Paul Durrant <paul@xen.org>
Diffstat (limited to 'target/i386/kvm')
-rw-r--r--target/i386/kvm/trace-events1
-rw-r--r--target/i386/kvm/xen-emu.c84
2 files changed, 82 insertions, 3 deletions
diff --git a/target/i386/kvm/trace-events b/target/i386/kvm/trace-events
index a840e0333d..b365a8e8e2 100644
--- a/target/i386/kvm/trace-events
+++ b/target/i386/kvm/trace-events
@@ -11,3 +11,4 @@ kvm_xen_hypercall(int cpu, uint8_t cpl, uint64_t input, uint64_t a0, uint64_t a1
kvm_xen_soft_reset(void) ""
kvm_xen_set_shared_info(uint64_t gfn) "shared info at gfn 0x%" PRIx64
kvm_xen_set_vcpu_attr(int cpu, int type, uint64_t gpa) "vcpu attr cpu %d type %d gpa 0x%" PRIx64
+kvm_xen_set_vcpu_callback(int cpu, int vector) "callback vcpu %d vector %d"
diff --git a/target/i386/kvm/xen-emu.c b/target/i386/kvm/xen-emu.c
index ac143c05a4..e9a4422d93 100644
--- a/target/i386/kvm/xen-emu.c
+++ b/target/i386/kvm/xen-emu.c
@@ -27,6 +27,7 @@
#include "hw/xen/interface/sched.h"
#include "hw/xen/interface/memory.h"
#include "hw/xen/interface/hvm/hvm_op.h"
+#include "hw/xen/interface/hvm/params.h"
#include "hw/xen/interface/vcpu.h"
#include "hw/xen/interface/event_channel.h"
@@ -193,7 +194,8 @@ static bool kvm_xen_hcall_xen_version(struct kvm_xen_exit *exit, X86CPU *cpu,
fi.submap |= 1 << XENFEAT_writable_page_tables |
1 << XENFEAT_writable_descriptor_tables |
1 << XENFEAT_auto_translated_physmap |
- 1 << XENFEAT_supervisor_mode_kernel;
+ 1 << XENFEAT_supervisor_mode_kernel |
+ 1 << XENFEAT_hvm_callback_vector;
}
err = kvm_copy_to_gva(CPU(cpu), arg, &fi, sizeof(fi));
@@ -220,6 +222,31 @@ static int kvm_xen_set_vcpu_attr(CPUState *cs, uint16_t type, uint64_t gpa)
return kvm_vcpu_ioctl(cs, KVM_XEN_VCPU_SET_ATTR, &xhsi);
}
+static int kvm_xen_set_vcpu_callback_vector(CPUState *cs)
+{
+ uint8_t vector = X86_CPU(cs)->env.xen_vcpu_callback_vector;
+ struct kvm_xen_vcpu_attr xva;
+
+ xva.type = KVM_XEN_VCPU_ATTR_TYPE_UPCALL_VECTOR;
+ xva.u.vector = vector;
+
+ trace_kvm_xen_set_vcpu_callback(cs->cpu_index, vector);
+
+ return kvm_vcpu_ioctl(cs, KVM_XEN_HVM_SET_ATTR, &xva);
+}
+
+static void do_set_vcpu_callback_vector(CPUState *cs, run_on_cpu_data data)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ env->xen_vcpu_callback_vector = data.host_int;
+
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ kvm_xen_set_vcpu_callback_vector(cs);
+ }
+}
+
static void do_set_vcpu_info_default_gpa(CPUState *cs, run_on_cpu_data data)
{
X86CPU *cpu = X86_CPU(cs);
@@ -276,12 +303,16 @@ static void do_vcpu_soft_reset(CPUState *cs, run_on_cpu_data data)
env->xen_vcpu_info_default_gpa = INVALID_GPA;
env->xen_vcpu_time_info_gpa = INVALID_GPA;
env->xen_vcpu_runstate_gpa = INVALID_GPA;
+ env->xen_vcpu_callback_vector = 0;
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO, INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_VCPU_TIME_INFO,
INVALID_GPA);
kvm_xen_set_vcpu_attr(cs, KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADDR,
INVALID_GPA);
+ if (kvm_xen_has_cap(EVTCHN_SEND)) {
+ kvm_xen_set_vcpu_callback_vector(cs);
+ }
}
@@ -458,17 +489,53 @@ static bool kvm_xen_hcall_memory_op(struct kvm_xen_exit *exit, X86CPU *cpu,
return true;
}
+static int kvm_xen_hcall_evtchn_upcall_vector(struct kvm_xen_exit *exit,
+ X86CPU *cpu, uint64_t arg)
+{
+ struct xen_hvm_evtchn_upcall_vector up;
+ CPUState *target_cs;
+
+ /* No need for 32/64 compat handling */
+ qemu_build_assert(sizeof(up) == 8);
+
+ if (kvm_copy_from_gva(CPU(cpu), arg, &up, sizeof(up))) {
+ return -EFAULT;
+ }
+
+ if (up.vector < 0x10) {
+ return -EINVAL;
+ }
+
+ target_cs = qemu_get_cpu(up.vcpu);
+ if (!target_cs) {
+ return -EINVAL;
+ }
+
+ async_run_on_cpu(target_cs, do_set_vcpu_callback_vector,
+ RUN_ON_CPU_HOST_INT(up.vector));
+ return 0;
+}
+
static bool kvm_xen_hcall_hvm_op(struct kvm_xen_exit *exit, X86CPU *cpu,
int cmd, uint64_t arg)
{
+ int ret = -ENOSYS;
switch (cmd) {
+ case HVMOP_set_evtchn_upcall_vector:
+ ret = kvm_xen_hcall_evtchn_upcall_vector(exit, cpu,
+ exit->u.hcall.params[0]);
+ break;
+
case HVMOP_pagetable_dying:
- exit->u.hcall.result = -ENOSYS;
- return true;
+ ret = -ENOSYS;
+ break;
default:
return false;
}
+
+ exit->u.hcall.result = ret;
+ return true;
}
static int vcpuop_register_vcpu_info(CPUState *cs, CPUState *target,
@@ -812,6 +879,17 @@ int kvm_put_xen_state(CPUState *cs)
}
}
+ if (!kvm_xen_has_cap(EVTCHN_SEND)) {
+ return 0;
+ }
+
+ if (env->xen_vcpu_callback_vector) {
+ ret = kvm_xen_set_vcpu_callback_vector(cs);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
return 0;
}