aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--hw/vfio_pci.c34
-rw-r--r--main-loop.c27
2 files changed, 41 insertions, 20 deletions
diff --git a/hw/vfio_pci.c b/hw/vfio_pci.c
index 28c83031d0..c51ae6761b 100644
--- a/hw/vfio_pci.c
+++ b/hw/vfio_pci.c
@@ -562,8 +562,8 @@ static int vfio_enable_vectors(VFIODevice *vdev, bool msix)
return ret;
}
-static int vfio_msix_vector_use(PCIDevice *pdev,
- unsigned int nr, MSIMessage msg)
+static int vfio_msix_vector_do_use(PCIDevice *pdev, unsigned int nr,
+ MSIMessage *msg, IOHandler *handler)
{
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
VFIOMSIVector *vector;
@@ -587,7 +587,7 @@ static int vfio_msix_vector_use(PCIDevice *pdev,
* Attempt to enable route through KVM irqchip,
* default to userspace handling if unavailable.
*/
- vector->virq = kvm_irqchip_add_msi_route(kvm_state, msg);
+ vector->virq = msg ? kvm_irqchip_add_msi_route(kvm_state, *msg) : -1;
if (vector->virq < 0 ||
kvm_irqchip_add_irqfd_notifier(kvm_state, &vector->interrupt,
vector->virq) < 0) {
@@ -596,7 +596,7 @@ static int vfio_msix_vector_use(PCIDevice *pdev,
vector->virq = -1;
}
qemu_set_fd_handler(event_notifier_get_fd(&vector->interrupt),
- vfio_msi_interrupt, NULL, vector);
+ handler, NULL, vector);
}
/*
@@ -639,6 +639,12 @@ static int vfio_msix_vector_use(PCIDevice *pdev,
return 0;
}
+static int vfio_msix_vector_use(PCIDevice *pdev,
+ unsigned int nr, MSIMessage msg)
+{
+ return vfio_msix_vector_do_use(pdev, nr, &msg, vfio_msi_interrupt);
+}
+
static void vfio_msix_vector_release(PCIDevice *pdev, unsigned int nr)
{
VFIODevice *vdev = DO_UPCAST(VFIODevice, pdev, pdev);
@@ -697,6 +703,22 @@ static void vfio_enable_msix(VFIODevice *vdev)
vdev->interrupt = VFIO_INT_MSIX;
+ /*
+ * Some communication channels between VF & PF or PF & fw rely on the
+ * physical state of the device and expect that enabling MSI-X from the
+ * guest enables the same on the host. When our guest is Linux, the
+ * guest driver call to pci_enable_msix() sets the enabling bit in the
+ * MSI-X capability, but leaves the vector table masked. We therefore
+ * can't rely on a vector_use callback (from request_irq() in the guest)
+ * to switch the physical device into MSI-X mode because that may come a
+ * long time after pci_enable_msix(). This code enables vector 0 with
+ * triggering to userspace, then immediately release the vector, leaving
+ * the physical device with no vectors enabled, but MSI-X enabled, just
+ * like the guest view.
+ */
+ vfio_msix_vector_do_use(&vdev->pdev, 0, NULL, NULL);
+ vfio_msix_vector_release(&vdev->pdev, 0);
+
if (msix_set_vector_notifiers(&vdev->pdev, vfio_msix_vector_use,
vfio_msix_vector_release, NULL)) {
error_report("vfio: msix_set_vector_notifiers failed\n");
@@ -1815,13 +1837,13 @@ static int vfio_get_device(VFIOGroup *group, const char *name, VFIODevice *vdev)
error_report("Warning, device %s does not support reset\n", name);
}
- if (dev_info.num_regions != VFIO_PCI_NUM_REGIONS) {
+ if (dev_info.num_regions < VFIO_PCI_CONFIG_REGION_INDEX + 1) {
error_report("vfio: unexpected number of io regions %u\n",
dev_info.num_regions);
goto error;
}
- if (dev_info.num_irqs != VFIO_PCI_NUM_IRQS) {
+ if (dev_info.num_irqs < VFIO_PCI_MSIX_IRQ_INDEX + 1) {
error_report("vfio: unexpected number of irqs %u\n", dev_info.num_irqs);
goto error;
}
diff --git a/main-loop.c b/main-loop.c
index 54f38ae1ae..6f52ac39bc 100644
--- a/main-loop.c
+++ b/main-loop.c
@@ -330,7 +330,7 @@ void qemu_fd_register(int fd)
static int os_host_main_loop_wait(uint32_t timeout)
{
GMainContext *context = g_main_context_default();
- int ret, i;
+ int select_ret, g_poll_ret, ret, i;
PollingEntry *pe;
WaitObjects *w = &wait_objects;
gint poll_timeout;
@@ -345,13 +345,6 @@ static int os_host_main_loop_wait(uint32_t timeout)
return ret;
}
- if (nfds >= 0) {
- ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
- if (ret != 0) {
- timeout = 0;
- }
- }
-
g_main_context_prepare(context, &max_priority);
n_poll_fds = g_main_context_query(context, max_priority, &poll_timeout,
poll_fds, ARRAY_SIZE(poll_fds));
@@ -367,9 +360,9 @@ static int os_host_main_loop_wait(uint32_t timeout)
}
qemu_mutex_unlock_iothread();
- ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
+ g_poll_ret = g_poll(poll_fds, n_poll_fds + w->num, poll_timeout);
qemu_mutex_lock_iothread();
- if (ret > 0) {
+ if (g_poll_ret > 0) {
for (i = 0; i < w->num; i++) {
w->revents[i] = poll_fds[n_poll_fds + i].revents;
}
@@ -384,12 +377,18 @@ static int os_host_main_loop_wait(uint32_t timeout)
g_main_context_dispatch(context);
}
- /* If an edge-triggered socket event occurred, select will return a
- * positive result on the next iteration. We do not need to do anything
- * here.
+ /* Call select after g_poll to avoid a useless iteration and therefore
+ * improve socket latency.
*/
- return ret;
+ if (nfds >= 0) {
+ select_ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv0);
+ if (select_ret != 0) {
+ timeout = 0;
+ }
+ }
+
+ return select_ret || g_poll_ret;
}
#endif