aboutsummaryrefslogtreecommitdiff
path: root/hw
diff options
context:
space:
mode:
authorAvihai Horon <avihaih@nvidia.com>2023-02-16 16:36:24 +0200
committerAlex Williamson <alex.williamson@redhat.com>2023-02-16 12:13:46 -0700
commit29d81b71aa2ac0f594d881460e22e291a9417a74 (patch)
tree1bcd3ff61c989937cf78a04702c20d451b652fc9 /hw
parent8b942af393a2d9f822aea4e5e0d241e668146bf2 (diff)
vfio/migration: Block multiple devices migration
Currently VFIO migration doesn't implement some kind of intermediate quiescent state in which P2P DMAs are quiesced before stopping or running the device. This can cause problems in multi-device migration where the devices are doing P2P DMAs, since the devices are not stopped together at the same time. Until such support is added, block migration of multiple devices. Signed-off-by: Avihai Horon <avihaih@nvidia.com> Reviewed-by: Cédric Le Goater <clg@redhat.com> Reviewed-by: Juan Quintela <quintela@redhat.com> Link: https://lore.kernel.org/r/20230216143630.25610-6-avihaih@nvidia.com Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
Diffstat (limited to 'hw')
-rw-r--r--hw/vfio/common.c53
-rw-r--r--hw/vfio/migration.c6
2 files changed, 59 insertions, 0 deletions
diff --git a/hw/vfio/common.c b/hw/vfio/common.c
index 3a35f4afad..fe80ccf914 100644
--- a/hw/vfio/common.c
+++ b/hw/vfio/common.c
@@ -41,6 +41,7 @@
#include "qapi/error.h"
#include "migration/migration.h"
#include "migration/misc.h"
+#include "migration/blocker.h"
#include "sysemu/tpm.h"
VFIOGroupList vfio_group_list =
@@ -337,6 +338,58 @@ bool vfio_mig_active(void)
return true;
}
+static Error *multiple_devices_migration_blocker;
+
+static unsigned int vfio_migratable_device_num(void)
+{
+ VFIOGroup *group;
+ VFIODevice *vbasedev;
+ unsigned int device_num = 0;
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ QLIST_FOREACH(vbasedev, &group->device_list, next) {
+ if (vbasedev->migration) {
+ device_num++;
+ }
+ }
+ }
+
+ return device_num;
+}
+
+int vfio_block_multiple_devices_migration(Error **errp)
+{
+ int ret;
+
+ if (multiple_devices_migration_blocker ||
+ vfio_migratable_device_num() <= 1) {
+ return 0;
+ }
+
+ error_setg(&multiple_devices_migration_blocker,
+ "Migration is currently not supported with multiple "
+ "VFIO devices");
+ ret = migrate_add_blocker(multiple_devices_migration_blocker, errp);
+ if (ret < 0) {
+ error_free(multiple_devices_migration_blocker);
+ multiple_devices_migration_blocker = NULL;
+ }
+
+ return ret;
+}
+
+void vfio_unblock_multiple_devices_migration(void)
+{
+ if (!multiple_devices_migration_blocker ||
+ vfio_migratable_device_num() > 1) {
+ return;
+ }
+
+ migrate_del_blocker(multiple_devices_migration_blocker);
+ error_free(multiple_devices_migration_blocker);
+ multiple_devices_migration_blocker = NULL;
+}
+
static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
{
VFIOGroup *group;
diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c
index e56eef1ee8..8e96999669 100644
--- a/hw/vfio/migration.c
+++ b/hw/vfio/migration.c
@@ -878,6 +878,11 @@ int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
goto add_blocker;
}
+ ret = vfio_block_multiple_devices_migration(errp);
+ if (ret) {
+ return ret;
+ }
+
trace_vfio_migration_probe(vbasedev->name, info->index);
g_free(info);
return 0;
@@ -904,6 +909,7 @@ void vfio_migration_finalize(VFIODevice *vbasedev)
qemu_del_vm_change_state_handler(migration->vm_state);
unregister_savevm(VMSTATE_IF(vbasedev->dev), "vfio", vbasedev);
vfio_migration_exit(vbasedev);
+ vfio_unblock_multiple_devices_migration();
}
if (vbasedev->migration_blocker) {