lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <20251120123647.3522082-7-michal.winiarski@intel.com>
Date: Thu, 20 Nov 2025 13:36:47 +0100
From: Michał Winiarski <michal.winiarski@...el.com>
To: Jason Gunthorpe <jgg@...pe.ca>, Alex Williamson <alex@...zbot.org>, "Kevin
 Tian" <kevin.tian@...el.com>, Yishai Hadas <yishaih@...dia.com>, Longfang Liu
	<liulongfang@...wei.com>, Shameer Kolothum <skolothumtho@...dia.com>, "Brett
 Creeley" <brett.creeley@....com>, Giovanni Cabiddu
	<giovanni.cabiddu@...el.com>, <kvm@...r.kernel.org>, <qat-linux@...el.com>,
	<virtualization@...ts.linux.dev>, <linux-kernel@...r.kernel.org>
CC: Michał Winiarski <michal.winiarski@...el.com>
Subject: [PATCH 6/6] vfio/virtio: Use .migration_reset_state() callback

Move the migration device state reset code from .reset_done() to
dedicated callback.
Remove the deferred reset mechanism, as it's no longer needed.

Signed-off-by: Michał Winiarski <michal.winiarski@...el.com>
---
 drivers/vfio/pci/virtio/common.h  |  3 --
 drivers/vfio/pci/virtio/main.c    |  1 -
 drivers/vfio/pci/virtio/migrate.c | 71 +++++++++----------------------
 3 files changed, 21 insertions(+), 54 deletions(-)

diff --git a/drivers/vfio/pci/virtio/common.h b/drivers/vfio/pci/virtio/common.h
index c7d7e27af386e..cb27d3d2d3bb9 100644
--- a/drivers/vfio/pci/virtio/common.h
+++ b/drivers/vfio/pci/virtio/common.h
@@ -92,12 +92,9 @@ struct virtiovf_pci_core_device {
 
 	/* LM related */
 	u8 migrate_cap:1;
-	u8 deferred_reset:1;
 	/* protect migration state */
 	struct mutex state_mutex;
 	enum vfio_device_mig_state mig_state;
-	/* protect the reset_done flow */
-	spinlock_t reset_lock;
 	struct virtiovf_migration_file *resuming_migf;
 	struct virtiovf_migration_file *saving_migf;
 };
diff --git a/drivers/vfio/pci/virtio/main.c b/drivers/vfio/pci/virtio/main.c
index 8084f3e36a9f7..b80cb740f9a5d 100644
--- a/drivers/vfio/pci/virtio/main.c
+++ b/drivers/vfio/pci/virtio/main.c
@@ -203,7 +203,6 @@ static void virtiovf_pci_aer_reset_done(struct pci_dev *pdev)
 #ifdef CONFIG_VIRTIO_VFIO_PCI_ADMIN_LEGACY
 	virtiovf_legacy_io_reset_done(pdev);
 #endif
-	virtiovf_migration_reset_done(pdev);
 }
 
 static const struct pci_error_handlers virtiovf_err_handlers = {
diff --git a/drivers/vfio/pci/virtio/migrate.c b/drivers/vfio/pci/virtio/migrate.c
index 7dd0ac866461d..5c7f9091d84e8 100644
--- a/drivers/vfio/pci/virtio/migrate.c
+++ b/drivers/vfio/pci/virtio/migrate.c
@@ -247,49 +247,6 @@ static void virtiovf_disable_fds(struct virtiovf_pci_core_device *virtvdev)
 	}
 }
 
-/*
- * This function is called in all state_mutex unlock cases to
- * handle a 'deferred_reset' if exists.
- */
-static void virtiovf_state_mutex_unlock(struct virtiovf_pci_core_device *virtvdev)
-{
-again:
-	spin_lock(&virtvdev->reset_lock);
-	if (virtvdev->deferred_reset) {
-		virtvdev->deferred_reset = false;
-		spin_unlock(&virtvdev->reset_lock);
-		virtvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
-		virtiovf_disable_fds(virtvdev);
-		goto again;
-	}
-	mutex_unlock(&virtvdev->state_mutex);
-	spin_unlock(&virtvdev->reset_lock);
-}
-
-void virtiovf_migration_reset_done(struct pci_dev *pdev)
-{
-	struct virtiovf_pci_core_device *virtvdev = dev_get_drvdata(&pdev->dev);
-
-	if (!virtvdev->migrate_cap)
-		return;
-
-	/*
-	 * As the higher VFIO layers are holding locks across reset and using
-	 * those same locks with the mm_lock we need to prevent ABBA deadlock
-	 * with the state_mutex and mm_lock.
-	 * In case the state_mutex was taken already we defer the cleanup work
-	 * to the unlock flow of the other running context.
-	 */
-	spin_lock(&virtvdev->reset_lock);
-	virtvdev->deferred_reset = true;
-	if (!mutex_trylock(&virtvdev->state_mutex)) {
-		spin_unlock(&virtvdev->reset_lock);
-		return;
-	}
-	spin_unlock(&virtvdev->reset_lock);
-	virtiovf_state_mutex_unlock(virtvdev);
-}
-
 static int virtiovf_release_file(struct inode *inode, struct file *filp)
 {
 	struct virtiovf_migration_file *migf = filp->private_data;
@@ -513,7 +470,7 @@ static long virtiovf_precopy_ioctl(struct file *filp, unsigned int cmd,
 		goto err_state_unlock;
 
 done:
-	virtiovf_state_mutex_unlock(virtvdev);
+	mutex_unlock(&virtvdev->state_mutex);
 	if (copy_to_user((void __user *)arg, &info, minsz))
 		return -EFAULT;
 	return 0;
@@ -521,7 +478,7 @@ static long virtiovf_precopy_ioctl(struct file *filp, unsigned int cmd,
 err_migf_unlock:
 	mutex_unlock(&migf->lock);
 err_state_unlock:
-	virtiovf_state_mutex_unlock(virtvdev);
+	mutex_unlock(&virtvdev->state_mutex);
 	return ret;
 }
 
@@ -1048,7 +1005,7 @@ static ssize_t virtiovf_resume_write(struct file *filp, const char __user *buf,
 	if (ret)
 		migf->state = VIRTIOVF_MIGF_STATE_ERROR;
 	mutex_unlock(&migf->lock);
-	virtiovf_state_mutex_unlock(migf->virtvdev);
+	mutex_unlock(&migf->virtvdev->state_mutex);
 	return ret ? ret : done;
 }
 
@@ -1245,7 +1202,7 @@ virtiovf_pci_set_device_state(struct vfio_device *vdev,
 			break;
 		}
 	}
-	virtiovf_state_mutex_unlock(virtvdev);
+	mutex_unlock(&virtvdev->state_mutex);
 	return res;
 }
 
@@ -1257,10 +1214,24 @@ static int virtiovf_pci_get_device_state(struct vfio_device *vdev,
 
 	mutex_lock(&virtvdev->state_mutex);
 	*curr_state = virtvdev->mig_state;
-	virtiovf_state_mutex_unlock(virtvdev);
+	mutex_unlock(&virtvdev->state_mutex);
 	return 0;
 }
 
+static void virtiovf_pci_reset_device_state(struct vfio_device *vdev)
+{
+	struct virtiovf_pci_core_device *virtvdev = container_of(
+		vdev, struct virtiovf_pci_core_device, core_device.vdev);
+
+	if (!virtvdev->migrate_cap)
+		return;
+
+	mutex_lock(&virtvdev->state_mutex);
+	virtvdev->mig_state = VFIO_DEVICE_STATE_RUNNING;
+	virtiovf_disable_fds(virtvdev);
+	mutex_unlock(&virtvdev->state_mutex);
+}
+
 static int virtiovf_pci_get_data_size(struct vfio_device *vdev,
 				      unsigned long *stop_copy_length)
 {
@@ -1297,13 +1268,14 @@ static int virtiovf_pci_get_data_size(struct vfio_device *vdev,
 	if (!obj_id_exists)
 		virtiovf_pci_free_obj_id(virtvdev, obj_id);
 end:
-	virtiovf_state_mutex_unlock(virtvdev);
+	mutex_unlock(&virtvdev->state_mutex);
 	return ret;
 }
 
 static const struct vfio_migration_ops virtvdev_pci_mig_ops = {
 	.migration_set_state = virtiovf_pci_set_device_state,
 	.migration_get_state = virtiovf_pci_get_device_state,
+	.migration_reset_state = virtiovf_pci_reset_device_state,
 	.migration_get_data_size = virtiovf_pci_get_data_size,
 };
 
@@ -1311,7 +1283,6 @@ void virtiovf_set_migratable(struct virtiovf_pci_core_device *virtvdev)
 {
 	virtvdev->migrate_cap = 1;
 	mutex_init(&virtvdev->state_mutex);
-	spin_lock_init(&virtvdev->reset_lock);
 	virtvdev->core_device.vdev.migration_flags =
 		VFIO_MIGRATION_STOP_COPY |
 		VFIO_MIGRATION_P2P |
-- 
2.51.2


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ