[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20211024083019.232813-14-yishaih@nvidia.com>
Date: Sun, 24 Oct 2021 11:30:19 +0300
From: Yishai Hadas <yishaih@...dia.com>
To: <alex.williamson@...hat.com>, <bhelgaas@...gle.com>,
<jgg@...dia.com>, <saeedm@...dia.com>
CC: <linux-pci@...r.kernel.org>, <kvm@...r.kernel.org>,
<netdev@...r.kernel.org>, <kuba@...nel.org>, <leonro@...dia.com>,
<kwankhede@...dia.com>, <mgurtovoy@...dia.com>,
<yishaih@...dia.com>, <maorg@...dia.com>
Subject: [PATCH V3 mlx5-next 13/13] vfio/mlx5: Use its own PCI reset_done error handler
Register its own handler for pci_error_handlers.reset_done and update
state accordingly.
Signed-off-by: Yishai Hadas <yishaih@...dia.com>
Signed-off-by: Leon Romanovsky <leonro@...dia.com>
---
drivers/vfio/pci/mlx5/main.c | 54 ++++++++++++++++++++++++++++++++++--
1 file changed, 52 insertions(+), 2 deletions(-)
diff --git a/drivers/vfio/pci/mlx5/main.c b/drivers/vfio/pci/mlx5/main.c
index 4b21b388dcc5..ca7e5692a7ff 100644
--- a/drivers/vfio/pci/mlx5/main.c
+++ b/drivers/vfio/pci/mlx5/main.c
@@ -55,8 +55,11 @@ struct mlx5vf_pci_migration_info {
struct mlx5vf_pci_core_device {
struct vfio_pci_core_device core_device;
u8 migrate_cap:1;
+ u8 defered_reset:1;
/* protect migration state */
struct mutex state_mutex;
+ /* protect the reset_done flow */
+ spinlock_t reset_lock;
struct mlx5vf_pci_migration_info vmig;
};
@@ -471,6 +474,47 @@ mlx5vf_pci_migration_data_rw(struct mlx5vf_pci_core_device *mvdev,
return count;
}
+/* This function is called in all state_mutex unlock cases to
+ * handle a 'defered_reset' if exists.
+ */
+static void mlx5vf_state_mutex_unlock(struct mlx5vf_pci_core_device *mvdev)
+{
+again:
+ spin_lock(&mvdev->reset_lock);
+ if (mvdev->defered_reset) {
+ mvdev->defered_reset = false;
+ spin_unlock(&mvdev->reset_lock);
+ mlx5vf_reset_mig_state(mvdev);
+ mvdev->vmig.vfio_dev_state = VFIO_DEVICE_STATE_RUNNING;
+ goto again;
+ }
+ spin_unlock(&mvdev->reset_lock);
+ mutex_unlock(&mvdev->state_mutex);
+}
+
+static void mlx5vf_pci_aer_reset_done(struct pci_dev *pdev)
+{
+ struct mlx5vf_pci_core_device *mvdev = dev_get_drvdata(&pdev->dev);
+
+ if (!mvdev->migrate_cap)
+ return;
+
+ /* As the higher VFIO layers are holding locks across reset and using
+ * those same locks with the mm_lock we need to prevent ABBA deadlock
+ * with the state_mutex and mm_lock.
+ * In case the state_mutex was taken alreday we differ the cleanup work
+ * to the unlock flow of the other running context.
+ */
+ spin_lock(&mvdev->reset_lock);
+ mvdev->defered_reset = true;
+ if (!mutex_trylock(&mvdev->state_mutex)) {
+ spin_unlock(&mvdev->reset_lock);
+ return;
+ }
+ spin_unlock(&mvdev->reset_lock);
+ mlx5vf_state_mutex_unlock(mvdev);
+}
+
static ssize_t mlx5vf_pci_mig_rw(struct vfio_pci_core_device *vdev,
char __user *buf, size_t count, loff_t *ppos,
bool iswrite)
@@ -539,7 +583,7 @@ static ssize_t mlx5vf_pci_mig_rw(struct vfio_pci_core_device *vdev,
}
end:
- mutex_unlock(&mvdev->state_mutex);
+ mlx5vf_state_mutex_unlock(mvdev);
return ret;
}
@@ -634,6 +678,7 @@ static int mlx5vf_pci_probe(struct pci_dev *pdev,
if (MLX5_CAP_GEN(mdev, migration)) {
mvdev->migrate_cap = 1;
mutex_init(&mvdev->state_mutex);
+ spin_lock_init(&mvdev->reset_lock);
}
mlx5_vf_put_core_dev(mdev);
}
@@ -668,12 +713,17 @@ static const struct pci_device_id mlx5vf_pci_table[] = {
MODULE_DEVICE_TABLE(pci, mlx5vf_pci_table);
+const struct pci_error_handlers mlx5vf_err_handlers = {
+ .reset_done = mlx5vf_pci_aer_reset_done,
+ .error_detected = vfio_pci_aer_err_detected,
+};
+
static struct pci_driver mlx5vf_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = mlx5vf_pci_table,
.probe = mlx5vf_pci_probe,
.remove = mlx5vf_pci_remove,
- .err_handler = &vfio_pci_core_err_handlers,
+ .err_handler = &mlx5vf_err_handlers,
};
static void __exit mlx5vf_pci_cleanup(void)
--
2.18.1
Powered by blists - more mailing lists