[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230213074941.919324-2-baolu.lu@linux.intel.com>
Date: Mon, 13 Feb 2023 15:49:38 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: iommu@...ts.linux.dev
Cc: Joerg Roedel <joro@...tes.org>, Jason Gunthorpe <jgg@...dia.com>,
Christoph Hellwig <hch@...radead.org>,
Kevin Tian <kevin.tian@...el.com>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
linux-kernel@...r.kernel.org, Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH 1/4] iommu: Add dev_iommu->ops_rwsem
Add a RW semaphore to make sure that iommu_ops of a device is consistent
in any non-driver-oriented path, such as a store operation on the iommu
group sysfs node.
Add a pair of helpers to freeze and unfreeze the iommu ops of all devices
in an iommu group, and use them in iommu_group_store_type().
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
include/linux/iommu.h | 3 +++
drivers/iommu/iommu.c | 53 ++++++++++++++++++++++++++++++++++++++++++-
2 files changed, 55 insertions(+), 1 deletion(-)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 3589d1b8f922..a4204e1bfef3 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -402,6 +402,8 @@ struct iommu_fault_param {
* @fwspec: IOMMU fwspec data
* @iommu_dev: IOMMU device this device is linked to
* @priv: IOMMU Driver private data
+ * @ops_rwsem: RW semaphore to synchronize between device release
+ * path and the sysfs interfaces.
* @max_pasids: number of PASIDs this device can consume
* @attach_deferred: the dma domain attachment is deferred
*
@@ -415,6 +417,7 @@ struct dev_iommu {
struct iommu_fwspec *fwspec;
struct iommu_device *iommu_dev;
void *priv;
+ struct rw_semaphore ops_rwsem;
u32 max_pasids;
u32 attach_deferred:1;
};
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 5f1dc9aaba52..4f71dcd2621b 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -267,6 +267,7 @@ static struct dev_iommu *dev_iommu_get(struct device *dev)
return NULL;
mutex_init(¶m->lock);
+ init_rwsem(¶m->ops_rwsem);
dev->iommu = param;
return param;
}
@@ -461,12 +462,19 @@ void iommu_release_device(struct device *dev)
iommu_device_unlink(dev->iommu->iommu_dev, dev);
+ /*
+ * The device's iommu_ops will be released in .release_device
+ * callback. Hold ops_rwsem to avoid use after release.
+ */
+ down_write(&dev->iommu->ops_rwsem);
ops = dev_iommu_ops(dev);
if (ops->release_device)
ops->release_device(dev);
+ module_put(ops->owner);
+ dev->iommu->iommu_dev = NULL;
+ up_write(&dev->iommu->ops_rwsem);
iommu_group_remove_device(dev);
- module_put(ops->owner);
dev_iommu_free(dev);
}
@@ -2911,6 +2919,46 @@ static int iommu_change_dev_def_domain(struct iommu_group *group,
return ret;
}
+static int iommu_group_freeze_dev_ops(struct iommu_group *group)
+{
+ struct group_device *device;
+ struct device *dev;
+
+ mutex_lock(&group->mutex);
+ list_for_each_entry(device, &group->devices, list) {
+ dev = device->dev;
+ down_read(&dev->iommu->ops_rwsem);
+ /* .release_device has been called. */
+ if (!dev->iommu->iommu_dev) {
+ up_read(&dev->iommu->ops_rwsem);
+ goto restore_out;
+ }
+ }
+ mutex_unlock(&group->mutex);
+
+ return 0;
+
+restore_out:
+ list_for_each_entry(device, &group->devices, list) {
+ if (device->dev == dev)
+ break;
+ up_read(&device->dev->iommu->ops_rwsem);
+ }
+ mutex_unlock(&group->mutex);
+
+ return -EINVAL;
+}
+
+static void iommu_group_unfreeze_dev_ops(struct iommu_group *group)
+{
+ struct group_device *device;
+
+ mutex_lock(&group->mutex);
+ list_for_each_entry(device, &group->devices, list)
+ up_read(&device->dev->iommu->ops_rwsem);
+ mutex_unlock(&group->mutex);
+}
+
/*
* Changing the default domain through sysfs requires the users to unbind the
* drivers from the devices in the iommu group, except for a DMA -> DMA-FQ
@@ -2988,6 +3036,8 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
*/
mutex_unlock(&group->mutex);
+ iommu_group_freeze_dev_ops(group);
+
/* Check if the device in the group still has a driver bound to it */
device_lock(dev);
if (device_is_bound(dev) && !(req_type == IOMMU_DOMAIN_DMA_FQ &&
@@ -3002,6 +3052,7 @@ static ssize_t iommu_group_store_type(struct iommu_group *group,
out:
device_unlock(dev);
+ iommu_group_unfreeze_dev_ops(group);
put_device(dev);
return ret;
--
2.34.1
Powered by blists - more mailing lists