[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240126105341.78086-2-baolu.lu@linux.intel.com>
Date: Fri, 26 Jan 2024 18:53:40 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Jason Gunthorpe <jgg@...pe.ca>,
Kevin Tian <kevin.tian@...el.com>,
Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>
Cc: iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH v2 1/2] iommu: Use mutex instead of spinlock for iommu_device_list
The iommu_device_lock spinlock was used to protect the iommu device
list. However, there is no requirement to access the iommu device
list in interrupt context. Therefore, a mutex is sufficient.
This also prepares for the next change, which will iterate the iommu
device list and call the probe callback within the locking area.
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/iommu.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 68e648b55767..0af0b5544072 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -146,7 +146,7 @@ struct iommu_group_attribute iommu_group_attr_##_name = \
container_of(_kobj, struct iommu_group, kobj)
static LIST_HEAD(iommu_device_list);
-static DEFINE_SPINLOCK(iommu_device_lock);
+static DEFINE_MUTEX(iommu_device_lock);
static const struct bus_type * const iommu_buses[] = {
&platform_bus_type,
@@ -262,9 +262,9 @@ int iommu_device_register(struct iommu_device *iommu,
if (hwdev)
iommu->fwnode = dev_fwnode(hwdev);
- spin_lock(&iommu_device_lock);
+ mutex_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
- spin_unlock(&iommu_device_lock);
+ mutex_unlock(&iommu_device_lock);
for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++)
err = bus_iommu_probe(iommu_buses[i]);
@@ -279,9 +279,9 @@ void iommu_device_unregister(struct iommu_device *iommu)
for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
- spin_lock(&iommu_device_lock);
+ mutex_lock(&iommu_device_lock);
list_del(&iommu->list);
- spin_unlock(&iommu_device_lock);
+ mutex_unlock(&iommu_device_lock);
/* Pairs with the alloc in generic_single_device_group() */
iommu_group_put(iommu->singleton_group);
@@ -316,9 +316,9 @@ int iommu_device_register_bus(struct iommu_device *iommu,
if (err)
return err;
- spin_lock(&iommu_device_lock);
+ mutex_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list);
- spin_unlock(&iommu_device_lock);
+ mutex_unlock(&iommu_device_lock);
err = bus_iommu_probe(bus);
if (err) {
@@ -2033,9 +2033,9 @@ bool iommu_present(const struct bus_type *bus)
for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++) {
if (iommu_buses[i] == bus) {
- spin_lock(&iommu_device_lock);
+ mutex_lock(&iommu_device_lock);
ret = !list_empty(&iommu_device_list);
- spin_unlock(&iommu_device_lock);
+ mutex_unlock(&iommu_device_lock);
}
}
return ret;
@@ -2983,13 +2983,13 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
const struct iommu_ops *ops = NULL;
struct iommu_device *iommu;
- spin_lock(&iommu_device_lock);
+ mutex_lock(&iommu_device_lock);
list_for_each_entry(iommu, &iommu_device_list, list)
if (iommu->fwnode == fwnode) {
ops = iommu->ops;
break;
}
- spin_unlock(&iommu_device_lock);
+ mutex_unlock(&iommu_device_lock);
return ops;
}
--
2.34.1
Powered by blists - more mailing lists