[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210828153642.19396-7-sven@svenpeter.dev>
Date: Sat, 28 Aug 2021 17:36:40 +0200
From: Sven Peter <sven@...npeter.dev>
To: iommu@...ts.linux-foundation.org
Cc: Sven Peter <sven@...npeter.dev>, Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Arnd Bergmann <arnd@...nel.org>,
Mohamed Mediouni <mohamed.mediouni@...amail.com>,
Alexander Graf <graf@...zon.com>,
Hector Martin <marcan@...can.st>,
Alyssa Rosenzweig <alyssa@...enzweig.io>,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 6/8] iommu: Move IOMMU pagesize check to attach_device
The iova allocator is capable of handling any granularity which is a power
of two. Remove the much stronger condition that the granularity must be
smaller or equal to the CPU page size from a BUG_ON there.
Instead, check this condition during __iommu_attach_device and fail
gracefully.
Signed-off-by: Sven Peter <sven@...npeter.dev>
---
drivers/iommu/iommu.c | 34 +++++++++++++++++++++++++++++++---
drivers/iommu/iova.c | 7 ++++---
2 files changed, 35 insertions(+), 6 deletions(-)
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index b4499b1915fa..f02b727d3054 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -79,6 +79,8 @@ static struct iommu_domain *__iommu_domain_alloc(struct bus_type *bus,
unsigned type);
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
+static void __iommu_detach_device(struct iommu_domain *domain,
+ struct device *dev);
static int __iommu_attach_group(struct iommu_domain *domain,
struct iommu_group *group);
static void __iommu_detach_group(struct iommu_domain *domain,
@@ -1974,6 +1976,18 @@ void iommu_domain_free(struct iommu_domain *domain)
}
EXPORT_SYMBOL_GPL(iommu_domain_free);
+static int iommu_check_page_size(struct iommu_domain *domain)
+{
+ if (!(domain->type & __IOMMU_DOMAIN_PAGING))
+ return 0;
+
+ if ((1 << __ffs(domain->pgsize_bitmap)) > PAGE_SIZE) {
+ pr_warn("IOMMU page size cannot represent CPU pages.\n");
+ return -EFAULT;
+ }
+
+ return 0;
+}
static int __iommu_attach_device(struct iommu_domain *domain,
struct device *dev)
{
@@ -1983,9 +1997,23 @@ static int __iommu_attach_device(struct iommu_domain *domain,
return -ENODEV;
ret = domain->ops->attach_dev(domain, dev);
- if (!ret)
- trace_attach_device_to_domain(dev);
- return ret;
+ if (ret)
+ return ret;
+
+ /*
+ * Check that CPU pages can be represented by the IOVA granularity.
+ * This has to be done after ops->attach_dev since many IOMMU drivers
+ * only limit domain->pgsize_bitmap after having attached the first
+ * device.
+ */
+ ret = iommu_check_page_size(domain);
+ if (ret) {
+ __iommu_detach_device(domain, dev);
+ return ret;
+ }
+
+ trace_attach_device_to_domain(dev);
+ return 0;
}
int iommu_attach_device(struct iommu_domain *domain, struct device *dev)
diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c
index 0af42fb93a49..302e6dfa7cdc 100644
--- a/drivers/iommu/iova.c
+++ b/drivers/iommu/iova.c
@@ -50,10 +50,11 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule,
{
/*
* IOVA granularity will normally be equal to the smallest
- * supported IOMMU page size; both *must* be capable of
- * representing individual CPU pages exactly.
+ * supported IOMMU page size; while both usually are capable of
+ * representing individual CPU pages exactly the IOVA allocator
+ * supports any granularities that are an exact power of two.
*/
- BUG_ON((granule > PAGE_SIZE) || !is_power_of_2(granule));
+ BUG_ON(!is_power_of_2(granule));
spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT;
--
2.25.1
Powered by blists - more mailing lists