[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210607182541.119756-5-namit@vmware.com>
Date: Mon, 7 Jun 2021 11:25:39 -0700
From: Nadav Amit <nadav.amit@...il.com>
To: Joerg Roedel <joro@...tes.org>
Cc: Nadav Amit <namit@...are.com>, Will Deacon <will@...nel.org>,
Jiajun Cao <caojiajun@...are.com>,
Robin Murphy <robin.murphy@....com>,
Lu Baolu <baolu.lu@...ux.intel.com>,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org
Subject: [PATCH v3 4/6] iommu: Factor iommu_iotlb_gather_is_disjoint() out
From: Nadav Amit <namit@...are.com>
Refactor iommu_iotlb_gather_add_page() and factor out the logic that
detects whether IOTLB gather range and a new range are disjoint. To be
used by the next patch that implements different gathering logic for
AMD.
Cc: Joerg Roedel <joro@...tes.org>
Cc: Will Deacon <will@...nel.org>
Cc: Jiajun Cao <caojiajun@...are.com>
Cc: Robin Murphy <robin.murphy@....com>
Cc: Lu Baolu <baolu.lu@...ux.intel.com>
Cc: iommu@...ts.linux-foundation.org
Cc: linux-kernel@...r.kernel.org>
Signed-off-by: Nadav Amit <namit@...are.com>
---
include/linux/iommu.h | 41 +++++++++++++++++++++++++++++++++--------
1 file changed, 33 insertions(+), 8 deletions(-)
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f254c62f3720..b5a2bfc68fb0 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -497,6 +497,28 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain,
iommu_iotlb_gather_init(iotlb_gather);
}
+/**
+ * iommu_iotlb_gather_is_disjoint - Checks whether a new range is disjoint
+ *
+ * @gather: TLB gather data
+ * @iova: start of page to invalidate
+ * @size: size of page to invalidate
+ *
+ * Helper for IOMMU drivers to check whether a new range is and the gathered
+ * range are disjoint. For many IOMMUs, flushing the IOMMU in this case is
+ * better than merging the two, which might lead to unnecessary invalidations.
+ */
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ unsigned long start = iova, end = start + size - 1;
+
+ return gather->end != 0 &&
+ (end + 1 < gather->start || start > gather->end + 1);
+}
+
+
/**
* iommu_iotlb_gather_add_range - Gather for address-based TLB invalidation
* @gather: TLB gather data
@@ -533,20 +555,16 @@ static inline void iommu_iotlb_gather_add_page(struct iommu_domain *domain,
struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
- unsigned long start = iova, end = start + size - 1;
-
/*
* If the new page is disjoint from the current range or is mapped at
* a different granularity, then sync the TLB so that the gather
* structure can be rewritten.
*/
- if (gather->pgsize != size ||
- end + 1 < gather->start || start > gather->end + 1) {
- if (gather->pgsize)
- iommu_iotlb_sync(domain, gather);
- gather->pgsize = size;
- }
+ if ((gather->pgsize && gather->pgsize != size) ||
+ iommu_iotlb_gather_is_disjoint(gather, iova, size))
+ iommu_iotlb_sync(domain, gather);
+ gather->pgsize = size;
iommu_iotlb_gather_add_range(gather, iova, size);
}
@@ -730,6 +748,13 @@ static inline void iommu_iotlb_sync(struct iommu_domain *domain,
{
}
+static inline
+bool iommu_iotlb_gather_is_disjoint(struct iommu_iotlb_gather *gather,
+ unsigned long iova, size_t size)
+{
+ return false;
+}
+
static inline void iommu_iotlb_gather_add_range(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t size)
{
--
2.25.1
Powered by blists - more mailing lists