[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240809025431.14605-3-tina.zhang@intel.com>
Date: Fri, 9 Aug 2024 10:54:28 +0800
From: Tina Zhang <tina.zhang@...el.com>
To: Lu Baolu <baolu.lu@...ux.intel.com>,
Kevin Tian <kevin.tian@...el.com>
Cc: iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Tina Zhang <tina.zhang@...el.com>
Subject: [PATCH v2 2/5] iommu/vt-d: Refactor IOTLB and Dev-IOTLB flush logic
Introduce three new helper functions, handle_iotlb_flush(), handle_dev_
tlb_flush() and handle_dev_tlb_flush_all() to encapsulate the logic for
IOTLB and Dev-IOTLB invalidation commands. This refactoring aims to
improve code readability and maintainability by centralizing the handling
of these flush operations.
Signed-off-by: Tina Zhang <tina.zhang@...el.com>
---
drivers/iommu/intel/cache.c | 136 +++++++++++++++++++-----------------
1 file changed, 72 insertions(+), 64 deletions(-)
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 44e92638c0cd..3ae84ccfcfa1 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -255,6 +255,72 @@ static unsigned long calculate_psi_aligned_address(unsigned long start,
return ALIGN_DOWN(start, VTD_PAGE_SIZE << mask);
}
+static inline void handle_iotlb_flush(struct dmar_domain *domain,
+ struct cache_tag *tag,
+ unsigned long addr,
+ unsigned long pages,
+ unsigned long mask,
+ int ih)
+{
+ struct intel_iommu *iommu = tag->iommu;
+
+ if (domain->use_first_level) {
+ qi_flush_piotlb(iommu, tag->domain_id,
+ tag->pasid, addr, pages, ih);
+ } else {
+ /*
+ * Fallback to domain selective flush if no
+ * PSI support or the size is too big.
+ */
+ if (!cap_pgsel_inv(iommu->cap) ||
+ mask > cap_max_amask_val(iommu->cap) ||
+ pages == -1)
+ iommu->flush.flush_iotlb(iommu, tag->domain_id,
+ 0, 0, DMA_TLB_DSI_FLUSH);
+ else
+ iommu->flush.flush_iotlb(iommu, tag->domain_id,
+ addr | ih, mask,
+ DMA_TLB_PSI_FLUSH);
+ }
+}
+
+static void handle_dev_tlb_flush(struct cache_tag *tag,
+ unsigned long addr,
+ unsigned long mask)
+{
+ struct intel_iommu *iommu = tag->iommu;
+ struct device_domain_info *info;
+ u16 sid;
+
+ info = dev_iommu_priv_get(tag->dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+
+ if (tag->pasid == IOMMU_NO_PASID)
+ qi_flush_dev_iotlb(iommu, sid, info->pfsid,
+ info->ats_qdep, addr, mask);
+ else
+ qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid,
+ tag->pasid, info->ats_qdep,
+ addr, mask);
+
+ quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep);
+}
+
+static void handle_dev_tlb_flush_all(struct cache_tag *tag)
+{
+ struct intel_iommu *iommu = tag->iommu;
+ struct device_domain_info *info;
+ u16 sid;
+
+ info = dev_iommu_priv_get(tag->dev);
+ sid = PCI_DEVID(info->bus, info->devfn);
+
+ qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
+ 0, MAX_AGAW_PFN_WIDTH);
+ quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH,
+ IOMMU_NO_PASID, info->ats_qdep);
+}
+
/*
* Invalidates a range of IOVA from @start (inclusive) to @end (inclusive)
* when the memory mappings in the target domain have been modified.
@@ -270,30 +336,10 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
- struct intel_iommu *iommu = tag->iommu;
- struct device_domain_info *info;
- u16 sid;
-
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
- if (domain->use_first_level) {
- qi_flush_piotlb(iommu, tag->domain_id,
- tag->pasid, addr, pages, ih);
- } else {
- /*
- * Fallback to domain selective flush if no
- * PSI support or the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- 0, 0, DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- addr | ih, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ handle_iotlb_flush(domain, tag, addr, pages, mask, ih);
break;
case CACHE_TAG_NESTING_DEVTLB:
/*
@@ -307,18 +353,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
mask = MAX_AGAW_PFN_WIDTH;
fallthrough;
case CACHE_TAG_DEVTLB:
- info = dev_iommu_priv_get(tag->dev);
- sid = PCI_DEVID(info->bus, info->devfn);
-
- if (tag->pasid == IOMMU_NO_PASID)
- qi_flush_dev_iotlb(iommu, sid, info->pfsid,
- info->ats_qdep, addr, mask);
- else
- qi_flush_dev_iotlb_pasid(iommu, sid, info->pfsid,
- tag->pasid, info->ats_qdep,
- addr, mask);
-
- quirk_extra_dev_tlb_flush(info, addr, mask, tag->pasid, info->ats_qdep);
+ handle_dev_tlb_flush(tag, addr, mask);
break;
}
@@ -338,29 +373,14 @@ void cache_tag_flush_all(struct dmar_domain *domain)
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
- struct intel_iommu *iommu = tag->iommu;
- struct device_domain_info *info;
- u16 sid;
-
switch (tag->type) {
case CACHE_TAG_IOTLB:
case CACHE_TAG_NESTING_IOTLB:
- if (domain->use_first_level)
- qi_flush_piotlb(iommu, tag->domain_id,
- tag->pasid, 0, -1, 0);
- else
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- 0, 0, DMA_TLB_DSI_FLUSH);
+ handle_iotlb_flush(domain, tag, 0, -1, 0, 0);
break;
case CACHE_TAG_DEVTLB:
case CACHE_TAG_NESTING_DEVTLB:
- info = dev_iommu_priv_get(tag->dev);
- sid = PCI_DEVID(info->bus, info->devfn);
-
- qi_flush_dev_iotlb(iommu, sid, info->pfsid, info->ats_qdep,
- 0, MAX_AGAW_PFN_WIDTH);
- quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH,
- IOMMU_NO_PASID, info->ats_qdep);
+ handle_dev_tlb_flush_all(tag);
break;
}
@@ -399,20 +419,8 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
}
if (tag->type == CACHE_TAG_IOTLB ||
- tag->type == CACHE_TAG_NESTING_IOTLB) {
- /*
- * Fallback to domain selective flush if no
- * PSI support or the size is too big.
- */
- if (!cap_pgsel_inv(iommu->cap) ||
- mask > cap_max_amask_val(iommu->cap))
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- 0, 0, DMA_TLB_DSI_FLUSH);
- else
- iommu->flush.flush_iotlb(iommu, tag->domain_id,
- addr, mask,
- DMA_TLB_PSI_FLUSH);
- }
+ tag->type == CACHE_TAG_NESTING_IOTLB)
+ handle_iotlb_flush(domain, tag, addr, pages, mask, 0);
trace_cache_tag_flush_range_np(tag, start, end, addr, pages, mask);
}
--
2.43.0
Powered by blists - more mailing lists