[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250714045028.958850-12-baolu.lu@linux.intel.com>
Date: Mon, 14 Jul 2025 12:50:28 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>
Cc: iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org
Subject: [PATCH 11/11] iommu/vt-d: Deduplicate cache_tag_flush_all by reusing flush_range
From: Ethan Milon <ethan.milon@...den.com>
The logic in cache_tag_flush_all() to iterate over cache tags and issue
TLB invalidations is largely duplicated in cache_tag_flush_range(), with
the only difference being the range parameters.
Extend cache_tag_flush_range() to handle a full address space flush when
called with start = 0 and end = ULONG_MAX. This allows
cache_tag_flush_all() to simply delegate to cache_tag_flush_range()
Signed-off-by: Ethan Milon <ethan.milon@...den.com>
Link: https://lore.kernel.org/r/20250708214821.30967-2-ethan.milon@eviden.com
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/intel/cache.c | 34 ++++++++--------------------------
drivers/iommu/intel/trace.h | 5 -----
2 files changed, 8 insertions(+), 31 deletions(-)
diff --git a/drivers/iommu/intel/cache.c b/drivers/iommu/intel/cache.c
index 071f78e67fcb..265e7290256b 100644
--- a/drivers/iommu/intel/cache.c
+++ b/drivers/iommu/intel/cache.c
@@ -434,7 +434,13 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
struct cache_tag *tag;
unsigned long flags;
- addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+ if (start == 0 && end == ULONG_MAX) {
+ addr = 0;
+ pages = -1;
+ mask = MAX_AGAW_PFN_WIDTH;
+ } else {
+ addr = calculate_psi_aligned_address(start, end, &pages, &mask);
+ }
spin_lock_irqsave(&domain->cache_lock, flags);
list_for_each_entry(tag, &domain->cache_tags, node) {
@@ -475,31 +481,7 @@ void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
*/
void cache_tag_flush_all(struct dmar_domain *domain)
{
- struct intel_iommu *iommu = NULL;
- struct cache_tag *tag;
- unsigned long flags;
-
- spin_lock_irqsave(&domain->cache_lock, flags);
- list_for_each_entry(tag, &domain->cache_tags, node) {
- if (iommu && iommu != tag->iommu)
- qi_batch_flush_descs(iommu, domain->qi_batch);
- iommu = tag->iommu;
-
- switch (tag->type) {
- case CACHE_TAG_IOTLB:
- case CACHE_TAG_NESTING_IOTLB:
- cache_tag_flush_iotlb(domain, tag, 0, -1, 0, 0);
- break;
- case CACHE_TAG_DEVTLB:
- case CACHE_TAG_NESTING_DEVTLB:
- cache_tag_flush_devtlb_psi(domain, tag, 0, MAX_AGAW_PFN_WIDTH);
- break;
- }
-
- trace_cache_tag_flush_all(tag);
- }
- qi_batch_flush_descs(iommu, domain->qi_batch);
- spin_unlock_irqrestore(&domain->cache_lock, flags);
+ cache_tag_flush_range(domain, 0, ULONG_MAX, 0);
}
/*
diff --git a/drivers/iommu/intel/trace.h b/drivers/iommu/intel/trace.h
index 9defdae6ebae..6311ba3f1691 100644
--- a/drivers/iommu/intel/trace.h
+++ b/drivers/iommu/intel/trace.h
@@ -130,11 +130,6 @@ DEFINE_EVENT(cache_tag_log, cache_tag_unassign,
TP_ARGS(tag)
);
-DEFINE_EVENT(cache_tag_log, cache_tag_flush_all,
- TP_PROTO(struct cache_tag *tag),
- TP_ARGS(tag)
-);
-
DECLARE_EVENT_CLASS(cache_tag_flush,
TP_PROTO(struct cache_tag *tag, unsigned long start, unsigned long end,
unsigned long addr, unsigned long pages, unsigned long mask),
--
2.43.0
Powered by blists - more mailing lists