[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250721051657.1695788-1-baolu.lu@linux.intel.com>
Date: Mon, 21 Jul 2025 13:16:57 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Kevin Tian <kevin.tian@...el.com>,
Jason Gunthorpe <jgg@...dia.com>
Cc: iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH 1/1] iommu/vt-d: Make iotlb_sync_map a static property of dmar_domain
Commit 12724ce3fe1a ("iommu/vt-d: Optimize iotlb_sync_map for
non-caching/non-RWBF modes") dynamically set iotlb_sync_map. This causes
synchronization issues due to lack of locking on map and attach paths,
racing iommufd userspace operations.
Invalidation changes must precede device attachment to ensure all flushes
complete before hardware walks page tables, preventing coherence issues.
Make domain->iotlb_sync_map static, set once during domain allocation. If
an IOMMU requires iotlb_sync_map but the domain lacks it, attach is
rejected. This won't reduce domain sharing: RWBF and shadowing page table
caching are legacy uses with legacy hardware. Mixed configs (some IOMMUs
in caching mode, others not) are unlikely in real-world scenarios.
Fixes: 12724ce3fe1a ("iommu/vt-d: Optimize iotlb_sync_map for non-caching/non-RWBF modes")
Suggested-by: Jason Gunthorpe <jgg@...dia.com>
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/intel/iommu.c | 43 +++++++++++++++++++++++++------------
1 file changed, 29 insertions(+), 14 deletions(-)
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 8db8be9b7e7d..7c64d88ece77 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -57,6 +57,8 @@
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
+#define rwbf_required(iommu) (rwbf_quirk || cap_rwbf((iommu)->cap))
+
/*
* set to 1 to panic kernel if can't successfully enable VT-d
* (used when kernel is launched w/ TXT)
@@ -1780,18 +1782,6 @@ static int domain_setup_first_level(struct intel_iommu *iommu,
__pa(pgd), flags, old);
}
-static bool domain_need_iotlb_sync_map(struct dmar_domain *domain,
- struct intel_iommu *iommu)
-{
- if (cap_caching_mode(iommu->cap) && intel_domain_is_ss_paging(domain))
- return true;
-
- if (rwbf_quirk || cap_rwbf(iommu->cap))
- return true;
-
- return false;
-}
-
static int dmar_domain_attach_device(struct dmar_domain *domain,
struct device *dev)
{
@@ -1831,8 +1821,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
if (ret)
goto out_block_translation;
- domain->iotlb_sync_map |= domain_need_iotlb_sync_map(domain, iommu);
-
return 0;
out_block_translation:
@@ -3352,6 +3340,14 @@ intel_iommu_domain_alloc_first_stage(struct device *dev,
return ERR_CAST(dmar_domain);
dmar_domain->domain.ops = &intel_fs_paging_domain_ops;
+ /*
+ * iotlb sync for map is only needed for legacy implementations that
+ * explicitly require flushing internal write buffers to ensure memory
+ * coherence.
+ */
+ if (rwbf_required(iommu))
+ dmar_domain->iotlb_sync_map = true;
+
return &dmar_domain->domain;
}
@@ -3386,6 +3382,14 @@ intel_iommu_domain_alloc_second_stage(struct device *dev,
if (flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING)
dmar_domain->domain.dirty_ops = &intel_dirty_ops;
+ /*
+ * Besides the internal write buffer flush, the caching mode used for
+ * legacy nested translation (which utilizes shadowing page tables)
+ * also requires iotlb sync on map.
+ */
+ if (rwbf_required(iommu) || cap_caching_mode(iommu->cap))
+ dmar_domain->iotlb_sync_map = true;
+
return &dmar_domain->domain;
}
@@ -3446,6 +3450,11 @@ static int paging_domain_compatible_first_stage(struct dmar_domain *dmar_domain,
if (!cap_fl1gp_support(iommu->cap) &&
(dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
+
+ /* iotlb sync on map requirement */
+ if ((rwbf_required(iommu)) && !dmar_domain->iotlb_sync_map)
+ return -EINVAL;
+
return 0;
}
@@ -3469,6 +3478,12 @@ paging_domain_compatible_second_stage(struct dmar_domain *dmar_domain,
return -EINVAL;
if (!(sslps & BIT(1)) && (dmar_domain->domain.pgsize_bitmap & SZ_1G))
return -EINVAL;
+
+ /* iotlb sync on map requirement */
+ if ((rwbf_required(iommu) || cap_caching_mode(iommu->cap)) &&
+ !dmar_domain->iotlb_sync_map)
+ return -EINVAL;
+
return 0;
}
--
2.43.0
Powered by blists - more mailing lists