[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240701112317.94022-2-baolu.lu@linux.intel.com>
Date: Mon, 1 Jul 2024 19:23:16 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Jason Gunthorpe <jgg@...pe.ca>,
Kevin Tian <kevin.tian@...el.com>
Cc: iommu@...ts.linux.dev,
linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH v3 1/2] iommu/vt-d: Add helper to flush caches for context change
This helper is used to flush the related caches following a change in a
context table entry that was previously present. The VT-d specification
provides guidance for such invalidations in section 6.5.3.3.
This helper replaces the existing open code in the code paths where a
present context entry is being torn down.
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/intel/iommu.h | 4 ++
drivers/iommu/intel/iommu.c | 32 +----------
drivers/iommu/intel/pasid.c | 106 +++++++++++++++++++++++++++++-------
3 files changed, 92 insertions(+), 50 deletions(-)
diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
index 9a3b064126de..63eb3306c025 100644
--- a/drivers/iommu/intel/iommu.h
+++ b/drivers/iommu/intel/iommu.h
@@ -1143,6 +1143,10 @@ void cache_tag_flush_all(struct dmar_domain *domain);
void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
unsigned long end);
+void intel_context_flush_present(struct device_domain_info *info,
+ struct context_entry *context,
+ bool affect_domains);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
void intel_svm_check(struct intel_iommu *iommu);
int intel_svm_enable_prq(struct intel_iommu *iommu);
diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
index 1f0d6892a0b6..e84b0fdca107 100644
--- a/drivers/iommu/intel/iommu.c
+++ b/drivers/iommu/intel/iommu.c
@@ -1359,21 +1359,6 @@ static void iommu_disable_pci_caps(struct device_domain_info *info)
}
}
-static void __iommu_flush_dev_iotlb(struct device_domain_info *info,
- u64 addr, unsigned int mask)
-{
- u16 sid, qdep;
-
- if (!info || !info->ats_enabled)
- return;
-
- sid = info->bus << 8 | info->devfn;
- qdep = info->ats_qdep;
- qi_flush_dev_iotlb(info->iommu, sid, info->pfsid,
- qdep, addr, mask);
- quirk_extra_dev_tlb_flush(info, addr, mask, IOMMU_NO_PASID, qdep);
-}
-
static void intel_flush_iotlb_all(struct iommu_domain *domain)
{
cache_tag_flush_all(to_dmar_domain(domain));
@@ -1959,7 +1944,6 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
{
struct intel_iommu *iommu = info->iommu;
struct context_entry *context;
- u16 did_old;
spin_lock(&iommu->lock);
context = iommu_context_addr(iommu, bus, devfn, 0);
@@ -1968,24 +1952,10 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
return;
}
- did_old = context_domain_id(context);
-
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
- iommu->flush.flush_context(iommu,
- did_old,
- (((u16)bus) << 8) | devfn,
- DMA_CCMD_MASK_NOBIT,
- DMA_CCMD_DEVICE_INVL);
-
- iommu->flush.flush_iotlb(iommu,
- did_old,
- 0,
- 0,
- DMA_TLB_DSI_FLUSH);
-
- __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH);
+ intel_context_flush_present(info, context, true);
}
static int domain_setup_first_level(struct intel_iommu *iommu,
diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
index aabcdf756581..d6623d2c2050 100644
--- a/drivers/iommu/intel/pasid.c
+++ b/drivers/iommu/intel/pasid.c
@@ -691,25 +691,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
context_clear_entry(context);
__iommu_flush_cache(iommu, context, sizeof(*context));
spin_unlock(&iommu->lock);
-
- /*
- * Cache invalidation for changes to a scalable-mode context table
- * entry.
- *
- * Section 6.5.3.3 of the VT-d spec:
- * - Device-selective context-cache invalidation;
- * - Domain-selective PASID-cache invalidation to affected domains
- * (can be skipped if all PASID entries were not-present);
- * - Domain-selective IOTLB invalidation to affected domains;
- * - Global Device-TLB invalidation to affected functions.
- *
- * The iommu has been parked in the blocking state. All domains have
- * been detached from the device or PASID. The PASID and IOTLB caches
- * have been invalidated during the domain detach path.
- */
- iommu->flush.flush_context(iommu, 0, PCI_DEVID(bus, devfn),
- DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
- devtlb_invalidation_with_pasid(iommu, dev, IOMMU_NO_PASID);
+ intel_context_flush_present(info, context, false);
}
static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
@@ -871,3 +853,89 @@ int intel_pasid_setup_sm_context(struct device *dev)
return pci_for_each_dma_alias(to_pci_dev(dev), pci_pasid_table_setup, dev);
}
+
+/*
+ * Global Device-TLB invalidation following changes in a context entry which
+ * was present.
+ */
+static void __context_flush_dev_iotlb(struct device_domain_info *info)
+{
+ if (!info->ats_enabled)
+ return;
+
+ qi_flush_dev_iotlb(info->iommu, PCI_DEVID(info->bus, info->devfn),
+ info->pfsid, info->ats_qdep, 0, MAX_AGAW_PFN_WIDTH);
+
+ /*
+ * There is no guarantee that the device DMA is stopped when it reaches
+ * here. Therefore, always attempt the extra device TLB invalidation
+ * quirk. The impact on performance is acceptable since this is not a
+ * performance-critical path.
+ */
+ quirk_extra_dev_tlb_flush(info, 0, MAX_AGAW_PFN_WIDTH, IOMMU_NO_PASID,
+ info->ats_qdep);
+}
+
+/*
+ * Cache invalidations after change in a context table entry that was present
+ * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
+ * IOMMU is in scalable mode and all PASID table entries of the device were
+ * non-present, set flush_domains to false. Otherwise, true.
+ */
+void intel_context_flush_present(struct device_domain_info *info,
+ struct context_entry *context,
+ bool flush_domains)
+{
+ struct intel_iommu *iommu = info->iommu;
+ u16 did = context_domain_id(context);
+ struct pasid_entry *pte;
+ int i;
+
+ /*
+ * Device-selective context-cache invalidation. The Domain-ID field
+ * of the Context-cache Invalidate Descriptor is ignored by hardware
+ * when operating in scalable mode. Therefore the @did value doesn't
+ * matter in scalable mode.
+ */
+ iommu->flush.flush_context(iommu, did, PCI_DEVID(info->bus, info->devfn),
+ DMA_CCMD_MASK_NOBIT, DMA_CCMD_DEVICE_INVL);
+
+ /*
+ * For legacy mode:
+ * - Domain-selective IOTLB invalidation
+ * - Global Device-TLB invalidation to all affected functions
+ */
+ if (!sm_supported(iommu)) {
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+ __context_flush_dev_iotlb(info);
+
+ return;
+ }
+
+ /*
+ * For scalable mode:
+ * - Domain-selective PASID-cache invalidation to affected domains
+ * - Domain-selective IOTLB invalidation to affected domains
+ * - Global Device-TLB invalidation to affected functions
+ */
+ if (flush_domains) {
+ /*
+ * If the IOMMU is running in scalable mode and there might
+ * be potential PASID translations, the caller should hold
+ * the lock to ensure that context changes and cache flushes
+ * are atomic.
+ */
+ assert_spin_locked(&iommu->lock);
+ for (i = 0; i < info->pasid_table->max_pasid; i++) {
+ pte = intel_pasid_get_entry(info->dev, i);
+ if (!pte || !pasid_pte_is_present(pte))
+ continue;
+
+ did = pasid_get_domain_id(pte);
+ qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
+ iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
+ }
+ }
+
+ __context_flush_dev_iotlb(info);
+}
--
2.34.1
Powered by blists - more mailing lists