[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <f7d84e3d-a648-4292-a652-408f704411c7@intel.com>
Date: Tue, 4 Mar 2025 16:43:09 +0800
From: Yi Liu <yi.l.liu@...el.com>
To: Lu Baolu <baolu.lu@...ux.intel.com>, Joerg Roedel <joro@...tes.org>, "Will
Deacon" <will@...nel.org>, Robin Murphy <robin.murphy@....com>, "Jason
Gunthorpe" <jgg@...pe.ca>, Kevin Tian <kevin.tian@...el.com>
CC: Dave Jiang <dave.jiang@...el.com>, Vinod Koul <vkoul@...nel.org>, "Fenghua
Yu" <fenghuay@...dia.com>, Zhangfei Gao <zhangfei.gao@...aro.org>, Zhou Wang
<wangzhou1@...ilicon.com>, <iommu@...ts.linux.dev>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v2 06/12] iommu/vt-d: Cleanup
intel_context_flush_present()
Hi Baolu,
On 2025/2/24 13:16, Lu Baolu wrote:
> The intel_context_flush_present() is called in places where either the
> scalable mode is disabled, or scalable mode is enabled but all PASID
> entries are known to be non-present. In these cases, the flush_domains
> path within intel_context_flush_present() will never execute. This dead
> code is therefore removed.
The reason for this path is the remaining caller of
intel_context_flush_present() is only the domain_context_clear_one() which
is called in legacy mode path. Is it?
If so, it seems unnecessary to keep __context_flush_dev_iotlb(info); in the
end of the new intel_context_flush_present(). Also, since this helper is
more for legacy mode, might be good to move it out of pasid.c.:)
> Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
> ---
> drivers/iommu/intel/iommu.c | 2 +-
> drivers/iommu/intel/iommu.h | 3 +--
> drivers/iommu/intel/pasid.c | 39 ++++++-------------------------------
> 3 files changed, 8 insertions(+), 36 deletions(-)
>
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index 91d49e2cea34..1d564240c977 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -1730,7 +1730,7 @@ static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8
> context_clear_entry(context);
> __iommu_flush_cache(iommu, context, sizeof(*context));
> spin_unlock(&iommu->lock);
> - intel_context_flush_present(info, context, did, true);
> + intel_context_flush_present(info, context, did);
> }
>
> int __domain_setup_first_level(struct intel_iommu *iommu,
> diff --git a/drivers/iommu/intel/iommu.h b/drivers/iommu/intel/iommu.h
> index f7d78cf0778c..754f6d7ade26 100644
> --- a/drivers/iommu/intel/iommu.h
> +++ b/drivers/iommu/intel/iommu.h
> @@ -1306,8 +1306,7 @@ void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
> unsigned long end);
>
> void intel_context_flush_present(struct device_domain_info *info,
> - struct context_entry *context,
> - u16 did, bool affect_domains);
> + struct context_entry *context, u16 did);
>
> int intel_iommu_enable_prq(struct intel_iommu *iommu);
> int intel_iommu_finish_prq(struct intel_iommu *iommu);
> diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
> index c2742e256552..a2c6be624dbf 100644
> --- a/drivers/iommu/intel/pasid.c
> +++ b/drivers/iommu/intel/pasid.c
> @@ -932,7 +932,7 @@ static void device_pasid_table_teardown(struct device *dev, u8 bus, u8 devfn)
> context_clear_entry(context);
> __iommu_flush_cache(iommu, context, sizeof(*context));
> spin_unlock(&iommu->lock);
> - intel_context_flush_present(info, context, did, false);
> + intel_context_flush_present(info, context, did);
> }
>
> static int pci_pasid_table_teardown(struct pci_dev *pdev, u16 alias, void *data)
> @@ -1119,17 +1119,15 @@ static void __context_flush_dev_iotlb(struct device_domain_info *info)
>
> /*
> * Cache invalidations after change in a context table entry that was present
> - * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations). If
> - * IOMMU is in scalable mode and all PASID table entries of the device were
> - * non-present, set flush_domains to false. Otherwise, true.
> + * according to the Spec 6.5.3.3 (Guidance to Software for Invalidations).
> + * This helper can only be used when IOMMU is working in the legacy mode or
> + * IOMMU is in scalable mode but all PASID table entries of the device are
> + * non-present.
> */
> void intel_context_flush_present(struct device_domain_info *info,
> - struct context_entry *context,
> - u16 did, bool flush_domains)
> + struct context_entry *context, u16 did)
> {
> struct intel_iommu *iommu = info->iommu;
> - struct pasid_entry *pte;
> - int i;
>
> /*
> * Device-selective context-cache invalidation. The Domain-ID field
> @@ -1152,30 +1150,5 @@ void intel_context_flush_present(struct device_domain_info *info,
> return;
> }
>
> - /*
> - * For scalable mode:
> - * - Domain-selective PASID-cache invalidation to affected domains
> - * - Domain-selective IOTLB invalidation to affected domains
> - * - Global Device-TLB invalidation to affected functions
> - */
> - if (flush_domains) {
> - /*
> - * If the IOMMU is running in scalable mode and there might
> - * be potential PASID translations, the caller should hold
> - * the lock to ensure that context changes and cache flushes
> - * are atomic.
> - */
> - assert_spin_locked(&iommu->lock);
> - for (i = 0; i < info->pasid_table->max_pasid; i++) {
> - pte = intel_pasid_get_entry(info->dev, i);
> - if (!pte || !pasid_pte_is_present(pte))
> - continue;
> -
> - did = pasid_get_domain_id(pte);
> - qi_flush_pasid_cache(iommu, did, QI_PC_ALL_PASIDS, 0);
> - iommu->flush.flush_iotlb(iommu, did, 0, 0, DMA_TLB_DSI_FLUSH);
> - }
> - }
> -
> __context_flush_dev_iotlb(info);
> }
--
Regards,
Yi Liu
Powered by blists - more mailing lists