[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <0db75998-0e27-4931-9528-b05aeba395b7@intel.com>
Date: Tue, 4 Mar 2025 15:55:15 +0800
From: Yi Liu <yi.l.liu@...el.com>
To: Lu Baolu <baolu.lu@...ux.intel.com>, Joerg Roedel <joro@...tes.org>, "Will
Deacon" <will@...nel.org>, Robin Murphy <robin.murphy@....com>, "Jason
Gunthorpe" <jgg@...pe.ca>, Kevin Tian <kevin.tian@...el.com>
CC: Dave Jiang <dave.jiang@...el.com>, Vinod Koul <vkoul@...nel.org>, "Fenghua
Yu" <fenghuay@...dia.com>, Zhangfei Gao <zhangfei.gao@...aro.org>, Zhou Wang
<wangzhou1@...ilicon.com>, <iommu@...ts.linux.dev>,
<linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v2 04/12] iommu/vt-d: Move scalable mode ATS enablement to
probe path
On 2025/2/24 13:16, Lu Baolu wrote:
> Device ATS is currently enabled when a domain is attached to the device
> and disabled when the domain is detached. This creates a limitation:
> when the IOMMU is operating in scalable mode and IOPF is enabled, the
> device's domain cannot be changed.
>
> Remove this limitation by moving ATS enablement to the device probe path.
>
> Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
> ---
> drivers/iommu/intel/iommu.c | 78 ++++++++++++++++++-------------------
> 1 file changed, 38 insertions(+), 40 deletions(-)
I'm ok with this patch. Just a heads up in case of anyone that is not aware
of a discussion in another threa which intends to enable ATS in domain
attach.
[1]
https://lore.kernel.org/linux-iommu/2c9ef073-fee5-43c6-8932-a8cae677970e@intel.com/
> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
> index 16dd8f0de76d..f52602bde742 100644
> --- a/drivers/iommu/intel/iommu.c
> +++ b/drivers/iommu/intel/iommu.c
> @@ -1172,34 +1172,6 @@ static bool dev_needs_extra_dtlb_flush(struct pci_dev *pdev)
> return true;
> }
>
> -static void iommu_enable_pci_caps(struct device_domain_info *info)
> -{
> - struct pci_dev *pdev;
> -
> - if (!dev_is_pci(info->dev))
> - return;
> -
> - pdev = to_pci_dev(info->dev);
> - if (info->ats_supported && pci_ats_page_aligned(pdev) &&
> - !pci_enable_ats(pdev, VTD_PAGE_SHIFT))
> - info->ats_enabled = 1;
> -}
> -
> -static void iommu_disable_pci_caps(struct device_domain_info *info)
> -{
> - struct pci_dev *pdev;
> -
> - if (!dev_is_pci(info->dev))
> - return;
> -
> - pdev = to_pci_dev(info->dev);
> -
> - if (info->ats_enabled) {
> - pci_disable_ats(pdev);
> - info->ats_enabled = 0;
> - }
> -}
> -
> static void intel_flush_iotlb_all(struct iommu_domain *domain)
> {
> cache_tag_flush_all(to_dmar_domain(domain));
> @@ -1556,12 +1528,22 @@ domain_context_mapping(struct dmar_domain *domain, struct device *dev)
> struct device_domain_info *info = dev_iommu_priv_get(dev);
> struct intel_iommu *iommu = info->iommu;
> u8 bus = info->bus, devfn = info->devfn;
> + struct pci_dev *pdev;
> + int ret;
>
> if (!dev_is_pci(dev))
> return domain_context_mapping_one(domain, iommu, bus, devfn);
>
> - return pci_for_each_dma_alias(to_pci_dev(dev),
> - domain_context_mapping_cb, domain);
> + pdev = to_pci_dev(dev);
> + ret = pci_for_each_dma_alias(pdev, domain_context_mapping_cb, domain);
> + if (ret)
> + return ret;
> +
> + if (info->ats_supported && pci_ats_page_aligned(pdev) &&
> + !pci_enable_ats(pdev, VTD_PAGE_SHIFT))
> + info->ats_enabled = 1;
> +
> + return 0;
> }
>
> /* Return largest possible superpage level for a given mapping */
> @@ -1843,8 +1825,6 @@ static int dmar_domain_attach_device(struct dmar_domain *domain,
> if (ret)
> goto out_block_translation;
>
> - iommu_enable_pci_caps(info);
> -
> ret = cache_tag_assign_domain(domain, dev, IOMMU_NO_PASID);
> if (ret)
> goto out_block_translation;
> @@ -3191,13 +3171,20 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op
> */
> static void domain_context_clear(struct device_domain_info *info)
> {
> + struct pci_dev *pdev;
> +
> if (!dev_is_pci(info->dev)) {
> domain_context_clear_one(info, info->bus, info->devfn);
> return;
> }
>
> - pci_for_each_dma_alias(to_pci_dev(info->dev),
> - &domain_context_clear_one_cb, info);
> + pdev = to_pci_dev(info->dev);
> + pci_for_each_dma_alias(pdev, &domain_context_clear_one_cb, info);
> +
> + if (info->ats_enabled) {
> + pci_disable_ats(pdev);
> + info->ats_enabled = 0;
> + }
> }
>
> /*
> @@ -3214,7 +3201,6 @@ void device_block_translation(struct device *dev)
> if (info->domain)
> cache_tag_unassign_domain(info->domain, dev, IOMMU_NO_PASID);
>
> - iommu_disable_pci_caps(info);
> if (!dev_is_real_dma_subdevice(dev)) {
> if (sm_supported(iommu))
> intel_pasid_tear_down_entry(iommu, dev,
> @@ -3749,6 +3735,16 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
> !pci_enable_pasid(pdev, info->pasid_supported & ~1))
> info->pasid_enabled = 1;
>
> + if (sm_supported(iommu)) {
> + if (info->ats_supported && pci_ats_page_aligned(pdev)) {
> + ret = pci_enable_ats(pdev, VTD_PAGE_SHIFT);
> + if (ret)
> + pci_info(pdev, "Failed to enable ATS on device\n");
> + else
> + info->ats_enabled = 1;
> + }
> + }
> +
> return &iommu->iommu;
> free_table:
> intel_pasid_free_table(dev);
> @@ -3765,6 +3761,11 @@ static void intel_iommu_release_device(struct device *dev)
> struct device_domain_info *info = dev_iommu_priv_get(dev);
> struct intel_iommu *iommu = info->iommu;
>
> + if (info->ats_enabled) {
> + pci_disable_ats(to_pci_dev(dev));
> + info->ats_enabled = 0;
> + }
> +
> if (info->pasid_enabled) {
> pci_disable_pasid(to_pci_dev(dev));
> info->pasid_enabled = 0;
> @@ -4365,13 +4366,10 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device
> if (dev_is_real_dma_subdevice(dev))
> return 0;
>
> - if (sm_supported(iommu)) {
> + if (sm_supported(iommu))
> ret = intel_pasid_setup_pass_through(iommu, dev, IOMMU_NO_PASID);
> - if (!ret)
> - iommu_enable_pci_caps(info);
> - } else {
> + else
> ret = device_setup_pass_through(dev);
> - }
>
> return ret;
> }
--
Regards,
Yi Liu
Powered by blists - more mailing lists