[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <2f279564-633b-fe93-5ffd-34b3e8f1c6fc@linux.intel.com>
Date: Tue, 10 May 2022 09:16:25 +0800
From: Baolu Lu <baolu.lu@...ux.intel.com>
To: "Tian, Kevin" <kevin.tian@...el.com>,
Joerg Roedel <joro@...tes.org>,
Jason Gunthorpe <jgg@...dia.com>,
Alex Williamson <alex.williamson@...hat.com>
Cc: "Pan, Jacob jun" <jacob.jun.pan@...el.com>,
"Liu, Yi L" <yi.l.liu@...el.com>,
"iommu@...ts.linux-foundation.org" <iommu@...ts.linux-foundation.org>,
"linux-kernel@...r.kernel.org" <linux-kernel@...r.kernel.org>
Subject: Re: [PATCH v4 2/4] iommu/vt-d: Check domain force_snooping against
attached devices
On 2022/5/10 08:51, Tian, Kevin wrote:
>> From: Lu Baolu <baolu.lu@...ux.intel.com>
>> Sent: Sunday, May 8, 2022 8:35 PM
>>
>> As domain->force_snooping only impacts the devices attached with the
>> domain, there's no need to check against all IOMMU units. On the other
>> hand, force_snooping could be set on a domain no matter whether it has
>> been attached or not, and once set it is an immutable flag. If no
>> device attached, the operation always succeeds. Then this empty domain
>> can be only attached to a device of which the IOMMU supports snoop
>> control.
>>
>> Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
>
> Reviewed-by: Kevin Tian <kevin.tian@...el.com>
Thank you, Kevin. I will queue this series for v5.19.
Best regards,
baolu
>
>> ---
>> include/linux/intel-iommu.h | 1 +
>> drivers/iommu/intel/pasid.h | 2 ++
>> drivers/iommu/intel/iommu.c | 53
>> ++++++++++++++++++++++++++++++++++---
>> drivers/iommu/intel/pasid.c | 42 +++++++++++++++++++++++++++++
>> 4 files changed, 95 insertions(+), 3 deletions(-)
>>
>> diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
>> index 72e5d7900e71..4f29139bbfc3 100644
>> --- a/include/linux/intel-iommu.h
>> +++ b/include/linux/intel-iommu.h
>> @@ -540,6 +540,7 @@ struct dmar_domain {
>> u8 has_iotlb_device: 1;
>> u8 iommu_coherency: 1; /* indicate coherency of
>> iommu access */
>> u8 force_snooping : 1; /* Create IOPTEs with snoop control
>> */
>> + u8 set_pte_snp:1;
>>
>> struct list_head devices; /* all devices' list */
>> struct iova_domain iovad; /* iova's that belong to this domain
>> */
>> diff --git a/drivers/iommu/intel/pasid.h b/drivers/iommu/intel/pasid.h
>> index ab4408c824a5..583ea67fc783 100644
>> --- a/drivers/iommu/intel/pasid.h
>> +++ b/drivers/iommu/intel/pasid.h
>> @@ -123,4 +123,6 @@ void intel_pasid_tear_down_entry(struct
>> intel_iommu *iommu,
>> bool fault_ignore);
>> int vcmd_alloc_pasid(struct intel_iommu *iommu, u32 *pasid);
>> void vcmd_free_pasid(struct intel_iommu *iommu, u32 pasid);
>> +void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
>> + struct device *dev, u32 pasid);
>> #endif /* __INTEL_PASID_H */
>> diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
>> index b4802f4055a0..048ebfbd5fcb 100644
>> --- a/drivers/iommu/intel/iommu.c
>> +++ b/drivers/iommu/intel/iommu.c
>> @@ -2459,7 +2459,7 @@ static int domain_setup_first_level(struct
>> intel_iommu *iommu,
>> if (level == 5)
>> flags |= PASID_FLAG_FL5LP;
>>
>> - if (domain->domain.type == IOMMU_DOMAIN_UNMANAGED)
>> + if (domain->force_snooping)
>> flags |= PASID_FLAG_PAGE_SNOOP;
>>
>> return intel_pasid_setup_first_level(iommu, dev, (pgd_t *)pgd, pasid,
>> @@ -4444,7 +4444,7 @@ static int intel_iommu_map(struct iommu_domain
>> *domain,
>> prot |= DMA_PTE_READ;
>> if (iommu_prot & IOMMU_WRITE)
>> prot |= DMA_PTE_WRITE;
>> - if (dmar_domain->force_snooping)
>> + if (dmar_domain->set_pte_snp)
>> prot |= DMA_PTE_SNP;
>>
>> max_addr = iova + size;
>> @@ -4567,13 +4567,60 @@ static phys_addr_t
>> intel_iommu_iova_to_phys(struct iommu_domain *domain,
>> return phys;
>> }
>>
>> +static bool domain_support_force_snooping(struct dmar_domain *domain)
>> +{
>> + struct device_domain_info *info;
>> + bool support = true;
>> +
>> + assert_spin_locked(&device_domain_lock);
>> + list_for_each_entry(info, &domain->devices, link) {
>> + if (!ecap_sc_support(info->iommu->ecap)) {
>> + support = false;
>> + break;
>> + }
>> + }
>> +
>> + return support;
>> +}
>> +
>> +static void domain_set_force_snooping(struct dmar_domain *domain)
>> +{
>> + struct device_domain_info *info;
>> +
>> + assert_spin_locked(&device_domain_lock);
>> +
>> + /*
>> + * Second level page table supports per-PTE snoop control. The
>> + * iommu_map() interface will handle this by setting SNP bit.
>> + */
>> + if (!domain_use_first_level(domain)) {
>> + domain->set_pte_snp = true;
>> + return;
>> + }
>> +
>> + list_for_each_entry(info, &domain->devices, link)
>> + intel_pasid_setup_page_snoop_control(info->iommu, info-
>>> dev,
>> + PASID_RID2PASID);
>> +}
>> +
>> static bool intel_iommu_enforce_cache_coherency(struct iommu_domain
>> *domain)
>> {
>> struct dmar_domain *dmar_domain = to_dmar_domain(domain);
>> + unsigned long flags;
>>
>> - if (!domain_update_iommu_snooping(NULL))
>> + if (dmar_domain->force_snooping)
>> + return true;
>> +
>> + spin_lock_irqsave(&device_domain_lock, flags);
>> + if (!domain_support_force_snooping(dmar_domain)) {
>> + spin_unlock_irqrestore(&device_domain_lock, flags);
>> return false;
>> + }
>> +
>> + domain_set_force_snooping(dmar_domain);
>> dmar_domain->force_snooping = true;
>> + spin_unlock_irqrestore(&device_domain_lock, flags);
>> +
>> return true;
>> }
>>
>> diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c
>> index f8d215d85695..d19dd66a670c 100644
>> --- a/drivers/iommu/intel/pasid.c
>> +++ b/drivers/iommu/intel/pasid.c
>> @@ -762,3 +762,45 @@ int intel_pasid_setup_pass_through(struct
>> intel_iommu *iommu,
>>
>> return 0;
>> }
>> +
>> +/*
>> + * Set the page snoop control for a pasid entry which has been set up.
>> + */
>> +void intel_pasid_setup_page_snoop_control(struct intel_iommu *iommu,
>> + struct device *dev, u32 pasid)
>> +{
>> + struct pasid_entry *pte;
>> + u16 did;
>> +
>> + spin_lock(&iommu->lock);
>> + pte = intel_pasid_get_entry(dev, pasid);
>> + if (WARN_ON(!pte || !pasid_pte_is_present(pte))) {
>> + spin_unlock(&iommu->lock);
>> + return;
>> + }
>> +
>> + pasid_set_pgsnp(pte);
>> + did = pasid_get_domain_id(pte);
>> + spin_unlock(&iommu->lock);
>> +
>> + if (!ecap_coherent(iommu->ecap))
>> + clflush_cache_range(pte, sizeof(*pte));
>> +
>> + /*
>> + * VT-d spec 3.4 table23 states guides for cache invalidation:
>> + *
>> + * - PASID-selective-within-Domain PASID-cache invalidation
>> + * - PASID-selective PASID-based IOTLB invalidation
>> + * - If (pasid is RID_PASID)
>> + * - Global Device-TLB invalidation to affected functions
>> + * Else
>> + * - PASID-based Device-TLB invalidation (with S=1 and
>> + * Addr[63:12]=0x7FFFFFFF_FFFFF) to affected functions
>> + */
>> + pasid_cache_invalidation_with_pasid(iommu, did, pasid);
>> + qi_flush_piotlb(iommu, did, pasid, 0, -1, 0);
>> +
>> + /* Device IOTLB doesn't need to be flushed in caching mode. */
>> + if (!cap_caching_mode(iommu->cap))
>> + devtlb_invalidation_with_pasid(iommu, dev, pasid);
>> +}
>> --
>> 2.25.1
>
Powered by blists - more mailing lists