lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20231006180607.GA1140589@nvidia.com>
Date:   Fri, 6 Oct 2023 15:06:07 -0300
From:   Jason Gunthorpe <jgg@...dia.com>
To:     Tina Zhang <tina.zhang@...el.com>
Cc:     Kevin Tian <kevin.tian@...el.com>,
        Lu Baolu <baolu.lu@...ux.intel.com>,
        Michael Shavit <mshavit@...gle.com>,
        Vasant Hegde <vasant.hegde@....com>, iommu@...ts.linux.dev,
        linux-kernel@...r.kernel.org
Subject: Re: [PATCH v5 5/6] iommu: Support mm PASID 1:n with sva domains

On Mon, Sep 25, 2023 at 10:38:12AM +0800, Tina Zhang wrote:
> Each mm bound to devices gets a PASID and corresponding sva domains
> allocated in iommu_sva_bind_device(), which are referenced by iommu_mm
> field of the mm. The PASID is released in __mmdrop(), while a sva domain
> is released when no one is using it (the reference count is decremented
> in iommu_sva_unbind_device()). However, although sva domains and their
> PASID are separate objects such that their own life cycles could be
> handled independently, an enqcmd use case may require releasing the
> PASID in releasing the mm (i.e., once a PASID is allocated for a mm, it
> will be permanently used by the mm and won't be released until the end
> of mm) and only allows to drop the PASID after the sva domains are
> released. To this end, mmgrab() is called in iommu_sva_domain_alloc() to
> increment the mm reference count and mmdrop() is invoked in
> iommu_domain_free() to decrement the mm reference count.
> 
> Since the required info of PASID and sva domains is kept in struct
> iommu_mm_data of a mm, use mm->iommu_mm field instead of the old pasid
> field in mm struct. The sva domain list is protected by iommu_sva_lock.
> 
> Besides, this patch removes mm_pasid_init(), as with the introduced
> iommu_mm structure, initializing mm pasid in mm_init() is unnecessary.
> 
> Reviewed-by: Lu Baolu <baolu.lu@...ux.intel.com>
> Reviewed-by: Vasant Hegde <vasant.hegde@....com>
> Signed-off-by: Tina Zhang <tina.zhang@...el.com>
> ---
> 
> Change in v5:
> - Use smp_store_release() & READ_ONCE() in storing and loading mm's
>   pasid value.
> 
> Change in v4:
> - Rebase to v6.6-rc1.
> 
>  drivers/iommu/iommu-sva.c | 40 +++++++++++++++++++++++++++------------
>  include/linux/iommu.h     | 18 +++++++++++-------
>  kernel/fork.c             |  1 -
>  3 files changed, 39 insertions(+), 20 deletions(-)

I was wondering what Nicolin's issue was, didn't see anything.

But I think you should incorporate this into the patch.

And there is a straightforward way to move the global lock into the
iommu_mm_data that we can explore later.

diff --git a/drivers/iommu/iommu-sva.c b/drivers/iommu/iommu-sva.c
index b2c1db1ae385b0..e712554ea3656f 100644
--- a/drivers/iommu/iommu-sva.c
+++ b/drivers/iommu/iommu-sva.c
@@ -12,34 +12,33 @@
 static DEFINE_MUTEX(iommu_sva_lock);
 
 /* Allocate a PASID for the mm within range (inclusive) */
-static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
+static struct iommu_mm_data *iommu_alloc_mm_data(struct mm_struct *mm,
+						 struct device *dev)
 {
-	ioasid_t pasid;
 	struct iommu_mm_data *iommu_mm;
-	int ret = 0;
+	ioasid_t pasid;
+
+	lockdep_assert_held(&iommu_sva_lock);
 
 	if (!arch_pgtable_dma_compat(mm))
-		return -EBUSY;
+		return ERR_PTR(-EBUSY);
 
-	mutex_lock(&iommu_sva_lock);
+	iommu_mm = mm->iommu_mm;
 	/* Is a PASID already associated with this mm? */
-	if (mm_valid_pasid(mm)) {
-		if (mm_get_pasid(mm) >= dev->iommu->max_pasids)
-			ret = -EOVERFLOW;
-		goto out;
+	if (iommu_mm) {
+		if (iommu_mm->pasid >= dev->iommu->max_pasids)
+			return ERR_PTR(-EOVERFLOW);
+		return iommu_mm;
 	}
 
 	iommu_mm = kzalloc(sizeof(struct iommu_mm_data), GFP_KERNEL);
-	if (!iommu_mm) {
-		ret = -ENOMEM;
-		goto out;
-	}
+	if (!iommu_mm)
+		return ERR_PTR(-ENOMEM);
 
 	pasid = iommu_alloc_global_pasid(dev);
 	if (pasid == IOMMU_PASID_INVALID) {
 		kfree(iommu_mm);
-		ret = -ENOSPC;
-		goto out;
+		return ERR_PTR(-ENOSPC);
 	}
 	iommu_mm->pasid = pasid;
 	INIT_LIST_HEAD(&iommu_mm->sva_domains);
@@ -49,11 +48,7 @@ static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
 	 * valid iommu_mm with uninitialized values.
 	 */
 	smp_store_release(&mm->iommu_mm, iommu_mm);
-
-	ret = 0;
-out:
-	mutex_unlock(&iommu_sva_lock);
-	return ret;
+	return iommu_mm;
 }
 
 /**
@@ -74,23 +69,29 @@ static int iommu_sva_alloc_pasid(struct mm_struct *mm, struct device *dev)
  */
 struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm)
 {
+	struct iommu_mm_data *iommu_mm;
 	struct iommu_domain *domain;
 	struct iommu_sva *handle;
 	int ret;
 
+	mutex_lock(&iommu_sva_lock);
+
 	/* Allocate mm->pasid if necessary. */
-	ret = iommu_sva_alloc_pasid(mm, dev);
-	if (ret)
-		return ERR_PTR(ret);
+	iommu_mm = iommu_alloc_mm_data(mm, dev);
+	if (IS_ERR(iommu_mm)) {
+		ret = PTR_ERR(iommu_mm);
+		goto out_unlock;
+	}
 
 	handle = kzalloc(sizeof(*handle), GFP_KERNEL);
-	if (!handle)
-		return ERR_PTR(-ENOMEM);
+	if (!handle) {
+		ret = -ENOMEM;
+		goto out_unlock;
+	}
 
-	mutex_lock(&iommu_sva_lock);
 	/* Search for an existing domain. */
 	list_for_each_entry(domain, &mm->iommu_mm->sva_domains, next) {
-		ret = iommu_attach_device_pasid(domain, dev, mm_get_pasid(mm));
+		ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
 		if (!ret) {
 			domain->users++;
 			goto out;
@@ -104,7 +105,7 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
 		goto out_unlock;
 	}
 
-	ret = iommu_attach_device_pasid(domain, dev, mm_get_pasid(mm));
+	ret = iommu_attach_device_pasid(domain, dev, iommu_mm->pasid);
 	if (ret)
 		goto out_free_domain;
 	domain->users = 1;
@@ -119,10 +120,9 @@ struct iommu_sva *iommu_sva_bind_device(struct device *dev, struct mm_struct *mm
 
 out_free_domain:
 	iommu_domain_free(domain);
+	kfree(handle);
 out_unlock:
 	mutex_unlock(&iommu_sva_lock);
-	kfree(handle);
-
 	return ERR_PTR(ret);
 }
 EXPORT_SYMBOL_GPL(iommu_sva_bind_device);
@@ -220,9 +220,11 @@ iommu_sva_handle_iopf(struct iommu_fault *fault, void *data)
 
 void mm_pasid_drop(struct mm_struct *mm)
 {
-	if (likely(!mm_valid_pasid(mm)))
+	struct iommu_mm_data *iommu_mm = mm->iommu_mm;
+
+	if (!iommu_mm)
 		return;
 
-	iommu_free_global_pasid(mm_get_pasid(mm));
-	kfree(mm->iommu_mm);
+	iommu_free_global_pasid(iommu_mm->pasid);
+	kfree(iommu_mm);
 }

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ