[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ad75a1f7f0b4d5b6d35238b4ae7b41db1410110c.1690488745.git.nicolinc@nvidia.com>
Date: Thu, 27 Jul 2023 13:24:34 -0700
From: Nicolin Chen <nicolinc@...dia.com>
To: <jgg@...dia.com>, <kevin.tian@...el.com>
CC: <yi.l.liu@...el.com>, <joro@...tes.org>, <will@...nel.org>,
<robin.murphy@....com>, <alex.williamson@...hat.com>,
<shuah@...nel.org>, <linux-kernel@...r.kernel.org>,
<iommu@...ts.linux.dev>, <kvm@...r.kernel.org>,
<linux-kselftest@...r.kernel.org>, <mjrosato@...ux.ibm.com>,
<farman@...ux.ibm.com>
Subject: [PATCH v10 3/6] iommufd: Add iommufd_access_change_ioas(_id) helpers
The complication of the mutex and refcount will be amplified after we
introduce the replace support for access. So, add a preparatory change
of a constitutive helper iommufd_access_change_ioas() and its wrapper
iommufd_access_change_ioas_id(). They can simply take care of existing
iommufd_access_attach() and iommufd_access_detach(), with a less risk
of race condition.
Also, update the unprotect routine in iommufd_access_destroy_object()
to calling the new iommufd_access_change_ioas() helper.
Suggested-by: Jason Gunthorpe <jgg@...dia.com>
Signed-off-by: Nicolin Chen <nicolinc@...dia.com>
---
drivers/iommu/iommufd/device.c | 123 +++++++++++++++++++++------------
1 file changed, 80 insertions(+), 43 deletions(-)
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index 7a3e8660b902..e79cbedd8626 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -684,17 +684,82 @@ void iommufd_device_detach(struct iommufd_device *idev)
}
EXPORT_SYMBOL_NS_GPL(iommufd_device_detach, IOMMUFD);
+/*
+ * On success, it will refcount_inc() at a valid new_ioas and refcount_dec() at
+ * a valid cur_ioas (access->ioas). A caller passing in a valid new_ioas should
+ * call iommufd_put_object() if it does an iommufd_get_object() for a new_ioas.
+ */
+static int iommufd_access_change_ioas(struct iommufd_access *access,
+ struct iommufd_ioas *new_ioas)
+{
+ u32 iopt_access_list_id = access->iopt_access_list_id;
+ struct iommufd_ioas *cur_ioas = access->ioas;
+ int rc;
+
+ lockdep_assert_held(&access->ioas_lock);
+
+ /* We are racing with a concurrent detach, bail */
+ if (cur_ioas != access->ioas_unpin)
+ return -EBUSY;
+
+ if (IS_ERR(new_ioas))
+ return PTR_ERR(new_ioas);
+
+ if (cur_ioas == new_ioas)
+ return 0;
+
+ /*
+ * Set ioas to NULL to block any further iommufd_access_pin_pages().
+ * iommufd_access_unpin_pages() can continue using access->ioas_unpin.
+ */
+ access->ioas = NULL;
+
+ if (new_ioas) {
+ rc = iopt_add_access(&new_ioas->iopt, access);
+ if (rc) {
+ access->ioas = cur_ioas;
+ return rc;
+ }
+ refcount_inc(&new_ioas->obj.users);
+ }
+
+ if (cur_ioas) {
+ if (access->ops->unmap) {
+ mutex_unlock(&access->ioas_lock);
+ access->ops->unmap(access->data, 0, ULONG_MAX);
+ mutex_lock(&access->ioas_lock);
+ }
+ iopt_remove_access(&cur_ioas->iopt, access, iopt_access_list_id);
+ refcount_dec(&cur_ioas->obj.users);
+ }
+
+ access->ioas = new_ioas;
+ access->ioas_unpin = new_ioas;
+
+ return 0;
+}
+
+static int iommufd_access_change_ioas_id(struct iommufd_access *access, u32 id)
+{
+ struct iommufd_ioas *ioas = iommufd_get_ioas(access->ictx, id);
+ int rc;
+
+ if (IS_ERR(ioas))
+ return PTR_ERR(ioas);
+ rc = iommufd_access_change_ioas(access, ioas);
+ iommufd_put_object(&ioas->obj);
+ return rc;
+}
+
void iommufd_access_destroy_object(struct iommufd_object *obj)
{
struct iommufd_access *access =
container_of(obj, struct iommufd_access, obj);
- if (access->ioas) {
- iopt_remove_access(&access->ioas->iopt, access,
- access->iopt_access_list_id);
- refcount_dec(&access->ioas->obj.users);
- access->ioas = NULL;
- }
+ mutex_lock(&access->ioas_lock);
+ if (access->ioas)
+ WARN_ON(iommufd_access_change_ioas(access, NULL));
+ mutex_unlock(&access->ioas_lock);
iommufd_ctx_put(access->ictx);
}
@@ -761,60 +826,32 @@ EXPORT_SYMBOL_NS_GPL(iommufd_access_destroy, IOMMUFD);
void iommufd_access_detach(struct iommufd_access *access)
{
- struct iommufd_ioas *cur_ioas = access->ioas;
+ int rc;
mutex_lock(&access->ioas_lock);
- if (WARN_ON(!access->ioas))
- goto out;
- /*
- * Set ioas to NULL to block any further iommufd_access_pin_pages().
- * iommufd_access_unpin_pages() can continue using access->ioas_unpin.
- */
- access->ioas = NULL;
-
- if (access->ops->unmap) {
+ if (WARN_ON(!access->ioas)) {
mutex_unlock(&access->ioas_lock);
- access->ops->unmap(access->data, 0, ULONG_MAX);
- mutex_lock(&access->ioas_lock);
+ return;
}
- iopt_remove_access(&cur_ioas->iopt, access,
- access->iopt_access_list_id);
- refcount_dec(&cur_ioas->obj.users);
-out:
- access->ioas_unpin = NULL;
+ rc = iommufd_access_change_ioas(access, NULL);
+ WARN_ON(rc);
mutex_unlock(&access->ioas_lock);
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_detach, IOMMUFD);
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id)
{
- struct iommufd_ioas *new_ioas;
- int rc = 0;
+ int rc;
mutex_lock(&access->ioas_lock);
- if (WARN_ON(access->ioas || access->ioas_unpin)) {
+ if (WARN_ON(access->ioas)) {
mutex_unlock(&access->ioas_lock);
return -EINVAL;
}
- new_ioas = iommufd_get_ioas(access->ictx, ioas_id);
- if (IS_ERR(new_ioas)) {
- mutex_unlock(&access->ioas_lock);
- return PTR_ERR(new_ioas);
- }
-
- rc = iopt_add_access(&new_ioas->iopt, access);
- if (rc) {
- mutex_unlock(&access->ioas_lock);
- iommufd_put_object(&new_ioas->obj);
- return rc;
- }
- iommufd_ref_to_users(&new_ioas->obj);
-
- access->ioas = new_ioas;
- access->ioas_unpin = new_ioas;
+ rc = iommufd_access_change_ioas_id(access, ioas_id);
mutex_unlock(&access->ioas_lock);
- return 0;
+ return rc;
}
EXPORT_SYMBOL_NS_GPL(iommufd_access_attach, IOMMUFD);
--
2.41.0
Powered by blists - more mailing lists