[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240122073903.24406-7-baolu.lu@linux.intel.com>
Date: Mon, 22 Jan 2024 15:39:01 +0800
From: Lu Baolu <baolu.lu@...ux.intel.com>
To: Jason Gunthorpe <jgg@...pe.ca>,
Kevin Tian <kevin.tian@...el.com>,
Joerg Roedel <joro@...tes.org>,
Will Deacon <will@...nel.org>,
Robin Murphy <robin.murphy@....com>,
Jean-Philippe Brucker <jean-philippe@...aro.org>,
Nicolin Chen <nicolinc@...dia.com>,
Yi Liu <yi.l.liu@...el.com>,
Jacob Pan <jacob.jun.pan@...ux.intel.com>,
Joel Granados <j.granados@...sung.com>
Cc: iommu@...ts.linux.dev,
virtualization@...ts.linux-foundation.org,
linux-kernel@...r.kernel.org,
Lu Baolu <baolu.lu@...ux.intel.com>
Subject: [PATCH v3 6/8] iommufd: IOPF-capable hw page table attach/detach/replace
The iopf-capable hw page table attach/detach/replace should use the iommu
iopf-specific interfaces. The pointer to iommufd_device is stored in the
private field of the attachment cookie, so that it can be easily retrieved
in the fault handling paths. The references to iommufd_device and
iommufd_hw_pagetable objects are held until the cookie is released, which
happens after the hw_pagetable is detached from the device and all
outstanding iopf's are responded to. This guarantees that both the device
and hw_pagetable are valid before domain detachment and outstanding faults
are handled.
The iopf-capable hw page tables can only be attached to devices that
support the IOMMU_DEV_FEAT_IOPF feature. On the first attachment of an
iopf-capable hw_pagetable to the device, the IOPF feature is enabled on
the device. Similarly, after the last iopf-capable hwpt is detached from
the device, the IOPF feature is disabled on the device.
The current implementation allows a replacement between iopf-capable and
non-iopf-capable hw page tables. This matches the nested translation use
case, where a parent domain is attached by default and can then be
replaced with a nested user domain with iopf support.
Signed-off-by: Lu Baolu <baolu.lu@...ux.intel.com>
---
drivers/iommu/iommufd/iommufd_private.h | 7 ++
drivers/iommu/iommufd/device.c | 15 ++-
drivers/iommu/iommufd/fault.c | 122 ++++++++++++++++++++++++
3 files changed, 141 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 2780bed0c6b1..9844a1289c01 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -398,6 +398,7 @@ struct iommufd_device {
/* always the physical device */
struct device *dev;
bool enforce_cache_coherency;
+ bool iopf_enabled;
/* outstanding faults awaiting response indexed by fault group id */
struct xarray faults;
};
@@ -459,6 +460,12 @@ iommufd_get_fault(struct iommufd_ucmd *ucmd, u32 id)
int iommufd_fault_alloc(struct iommufd_ucmd *ucmd);
void iommufd_fault_destroy(struct iommufd_object *obj);
int iommufd_fault_iopf_handler(struct iopf_group *group);
+int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev);
+void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev);
+int iommufd_fault_domain_replace_dev(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev);
#ifdef CONFIG_IOMMUFD_TEST
int iommufd_test(struct iommufd_ucmd *ucmd);
diff --git a/drivers/iommu/iommufd/device.c b/drivers/iommu/iommufd/device.c
index d70913ee8fdf..c4737e876ebc 100644
--- a/drivers/iommu/iommufd/device.c
+++ b/drivers/iommu/iommufd/device.c
@@ -377,7 +377,10 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
* attachment.
*/
if (list_empty(&idev->igroup->device_list)) {
- rc = iommu_attach_group(hwpt->domain, idev->igroup->group);
+ if (hwpt->fault_capable)
+ rc = iommufd_fault_domain_attach_dev(hwpt, idev);
+ else
+ rc = iommu_attach_group(hwpt->domain, idev->igroup->group);
if (rc)
goto err_unresv;
idev->igroup->hwpt = hwpt;
@@ -403,7 +406,10 @@ iommufd_hw_pagetable_detach(struct iommufd_device *idev)
mutex_lock(&idev->igroup->lock);
list_del(&idev->group_item);
if (list_empty(&idev->igroup->device_list)) {
- iommu_detach_group(hwpt->domain, idev->igroup->group);
+ if (hwpt->fault_capable)
+ iommufd_fault_domain_detach_dev(hwpt, idev);
+ else
+ iommu_detach_group(hwpt->domain, idev->igroup->group);
idev->igroup->hwpt = NULL;
}
if (hwpt_is_paging(hwpt))
@@ -498,7 +504,10 @@ iommufd_device_do_replace(struct iommufd_device *idev,
goto err_unlock;
}
- rc = iommu_group_replace_domain(igroup->group, hwpt->domain);
+ if (old_hwpt->fault_capable || hwpt->fault_capable)
+ rc = iommufd_fault_domain_replace_dev(hwpt, idev);
+ else
+ rc = iommu_group_replace_domain(igroup->group, hwpt->domain);
if (rc)
goto err_unresv;
diff --git a/drivers/iommu/iommufd/fault.c b/drivers/iommu/iommufd/fault.c
index e752d1c49dde..a4a49f3cd4c2 100644
--- a/drivers/iommu/iommufd/fault.c
+++ b/drivers/iommu/iommufd/fault.c
@@ -267,3 +267,125 @@ int iommufd_fault_iopf_handler(struct iopf_group *group)
return 0;
}
+
+static void release_attach_cookie(struct iopf_attach_cookie *cookie)
+{
+ struct iommufd_hw_pagetable *hwpt = cookie->domain->fault_data;
+ struct iommufd_device *idev = cookie->private;
+
+ refcount_dec(&idev->obj.users);
+ refcount_dec(&hwpt->obj.users);
+ kfree(cookie);
+}
+
+static int iommufd_fault_iopf_enable(struct iommufd_device *idev)
+{
+ int ret;
+
+ if (idev->iopf_enabled)
+ return 0;
+
+ ret = iommu_dev_enable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
+ if (ret)
+ return ret;
+
+ idev->iopf_enabled = true;
+
+ return 0;
+}
+
+static void iommufd_fault_iopf_disable(struct iommufd_device *idev)
+{
+ if (!idev->iopf_enabled)
+ return;
+
+ iommu_dev_disable_feature(idev->dev, IOMMU_DEV_FEAT_IOPF);
+ idev->iopf_enabled = false;
+}
+
+int iommufd_fault_domain_attach_dev(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ struct iopf_attach_cookie *cookie;
+ int ret;
+
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return -ENOMEM;
+
+ refcount_inc(&hwpt->obj.users);
+ refcount_inc(&idev->obj.users);
+ cookie->release = release_attach_cookie;
+ cookie->private = idev;
+
+ if (!idev->iopf_enabled) {
+ ret = iommufd_fault_iopf_enable(idev);
+ if (ret)
+ goto out_put_cookie;
+ }
+
+ ret = iopf_domain_attach(hwpt->domain, idev->dev, IOMMU_NO_PASID, cookie);
+ if (ret)
+ goto out_disable_iopf;
+
+ return 0;
+out_disable_iopf:
+ iommufd_fault_iopf_disable(idev);
+out_put_cookie:
+ release_attach_cookie(cookie);
+
+ return ret;
+}
+
+void iommufd_fault_domain_detach_dev(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ iopf_domain_detach(hwpt->domain, idev->dev, IOMMU_NO_PASID);
+ iommufd_fault_iopf_disable(idev);
+}
+
+int iommufd_fault_domain_replace_dev(struct iommufd_hw_pagetable *hwpt,
+ struct iommufd_device *idev)
+{
+ bool iopf_enabled_originally = idev->iopf_enabled;
+ struct iopf_attach_cookie *cookie = NULL;
+ int ret;
+
+ if (hwpt->fault_capable) {
+ cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
+ if (!cookie)
+ return -ENOMEM;
+
+ refcount_inc(&hwpt->obj.users);
+ refcount_inc(&idev->obj.users);
+ cookie->release = release_attach_cookie;
+ cookie->private = idev;
+
+ if (!idev->iopf_enabled) {
+ ret = iommufd_fault_iopf_enable(idev);
+ if (ret) {
+ release_attach_cookie(cookie);
+ return ret;
+ }
+ }
+ }
+
+ ret = iopf_domain_replace(hwpt->domain, idev->dev, IOMMU_NO_PASID, cookie);
+ if (ret) {
+ goto out_put_cookie;
+ }
+
+ if (iopf_enabled_originally && !hwpt->fault_capable)
+ iommufd_fault_iopf_disable(idev);
+
+ return 0;
+out_put_cookie:
+ if (hwpt->fault_capable)
+ release_attach_cookie(cookie);
+ if (iopf_enabled_originally && !idev->iopf_enabled)
+ iommufd_fault_iopf_enable(idev);
+ else if (!iopf_enabled_originally && idev->iopf_enabled)
+ iommufd_fault_iopf_disable(idev);
+
+ return ret;
+}
--
2.34.1
Powered by blists - more mailing lists