[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <35c5fa5926be45bda82f5fc87545cd3180ad4c9c.1749882255.git.nicolinc@nvidia.com>
Date: Fri, 13 Jun 2025 23:35:19 -0700
From: Nicolin Chen <nicolinc@...dia.com>
To: <jgg@...dia.com>, <kevin.tian@...el.com>
CC: <will@...nel.org>, <robin.murphy@....com>, <joro@...tes.org>,
<praan@...gle.com>, <yi.l.liu@...el.com>, <peterz@...radead.org>,
<jsnitsel@...hat.com>, <linux-arm-kernel@...ts.infradead.org>,
<iommu@...ts.linux.dev>, <linux-kernel@...r.kernel.org>,
<patches@...ts.linux.dev>, <baolu.lu@...ux.intel.com>
Subject: [PATCH v2 07/14] iommufd/viommu: Support get_viommu_size and viommu_init ops
To ease the for-driver iommufd APIs, get_viommu_size and viommu_init ops
are introduced to replace the viommu_init op.
Let the new viommu_init pathway coexist with the old viommu_alloc one.
Since the viommu_alloc op and its pathway will be soon deprecated, try to
minimize the code difference between them by adding a tentative jump tag.
Note that this fails a !viommu->ops case from now on with a WARN_ON_ONCE
since a vIOMMU is expected to support an alloc_domain_nested op for now,
or some sort of a viommu op in the foreseeable future. This WARN_ON_ONCE
can be lifted, if some day there is a use case wanting !viommu->ops.
Suggested-by: Jason Gunthorpe <jgg@...dia.com>
Signed-off-by: Nicolin Chen <nicolinc@...dia.com>
---
drivers/iommu/iommufd/viommu.c | 42 +++++++++++++++++++++++++++++++---
1 file changed, 39 insertions(+), 3 deletions(-)
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index 01df2b985f02..27a39f524840 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -21,6 +21,7 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
struct iommufd_viommu *viommu;
struct iommufd_device *idev;
const struct iommu_ops *ops;
+ size_t viommu_size;
int rc;
if (cmd->flags || cmd->type == IOMMU_VIOMMU_TYPE_DEFAULT)
@@ -31,11 +32,29 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
return PTR_ERR(idev);
ops = dev_iommu_ops(idev->dev);
- if (!ops->viommu_alloc) {
+ if (!ops->get_viommu_size || !ops->viommu_init) {
+ if (ops->viommu_alloc)
+ goto get_hwpt_paging;
+ rc = -EOPNOTSUPP;
+ goto out_put_idev;
+ }
+
+ viommu_size = ops->get_viommu_size(idev->dev, cmd->type);
+ if (!viommu_size) {
+ rc = -EOPNOTSUPP;
+ goto out_put_idev;
+ }
+
+ /*
+ * It is a driver bug for providing a viommu_size smaller than the core
+ * vIOMMU structure size
+ */
+ if (WARN_ON_ONCE(viommu_size < sizeof(*viommu))) {
rc = -EOPNOTSUPP;
goto out_put_idev;
}
+get_hwpt_paging:
hwpt_paging = iommufd_get_hwpt_paging(ucmd, cmd->hwpt_id);
if (IS_ERR(hwpt_paging)) {
rc = PTR_ERR(hwpt_paging);
@@ -47,8 +66,13 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
goto out_put_hwpt;
}
- viommu = ops->viommu_alloc(idev->dev, hwpt_paging->common.domain,
- ucmd->ictx, cmd->type);
+ if (ops->viommu_alloc)
+ viommu = ops->viommu_alloc(idev->dev,
+ hwpt_paging->common.domain,
+ ucmd->ictx, cmd->type);
+ else
+ viommu = (struct iommufd_viommu *)_iommufd_object_alloc(
+ ucmd->ictx, viommu_size, IOMMUFD_OBJ_VIOMMU);
if (IS_ERR(viommu)) {
rc = PTR_ERR(viommu);
goto out_put_hwpt;
@@ -68,6 +92,18 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
*/
viommu->iommu_dev = __iommu_get_iommu_dev(idev->dev);
+ if (!ops->viommu_alloc) {
+ rc = ops->viommu_init(viommu, hwpt_paging->common.domain);
+ if (rc)
+ goto out_abort;
+ }
+
+ /* It is a driver bug that viommu->ops isn't filled */
+ if (WARN_ON_ONCE(!viommu->ops)) {
+ rc = -EOPNOTSUPP;
+ goto out_abort;
+ }
+
cmd->out_viommu_id = viommu->obj.id;
rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
if (rc)
--
2.43.0
Powered by blists - more mailing lists