lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aa6f1ef7f9fd456e62f422a4ad0057af30b11cbe.1724777091.git.nicolinc@nvidia.com>
Date: Tue, 27 Aug 2024 10:02:05 -0700
From: Nicolin Chen <nicolinc@...dia.com>
To: <jgg@...dia.com>, <kevin.tian@...el.com>, <will@...nel.org>
CC: <joro@...tes.org>, <suravee.suthikulpanit@....com>,
	<robin.murphy@....com>, <dwmw2@...radead.org>, <baolu.lu@...ux.intel.com>,
	<shuah@...nel.org>, <linux-kernel@...r.kernel.org>, <iommu@...ts.linux.dev>,
	<linux-arm-kernel@...ts.infradead.org>, <linux-kselftest@...r.kernel.org>,
	<eric.auger@...hat.com>, <jean-philippe@...aro.org>, <mdf@...nel.org>,
	<mshavit@...gle.com>, <shameerali.kolothum.thodi@...wei.com>,
	<smostafa@...gle.com>, <yi.l.liu@...el.com>
Subject: [PATCH v1 03/10] iommufd: Add IOMMUFD_OBJ_EVENT_VIRQ and IOMMUFD_CMD_VIRQ_ALLOC

Allow a VIOMMU object to allocate VIRQ events. Each VIOMMU is allowed to
have multiple VIRQ events but they must not have a duplicated type.

Signed-off-by: Nicolin Chen <nicolinc@...dia.com>
---
 drivers/iommu/iommufd/event.c           | 136 ++++++++++++++++++++++++
 drivers/iommu/iommufd/iommufd_private.h |  54 ++++++++++
 drivers/iommu/iommufd/main.c            |   5 +
 drivers/iommu/iommufd/viommu.c          |   2 +
 include/uapi/linux/iommufd.h            |  32 ++++++
 5 files changed, 229 insertions(+)

diff --git a/drivers/iommu/iommufd/event.c b/drivers/iommu/iommufd/event.c
index 8fea142e1ac2..f10827ce9cbd 100644
--- a/drivers/iommu/iommufd/event.c
+++ b/drivers/iommu/iommufd/event.c
@@ -339,6 +339,67 @@ static const struct iommufd_event_ops iommufd_event_iopf_ops = {
 	.write = &iommufd_event_iopf_fops_write,
 };
 
+/* IOMMUFD_OBJ_EVENT_VIRQ Functions */
+
+void iommufd_event_virq_destroy(struct iommufd_object *obj)
+{
+	struct iommufd_event *event =
+		container_of(obj, struct iommufd_event, obj);
+	struct iommufd_event_virq *event_virq = to_event_virq(event);
+	struct iommufd_viommu_irq *virq, *next;
+
+	/*
+	 * The iommufd object's reference count is zero at this point.
+	 * We can be confident that no other threads are currently
+	 * accessing this pointer. Therefore, acquiring the mutex here
+	 * is unnecessary.
+	 */
+	list_for_each_entry_safe(virq, next, &event->deliver, node) {
+		list_del(&virq->node);
+		kfree(virq);
+	}
+	destroy_workqueue(event_virq->irq_wq);
+	list_del(&event_virq->node);
+	refcount_dec(&event_virq->viommu->obj.users);
+}
+
+static ssize_t
+iommufd_event_virq_fops_read(struct iommufd_event *event,
+			     char __user *buf, size_t count, loff_t *ppos)
+{
+	size_t done = 0;
+	int rc = 0;
+
+	if (*ppos)
+		return -ESPIPE;
+
+	mutex_lock(&event->mutex);
+	while (!list_empty(&event->deliver) && count > done) {
+		struct iommufd_viommu_irq *virq =
+			list_first_entry(&event->deliver,
+					 struct iommufd_viommu_irq, node);
+		void *virq_data = (void *)virq + sizeof(*virq);
+
+		if (virq->irq_len > count - done)
+			break;
+
+		if (copy_to_user(buf + done, virq_data, virq->irq_len)) {
+			rc = -EFAULT;
+			break;
+		}
+		done += virq->irq_len;
+		list_del(&virq->node);
+		kfree(virq);
+	}
+	mutex_unlock(&event->mutex);
+
+	return done == 0 ? rc : done;
+}
+
+static const struct iommufd_event_ops iommufd_event_virq_ops = {
+	.read = &iommufd_event_virq_fops_read,
+};
+
 /* Common Event Functions */
 
 static ssize_t iommufd_event_fops_read(struct file *filep, char __user *buf,
@@ -475,3 +536,78 @@ int iommufd_event_iopf_alloc(struct iommufd_ucmd *ucmd)
 
 	return rc;
 }
+
+int iommufd_event_virq_alloc(struct iommufd_ucmd *ucmd)
+{
+	struct iommu_virq_alloc *cmd = ucmd->cmd;
+	struct iommufd_event_virq *event_virq;
+	struct workqueue_struct *irq_wq;
+	struct iommufd_viommu *viommu;
+	int fdno;
+	int rc;
+
+	if (cmd->flags)
+		return -EOPNOTSUPP;
+	if (cmd->type == IOMMU_VIRQ_TYPE_NONE)
+		return -EINVAL;
+
+	viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
+	if (IS_ERR(viommu))
+		return PTR_ERR(viommu);
+	down_write(&viommu->virqs_rwsem);
+
+	if (iommufd_viommu_find_event_virq(viommu, cmd->type)) {
+		rc = -EEXIST;
+		goto out_unlock_virqs;
+	}
+
+	event_virq = __iommufd_object_alloc(ucmd->ictx, event_virq,
+					    IOMMUFD_OBJ_EVENT_VIRQ, common.obj);
+	if (IS_ERR(event_virq)) {
+		rc = PTR_ERR(event_virq);
+		goto out_unlock_virqs;
+	}
+
+	irq_wq = alloc_workqueue("viommu_irq/%d", WQ_UNBOUND, 0,
+				 event_virq->common.obj.id);
+	if (!irq_wq) {
+		rc = -ENOMEM;
+		goto out_abort;
+	}
+
+	rc = iommufd_event_init(&event_virq->common, "[iommufd-viommu-irq]",
+				ucmd->ictx, &fdno, &iommufd_event_virq_ops);
+	if (rc)
+		goto out_irq_wq;
+
+	event_virq->irq_wq = irq_wq;
+	event_virq->viommu = viommu;
+	event_virq->type = cmd->type;
+	cmd->out_virq_id = event_virq->common.obj.id;
+	cmd->out_virq_fd = fdno;
+
+	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+	if (rc)
+		goto out_put_fdno;
+	iommufd_object_finalize(ucmd->ictx, &event_virq->common.obj);
+
+	fd_install(fdno, event_virq->common.filep);
+
+	list_add_tail(&event_virq->node, &viommu->virqs);
+	refcount_inc(&viommu->obj.users);
+
+	goto out_unlock_virqs;
+out_put_fdno:
+	put_unused_fd(fdno);
+	fput(event_virq->common.filep);
+	iommufd_event_deinit(&event_virq->common);
+out_irq_wq:
+	destroy_workqueue(irq_wq);
+out_abort:
+	iommufd_object_abort_and_destroy(ucmd->ictx, &event_virq->common.obj);
+out_unlock_virqs:
+	up_write(&viommu->virqs_rwsem);
+	iommufd_put_object(ucmd->ictx, &viommu->obj);
+
+	return rc;
+}
diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index c22d72c981c7..be1f1813672e 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -133,6 +133,7 @@ enum iommufd_object_type {
 	IOMMUFD_OBJ_IOAS,
 	IOMMUFD_OBJ_ACCESS,
 	IOMMUFD_OBJ_EVENT_IOPF,
+	IOMMUFD_OBJ_EVENT_VIRQ,
 	IOMMUFD_OBJ_VIOMMU,
 #ifdef CONFIG_IOMMUFD_TEST
 	IOMMUFD_OBJ_SELFTEST,
@@ -567,6 +568,43 @@ static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
 	return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
 }
 
+struct iommufd_event_virq {
+	struct iommufd_event common;
+	struct iommufd_viommu *viommu;
+	struct workqueue_struct *irq_wq;
+	struct list_head node;
+
+	unsigned int type;
+};
+
+static inline struct iommufd_event_virq *
+to_event_virq(struct iommufd_event *event)
+{
+	return container_of(event, struct iommufd_event_virq, common);
+}
+
+static inline struct iommufd_event_virq *
+iommufd_get_event_virq(struct iommufd_ucmd *ucmd, u32 id)
+{
+	return container_of(iommufd_get_object(ucmd->ictx, id,
+					       IOMMUFD_OBJ_EVENT_VIRQ),
+			    struct iommufd_event_virq, common.obj);
+}
+
+int iommufd_event_virq_alloc(struct iommufd_ucmd *ucmd);
+void iommufd_event_virq_destroy(struct iommufd_object *obj);
+
+struct iommufd_viommu_irq {
+	struct iommufd_event_virq *event_virq;
+	struct list_head node;
+	ssize_t irq_len;
+};
+
+static inline int iommufd_event_virq_handler(struct iommufd_viommu_irq *virq)
+{
+	return iommufd_event_notify(&virq->event_virq->common, &virq->node);
+}
+
 struct iommufd_viommu {
 	struct iommufd_object obj;
 	struct iommufd_ctx *ictx;
@@ -575,6 +613,8 @@ struct iommufd_viommu {
 	/* The locking order is vdev_ids_rwsem -> igroup::lock */
 	struct rw_semaphore vdev_ids_rwsem;
 	struct xarray vdev_ids;
+	struct rw_semaphore virqs_rwsem;
+	struct list_head virqs;
 
 	const struct iommufd_viommu_ops *ops;
 
@@ -595,6 +635,20 @@ iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
 			    struct iommufd_viommu, obj);
 }
 
+static inline struct iommufd_event_virq *
+iommufd_viommu_find_event_virq(struct iommufd_viommu *viommu, u32 type)
+{
+	struct iommufd_event_virq *event_virq, *next;
+
+	lockdep_assert_held(&viommu->virqs_rwsem);
+
+	list_for_each_entry_safe(event_virq, next, &viommu->virqs, node) {
+		if (event_virq->type == type)
+			return event_virq;
+	}
+	return NULL;
+}
+
 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
 void iommufd_viommu_destroy(struct iommufd_object *obj);
 int iommufd_viommu_set_vdev_id(struct iommufd_ucmd *ucmd);
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 015f492afab1..22381ba031b5 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -361,6 +361,8 @@ static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
 	IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
 	IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_event_iopf_alloc,
 		 struct iommu_fault_alloc, out_fault_fd),
+	IOCTL_OP(IOMMU_VIRQ_ALLOC, iommufd_event_virq_alloc,
+		 struct iommu_virq_alloc, out_virq_fd),
 	IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
 		 __reserved),
 	IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
@@ -528,6 +530,9 @@ static const struct iommufd_object_ops iommufd_object_ops[] = {
 	[IOMMUFD_OBJ_EVENT_IOPF] = {
 		.destroy = iommufd_event_iopf_destroy,
 	},
+	[IOMMUFD_OBJ_EVENT_VIRQ] = {
+		.destroy = iommufd_event_virq_destroy,
+	},
 	[IOMMUFD_OBJ_VIOMMU] = {
 		.destroy = iommufd_viommu_destroy,
 	},
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index a4ba8bff4a26..9adc9c62ada9 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -67,6 +67,8 @@ int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
 
 	xa_init(&viommu->vdev_ids);
 	init_rwsem(&viommu->vdev_ids_rwsem);
+	INIT_LIST_HEAD(&viommu->virqs);
+	init_rwsem(&viommu->virqs_rwsem);
 
 	refcount_inc(&viommu->hwpt->common.obj.users);
 
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 0d973486b604..f9ec07efed8d 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -54,6 +54,7 @@ enum {
 	IOMMUFD_CMD_VIOMMU_ALLOC = 0x8f,
 	IOMMUFD_CMD_VIOMMU_SET_VDEV_ID = 0x90,
 	IOMMUFD_CMD_VIOMMU_UNSET_VDEV_ID = 0x91,
+	IOMMUFD_CMD_VIRQ_ALLOC = 0x92,
 };
 
 /**
@@ -951,4 +952,35 @@ struct iommu_viommu_unset_vdev_id {
 	__aligned_u64 vdev_id;
 };
 #define IOMMU_VIOMMU_UNSET_VDEV_ID _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_UNSET_VDEV_ID)
+
+/**
+ * enum iommu_virq_type - Virtual IRQ Type
+ * @IOMMU_VIRQ_TYPE_NONE: INVALID type
+ */
+enum iommu_virq_type {
+	IOMMU_VIRQ_TYPE_NONE = 0,
+};
+
+/**
+ * struct iommu_virq_alloc - ioctl(IOMMU_VIRQ_ALLOC)
+ * @size: sizeof(struct iommu_virq_alloc)
+ * @flags: Must be 0
+ * @viommu: viommu ID to associate the virtual IRQ with
+ * @type: Type of the virtual IRQ. Must be defined in enum iommu_virq_type
+ * @out_virq_id: The ID of the new VIRQ
+ * @out_fault_fd: The fd of the new VIRQ
+ *
+ * Explicitly allocate a virtual IRQ handler for a VIOMMU. A VIOMMU can have
+ * multiple FDs for different @type, but is confined to have only one FD per
+ * @type.
+ */
+struct iommu_virq_alloc {
+	__u32 size;
+	__u32 flags;
+	__u32 viommu_id;
+	__u32 type;
+	__u32 out_virq_id;
+	__u32 out_virq_fd;
+};
+#define IOMMU_VIRQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIRQ_ALLOC)
 #endif
-- 
2.43.0


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ