[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <729dfd0808f85d88fd3ef8bcea0168cc1d2c0d59.1723061378.git.nicolinc@nvidia.com>
Date: Wed, 7 Aug 2024 13:10:56 -0700
From: Nicolin Chen <nicolinc@...dia.com>
To: <jgg@...dia.com>, <kevin.tian@...el.com>, <will@...nel.org>
CC: <joro@...tes.org>, <suravee.suthikulpanit@....com>,
<robin.murphy@....com>, <dwmw2@...radead.org>, <baolu.lu@...ux.intel.com>,
<shuah@...nel.org>, <linux-kernel@...r.kernel.org>, <iommu@...ts.linux.dev>,
<linux-arm-kernel@...ts.infradead.org>, <linux-kselftest@...r.kernel.org>
Subject: [PATCH v1 15/16] iommu/arm-smmu-v3: Add viommu cache invalidation support
Add an arm_smmu_viommu_cache_invalidate() function for user space to issue
cache invalidation commands via viommu.
The viommu invalidation takes the same native format of a 128-bit command,
as the hwpt invalidation. Thus, reuse the same driver data structure, but
make it wider to accept CMDQ_OP_ATC_INV and CMDQ_OP_CFGI_CD{_ALL}.
Scan the commands against the supported ist and fix the VMIDs and SIDs.
Signed-off-by: Nicolin Chen <nicolinc@...dia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 54 +++++++++++++++++++--
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 1 +
include/uapi/linux/iommufd.h | 20 ++++++++
3 files changed, 70 insertions(+), 5 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index ec76377d505c..be4f849f1a48 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3219,15 +3219,32 @@ static void arm_smmu_domain_nested_free(struct iommu_domain *domain)
kfree(container_of(domain, struct arm_smmu_nested_domain, domain));
}
+static int arm_smmu_convert_viommu_vdev_id(struct iommufd_viommu *viommu,
+ u32 vdev_id, u32 *sid)
+{
+ struct arm_smmu_master *master;
+ struct device *dev;
+
+ dev = iommufd_viommu_find_device(viommu, vdev_id);
+ if (!dev)
+ return -EIO;
+ master = dev_iommu_priv_get(dev);
+
+ if (sid)
+ *sid = master->streams[0].id;
+ return 0;
+}
+
/*
* Convert, in place, the raw invalidation command into an internal format that
* can be passed to arm_smmu_cmdq_issue_cmdlist(). Internally commands are
* stored in CPU endian.
*
- * Enforce the VMID on the command.
+ * Enforce the VMID or the SID on the command.
*/
static int
arm_smmu_convert_user_cmd(struct arm_smmu_domain *s2_parent,
+ struct iommufd_viommu *viommu,
struct iommu_hwpt_arm_smmuv3_invalidate *cmd)
{
u16 vmid = s2_parent->s2_cfg.vmid;
@@ -3249,6 +3266,19 @@ arm_smmu_convert_user_cmd(struct arm_smmu_domain *s2_parent,
cmd->cmd[0] &= ~CMDQ_TLBI_0_VMID;
cmd->cmd[0] |= FIELD_PREP(CMDQ_TLBI_0_VMID, vmid);
break;
+ case CMDQ_OP_ATC_INV:
+ case CMDQ_OP_CFGI_CD:
+ case CMDQ_OP_CFGI_CD_ALL:
+ if (viommu) {
+ u32 sid, vsid = FIELD_GET(CMDQ_CFGI_0_SID, cmd->cmd[0]);
+
+ if (arm_smmu_convert_viommu_vdev_id(viommu, vsid, &sid))
+ return -EIO;
+ cmd->cmd[0] &= ~CMDQ_CFGI_0_SID;
+ cmd->cmd[0] |= FIELD_PREP(CMDQ_CFGI_0_SID, sid);
+ break;
+ }
+ fallthrough;
default:
return -EIO;
}
@@ -3256,8 +3286,11 @@ arm_smmu_convert_user_cmd(struct arm_smmu_domain *s2_parent,
}
static int __arm_smmu_cache_invalidate_user(struct arm_smmu_domain *s2_parent,
+ struct iommufd_viommu *viommu,
struct iommu_user_data_array *array)
{
+ unsigned int type = viommu ? IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3 :
+ IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3;
struct arm_smmu_device *smmu = s2_parent->smmu;
struct iommu_hwpt_arm_smmuv3_invalidate *last_batch;
struct iommu_hwpt_arm_smmuv3_invalidate *cmds;
@@ -3273,14 +3306,13 @@ static int __arm_smmu_cache_invalidate_user(struct arm_smmu_domain *s2_parent,
static_assert(sizeof(*cmds) == 2 * sizeof(u64));
ret = iommu_copy_struct_from_full_user_array(
- cmds, sizeof(*cmds), array,
- IOMMU_HWPT_INVALIDATE_DATA_ARM_SMMUV3);
+ cmds, sizeof(*cmds), array, type);
if (ret)
goto out;
last_batch = cmds;
while (cur != end) {
- ret = arm_smmu_convert_user_cmd(s2_parent, cur);
+ ret = arm_smmu_convert_user_cmd(s2_parent, viommu, cur);
if (ret)
goto out;
@@ -3310,7 +3342,7 @@ static int arm_smmu_cache_invalidate_user(struct iommu_domain *domain,
container_of(domain, struct arm_smmu_nested_domain, domain);
return __arm_smmu_cache_invalidate_user(
- nested_domain->s2_parent, array);
+ nested_domain->s2_parent, NULL, array);
}
static struct iommu_domain *
@@ -3812,6 +3844,15 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0;
}
+static int arm_smmu_viommu_cache_invalidate(struct iommufd_viommu *viommu,
+ struct iommu_user_data_array *array)
+{
+ struct iommu_domain *domain = iommufd_viommu_to_parent_domain(viommu);
+
+ return __arm_smmu_cache_invalidate_user(
+ to_smmu_domain(domain), viommu, array);
+}
+
static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain,
@@ -3842,6 +3883,9 @@ static struct iommu_ops arm_smmu_ops = {
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys,
.free = arm_smmu_domain_free_paging,
+ .default_viommu_ops = &(const struct iommufd_viommu_ops) {
+ .cache_invalidate = arm_smmu_viommu_cache_invalidate,
+ }
}
};
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 3f7442f0167e..a3fb08e0a195 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -10,6 +10,7 @@
#include <linux/bitfield.h>
#include <linux/iommu.h>
+#include <linux/iommufd.h>
#include <linux/kernel.h>
#include <linux/mmzone.h>
#include <linux/sizes.h>
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 998b3f2cd2b5..416b9a18e6bb 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -956,6 +956,26 @@ enum iommu_viommu_invalidate_data_type {
IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3,
};
+/**
+ * struct iommu_viommu_arm_smmuv3_invalidate - ARM SMMUv3 cahce invalidation
+ * (IOMMU_VIOMMU_INVALIDATE_DATA_ARM_SMMUV3)
+ * @cmd: 128-bit cache invalidation command that runs in SMMU CMDQ.
+ * Must be little-endian.
+ *
+ * Supported command list:
+ * CMDQ_OP_TLBI_NSNH_ALL
+ * CMDQ_OP_TLBI_NH_VA
+ * CMDQ_OP_TLBI_NH_VAA
+ * CMDQ_OP_TLBI_NH_ALL
+ * CMDQ_OP_TLBI_NH_ASID
+ * CMDQ_OP_ATC_INV
+ * CMDQ_OP_CFGI_CD
+ * CMDQ_OP_CFGI_CD_ALL
+ *
+ * -EIO will be returned if the command is not supported.
+ */
+#define iommu_viommu_arm_smmuv3_invalidate iommu_hwpt_arm_smmuv3_invalidate
+
/**
* struct iommu_viommu_invalidate - ioctl(IOMMU_VIOMMU_INVALIDATE)
* @size: sizeof(struct iommu_viommu_invalidate)
--
2.43.0
Powered by blists - more mailing lists