[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20210411111228.14386-8-eric.auger@redhat.com>
Date: Sun, 11 Apr 2021 13:12:23 +0200
From: Eric Auger <eric.auger@...hat.com>
To: eric.auger.pro@...il.com, eric.auger@...hat.com,
iommu@...ts.linux-foundation.org, linux-kernel@...r.kernel.org,
kvm@...r.kernel.org, kvmarm@...ts.cs.columbia.edu, will@...nel.org,
maz@...nel.org, robin.murphy@....com, joro@...tes.org,
alex.williamson@...hat.com, tn@...ihalf.com, zhukeqian1@...wei.com
Cc: jacob.jun.pan@...ux.intel.com, yi.l.liu@...el.com,
wangxingang5@...wei.com, jean-philippe@...aro.org,
zhangfei.gao@...aro.org, zhangfei.gao@...il.com,
vivek.gautam@....com, shameerali.kolothum.thodi@...wei.com,
yuzenghui@...wei.com, nicoleotsuka@...il.com,
lushenming@...wei.com, vsethi@...dia.com,
chenxiang66@...ilicon.com, vdumpa@...dia.com,
jiangkunkun@...wei.com
Subject: [PATCH v15 07/12] iommu/smmuv3: Implement cache_invalidate
Implement domain-selective, pasid selective and page-selective
IOTLB invalidations.
Signed-off-by: Eric Auger <eric.auger@...hat.com>
---
v4 -> v15:
- remove the redundant arm_smmu_cmdq_issue_sync(smmu)
in IOMMU_INV_GRANU_ADDR case (Zenghui)
- if RIL is not supported by the host, make sure the granule_size
that is passed by the userspace is supported or fix it
(Chenxiang)
v13 -> v14:
- Add domain invalidation
- do global inval when asid is not provided with addr
granularity
v7 -> v8:
- ASID based invalidation using iommu_inv_pasid_info
- check ARCHID/PASID flags in addr based invalidation
- use __arm_smmu_tlb_inv_context and __arm_smmu_tlb_inv_range_nosync
v6 -> v7
- check the uapi version
v3 -> v4:
- adapt to changes in the uapi
- add support for leaf parameter
- do not use arm_smmu_tlb_inv_range_nosync or arm_smmu_tlb_inv_context
anymore
v2 -> v3:
- replace __arm_smmu_tlb_sync by arm_smmu_cmdq_issue_sync
v1 -> v2:
- properly pass the asid
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 89 +++++++++++++++++++++
1 file changed, 89 insertions(+)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 56a301fbe75a..bfc112cc0d38 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2961,6 +2961,94 @@ static void arm_smmu_detach_pasid_table(struct iommu_domain *domain)
mutex_unlock(&smmu_domain->init_mutex);
}
+static int
+arm_smmu_cache_invalidate(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ struct arm_smmu_cmdq_ent cmd = {.opcode = CMDQ_OP_TLBI_NSNH_ALL};
+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
+ struct arm_smmu_device *smmu = smmu_domain->smmu;
+
+ if (smmu_domain->stage != ARM_SMMU_DOMAIN_NESTED)
+ return -EINVAL;
+
+ if (!smmu)
+ return -EINVAL;
+
+ if (inv_info->version != IOMMU_CACHE_INVALIDATE_INFO_VERSION_1)
+ return -EINVAL;
+
+ if (inv_info->cache & IOMMU_CACHE_INV_TYPE_PASID ||
+ inv_info->cache & IOMMU_CACHE_INV_TYPE_DEV_IOTLB) {
+ return -ENOENT;
+ }
+
+ if (!(inv_info->cache & IOMMU_CACHE_INV_TYPE_IOTLB))
+ return -EINVAL;
+
+ /* IOTLB invalidation */
+
+ switch (inv_info->granularity) {
+ case IOMMU_INV_GRANU_PASID:
+ {
+ struct iommu_inv_pasid_info *info =
+ &inv_info->granu.pasid_info;
+
+ if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+ return -ENOENT;
+ if (!(info->flags & IOMMU_INV_PASID_FLAGS_ARCHID))
+ return -EINVAL;
+
+ __arm_smmu_tlb_inv_context(smmu_domain, info->archid);
+ return 0;
+ }
+ case IOMMU_INV_GRANU_ADDR:
+ {
+ struct iommu_inv_addr_info *info = &inv_info->granu.addr_info;
+ size_t granule_size = info->granule_size;
+ size_t size = info->nb_granules * info->granule_size;
+ bool leaf = info->flags & IOMMU_INV_ADDR_FLAGS_LEAF;
+ int tg;
+
+ if (info->flags & IOMMU_INV_ADDR_FLAGS_PASID)
+ return -ENOENT;
+
+ if (!(info->flags & IOMMU_INV_ADDR_FLAGS_ARCHID))
+ break;
+
+ tg = __ffs(granule_size);
+ if (granule_size & ~(1 << tg))
+ return -EINVAL;
+ /*
+ * When RIL is not supported, make sure the granule size that is
+ * passed is supported. In RIL mode, this is enforced in
+ * __arm_smmu_tlb_inv_range()
+ */
+ if (!(smmu->features & ARM_SMMU_FEAT_RANGE_INV) &&
+ !(granule_size & smmu_domain->domain.pgsize_bitmap)) {
+ tg = __ffs(smmu_domain->domain.pgsize_bitmap);
+ granule_size = 1 << tg;
+ size = size >> tg;
+ }
+
+ arm_smmu_tlb_inv_range_domain(info->addr, size,
+ granule_size, leaf,
+ info->archid, smmu_domain);
+ return 0;
+ }
+ case IOMMU_INV_GRANU_DOMAIN:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Global S1 invalidation */
+ cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
+ arm_smmu_cmdq_issue_cmd(smmu, &cmd);
+ arm_smmu_cmdq_issue_sync(smmu);
+ return 0;
+}
+
static bool arm_smmu_dev_has_feature(struct device *dev,
enum iommu_dev_features feat)
{
@@ -3060,6 +3148,7 @@ static struct iommu_ops arm_smmu_ops = {
.put_resv_regions = generic_iommu_put_resv_regions,
.attach_pasid_table = arm_smmu_attach_pasid_table,
.detach_pasid_table = arm_smmu_detach_pasid_table,
+ .cache_invalidate = arm_smmu_cache_invalidate,
.dev_has_feat = arm_smmu_dev_has_feature,
.dev_feat_enabled = arm_smmu_dev_feature_enabled,
.dev_enable_feat = arm_smmu_dev_enable_feature,
--
2.26.3
Powered by blists - more mailing lists