[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1541ea4a9e9c935c2f907c0c5f13a4af2bdc2114.1766088962.git.nicolinc@nvidia.com>
Date: Thu, 18 Dec 2025 12:26:48 -0800
From: Nicolin Chen <nicolinc@...dia.com>
To: <will@...nel.org>, <robin.murphy@....com>, <jgg@...dia.com>
CC: <joro@...tes.org>, <jpb@...nel.org>, <praan@...gle.com>,
<miko.lenczewski@....com>, <linux-arm-kernel@...ts.infradead.org>,
<iommu@...ts.linux.dev>, <linux-kernel@...r.kernel.org>,
<patches@...ts.linux.dev>
Subject: [PATCH v1 2/9] iommu/arm-smmu-v3: Add alloc_id/free_id functions to arm_smmu_invs
An iotlb tag (ASID/VMID) will not be used:
1) Before being installed to CD/STE during a device attachment
2) After being removed from CD/STE during a device detachment
Both (1) and (2) exactly align with the lifecyle of the domain->invs. So,
it becomes very nature to use domain->invs to allocate/free an ASID/VMID.
Add a pair of function ops in struct arm_smmu_invs, to manage iotlb tag.
Signed-off-by: Nicolin Chen <nicolinc@...dia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 6 ++
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 93 +++++++++++++++++++++
2 files changed, 99 insertions(+)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 0a5aead300b6..b275673c03ce 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -691,6 +691,9 @@ static inline bool arm_smmu_inv_is_ats(const struct arm_smmu_inv *inv)
* @rwlock: optional rwlock to fench ATS operations
* @has_ats: flag if the array contains an INV_TYPE_ATS or INV_TYPE_ATS_FULL
* @rcu: rcu head for kfree_rcu()
+ * @smmu_domain: owner domain of the array
+ * @alloc_id: a callback to allocate a new iotlb tag
+ * @free_id: a callback to free an iotlb tag when its user counter reaches 0
* @inv: flexible invalidation array
*
* The arm_smmu_invs is an RCU data structure. During a ->attach_dev callback,
@@ -720,6 +723,9 @@ struct arm_smmu_invs {
rwlock_t rwlock;
bool has_ats;
struct rcu_head rcu;
+ struct arm_smmu_domain *smmu_domain;
+ int (*alloc_id)(struct arm_smmu_inv *inv, void *data);
+ void (*free_id)(struct arm_smmu_inv *inv, bool flush);
struct arm_smmu_inv inv[] __counted_by(max_invs);
};
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index bf0df16cec45..8a2b7064d29b 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -3117,6 +3117,94 @@ static void arm_smmu_disable_iopf(struct arm_smmu_master *master,
iopf_queue_remove_device(master->smmu->evtq.iopf, master->dev);
}
+/*
+ * When an array entry's users count reaches zero, it means the ASID/VMID is no
+ * longer being invalidated by map/unmap and must be cleaned. The rule is that
+ * all ASIDs/VMIDs not in an invalidation array are left cleared in the IOTLB.
+ */
+static void arm_smmu_inv_free_asid(struct arm_smmu_inv *inv, bool flush)
+{
+ lockdep_assert_held(&arm_smmu_asid_lock);
+
+ if (inv->type != INV_TYPE_S1_ASID)
+ return;
+ if (refcount_read(&inv->users))
+ return;
+
+ if (flush) {
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = inv->nsize_opcode,
+ .tlbi.asid = inv->id,
+ };
+
+ arm_smmu_cmdq_issue_cmd_with_sync(inv->smmu, &cmd);
+ }
+
+ /* Lastly, free the ASID as the last user detached */
+ xa_erase(&arm_smmu_asid_xa, inv->id);
+}
+
+static void arm_smmu_inv_free_vmid(struct arm_smmu_inv *inv, bool flush)
+{
+ lockdep_assert_held(&arm_smmu_asid_lock);
+
+ /* Note S2_VMID using nsize_opcode covers S2_VMID_S1_CLEAR already */
+ if (inv->type != INV_TYPE_S2_VMID)
+ return;
+ if (refcount_read(&inv->users))
+ return;
+
+ if (flush) {
+ struct arm_smmu_cmdq_ent cmd = {
+ .opcode = inv->nsize_opcode,
+ .tlbi.vmid = inv->id,
+ };
+
+ arm_smmu_cmdq_issue_cmd_with_sync(inv->smmu, &cmd);
+ }
+
+ /* Lastly, free the VMID as the last user detached */
+ ida_free(&inv->smmu->vmid_map, inv->id);
+}
+
+static int arm_smmu_inv_alloc_asid(struct arm_smmu_inv *inv, void *data)
+{
+ struct arm_smmu_domain *smmu_domain = data;
+ struct arm_smmu_device *smmu = inv->smmu;
+ u32 asid;
+ int ret;
+
+ lockdep_assert_held(&arm_smmu_asid_lock);
+
+ /* Allocate a new iotlb_tag.id */
+ WARN_ON(inv->type != INV_TYPE_S1_ASID);
+
+ ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
+ if (ret)
+ return ret;
+ inv->id = asid;
+ return 0;
+}
+
+static int arm_smmu_inv_alloc_vmid(struct arm_smmu_inv *inv, void *data)
+{
+ struct arm_smmu_device *smmu = inv->smmu;
+ int vmid;
+
+ lockdep_assert_held(&arm_smmu_asid_lock);
+
+ WARN_ON(inv->type != INV_TYPE_S2_VMID);
+
+ /* Reserve VMID 0 for stage-2 bypass STEs */
+ vmid = ida_alloc_range(&smmu->vmid_map, 1, (1 << smmu->vmid_bits) - 1,
+ GFP_KERNEL);
+ if (vmid < 0)
+ return vmid;
+ inv->id = vmid;
+ return 0;
+}
+
static struct arm_smmu_inv *
arm_smmu_master_build_inv(struct arm_smmu_master *master,
enum arm_smmu_inv_type type, u32 id, ioasid_t ssid,
@@ -3191,12 +3279,17 @@ arm_smmu_master_build_invs(struct arm_smmu_master *master, bool ats_enabled,
smmu_domain->cd.asid,
IOMMU_NO_PASID, pgsize))
return NULL;
+ master->build_invs->alloc_id = arm_smmu_inv_alloc_asid;
+ master->build_invs->free_id = arm_smmu_inv_free_asid;
+ master->build_invs->smmu_domain = smmu_domain;
break;
case ARM_SMMU_DOMAIN_S2:
if (!arm_smmu_master_build_inv(master, INV_TYPE_S2_VMID,
smmu_domain->s2_cfg.vmid,
IOMMU_NO_PASID, pgsize))
return NULL;
+ master->build_invs->alloc_id = arm_smmu_inv_alloc_vmid;
+ master->build_invs->free_id = arm_smmu_inv_free_vmid;
break;
default:
WARN_ON(true);
--
2.43.0
Powered by blists - more mailing lists