[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <85f3361fa07ca6884500ccc917da6f6b84e13a6c.1769044718.git.nicolinc@nvidia.com>
Date: Wed, 21 Jan 2026 17:24:22 -0800
From: Nicolin Chen <nicolinc@...dia.com>
To: <will@...nel.org>, <robin.murphy@....com>, <jgg@...dia.com>
CC: <joro@...tes.org>, <jpb@...nel.org>, <praan@...gle.com>,
<miko.lenczewski@....com>, <linux-arm-kernel@...ts.infradead.org>,
<iommu@...ts.linux.dev>, <linux-kernel@...r.kernel.org>,
<patches@...ts.linux.dev>
Subject: [PATCH v2 04/10] iommu/arm-smmu-v3: Allocate IOTLB cache tag if no id to reuse
An IOTLB tag now is forwarded from arm_smmu_domain_get_iotlb_tag() to its
final destination (a CD or STE entry).
Thus, arm_smmu_domain_get_iotlb_tag() can safely delink its references to
the cd->asid and s2_cfg->vmid in the smmu_domain. Instead, allocate a new
IOTLB cache tag from the xarray/ida.
The old asid and vmid in the smmu_domain will be deprecated, once VMID is
decoupled from the vSMMU use case.
Suggested-by: Jason Gunthorpe <jgg@...dia.com>
Signed-off-by: Nicolin Chen <nicolinc@...dia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 38 ++++++++++++++++++---
1 file changed, 33 insertions(+), 5 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 1927eb794db9..d10593823353 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1210,6 +1210,8 @@ void arm_smmu_invs_unref(struct arm_smmu_invs *invs,
/* KUNIT test doesn't pass in a free_fn */
if (free_fn)
free_fn(&invs->inv[i]);
+ /* Notify the caller to free the iotlb tag */
+ refcount_set(&to_unref->inv[j].users, 0);
invs->num_trashes++;
} else {
/* item in to_unref is not in invs or already a trash */
@@ -3165,12 +3167,31 @@ int arm_smmu_domain_get_iotlb_tag(struct arm_smmu_domain *smmu_domain,
if (!ret || !alloc)
return ret;
- if (tag->type == INV_TYPE_S1_ASID)
- tag->id = smmu_domain->cd.asid;
- else
- tag->id = smmu_domain->s2_cfg.vmid;
+ /* Allocate a new IOTLB cache tag (users counter == 0) */
+ lockdep_assert_held(&arm_smmu_asid_lock);
- return 0;
+ if (tag->type == INV_TYPE_S1_ASID) {
+ ret = xa_alloc(&arm_smmu_asid_xa, &tag->id, smmu_domain,
+ XA_LIMIT(1, (1 << smmu->asid_bits) - 1),
+ GFP_KERNEL);
+ } else {
+ ret = ida_alloc_range(&smmu->vmid_map, 1,
+ (1 << smmu->vmid_bits) - 1, GFP_KERNEL);
+ if (ret > 0) {
+ tag->id = ret; /* int is good for 16-bit VMID */
+ ret = 0;
+ }
+ }
+
+ return ret;
+}
+
+static void arm_smmu_iotlb_tag_free(struct arm_smmu_inv *tag)
+{
+ if (tag->type == INV_TYPE_S1_ASID)
+ xa_erase(&arm_smmu_asid_xa, tag->id);
+ else if (tag->type == INV_TYPE_S2_VMID)
+ ida_free(&tag->smmu->vmid_map, tag->id);
}
static struct arm_smmu_inv *
@@ -3220,6 +3241,9 @@ arm_smmu_master_build_inv(struct arm_smmu_master *master,
break;
}
+ /* Set a default users counter */
+ refcount_set(&cur->users, 1);
+
return cur;
}
@@ -3453,6 +3477,8 @@ arm_smmu_install_old_domain_invs(struct arm_smmu_attach_state *state)
arm_smmu_invs_unref(old_invs, invst->new_invs,
arm_smmu_inv_flush_iotlb_tag);
+ if (!refcount_read(&invst->new_invs->inv[0].users))
+ arm_smmu_iotlb_tag_free(&invst->tag);
new_invs = arm_smmu_invs_purge(old_invs);
if (!new_invs)
@@ -3615,6 +3641,8 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
err_free_vmaster:
kfree(state->vmaster);
err_unprepare_invs:
+ if (!refcount_read(&state->new_domain_invst.tag.users))
+ arm_smmu_iotlb_tag_free(&state->new_domain_invst.tag);
kfree(state->new_domain_invst.new_invs);
return ret;
}
--
2.43.0
Powered by blists - more mailing lists