[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <54c98cd7092a406d84df0abc9a6cb013aadb619a.1766088962.git.nicolinc@nvidia.com>
Date: Thu, 18 Dec 2025 12:26:50 -0800
From: Nicolin Chen <nicolinc@...dia.com>
To: <will@...nel.org>, <robin.murphy@....com>, <jgg@...dia.com>
CC: <joro@...tes.org>, <jpb@...nel.org>, <praan@...gle.com>,
<miko.lenczewski@....com>, <linux-arm-kernel@...ts.infradead.org>,
<iommu@...ts.linux.dev>, <linux-kernel@...r.kernel.org>,
<patches@...ts.linux.dev>
Subject: [PATCH v1 4/9] iommu/arm-smmu-v3: Use alloc_id/free_id ops in arm_smmu_invs_merge/unref
If a domain->invs has an iotlb tag (ASID/VMID) for an SMMU, all the devices
behind the SMMU can reuse the same iotlb tag. This exactly matches with the
existing case where attaching multiple devices to the same SMMU domain has
a shared iotlb tag stored in the domain (cd->asid or s2_cfg->vmid).
If a domain->invs doesn't have an iotlb tag for another SMMU, there can be
two cases:
1) This is a new domain so not yet attached to any devices
2) This is a shareable domain that is attached to a device behind one SMMU
but not yet to the other SMMU.
In either case, a new iotlb tag is required. Call the ->alloc_id op in the
arm_smmu_invs_merge(). The domain->invs arrary will keep it. The allocated
new iotlb tag will be returned to the caller via to_merge arrary.
Relax the arm_smmu_inv_cmp(), to allow sharing an iotlb tag across devices
behind the same SMMU instance.
Similarly, call the ->free_id op in the arm_smmu_invs_unref() when there's
no device using it any more.
Lastly, add a free helper for the revert path of arm_smmu_attach_prepare()
to use.
Signed-off-by: Nicolin Chen <nicolinc@...dia.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 3 +
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 70 +++++++++++++++++++--
2 files changed, 69 insertions(+), 4 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index e21e95936b05..230ab902a9b6 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -680,6 +680,9 @@ static inline bool arm_smmu_inv_is_ats(const struct arm_smmu_inv *inv)
return inv->type == INV_TYPE_ATS || inv->type == INV_TYPE_ATS_FULL;
}
+/* S1_ASID/S2_VMID(S1_CLEAR) types */
+#define arm_smmu_inv_is_iotlb_tag(inv) !arm_smmu_inv_is_ats(inv)
+
/**
* struct arm_smmu_invs - Per-domain invalidation array
* @max_invs: maximum capacity of the flexible array
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 1bf7b7233109..ec370e54b1bc 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1051,6 +1051,9 @@ static int arm_smmu_inv_cmp(const struct arm_smmu_inv *inv_l,
return cmp_int((uintptr_t)inv_l->smmu, (uintptr_t)inv_r->smmu);
if (inv_l->type != inv_r->type)
return cmp_int(inv_l->type, inv_r->type);
+ /* Each SMMU shares a single iotlb tag on a domain, so it is a match */
+ if (arm_smmu_inv_is_iotlb_tag(inv_l))
+ return 0;
return cmp_int(inv_l->id, inv_r->id);
}
@@ -1127,9 +1130,52 @@ struct arm_smmu_invs *arm_smmu_invs_merge(struct arm_smmu_invs *invs,
size_t i, j;
int cmp;
- arm_smmu_invs_for_each_cmp(invs, i, to_merge, j, cmp)
+ arm_smmu_invs_for_each_cmp(invs, i, to_merge, j, cmp) {
+ struct arm_smmu_inv *cur = &to_merge->inv[j];
+
num_invs++;
+ if (!arm_smmu_inv_is_iotlb_tag(cur))
+ continue;
+
+ /* A matching iotlb tag owned by the same SMMU can be shared */
+ if (cmp == 0) {
+ *cur = invs->inv[i];
+ continue;
+ }
+
+ /* Iterate the base invs array to find if next is a match */
+ if (cmp < 0 && i < invs->num_invs)
+ continue;
+
+ /*
+ * Currently the @to_merge array always carries an id (> 0) that
+ * is also installed in the CD/STE. So, we cannot allocate a new
+ * ID at this moment, because that would misalign with what's in
+ * the CD/STE. To not break the existing flow, bypass the new ID
+ * allocating code. We will lift this bypass line once rework is
+ * done.
+ */
+ if (cur->id)
+ continue;
+
+ /* No found. Allocate a new one */
+ if (j == 0) {
+ /* KUNIT test doesn't pass in an alloc_id function */
+ if (to_merge->alloc_id) {
+ int ret;
+
+ ret = to_merge->alloc_id(cur,
+ invs->smmu_domain);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+ } else {
+ /* Copy the allocated iotlb tag from the previous inv */
+ cur->id = cur[-1].id;
+ }
+ }
+
new_invs = arm_smmu_invs_alloc(num_invs);
if (!new_invs)
return ERR_PTR(-ENOMEM);
@@ -1207,9 +1253,9 @@ void arm_smmu_invs_unref(struct arm_smmu_invs *invs,
continue;
}
- /* KUNIT test doesn't pass in a free_fn */
- if (free_fn)
- free_fn(&invs->inv[i]);
+ /* KUNIT test doesn't pass in a free_id function */
+ if (to_unref->free_id)
+ to_unref->free_id(&invs->inv[i], true);
invs->num_trashes++;
} else {
/* item in to_unref is not in invs or already a trash */
@@ -3167,6 +3213,21 @@ static void arm_smmu_inv_free_vmid(struct arm_smmu_inv *inv, bool flush)
ida_free(&inv->smmu->vmid_map, inv->id);
}
+static void arm_smmu_inv_free_iotlb_tag(struct arm_smmu_inv *inv)
+{
+ switch (inv->type) {
+ case INV_TYPE_S1_ASID:
+ arm_smmu_inv_free_asid(inv, false);
+ return;
+ case INV_TYPE_S2_VMID:
+ arm_smmu_inv_free_vmid(inv, false);
+ return;
+ default:
+ WARN_ON(true);
+ return;
+ }
+}
+
static int arm_smmu_inv_alloc_asid(struct arm_smmu_inv *inv, void *data)
{
struct arm_smmu_domain *smmu_domain = data;
@@ -3662,6 +3723,7 @@ int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
err_free_vmaster:
kfree(state->vmaster);
err_unprepare_invs:
+ arm_smmu_inv_free_iotlb_tag(&state->new_domain_invst.iotlb_tag);
kfree(state->new_domain_invst.new_invs);
return ret;
}
--
2.43.0
Powered by blists - more mailing lists