[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251117184815.1027271-7-smostafa@google.com>
Date: Mon, 17 Nov 2025 18:47:53 +0000
From: Mostafa Saleh <smostafa@...gle.com>
To: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
kvmarm@...ts.linux.dev, iommu@...ts.linux.dev
Cc: catalin.marinas@....com, will@...nel.org, maz@...nel.org,
oliver.upton@...ux.dev, joey.gouly@....com, suzuki.poulose@....com,
yuzenghui@...wei.com, joro@...tes.org, jean-philippe@...aro.org, jgg@...pe.ca,
praan@...gle.com, danielmentz@...gle.com, mark.rutland@....com,
qperret@...gle.com, tabba@...gle.com, Mostafa Saleh <smostafa@...gle.com>
Subject: [PATCH v5 06/27] iommu/arm-smmu-v3: Move TLB range invalidation into
common code
Range TLB invalidation has a very specific algorithm, instead of
re-writing it for the hypervisor.
Signed-off-by: Mostafa Saleh <smostafa@...gle.com>
---
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 69 ++++--------------
drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 79 +++++++++++++++++++++
2 files changed, 92 insertions(+), 56 deletions(-)
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 1497ffcd4555..f6c3eeb4ecea 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -2105,74 +2105,31 @@ static void arm_smmu_tlb_inv_context(void *cookie)
arm_smmu_atc_inv_domain(smmu_domain, 0, 0);
}
+static void __arm_smmu_cmdq_batch_add(void *__opaque,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd)
+{
+ struct arm_smmu_device *smmu = (struct arm_smmu_device *)__opaque;
+
+ arm_smmu_cmdq_batch_add(smmu, cmds, cmd);
+}
+
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
unsigned long iova, size_t size,
size_t granule,
struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_device *smmu = smmu_domain->smmu;
- unsigned long end = iova + size, num_pages = 0, tg = 0;
- size_t inv_range = granule;
struct arm_smmu_cmdq_batch cmds;
if (!size)
return;
- if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
- /* Get the leaf page size */
- tg = __ffs(smmu_domain->domain.pgsize_bitmap);
-
- num_pages = size >> tg;
-
- /* Convert page size of 12,14,16 (log2) to 1,2,3 */
- cmd->tlbi.tg = (tg - 10) / 2;
-
- /*
- * Determine what level the granule is at. For non-leaf, both
- * io-pgtable and SVA pass a nominal last-level granule because
- * they don't know what level(s) actually apply, so ignore that
- * and leave TTL=0. However for various errata reasons we still
- * want to use a range command, so avoid the SVA corner case
- * where both scale and num could be 0 as well.
- */
- if (cmd->tlbi.leaf)
- cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
- else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
- num_pages++;
- }
-
arm_smmu_cmdq_batch_init(smmu, &cmds, cmd);
-
- while (iova < end) {
- if (smmu->features & ARM_SMMU_FEAT_RANGE_INV) {
- /*
- * On each iteration of the loop, the range is 5 bits
- * worth of the aligned size remaining.
- * The range in pages is:
- *
- * range = (num_pages & (0x1f << __ffs(num_pages)))
- */
- unsigned long scale, num;
-
- /* Determine the power of 2 multiple number of pages */
- scale = __ffs(num_pages);
- cmd->tlbi.scale = scale;
-
- /* Determine how many chunks of 2^scale size we have */
- num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
- cmd->tlbi.num = num - 1;
-
- /* range is num * 2^scale * pgsize */
- inv_range = num << (scale + tg);
-
- /* Clear out the lower order bits for the next iteration */
- num_pages -= num << scale;
- }
-
- cmd->tlbi.addr = iova;
- arm_smmu_cmdq_batch_add(smmu, &cmds, cmd);
- iova += inv_range;
- }
+ arm_smmu_tlb_inv_build(cmd, iova, size, granule,
+ smmu_domain->domain.pgsize_bitmap,
+ smmu->features & ARM_SMMU_FEAT_RANGE_INV,
+ smmu, __arm_smmu_cmdq_batch_add, &cmds);
arm_smmu_cmdq_batch_submit(smmu, &cmds);
}
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index 4aaf93945ee3..4a59b4d39c4f 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -1072,6 +1072,85 @@ static inline void arm_smmu_write_strtab_l1_desc(struct arm_smmu_strtab_l1 *dst,
WRITE_ONCE(dst->l2ptr, cpu_to_le64(val));
}
+/**
+ * arm_smmu_tlb_inv_build - Create a range invalidation command
+ * @cmd: Base command initialized with OPCODE (S1, S2..), vmid and asid.
+ * @iova: Start IOVA to invalidate
+ * @size: Size of range
+ * @granule: Granule of invalidation
+ * @pgsize_bitmap: Page size bit map of the page table.
+ * @is_range: Use range invalidation commands.
+ * @opaque: Pointer to pass to add_cmd
+ * @add_cmd: Function to send/batch the invalidation command
+ * @cmds: Incase of batching, it includes the pointer to the batch
+ */
+static inline void arm_smmu_tlb_inv_build(struct arm_smmu_cmdq_ent *cmd,
+ unsigned long iova, size_t size,
+ size_t granule, unsigned long pgsize_bitmap,
+ bool is_range, void *opaque,
+ void (*add_cmd)(void *_opaque,
+ struct arm_smmu_cmdq_batch *cmds,
+ struct arm_smmu_cmdq_ent *cmd),
+ struct arm_smmu_cmdq_batch *cmds)
+{
+ unsigned long end = iova + size, num_pages = 0, tg = 0;
+ size_t inv_range = granule;
+
+ if (is_range) {
+ /* Get the leaf page size */
+ tg = __ffs(pgsize_bitmap);
+
+ num_pages = size >> tg;
+
+ /* Convert page size of 12,14,16 (log2) to 1,2,3 */
+ cmd->tlbi.tg = (tg - 10) / 2;
+
+ /*
+ * Determine what level the granule is at. For non-leaf, both
+ * io-pgtable and SVA pass a nominal last-level granule because
+ * they don't know what level(s) actually apply, so ignore that
+ * and leave TTL=0. However for various errata reasons we still
+ * want to use a range command, so avoid the SVA corner case
+ * where both scale and num could be 0 as well.
+ */
+ if (cmd->tlbi.leaf)
+ cmd->tlbi.ttl = 4 - ((ilog2(granule) - 3) / (tg - 3));
+ else if ((num_pages & CMDQ_TLBI_RANGE_NUM_MAX) == 1)
+ num_pages++;
+ }
+
+ while (iova < end) {
+ if (is_range) {
+ /*
+ * On each iteration of the loop, the range is 5 bits
+ * worth of the aligned size remaining.
+ * The range in pages is:
+ *
+ * range = (num_pages & (0x1f << __ffs(num_pages)))
+ */
+ unsigned long scale, num;
+
+ /* Determine the power of 2 multiple number of pages */
+ scale = __ffs(num_pages);
+ cmd->tlbi.scale = scale;
+
+ /* Determine how many chunks of 2^scale size we have */
+ num = (num_pages >> scale) & CMDQ_TLBI_RANGE_NUM_MAX;
+ cmd->tlbi.num = num - 1;
+
+ /* range is num * 2^scale * pgsize */
+ inv_range = num << (scale + tg);
+
+ /* Clear out the lower order bits for the next iteration */
+ num_pages -= num << scale;
+ }
+
+ cmd->tlbi.addr = iova;
+ add_cmd(opaque, cmds, cmd);
+ iova += inv_range;
+ }
+}
+
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
void arm_smmu_sva_notifier_synchronize(void);
--
2.52.0.rc1.455.g30608eb744-goog
Powered by blists - more mailing lists