[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20251003173229.1533640-4-smostafa@google.com>
Date: Fri, 3 Oct 2025 17:32:28 +0000
From: Mostafa Saleh <smostafa@...gle.com>
To: linux-mm@...ck.org, iommu@...ts.linux.dev, linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org
Cc: corbet@....net, joro@...tes.org, will@...nel.org, robin.murphy@....com,
akpm@...ux-foundation.org, vbabka@...e.cz, surenb@...gle.com, mhocko@...e.com,
jackmanb@...gle.com, hannes@...xchg.org, ziy@...dia.com, david@...hat.com,
lorenzo.stoakes@...cle.com, Liam.Howlett@...cle.com, rppt@...nel.org,
Mostafa Saleh <smostafa@...gle.com>
Subject: [RFC PATCH 3/4] drivers/iommu-debug: Track IOMMU pages
Using the new calls use an atomic refcount to track how many times
a page is mapped in any of the IOMMUs.
For unmap we need to use iova_to_phys() to get the physical address
of the pages.
We use the smallest supported page size as the granularity of tracking
per domain.
This is important as it possible to map pages and unmap them with
larger sizes (as in map_sg()) cases.
Signed-off-by: Mostafa Saleh <smostafa@...gle.com>
---
drivers/iommu/iommu-debug.c | 83 +++++++++++++++++++++++++++++++++++++
1 file changed, 83 insertions(+)
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 607f1fcf2235..cec8f594c7fa 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -27,16 +27,99 @@ struct page_ext_operations page_iommu_debug_ops = {
.need = need_iommu_debug,
};
+static struct page_ext *get_iommu_page_ext(phys_addr_t phys)
+{
+ struct page *page = phys_to_page(phys);
+ struct page_ext *page_ext = page_ext_get(page);
+
+ return page_ext;
+}
+
+static struct iommu_debug_metadate *get_iommu_data(struct page_ext *page_ext)
+{
+ return page_ext_data(page_ext, &page_iommu_debug_ops);
+}
+
+static void iommu_debug_inc_page(phys_addr_t phys)
+{
+ struct page_ext *page_ext = get_iommu_page_ext(phys);
+ struct iommu_debug_metadate *d = get_iommu_data(page_ext);
+
+ WARN_ON(atomic_inc_return(&d->ref) <= 0);
+ page_ext_put(page_ext);
+}
+
+static void iommu_debug_dec_page(phys_addr_t phys)
+{
+ struct page_ext *page_ext = get_iommu_page_ext(phys);
+ struct iommu_debug_metadate *d = get_iommu_data(page_ext);
+
+ WARN_ON(atomic_dec_return(&d->ref) < 0);
+ page_ext_put(page_ext);
+}
+
+/*
+ * IOMMU pages size might not match the CPU page size, in that case, we use
+ * the smallest IOMMU page size to refcount the pages in the vmemap.
+ * That's is important as both map and unmap has to use the same page size
+ * to update the refcount to avoid double counting the same page.
+ * And as we can't know from iommu_unmap() what was the original page size
+ * used for map, we just use the minimum supported one for both.
+ */
+static size_t iommu_debug_page_size(struct iommu_domain *domain)
+{
+ return 1UL << __ffs(domain->pgsize_bitmap);
+}
+
void iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size)
{
+ size_t off;
+ size_t page_size = iommu_debug_page_size(domain);
+
+ if (!static_branch_likely(&iommu_debug_initialized))
+ return;
+
+ for (off = 0 ; off < size ; off += page_size) {
+ if (!pfn_valid(__phys_to_pfn(phys + off)))
+ continue;
+ iommu_debug_inc_page(phys + off);
+ }
}
void iommu_debug_unmap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
+ size_t off;
+ size_t page_size = iommu_debug_page_size(domain);
+
+ if (!static_branch_likely(&iommu_debug_initialized))
+ return;
+
+ for (off = 0 ; off < size ; off += page_size) {
+ phys_addr_t phys = iommu_iova_to_phys(domain, iova + off);
+
+ if (!phys || !pfn_valid(__phys_to_pfn(phys + off)))
+ continue;
+
+ iommu_debug_dec_page(phys);
+ }
}
void iommu_debug_remap(struct iommu_domain *domain, unsigned long iova, size_t size)
{
+ size_t off;
+ size_t page_size = iommu_debug_page_size(domain);
+
+ if (!static_branch_likely(&iommu_debug_initialized))
+ return;
+
+ for (off = 0 ; off < size ; off += page_size) {
+ phys_addr_t phys = iommu_iova_to_phys(domain, iova + off);
+
+ if (!phys || !pfn_valid(__phys_to_pfn(phys + off)))
+ continue;
+
+ iommu_debug_inc_page(phys);
+ }
}
void iommu_debug_init(void)
--
2.51.0.618.g983fd99d29-goog
Powered by blists - more mailing lists