lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAAywjhRceyE7WRaFY+Kyy=hGbMjzT9MJDC=+Y4kbXwvOxuDRSA@mail.gmail.com>
Date: Tue, 6 Jan 2026 13:18:29 -0800
From: Samiullah Khawaja <skhawaja@...gle.com>
To: Mostafa Saleh <smostafa@...gle.com>
Cc: linux-mm@...ck.org, iommu@...ts.linux.dev, linux-kernel@...r.kernel.org, 
	linux-doc@...r.kernel.org, corbet@....net, joro@...tes.org, will@...nel.org, 
	robin.murphy@....com, akpm@...ux-foundation.org, vbabka@...e.cz, 
	surenb@...gle.com, mhocko@...e.com, jackmanb@...gle.com, hannes@...xchg.org, 
	ziy@...dia.com, david@...hat.com, lorenzo.stoakes@...cle.com, 
	Liam.Howlett@...cle.com, rppt@...nel.org, xiaqinxin@...wei.com, 
	baolu.lu@...ux.intel.com, rdunlap@...radead.org
Subject: Re: [PATCH v5 3/4] iommu: debug-pagealloc: Track IOMMU pages

On Tue, Jan 6, 2026 at 8:22 AM Mostafa Saleh <smostafa@...gle.com> wrote:
>
> Using the new calls, use an atomic refcount to track how many times
> a page is mapped in any of the IOMMUs.
>
> For unmap we need to use iova_to_phys() to get the physical address
> of the pages.
>
> We use the smallest supported page size as the granularity of tracking
> per domain.
> This is important as it is possible to map pages and unmap them with
> larger sizes (as in map_sg()) cases.
>
> Reviewed-by: Lu Baolu <baolu.lu@...ux.intel.com>
> Signed-off-by: Mostafa Saleh <smostafa@...gle.com>
> ---
>  drivers/iommu/iommu-debug-pagealloc.c | 91 +++++++++++++++++++++++++++
>  1 file changed, 91 insertions(+)
>
> diff --git a/drivers/iommu/iommu-debug-pagealloc.c b/drivers/iommu/iommu-debug-pagealloc.c
> index 1d343421da98..86ccb310a4a8 100644
> --- a/drivers/iommu/iommu-debug-pagealloc.c
> +++ b/drivers/iommu/iommu-debug-pagealloc.c
> @@ -29,19 +29,110 @@ struct page_ext_operations page_iommu_debug_ops = {
>         .need = need_iommu_debug,
>  };
>
> +static struct page_ext *get_iommu_page_ext(phys_addr_t phys)
> +{
> +       struct page *page = phys_to_page(phys);
> +       struct page_ext *page_ext = page_ext_get(page);
> +
> +       return page_ext;
> +}
> +
> +static struct iommu_debug_metadata *get_iommu_data(struct page_ext *page_ext)
> +{
> +       return page_ext_data(page_ext, &page_iommu_debug_ops);
> +}
> +
> +static void iommu_debug_inc_page(phys_addr_t phys)
> +{
> +       struct page_ext *page_ext = get_iommu_page_ext(phys);
> +       struct iommu_debug_metadata *d = get_iommu_data(page_ext);
> +
> +       WARN_ON(atomic_inc_return_relaxed(&d->ref) <= 0);
> +       page_ext_put(page_ext);
> +}
> +
> +static void iommu_debug_dec_page(phys_addr_t phys)
> +{
> +       struct page_ext *page_ext = get_iommu_page_ext(phys);
> +       struct iommu_debug_metadata *d = get_iommu_data(page_ext);
> +
> +       WARN_ON(atomic_dec_return_relaxed(&d->ref) < 0);
> +       page_ext_put(page_ext);
> +}
> +
> +/*
> + * IOMMU page size doesn't have to match the CPU page size. So, we use
> + * the smallest IOMMU page size to refcount the pages in the vmemmap.
> + * That is important as both map and unmap has to use the same page size
> + * to update the refcount to avoid double counting the same page.
> + * And as we can't know from iommu_unmap() what was the original page size
> + * used for map, we just use the minimum supported one for both.
> + */
> +static size_t iommu_debug_page_size(struct iommu_domain *domain)
> +{
> +       return 1UL << __ffs(domain->pgsize_bitmap);
> +}
> +
>  void __iommu_debug_map(struct iommu_domain *domain, phys_addr_t phys, size_t size)
>  {
> +       size_t off, end;
> +       size_t page_size = iommu_debug_page_size(domain);
> +
> +       if (WARN_ON(!phys || check_add_overflow(phys, size, &end)))
> +               return;
> +
> +       for (off = 0 ; off < size ; off += page_size) {
> +               if (!pfn_valid(__phys_to_pfn(phys + off)))
> +                       continue;
> +               iommu_debug_inc_page(phys + off);
> +       }
> +}
> +
> +static void __iommu_debug_update_iova(struct iommu_domain *domain,
> +                                     unsigned long iova, size_t size, bool inc)
> +{
> +       size_t off, end;
> +       size_t page_size = iommu_debug_page_size(domain);
> +
> +       if (WARN_ON(check_add_overflow(iova, size, &end)))
> +               return;
> +
> +       for (off = 0 ; off < size ; off += page_size) {
> +               phys_addr_t phys = iommu_iova_to_phys(domain, iova + off);
> +
> +               if (!phys || !pfn_valid(__phys_to_pfn(phys)))
> +                       continue;
> +
> +               if (inc)
> +                       iommu_debug_inc_page(phys);
> +               else
> +                       iommu_debug_dec_page(phys);
> +       }
>  }
>
>  void __iommu_debug_unmap_begin(struct iommu_domain *domain,
>                                unsigned long iova, size_t size)
>  {
> +       __iommu_debug_update_iova(domain, iova, size, false);
>  }
>
>  void __iommu_debug_unmap_end(struct iommu_domain *domain,
>                              unsigned long iova, size_t size,
>                              size_t unmapped)
>  {
> +       if (unmapped == size)
> +               return;
> +
> +       /*
> +        * If unmap failed, re-increment the refcount, but if it unmapped
> +        * larger size, decrement the extra part.
> +        */
> +       if (unmapped < size)
> +               __iommu_debug_update_iova(domain, iova + unmapped,
> +                                         size - unmapped, true);
> +       else
> +               __iommu_debug_update_iova(domain, iova + size,
> +                                         unmapped - size, false);
>  }
>
>  void iommu_debug_init(void)
> --
> 2.52.0.351.gbe84eed79e-goog
>
>

Reviewed-by: Samiullah Khawaja <skhawaja@...gle.com>

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ