[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <aIvoMbici26ekErC@Yueyangs-MacBook-Pro.local>
Date: Thu, 31 Jul 2025 23:03:29 +0100
From: YUEYANG PAN <pyyjason@...il.com>
To: SeongJae Park <sj@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Usama Arif <usamaarif642@...il.com>, damon@...ts.linux.dev,
linux-mm@...ck.org, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 2/2] mm/damon: Add damos_stat support for vaddr
On Wed, Jul 30, 2025 at 10:56:52AM -0700, SeongJae Park wrote:
> On Wed, 30 Jul 2025 10:19:56 -0700 Yueyang Pan <pyyjason@...il.com> wrote:
>
> > This patch adds support for damos_stat in virtual address space.
> > It leverages the walk_page_range to walk the page table and gets
> > the folio from page table. The last folio scanned is stored in
> > damos->last_applied to prevent double counting.
> >
> > Signed-off-by: Yueyang Pan <pyyjason@...il.com>
> > ---
> > mm/damon/vaddr.c | 105 ++++++++++++++++++++++++++++++++++++++++++++++-
> > 1 file changed, 104 insertions(+), 1 deletion(-)
> >
> > diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
> > index 87e825349bdf..6ed919e817e2 100644
> > --- a/mm/damon/vaddr.c
> > +++ b/mm/damon/vaddr.c
> > @@ -890,6 +890,109 @@ static unsigned long damos_va_migrate(struct damon_target *target,
> > return applied * PAGE_SIZE;
> > }
> >
> > +struct damos_va_stat_private {
> > + struct damos *scheme;
> > + unsigned long *sz_filter_passed;
> > +};
> > +
> > +static inline bool damon_va_invalid_damos_folio(struct folio *folio, struct damos *s)
>
> Weirdly DAMON code usually keeps the 80 columns limit. Could you please break
> down this line?
>
> Also, the name feels long to me. What about damos_va_invalid_folio()?
>
Thanks. I will fix it in the next version.
> > +{
> > + return !folio || folio == s->last_applied;
> > +}
> > +
> > +static int damos_va_stat_pmd_entry(pmd_t *pmd, unsigned long addr,
> > + unsigned long next, struct mm_walk *walk)
> > +{
> > + struct damos_va_stat_private *priv = walk->private;
> > + struct damos *s = priv->scheme;
> > + unsigned long *sz_filter_passed = priv->sz_filter_passed;
> > + struct vm_area_struct *vma = walk->vma;
> > + struct folio *folio;
> > + spinlock_t *ptl;
> > + pte_t *start_pte, *pte, ptent;
> > + int nr;
> > +
> > +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> > + if (pmd_trans_huge(*pmd)) {
> > + pmd_t pmde;
> > +
> > + ptl = pmd_trans_huge_lock(pmd, vma);
> > + if (!ptl)
> > + return 0;
> > + pmde = pmdp_get(pmd);
> > + if (!pmd_present(pmde))
> > + goto huge_unlock;
> > +
> > + folio = vm_normal_folio_pmd(vma, addr, pmde);
> > +
> > + if (damon_va_invalid_damos_folio(folio, s))
> > + goto huge_unlock;
> > +
> > + if (!damos_va_filter_out(s, folio, vma, addr, NULL, pmd))
> > + *sz_filter_passed += folio_size(folio);
> > + s->last_applied = folio;
> > +
> > +huge_unlock:
> > + spin_unlock(ptl);
> > + return 0;
> > + }
> > +#endif
> > + start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
> > + if (!start_pte)
> > + return 0;
> > +
> > + for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) {
> > + nr = 1;
> > + ptent = ptep_get(pte);
> > +
> > + if (pte_none(ptent) || !pte_present(ptent))
> > + continue;
> > +
> > + folio = vm_normal_folio(vma, addr, ptent);
> > +
> > + if (damon_va_invalid_damos_folio(folio, s))
> > + continue;
> > +
> > + if (!damos_va_filter_out(s, folio, vma, addr, pte, NULL))
> > + *sz_filter_passed += folio_size(folio);
> > + nr = folio_nr_pages(folio);
> > + s->last_applied = folio;
> > + }
> > +
> > + pte_unmap_unlock(start_pte, ptl);
> > +
>
> No strong opinion, but I'd like to drop above two blank lines (one after the
> for loop, and one after pte_unmap_unlock() if you don't mind.
>
Will remove it in the next version.
> > + return 0;
> > +}
> > +
> > +static unsigned long damos_va_stat(struct damon_target *target,
> > + struct damon_region *r, struct damos *s,
> > + unsigned long *sz_filter_passed)
> > +{
> > +
>
> Seems this is unnecessary blank line. Let's remove it.
Will remove it in the next version.
>
> > + struct damos_va_stat_private priv;
> > + struct mm_struct *mm;
> > + struct mm_walk_ops walk_ops = {
> > + .pmd_entry = damos_va_stat_pmd_entry,
> > + .walk_lock = PGWALK_RDLOCK,
> > + };
> > +
> > + priv.scheme = s;
> > + priv.sz_filter_passed = sz_filter_passed;
> > +
> > + if (!damon_scheme_has_filter(s))
> > + return 0;
> > +
> > + mm = damon_get_mm(target);
> > + if (!mm)
> > + return 0;
> > +
> > + mmap_read_lock(mm);
> > + walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv);
> > + mmap_read_unlock(mm);
> > + mmput(mm);
> > + return 0;
> > +}
> > +
> > static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
> > struct damon_target *t, struct damon_region *r,
> > struct damos *scheme, unsigned long *sz_filter_passed)
> > @@ -916,7 +1019,7 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
> > case DAMOS_MIGRATE_COLD:
> > return damos_va_migrate(t, r, scheme, sz_filter_passed);
> > case DAMOS_STAT:
> > - return 0;
> > + return damos_va_stat(t, r, scheme, sz_filter_passed);
> > default:
> > /*
> > * DAMOS actions that are not yet supported by 'vaddr'.
> > --
> > 2.47.3
>
>
> Thanks,
> SJ
Best Wishes
Pan
Powered by blists - more mailing lists