lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250801233914.1530-1-sj@kernel.org>
Date: Fri,  1 Aug 2025 16:39:14 -0700
From: SeongJae Park <sj@...nel.org>
To: pyyjason@...il.com
Cc: SeongJae Park <sj@...nel.org>,
	Andrew Morton <akpm@...ux-foundation.org>,
	Usama Arif <usamaarif642@...il.com>,
	damon@...ts.linux.dev,
	linux-mm@...ck.org,
	linux-kernel@...r.kernel.org,
	kernel-team@...a.com
Subject: Re: [PATCH v3 2/2] mm/damon: Add damos_stat support for vaddr

On Fri,  1 Aug 2025 22:59:51 +0000 pyyjason@...il.com wrote:

> From: Yueyang Pan <pyyjason@...il.com>
> 
> This patch adds support for damos_stat in virtual address space.

As mentioned on the cover letter, this is not very technically correct.  Could
you please change the subject and above changelog, as suggested on the cover
letter?

> It leverages the walk_page_range to walk the page table and gets
> the folio from page table. The last folio scanned is stored in
> damos->last_applied to prevent double counting.
> 
> Signed-off-by: Yueyang Pan <pyyjason@...il.com>
> ---
>  mm/damon/vaddr.c | 103 ++++++++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 102 insertions(+), 1 deletion(-)
> 
> diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
> index 87e825349bdf..5960d5d36123 100644
> --- a/mm/damon/vaddr.c
> +++ b/mm/damon/vaddr.c
> @@ -890,6 +890,107 @@ static unsigned long damos_va_migrate(struct damon_target *target,
>  	return applied * PAGE_SIZE;
>  }
>  
> +struct damos_va_stat_private {
> +	struct damos *scheme;
> +	unsigned long *sz_filter_passed;
> +};
> +
> +static inline bool damos_va_invalid_folio(struct folio *folio,
> +		struct damos *s)
> +{
> +	return !folio || folio == s->last_applied;
> +}
> +
> +static int damos_va_stat_pmd_entry(pmd_t *pmd, unsigned long addr,
> +		unsigned long next, struct mm_walk *walk)
> +{
> +	struct damos_va_stat_private *priv = walk->private;
> +	struct damos *s = priv->scheme;
> +	unsigned long *sz_filter_passed = priv->sz_filter_passed;
> +	struct vm_area_struct *vma = walk->vma;
> +	struct folio *folio;
> +	spinlock_t *ptl;
> +	pte_t *start_pte, *pte, ptent;
> +	int nr;
> +
> +#ifdef CONFIG_TRANSPARENT_HUGEPAGE
> +	if (pmd_trans_huge(*pmd)) {
> +		pmd_t pmde;
> +
> +		ptl = pmd_trans_huge_lock(pmd, vma);
> +		if (!ptl)
> +			return 0;
> +		pmde = pmdp_get(pmd);
> +		if (!pmd_present(pmde))
> +			goto huge_unlock;
> +
> +		folio = vm_normal_folio_pmd(vma, addr, pmde);
> +
> +		if (damos_va_invalid_folio(folio, s))
> +			goto huge_unlock;
> +
> +		if (!damos_va_filter_out(s, folio, vma, addr, NULL, pmd))
> +			*sz_filter_passed += folio_size(folio);
> +		s->last_applied = folio;
> +
> +huge_unlock:
> +		spin_unlock(ptl);
> +		return 0;
> +	}
> +#endif
> +	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
> +	if (!start_pte)
> +		return 0;
> +
> +	for (; addr < next; pte += nr, addr += nr * PAGE_SIZE) {
> +		nr = 1;
> +		ptent = ptep_get(pte);
> +
> +		if (pte_none(ptent) || !pte_present(ptent))
> +			continue;
> +
> +		folio = vm_normal_folio(vma, addr, ptent);
> +
> +		if (damos_va_invalid_folio(folio, s))
> +			continue;
> +
> +		if (!damos_va_filter_out(s, folio, vma, addr, pte, NULL))
> +			*sz_filter_passed += folio_size(folio);
> +		nr = folio_nr_pages(folio);
> +		s->last_applied = folio;
> +	}
> +	pte_unmap_unlock(start_pte, ptl);
> +	return 0;
> +}
> +
> +static unsigned long damos_va_stat(struct damon_target *target,
> +		struct damon_region *r, struct damos *s,
> +		unsigned long *sz_filter_passed)
> +{
> +	struct damos_va_stat_private priv;
> +	struct mm_struct *mm;
> +	struct mm_walk_ops walk_ops = {
> +		.pmd_entry = damos_va_stat_pmd_entry,
> +		.walk_lock = PGWALK_RDLOCK,
> +	};
> +
> +	priv.scheme = s;
> +	priv.sz_filter_passed = sz_filter_passed;
> +
> +	if (!damon_ops_has_filter(s))

I suggested to change this function's name to damos_ops_has_filter() on the
previous patch of this series.  If it is accepted, this should also be updated.

> +		return 0;
> +
> +	mm = damon_get_mm(target);
> +	if (!mm)
> +		return 0;
> +
> +	mmap_read_lock(mm);
> +	walk_page_range(mm, r->ar.start, r->ar.end, &walk_ops, &priv);
> +	mmap_read_unlock(mm);
> +	mmput(mm);
> +	return 0;
> +}
> +
>  static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
>  		struct damon_target *t, struct damon_region *r,
>  		struct damos *scheme, unsigned long *sz_filter_passed)
> @@ -916,7 +1017,7 @@ static unsigned long damon_va_apply_scheme(struct damon_ctx *ctx,
>  	case DAMOS_MIGRATE_COLD:
>  		return damos_va_migrate(t, r, scheme, sz_filter_passed);
>  	case DAMOS_STAT:
> -		return 0;
> +		return damos_va_stat(t, r, scheme, sz_filter_passed);
>  	default:
>  		/*
>  		 * DAMOS actions that are not yet supported by 'vaddr'.
> -- 
> 2.43.0


Thanks,
SJ

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ