[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20250729182001.56172-1-sj@kernel.org>
Date: Tue, 29 Jul 2025 11:20:01 -0700
From: SeongJae Park <sj@...nel.org>
To: Yueyang Pan <pyyjason@...il.com>
Cc: SeongJae Park <sj@...nel.org>,
Andrew Morton <akpm@...ux-foundation.org>,
Usama Arif <usamaarif642@...il.com>,
damon@...ts.linux.dev,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH v1 1/2] mm/damon: Move invalid folio and has filter to ops-common
On Tue, 29 Jul 2025 06:53:29 -0700 Yueyang Pan <pyyjason@...il.com> wrote:
> From: PanJason <pyyjason@...il.com>
>
> This patch moves the damon_pa_invalid_damos_folio and
> damon_pa_scheme_has_filter to ops-common. renaming them
> to damon_invalid_damos_folio and damon_scheme_has_filter.
> Doing so allows us to reuse their logic in the vaddr version
> of DAMOS_STAT
You forgot adding your Signed-off-by:
> ---
> mm/damon/ops-common.c | 19 +++++++++++++++++++
> mm/damon/ops-common.h | 3 +++
> mm/damon/paddr.c | 29 +++++------------------------
> 3 files changed, 27 insertions(+), 24 deletions(-)
>
> diff --git a/mm/damon/ops-common.c b/mm/damon/ops-common.c
> index 99321ff5cb92..7d3b48cc0f86 100644
> --- a/mm/damon/ops-common.c
> +++ b/mm/damon/ops-common.c
> @@ -412,3 +412,22 @@ unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid)
>
> return nr_migrated;
> }
> +
> +bool damon_scheme_has_filter(struct damos *s)
> +{
> + struct damos_filter *f;
> + damos_for_each_ops_filter(f, s)
> + return true;
> + return false;
> +}
> +
> +bool damon_invalid_damos_folio(struct folio *folio, struct damos *s)
> +{
> + if (!folio)
> + return true;
> + if (folio == s->last_applied) {
> + folio_put(folio);
> + return true;
> + }
> + return false;
> +}
Unless you have different opinions about this function I mentioned on the reply
to the next patch, let's not move this function.
> diff --git a/mm/damon/ops-common.h b/mm/damon/ops-common.h
> index 61ad54aaf256..4e905477fdce 100644
> --- a/mm/damon/ops-common.h
> +++ b/mm/damon/ops-common.h
> @@ -21,3 +21,6 @@ int damon_hot_score(struct damon_ctx *c, struct damon_region *r,
>
> bool damos_folio_filter_match(struct damos_filter *filter, struct folio *folio);
> unsigned long damon_migrate_pages(struct list_head *folio_list, int target_nid);
> +
> +bool damon_scheme_has_filter(struct damos *s);
> +bool damon_invalid_damos_folio(struct folio *folio, struct damos *s);
> diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c
> index 53a55c5114fb..a8b7048e871e 100644
> --- a/mm/damon/paddr.c
> +++ b/mm/damon/paddr.c
> @@ -114,16 +114,6 @@ static bool damos_pa_filter_out(struct damos *scheme, struct folio *folio)
> return scheme->ops_filters_default_reject;
> }
>
> -static bool damon_pa_invalid_damos_folio(struct folio *folio, struct damos *s)
> -{
> - if (!folio)
> - return true;
> - if (folio == s->last_applied) {
> - folio_put(folio);
> - return true;
> - }
> - return false;
> -}
>
> static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
> unsigned long *sz_filter_passed)
> @@ -152,7 +142,7 @@ static unsigned long damon_pa_pageout(struct damon_region *r, struct damos *s,
> addr = r->ar.start;
> while (addr < r->ar.end) {
> folio = damon_get_folio(PHYS_PFN(addr));
> - if (damon_pa_invalid_damos_folio(folio, s)) {
> + if (damon_invalid_damos_folio(folio, s)) {
> addr += PAGE_SIZE;
> continue;
> }
> @@ -192,7 +182,7 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate(
> addr = r->ar.start;
> while (addr < r->ar.end) {
> folio = damon_get_folio(PHYS_PFN(addr));
> - if (damon_pa_invalid_damos_folio(folio, s)) {
> + if (damon_invalid_damos_folio(folio, s)) {
> addr += PAGE_SIZE;
> continue;
> }
> @@ -239,7 +229,7 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
> addr = r->ar.start;
> while (addr < r->ar.end) {
> folio = damon_get_folio(PHYS_PFN(addr));
> - if (damon_pa_invalid_damos_folio(folio, s)) {
> + if (damon_invalid_damos_folio(folio, s)) {
> addr += PAGE_SIZE;
> continue;
> }
> @@ -262,28 +252,19 @@ static unsigned long damon_pa_migrate(struct damon_region *r, struct damos *s,
> return applied * PAGE_SIZE;
> }
>
> -static bool damon_pa_scheme_has_filter(struct damos *s)
> -{
> - struct damos_filter *f;
> -
> - damos_for_each_ops_filter(f, s)
> - return true;
> - return false;
> -}
> -
> static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
> unsigned long *sz_filter_passed)
> {
> unsigned long addr;
> struct folio *folio;
>
> - if (!damon_pa_scheme_has_filter(s))
> + if (!damon_scheme_has_filter(s))
> return 0;
>
> addr = r->ar.start;
> while (addr < r->ar.end) {
> folio = damon_get_folio(PHYS_PFN(addr));
> - if (damon_pa_invalid_damos_folio(folio, s)) {
> + if (damon_invalid_damos_folio(folio, s)) {
> addr += PAGE_SIZE;
> continue;
> }
> --
> 2.47.3
Other than above, looks good to me.
Thanks,
SJ
Powered by blists - more mailing lists