[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <b79c4a9b-2d19-442c-969a-7479a8ed9dc7@linux.dev>
Date: Sun, 4 Jan 2026 19:42:16 +0800
From: Lance Yang <lance.yang@...ux.dev>
To: Vernon Yang <vernon2gm@...il.com>, baolin.wang@...ux.alibaba.com
Cc: lorenzo.stoakes@...cle.com, ziy@...dia.com, dev.jain@....com,
baohua@...nel.org, richard.weiyang@...il.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org, Vernon Yang <yanglincheng@...inos.cn>,
akpm@...ux-foundation.org, david@...nel.org
Subject: Re: [PATCH v3 4/6] mm: add folio_is_lazyfree helper
On 2026/1/4 13:41, Vernon Yang wrote:
> Add folio_is_lazyfree() function to identify lazy-free folios to improve
> code readability.
>
> Signed-off-by: Vernon Yang <yanglincheng@...inos.cn>
> ---
> include/linux/mm_inline.h | 5 +++++
> mm/rmap.c | 4 ++--
> mm/vmscan.c | 5 ++---
> 3 files changed, 9 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
> index fa2d6ba811b5..65a4ae52d915 100644
> --- a/include/linux/mm_inline.h
> +++ b/include/linux/mm_inline.h
> @@ -35,6 +35,11 @@ static inline int page_is_file_lru(struct page *page)
> return folio_is_file_lru(page_folio(page));
> }
>
> +static inline int folio_is_lazyfree(const struct folio *folio)
> +{
> + return folio_test_anon(folio) && !folio_test_swapbacked(folio);
> +}
> +
> static __always_inline void __update_lru_size(struct lruvec *lruvec,
> enum lru_list lru, enum zone_type zid,
> long nr_pages)
> diff --git a/mm/rmap.c b/mm/rmap.c
> index f955f02d570e..7241a3fa8574 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -1838,7 +1838,7 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
> max_nr = (end_addr - addr) >> PAGE_SHIFT;
>
> /* We only support lazyfree batching for now ... */
> - if (!folio_test_anon(folio) || folio_test_swapbacked(folio))
> + if (!folio_is_lazyfree(folio))
Please rebase against mm-new. Commit[1] already supports file folios
batching in folio_unmap_pte_batch()
+ /* We only support lazyfree or file folios batching for now ... */
+ if (folio_test_anon(folio) && folio_test_swapbacked(folio))
[1]
https://lore.kernel.org/all/142919ac14d3cf70cba370808d85debe089df7b4.1766631066.git.baolin.wang@linux.alibaba.com/
Thanks,
Lance
> return 1;
> if (pte_unused(pte))
> return 1;
> @@ -1934,7 +1934,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
> }
>
> if (!pvmw.pte) {
> - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
> + if (folio_is_lazyfree(folio)) {
> if (unmap_huge_pmd_locked(vma, pvmw.address, pvmw.pmd, folio))
> goto walk_done;
> /*
> diff --git a/mm/vmscan.c b/mm/vmscan.c
> index 670fe9fae5ba..f357f74b5a35 100644
> --- a/mm/vmscan.c
> +++ b/mm/vmscan.c
> @@ -963,8 +963,7 @@ static void folio_check_dirty_writeback(struct folio *folio,
> * They could be mistakenly treated as file lru. So further anon
> * test is needed.
> */
> - if (!folio_is_file_lru(folio) ||
> - (folio_test_anon(folio) && !folio_test_swapbacked(folio))) {
> + if (!folio_is_file_lru(folio) || folio_is_lazyfree(folio)) {
> *dirty = false;
> *writeback = false;
> return;
> @@ -1501,7 +1500,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
> }
> }
>
> - if (folio_test_anon(folio) && !folio_test_swapbacked(folio)) {
> + if (folio_is_lazyfree(folio)) {
> /* follow __remove_mapping for reference */
> if (!folio_ref_freeze(folio, 1))
> goto keep_locked;
Powered by blists - more mailing lists