[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251024-swap-clean-after-swap-table-p1-v2-3-a709469052e7@tencent.com>
Date: Fri, 24 Oct 2025 02:00:41 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Kemeng Shi <shikemeng@...weicloud.com>, Kairui Song <kasong@...cent.com>,
Nhat Pham <nphamcs@...il.com>, Baoquan He <bhe@...hat.com>,
Barry Song <baohua@...nel.org>, Chris Li <chrisl@...nel.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
David Hildenbrand <david@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Ying Huang <ying.huang@...ux.alibaba.com>,
YoungJun Park <youngjun.park@....com>, Kairui Song <ryncsn@...il.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH v2 3/5] mm, swap: cleanup swap entry allocation parameter
From: Kairui Song <kasong@...cent.com>
We no longer need this GFP parameter after commit 8578e0c00dcf ("mm, swap:
use the swap table for the swap cache and switch API"). Before that
commit the GFP parameter is already almost identical for all callers, so
nothing changed by that commit. Swap table just moved the GFP to lower
layer and make it more defined and changes depend on atomic or sleep
allocation.
Now this parameter is no longer used, just remove it. No behavior
change.
Acked-by: Chris Li <chrisl@...nel.org>
Acked-by: Nhat Pham <nphamcs@...il.com>
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
Reviewed-by: David Hildenbrand <david@...hat.com>
Signed-off-by: Kairui Song <kasong@...cent.com>
---
include/linux/swap.h | 4 ++--
mm/shmem.c | 2 +-
mm/swapfile.c | 3 +--
mm/vmscan.c | 4 ++--
4 files changed, 6 insertions(+), 7 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e818fbade1e2..a4b264817735 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -462,7 +462,7 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
+int folio_alloc_swap(struct folio *folio);
bool folio_free_swap(struct folio *folio);
void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
@@ -560,7 +560,7 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask)
+static inline int folio_alloc_swap(struct folio *folio)
{
return -EINVAL;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 10682328d54c..7559773ebb30 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1617,7 +1617,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
folio_mark_uptodate(folio);
}
- if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+ if (!folio_alloc_swap(folio)) {
bool first_swapped = shmem_recalc_inode(inode, 0, nr_pages);
int error;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 781a70dfcff1..42e2b2759240 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1417,7 +1417,6 @@ static bool swap_sync_discard(void)
/**
* folio_alloc_swap - allocate swap space for a folio
* @folio: folio we want to move to swap
- * @gfp: gfp mask for shadow nodes
*
* Allocate swap space for the folio and add the folio to the
* swap cache.
@@ -1425,7 +1424,7 @@ static bool swap_sync_discard(void)
* Context: Caller needs to hold the folio lock.
* Return: Whether the folio was added to the swap cache.
*/
-int folio_alloc_swap(struct folio *folio, gfp_t gfp)
+int folio_alloc_swap(struct folio *folio)
{
unsigned int order = folio_order(folio);
unsigned int size = 1 << order;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 92f4ca99b73c..c922bad2b8fd 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1296,7 +1296,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
- if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
+ if (folio_alloc_swap(folio)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
@@ -1312,7 +1312,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
}
#endif
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
- if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
+ if (folio_alloc_swap(folio))
goto activate_locked_split;
}
/*
--
2.51.0
Powered by blists - more mailing lists