[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20251007-swap-clean-after-swap-table-p1-v1-3-74860ef8ba74@tencent.com>
Date: Tue, 07 Oct 2025 04:02:35 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Kemeng Shi <shikemeng@...weicloud.com>, Kairui Song <kasong@...cent.com>,
Nhat Pham <nphamcs@...il.com>, Baoquan He <bhe@...hat.com>,
Barry Song <baohua@...nel.org>, Chris Li <chrisl@...nel.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
David Hildenbrand <david@...hat.com>,
"Matthew Wilcox (Oracle)" <willy@...radead.org>,
Ying Huang <ying.huang@...ux.alibaba.com>, Kairui Song <ryncsn@...il.com>,
linux-kernel@...r.kernel.org
Subject: [PATCH 3/4] mm, swap: cleanup swap entry allocation parameter
From: Kairui Song <kasong@...cent.com>
We no longer need this GFP parameter after commit 8578e0c00dcf ("mm, swap:
use the swap table for the swap cache and switch API"). Before that
commit the GFP parameter is already almost identical for all callers, so
nothing changed by that commit. Swap table just moved the GFP to lower
layer and make it more defined and changes depend on atomic or sleep
allocation.
Now this parameter is no longer used, just remove it. No behavior
change.
Signed-off-by: Kairui Song <kasong@...cent.com>
---
include/linux/swap.h | 4 ++--
mm/shmem.c | 2 +-
mm/swapfile.c | 2 +-
mm/vmscan.c | 4 ++--
4 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index e818fbade1e2..a4b264817735 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -462,7 +462,7 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask);
+int folio_alloc_swap(struct folio *folio);
bool folio_free_swap(struct folio *folio);
void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
@@ -560,7 +560,7 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-static inline int folio_alloc_swap(struct folio *folio, gfp_t gfp_mask)
+static inline int folio_alloc_swap(struct folio *folio)
{
return -EINVAL;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 45f51745ad88..63092cc0b141 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1617,7 +1617,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
folio_mark_uptodate(folio);
}
- if (!folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+ if (!folio_alloc_swap(folio)) {
bool first_swapped = shmem_recalc_inode(inode, 0, nr_pages);
int error;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 732e07c70ce9..534b21aeef5a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1425,7 +1425,7 @@ static bool swap_sync_discard(void)
* Context: Caller needs to hold the folio lock.
* Return: Whether the folio was added to the swap cache.
*/
-int folio_alloc_swap(struct folio *folio, gfp_t gfp)
+int folio_alloc_swap(struct folio *folio)
{
unsigned int order = folio_order(folio);
unsigned int size = 1 << order;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index aadbee50a851..c99f7d6d5dd9 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1296,7 +1296,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
split_folio_to_list(folio, folio_list))
goto activate_locked;
}
- if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN)) {
+ if (folio_alloc_swap(folio)) {
int __maybe_unused order = folio_order(folio);
if (!folio_test_large(folio))
@@ -1312,7 +1312,7 @@ static unsigned int shrink_folio_list(struct list_head *folio_list,
}
#endif
count_mthp_stat(order, MTHP_STAT_SWPOUT_FALLBACK);
- if (folio_alloc_swap(folio, __GFP_HIGH | __GFP_NOWARN))
+ if (folio_alloc_swap(folio))
goto activate_locked_split;
}
/*
--
2.51.0
Powered by blists - more mailing lists