[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250728075306.12704-5-ryncsn@gmail.com>
Date: Mon, 28 Jul 2025 15:53:02 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Matthew Wilcox <willy@...radead.org>,
Kemeng Shi <shikemeng@...weicloud.com>,
Chris Li <chrisl@...nel.org>,
Nhat Pham <nphamcs@...il.com>,
Baoquan He <bhe@...hat.com>,
Barry Song <baohua@...nel.org>,
linux-kernel@...r.kernel.org,
Kairui Song <kasong@...cent.com>
Subject: [PATCH v6 4/8] mm/shmem, swap: tidy up swap entry splitting
From: Kairui Song <kasong@...cent.com>
Instead of keeping different paths of splitting the entry before the swap
in start, move the entry splitting after the swapin has put the folio in
swap cache (or set the SWAP_HAS_CACHE bit). This way we only need one
place and one unified way to split the large entry. Whenever swapin
brought in a folio smaller than the shmem swap entry, split the entry and
recalculate the entry and index for verification.
This removes duplicated codes and function calls, reduces LOC, and the
split is less racy as it's guarded by swap cache now. So it will have a
lower chance of repeated faults due to raced split. The compiler is also
able to optimize the coder further:
bloat-o-meter results with GCC 14:
With DEBUG_SECTION_MISMATCH (-fno-inline-functions-called-once):
./scripts/bloat-o-meter mm/shmem.o.old mm/shmem.o
add/remove: 0/0 grow/shrink: 0/1 up/down: 0/-143 (-143)
Function old new delta
shmem_swapin_folio 2358 2215 -143
Total: Before=32933, After=32790, chg -0.43%
With !DEBUG_SECTION_MISMATCH:
add/remove: 0/1 grow/shrink: 1/0 up/down: 1069/-749 (320)
Function old new delta
shmem_swapin_folio 2871 3940 +1069
shmem_split_large_entry.isra 749 - -749
Total: Before=32806, After=33126, chg +0.98%
Since shmem_split_large_entry is only called in one place now. The
compiler will either generate more compact code, or inlined it for
better performance.
Signed-off-by: Kairui Song <kasong@...cent.com>
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
Tested-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/shmem.c | 56 ++++++++++++++++++++++--------------------------------
1 file changed, 23 insertions(+), 33 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 881d440eeebb..e089de25cf6a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2303,14 +2303,16 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
struct address_space *mapping = inode->i_mapping;
struct mm_struct *fault_mm = vma ? vma->vm_mm : NULL;
struct shmem_inode_info *info = SHMEM_I(inode);
+ swp_entry_t swap, index_entry;
struct swap_info_struct *si;
struct folio *folio = NULL;
bool skip_swapcache = false;
- swp_entry_t swap;
int error, nr_pages, order, split_order;
+ pgoff_t offset;
VM_BUG_ON(!*foliop || !xa_is_value(*foliop));
- swap = radix_to_swp_entry(*foliop);
+ index_entry = radix_to_swp_entry(*foliop);
+ swap = index_entry;
*foliop = NULL;
if (is_poisoned_swp_entry(swap))
@@ -2358,46 +2360,35 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
}
/*
- * Now swap device can only swap in order 0 folio, then we
- * should split the large swap entry stored in the pagecache
- * if necessary.
- */
- split_order = shmem_split_large_entry(inode, index, swap, gfp);
- if (split_order < 0) {
- error = split_order;
- goto failed;
- }
-
- /*
- * If the large swap entry has already been split, it is
+ * Now swap device can only swap in order 0 folio, it is
* necessary to recalculate the new swap entry based on
- * the old order alignment.
+ * the offset, as the swapin index might be unalgined.
*/
- if (split_order > 0) {
- pgoff_t offset = index - round_down(index, 1 << split_order);
-
+ if (order) {
+ offset = index - round_down(index, 1 << order);
swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
}
- /* Here we actually start the io */
folio = shmem_swapin_cluster(swap, gfp, info, index);
if (!folio) {
error = -ENOMEM;
goto failed;
}
- } else if (order > folio_order(folio)) {
+ }
+alloced:
+ if (order > folio_order(folio)) {
/*
- * Swap readahead may swap in order 0 folios into swapcache
+ * Swapin may get smaller folios due to various reasons:
+ * It may fallback to order 0 due to memory pressure or race,
+ * swap readahead may swap in order 0 folios into swapcache
* asynchronously, while the shmem mapping can still stores
* large swap entries. In such cases, we should split the
* large swap entry to prevent possible data corruption.
*/
- split_order = shmem_split_large_entry(inode, index, swap, gfp);
+ split_order = shmem_split_large_entry(inode, index, index_entry, gfp);
if (split_order < 0) {
- folio_put(folio);
- folio = NULL;
error = split_order;
- goto failed;
+ goto failed_nolock;
}
/*
@@ -2406,16 +2397,14 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
* the old order alignment.
*/
if (split_order > 0) {
- pgoff_t offset = index - round_down(index, 1 << split_order);
-
- swap = swp_entry(swp_type(swap), swp_offset(swap) + offset);
+ offset = index - round_down(index, 1 << split_order);
+ swap = swp_entry(swp_type(swap), swp_offset(index_entry) + offset);
}
} else if (order < folio_order(folio)) {
swap.val = round_down(swap.val, 1 << folio_order(folio));
index = round_down(index, 1 << folio_order(folio));
}
-alloced:
/*
* We have to do this with the folio locked to prevent races.
* The shmem_confirm_swap below only checks if the first swap
@@ -2479,12 +2468,13 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
shmem_set_folio_swapin_error(inode, index, folio, swap,
skip_swapcache);
unlock:
- if (skip_swapcache)
- swapcache_clear(si, swap, folio_nr_pages(folio));
- if (folio) {
+ if (folio)
folio_unlock(folio);
+failed_nolock:
+ if (skip_swapcache)
+ swapcache_clear(si, folio->swap, folio_nr_pages(folio));
+ if (folio)
folio_put(folio);
- }
put_swap_device(si);
return error;
--
2.50.1
Powered by blists - more mailing lists