[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250811172018.48901-3-ryncsn@gmail.com>
Date: Tue, 12 Aug 2025 01:20:18 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Vlastimil Babka <vbabka@...e.cz>,
Jann Horn <jannh@...gle.com>,
Pedro Falcato <pfalcato@...e.de>,
Matthew Wilcox <willy@...radead.org>,
Hugh Dickins <hughd@...gle.com>,
David Hildenbrand <david@...hat.com>,
Chris Li <chrisl@...nel.org>,
Barry Song <baohua@...nel.org>,
Baoquan He <bhe@...hat.com>,
Nhat Pham <nphamcs@...il.com>,
Kemeng Shi <shikemeng@...weicloud.com>,
linux-kernel@...r.kernel.org,
Kairui Song <kasong@...cent.com>
Subject: [PATCH 2/2] mm/mincore: use a helper for checking the swap cache
From: Kairui Song <kasong@...cent.com>
Introduce a mincore_swap helper for checking swap entries. Move all
swap related logic and sanity debug check into it, and separate them
from page cache checking.
The performance is better after this commit. mincore_page is never
called on a swap cache space now, so the logic can be simpler.
The sanity check also covers more potential cases now, previously
the WARN_ON only catches potentially corrupted page table, now if
shmem contains a swap entry with !CONFIG_SWAP, a WARN will be triggered.
This changes the mincore value when the WARN is triggered, but this
shouldn't matter. The WARN_ON means the data is already corrupted
or something is very wrong, so it really should not happen.
Before this series:
mincore on a swaped out 16G anon mmap range:
Took 488220 us
mincore on 16G shmem mmap range:
Took 530272 us.
After this commit:
mincore on a swaped out 16G anon mmap range:
Took 446763 us
mincore on 16G shmem mmap range:
Took 460496 us.
About ~10% faster.
Signed-off-by: Kairui Song <kasong@...cent.com>
---
mm/mincore.c | 90 ++++++++++++++++++++++++++++------------------------
1 file changed, 49 insertions(+), 41 deletions(-)
diff --git a/mm/mincore.c b/mm/mincore.c
index 20fd0967d3cb..2f3e1816a30d 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -47,6 +47,48 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr,
return 0;
}
+static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
+{
+ struct swap_info_struct *si;
+ struct folio *folio = NULL;
+ unsigned char present = 0;
+
+ if (!IS_ENABLED(CONFIG_SWAP)) {
+ WARN_ON(1);
+ return 0;
+ }
+
+ /*
+ * Shmem mapping may contain swapin error entries, which are
+ * absent. Page table may contain migration or hwpoison
+ * entries which are always uptodate.
+ */
+ if (non_swap_entry(entry))
+ return !shmem;
+
+ /*
+ * Shmem mapping lookup is lockless, so we need to grab the swap
+ * device. mincore page table walk locks the PTL, and the swap
+ * device is stable, avoid touching the si for better performance.
+ */
+ if (shmem) {
+ si = get_swap_device(entry);
+ if (!si)
+ return 0;
+ }
+ folio = filemap_get_entry(swap_address_space(entry),
+ swap_cache_index(entry));
+ if (shmem)
+ put_swap_device(si);
+ /* The swap cache space contains either folio, shadow or NULL */
+ if (folio && !xa_is_value(folio)) {
+ present = folio_test_uptodate(folio);
+ folio_put(folio);
+ }
+
+ return present;
+}
+
/*
* Later we can get more picky about what "in core" means precisely.
* For now, simply check to see if the page is in the page cache,
@@ -64,33 +106,15 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t index)
* any other file mapping (ie. marked !present and faulted in with
* tmpfs's .fault). So swapped out tmpfs mappings are tested here.
*/
- if (IS_ENABLED(CONFIG_SWAP) && shmem_mapping(mapping)) {
- folio = filemap_get_entry(mapping, index);
- /*
- * shmem/tmpfs may return swap: account for swapcache
- * page too.
- */
+ folio = filemap_get_entry(mapping, index);
+ if (folio) {
if (xa_is_value(folio)) {
- struct swap_info_struct *si;
- swp_entry_t swp = radix_to_swp_entry(folio);
- /* There might be swapin error entries in shmem mapping. */
- if (non_swap_entry(swp))
- return 0;
- /* Prevent swap device to being swapoff under us */
- si = get_swap_device(swp);
- if (si) {
- folio = filemap_get_folio(swap_address_space(swp),
- swap_cache_index(swp));
- put_swap_device(si);
- } else {
+ if (shmem_mapping(mapping))
+ return mincore_swap(radix_to_swp_entry(folio),
+ true);
+ else
return 0;
- }
}
- } else {
- folio = filemap_get_folio(mapping, index);
- }
-
- if (!IS_ERR_OR_NULL(folio)) {
present = folio_test_uptodate(folio);
folio_put(folio);
}
@@ -168,23 +192,7 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
for (i = 0; i < step; i++)
vec[i] = 1;
} else { /* pte is a swap entry */
- swp_entry_t entry = pte_to_swp_entry(pte);
-
- if (non_swap_entry(entry)) {
- /*
- * migration or hwpoison entries are always
- * uptodate
- */
- *vec = 1;
- } else {
-#ifdef CONFIG_SWAP
- *vec = mincore_page(swap_address_space(entry),
- swap_cache_index(entry));
-#else
- WARN_ON(1);
- *vec = 1;
-#endif
- }
+ *vec = mincore_swap(pte_to_swp_entry(pte), false);
}
vec += step;
}
--
2.50.1
Powered by blists - more mailing lists