[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250728075306.12704-3-ryncsn@gmail.com>
Date: Mon, 28 Jul 2025 15:53:00 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Hugh Dickins <hughd@...gle.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Matthew Wilcox <willy@...radead.org>,
Kemeng Shi <shikemeng@...weicloud.com>,
Chris Li <chrisl@...nel.org>,
Nhat Pham <nphamcs@...il.com>,
Baoquan He <bhe@...hat.com>,
Barry Song <baohua@...nel.org>,
linux-kernel@...r.kernel.org,
Kairui Song <kasong@...cent.com>,
Dev Jain <dev.jain@....com>
Subject: [PATCH v6 2/8] mm/shmem, swap: avoid redundant Xarray lookup during swapin
From: Kairui Song <kasong@...cent.com>
Currently shmem calls xa_get_order to get the swap radix entry order,
requiring a full tree walk. This can be easily combined with the swap
entry value checking (shmem_confirm_swap) to avoid the duplicated lookup
and abort early if the entry is gone already. Which should improve the
performance.
Signed-off-by: Kairui Song <kasong@...cent.com>
Reviewed-by: Kemeng Shi <shikemeng@...weicloud.com>
Reviewed-by: Dev Jain <dev.jain@....com>
Reviewed-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/shmem.c | 34 +++++++++++++++++++++++++---------
1 file changed, 25 insertions(+), 9 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 1d0fd266c29b..da8edb363c75 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -512,15 +512,27 @@ static int shmem_replace_entry(struct address_space *mapping,
/*
* Sometimes, before we decide whether to proceed or to fail, we must check
- * that an entry was not already brought back from swap by a racing thread.
+ * that an entry was not already brought back or split by a racing thread.
*
* Checking folio is not enough: by the time a swapcache folio is locked, it
* might be reused, and again be swapcache, using the same swap as before.
+ * Returns the swap entry's order if it still presents, else returns -1.
*/
-static bool shmem_confirm_swap(struct address_space *mapping,
- pgoff_t index, swp_entry_t swap)
+static int shmem_confirm_swap(struct address_space *mapping, pgoff_t index,
+ swp_entry_t swap)
{
- return xa_load(&mapping->i_pages, index) == swp_to_radix_entry(swap);
+ XA_STATE(xas, &mapping->i_pages, index);
+ int ret = -1;
+ void *entry;
+
+ rcu_read_lock();
+ do {
+ entry = xas_load(&xas);
+ if (entry == swp_to_radix_entry(swap))
+ ret = xas_get_order(&xas);
+ } while (xas_retry(&xas, entry));
+ rcu_read_unlock();
+ return ret;
}
/*
@@ -2293,16 +2305,20 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
return -EIO;
si = get_swap_device(swap);
- if (!si) {
- if (!shmem_confirm_swap(mapping, index, swap))
+ order = shmem_confirm_swap(mapping, index, swap);
+ if (unlikely(!si)) {
+ if (order < 0)
return -EEXIST;
else
return -EINVAL;
}
+ if (unlikely(order < 0)) {
+ put_swap_device(si);
+ return -EEXIST;
+ }
/* Look it up and read it in.. */
folio = swap_cache_get_folio(swap, NULL, 0);
- order = xa_get_order(&mapping->i_pages, index);
if (!folio) {
int nr_pages = 1 << order;
bool fallback_order0 = false;
@@ -2412,7 +2428,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
*/
folio_lock(folio);
if ((!skip_swapcache && !folio_test_swapcache(folio)) ||
- !shmem_confirm_swap(mapping, index, swap) ||
+ shmem_confirm_swap(mapping, index, swap) < 0 ||
folio->swap.val != swap.val) {
error = -EEXIST;
goto unlock;
@@ -2460,7 +2476,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
*foliop = folio;
return 0;
failed:
- if (!shmem_confirm_swap(mapping, index, swap))
+ if (shmem_confirm_swap(mapping, index, swap) < 0)
error = -EEXIST;
if (error == -EIO)
shmem_set_folio_swapin_error(inode, index, folio, swap,
--
2.50.1
Powered by blists - more mailing lists