[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250514201729.48420-11-ryncsn@gmail.com>
Date: Thu, 15 May 2025 04:17:10 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <willy@...radead.org>,
Hugh Dickins <hughd@...gle.com>,
Chris Li <chrisl@...nel.org>,
David Hildenbrand <david@...hat.com>,
Yosry Ahmed <yosryahmed@...gle.com>,
"Huang, Ying" <ying.huang@...ux.alibaba.com>,
Nhat Pham <nphamcs@...il.com>,
Johannes Weiner <hannes@...xchg.org>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Baoquan He <bhe@...hat.com>,
Barry Song <baohua@...nel.org>,
Kalesh Singh <kaleshsingh@...gle.com>,
Kemeng Shi <shikemeng@...weicloud.com>,
Tim Chen <tim.c.chen@...ux.intel.com>,
Ryan Roberts <ryan.roberts@....com>,
linux-kernel@...r.kernel.org,
Kairui Song <kasong@...cent.com>
Subject: [PATCH 10/28] mm, swap: add a swap helper for bypassing only read ahead
From: Kairui Song <kasong@...cent.com>
The swap cache now has a very low overhead, bypassing it is not helpful
anymore. To prepare for unifying the swap in path, introduce a new
helper that only bypasses read ahead and does not bypass the swap cache.
Signed-off-by: Kairui Song <kasong@...cent.com>
---
mm/swap.h | 6 ++
mm/swap_state.c | 158 ++++++++++++++++++++++++++++++------------------
2 files changed, 105 insertions(+), 59 deletions(-)
diff --git a/mm/swap.h b/mm/swap.h
index fec7d6e751ae..aab6bf9c3a8a 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -217,6 +217,7 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx);
struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf);
+struct folio *swapin_entry(swp_entry_t entry, struct folio *folio);
void swap_update_readahead(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr);
@@ -303,6 +304,11 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
return NULL;
}
+static inline struct folio *swapin_entry(swp_entry_t ent, struct folio *folio)
+{
+ return NULL;
+}
+
static inline void swap_update_readahead(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr)
{
diff --git a/mm/swap_state.c b/mm/swap_state.c
index fe71706e29d9..d68687295f52 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -353,54 +353,26 @@ void swap_update_readahead(struct folio *folio,
}
}
-struct folio *__swapin_cache_alloc(swp_entry_t entry, gfp_t gfp_mask,
- struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
- bool skip_if_exists)
+static struct folio *__swapin_cache_add_prepare(swp_entry_t entry,
+ struct folio *folio,
+ bool skip_if_exists)
{
- struct swap_info_struct *si = swp_info(entry);
- struct folio *folio;
- struct folio *new_folio = NULL;
- struct folio *result = NULL;
+ int nr_pages = folio_nr_pages(folio);
+ struct folio *exist;
void *shadow = NULL;
+ int err;
- *new_page_allocated = false;
for (;;) {
- int err;
-
/*
- * Check the swap cache first, if a cached folio is found,
- * return it unlocked. The caller will lock and check it.
+ * Caller should have checked swap cache and swap count
+ * already, try prepare the swap map directly, it will still
+ * fail with -ENOENT or -EEXIST if the entry is gone or raced.
*/
- folio = swap_cache_get_folio(entry);
- if (folio)
- goto got_folio;
-
- /*
- * Just skip read ahead for unused swap slot.
- */
- if (!swap_entry_swapped(si, entry))
- goto put_and_return;
-
- /*
- * Get a new folio to read into from swap. Allocate it now if
- * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
- * when -EEXIST will cause any racers to loop around until we
- * add it to cache.
- */
- if (!new_folio) {
- new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
- if (!new_folio)
- goto put_and_return;
- }
-
- /*
- * Swap entry may have been freed since our caller observed it.
- */
- err = swapcache_prepare(entry, 1);
+ err = swapcache_prepare(entry, nr_pages);
if (!err)
break;
else if (err != -EEXIST)
- goto put_and_return;
+ return NULL;
/*
* Protect against a recursive call to __swapin_cache_alloc()
@@ -411,7 +383,11 @@ struct folio *__swapin_cache_alloc(swp_entry_t entry, gfp_t gfp_mask,
* __swapin_cache_alloc() in the writeback path.
*/
if (skip_if_exists)
- goto put_and_return;
+ return NULL;
+
+ exist = swap_cache_get_folio(entry);
+ if (exist)
+ return exist;
/*
* We might race against __swap_cache_del_folio(), and
@@ -426,35 +402,99 @@ struct folio *__swapin_cache_alloc(swp_entry_t entry, gfp_t gfp_mask,
/*
* The swap entry is ours to swap in. Prepare the new folio.
*/
- __folio_set_locked(new_folio);
- __folio_set_swapbacked(new_folio);
+ __folio_set_locked(folio);
+ __folio_set_swapbacked(folio);
- if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
- goto fail_unlock;
-
- if (swap_cache_add_folio(entry, new_folio, &shadow))
+ if (swap_cache_add_folio(entry, folio, &shadow))
goto fail_unlock;
memcg1_swapin(entry, 1);
if (shadow)
- workingset_refault(new_folio, shadow);
+ workingset_refault(folio, shadow);
/* Caller will initiate read into locked new_folio */
- folio_add_lru(new_folio);
- *new_page_allocated = true;
- folio = new_folio;
-got_folio:
- result = folio;
- goto put_and_return;
+ folio_add_lru(folio);
+ return folio;
fail_unlock:
- put_swap_folio(new_folio, entry);
- folio_unlock(new_folio);
-put_and_return:
- if (!(*new_page_allocated) && new_folio)
- folio_put(new_folio);
- return result;
+ put_swap_folio(folio, entry);
+ folio_unlock(folio);
+ return NULL;
+}
+
+struct folio *__swapin_cache_alloc(swp_entry_t entry, gfp_t gfp_mask,
+ struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
+ bool skip_if_exists)
+{
+ struct swap_info_struct *si = swp_info(entry);
+ struct folio *swapcache = NULL, *folio = NULL;
+
+ /*
+ * Check the swap cache first, if a cached folio is found,
+ * return it unlocked. The caller will lock and check it.
+ */
+ swapcache = swap_cache_get_folio(entry);
+ if (swapcache)
+ goto out;
+
+ /*
+ * Just skip read ahead for unused swap slot.
+ */
+ if (!swap_entry_swapped(si, entry))
+ goto out;
+
+ /*
+ * Get a new folio to read into from swap. Allocate it now if
+ * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
+ * when -EEXIST will cause any racers to loop around until we
+ * add it to cache.
+ */
+ folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
+ if (!folio)
+ goto out;
+
+ if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
+ goto out;
+
+ swapcache = __swapin_cache_add_prepare(entry, folio, skip_if_exists);
+out:
+ if (swapcache && swapcache == folio) {
+ *new_page_allocated = true;
+ } else {
+ if (folio)
+ folio_put(folio);
+ *new_page_allocated = false;
+ }
+
+ return swapcache;
+}
+
+/**
+ * swapin_entry - swap-in one or multiple entries skipping readahead
+ *
+ * @entry: swap entry to swap in
+ * @folio: pre allocated folio
+ *
+ * Reads @entry into @folio. @folio will be added to swap cache first, if
+ * this raced with another users, only one user will successfully add its
+ * folio into swap cache, and that folio will be returned for all readers.
+ *
+ * If @folio is a large folio, the entry will be rounded down to match
+ * the folio start and the whole folio will be read in.
+ */
+struct folio *swapin_entry(swp_entry_t entry, struct folio *folio)
+{
+ struct folio *swapcache;
+ pgoff_t offset = swp_offset(entry);
+ unsigned long nr_pages = folio_nr_pages(folio);
+ VM_WARN_ON_ONCE(nr_pages > SWAPFILE_CLUSTER);
+
+ entry = swp_entry(swp_type(entry), ALIGN_DOWN(offset, nr_pages));
+ swapcache = __swapin_cache_add_prepare(entry, folio, false);
+ if (swapcache == folio)
+ swap_read_folio(folio, NULL);
+ return swapcache;
}
/*
--
2.49.0
Powered by blists - more mailing lists