[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMgjq7AwDH0+s__AYj5os0UzRjjAW6Q8snMA-FcW1NOuD8zWEg@mail.gmail.com>
Date: Thu, 30 Oct 2025 00:52:04 +0800
From: Kairui Song <ryncsn@...il.com>
To: linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>, Baoquan He <bhe@...hat.com>, 
	Barry Song <baohua@...nel.org>, Chris Li <chrisl@...nel.org>, Nhat Pham <nphamcs@...il.com>, 
	Johannes Weiner <hannes@...xchg.org>, Yosry Ahmed <yosry.ahmed@...ux.dev>, 
	David Hildenbrand <david@...hat.com>, Youngjun Park <youngjun.park@....com>, 
	Hugh Dickins <hughd@...gle.com>, Baolin Wang <baolin.wang@...ux.alibaba.com>, 
	"Huang, Ying" <ying.huang@...ux.alibaba.com>, Kemeng Shi <shikemeng@...weicloud.com>, 
	Lorenzo Stoakes <lorenzo.stoakes@...cle.com>, 
	"Matthew Wilcox (Oracle)" <willy@...radead.org>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH 15/19] mm, swap: add folio to swap cache directly on allocation
On Thu, Oct 30, 2025 at 12:00 AM Kairui Song <ryncsn@...il.com> wrote:
>
> From: Kairui Song <kasong@...cent.com>
>
> The allocator uses SWAP_HAS_CACHE to pin a swap slot upon allocation.
> SWAP_HAS_CACHE is being deprecated as it caused a lot of confusion.
> This pinning usage here can be dropped by adding the folio to swap
> cache directly on allocation.
>
> All swap allocations are folio-based now (except for hibernation), so
> the swap allocator can always take the folio as the parameter. And now
> both swap cache (swap table) and swap map are protected by the cluster
> lock, scanning the map and inserting the folio can be done in the same
> critical section. This eliminates the time window that a slot is pinned
> by SWAP_HAS_CACHE, but it has no cache, and avoids touching the lock
> multiple times.
>
> This is both a cleanup and an optimization.
>
> Signed-off-by: Kairui Song <kasong@...cent.com>
> ---
>  include/linux/swap.h |   5 --
>  mm/swap.h            |   8 +--
>  mm/swap_state.c      |  56 +++++++++++-------
>  mm/swapfile.c        | 161 +++++++++++++++++++++------------------------------
>  4 files changed, 105 insertions(+), 125 deletions(-)
>
> diff --git a/include/linux/swap.h b/include/linux/swap.h
> index ac3caa4c6999..4b4b81fbc6a3 100644
> --- a/include/linux/swap.h
> +++ b/include/linux/swap.h
> @@ -452,7 +452,6 @@ static inline long get_nr_swap_pages(void)
>  }
>
>  extern void si_swapinfo(struct sysinfo *);
> -void put_swap_folio(struct folio *folio, swp_entry_t entry);
>  extern int add_swap_count_continuation(swp_entry_t, gfp_t);
>  int swap_type_of(dev_t device, sector_t offset);
>  int find_first_swap(dev_t *device);
> @@ -534,10 +533,6 @@ static inline void swap_put_entries_direct(swp_entry_t ent, int nr)
>  {
>  }
>
> -static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
> -{
> -}
> -
>  static inline int __swap_count(swp_entry_t entry)
>  {
>         return 0;
> diff --git a/mm/swap.h b/mm/swap.h
> index 74c61129d7b7..03694ffa662f 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -277,13 +277,13 @@ void __swapcache_clear_cached(struct swap_info_struct *si,
>   */
>  struct folio *swap_cache_get_folio(swp_entry_t entry);
>  void *swap_cache_get_shadow(swp_entry_t entry);
> -int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
> -                        void **shadow, bool alloc);
>  void swap_cache_del_folio(struct folio *folio);
>  struct folio *swap_cache_alloc_folio(swp_entry_t entry, gfp_t gfp_flags,
>                                      struct mempolicy *mpol, pgoff_t ilx,
>                                      bool *alloced);
>  /* Below helpers require the caller to lock and pass in the swap cluster. */
> +void __swap_cache_add_folio(struct swap_cluster_info *ci,
> +                           struct folio *folio, swp_entry_t entry);
>  void __swap_cache_del_folio(struct swap_cluster_info *ci,
>                             struct folio *folio, swp_entry_t entry, void *shadow);
>  void __swap_cache_replace_folio(struct swap_cluster_info *ci,
> @@ -459,8 +459,8 @@ static inline void *swap_cache_get_shadow(swp_entry_t entry)
>         return NULL;
>  }
>
> -static inline int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
> -                                      void **shadow, bool alloc)
> +static inline void *__swap_cache_add_folio(struct swap_cluster_info *ci,
> +               struct folio *folio, swp_entry_t entry)
>  {
>  }
>
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index d2bcca92b6e0..85d9f99c384f 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -122,6 +122,34 @@ void *swap_cache_get_shadow(swp_entry_t entry)
>         return NULL;
>  }
>
> +void __swap_cache_add_folio(struct swap_cluster_info *ci,
> +                           struct folio *folio, swp_entry_t entry)
> +{
> +       unsigned long new_tb;
> +       unsigned int ci_start, ci_off, ci_end;
> +       unsigned long nr_pages = folio_nr_pages(folio);
> +
> +       VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
> +       VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
> +       VM_WARN_ON_ONCE_FOLIO(!folio_test_swapbacked(folio), folio);
> +
> +       new_tb = folio_to_swp_tb(folio);
> +       ci_start = swp_cluster_offset(entry);
> +       ci_off = ci_start;
> +       ci_end = ci_start + nr_pages;
> +       do {
> +               VM_WARN_ON_ONCE(swp_tb_is_folio(__swap_table_get(ci, ci_off)));
> +               __swap_table_set(ci, ci_off, new_tb);
> +       } while (++ci_off < ci_end);
> +
> +       folio_ref_add(folio, nr_pages);
> +       folio_set_swapcache(folio);
> +       folio->swap = entry;
> +
> +       node_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
> +       lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr_pages);
> +}
> +
>  /**
>   * swap_cache_add_folio - Add a folio into the swap cache.
>   * @folio: The folio to be added.
> @@ -136,23 +164,18 @@ void *swap_cache_get_shadow(swp_entry_t entry)
>   * The caller also needs to update the corresponding swap_map slots with
>   * SWAP_HAS_CACHE bit to avoid race or conflict.
>   */
> -int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
> -                        void **shadowp, bool alloc)
> +static int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
> +                               void **shadowp)
>  {
>         int err;
>         void *shadow = NULL;
> +       unsigned long old_tb;
>         struct swap_info_struct *si;
> -       unsigned long old_tb, new_tb;
>         struct swap_cluster_info *ci;
>         unsigned int ci_start, ci_off, ci_end, offset;
>         unsigned long nr_pages = folio_nr_pages(folio);
>
> -       VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio);
> -       VM_WARN_ON_ONCE_FOLIO(folio_test_swapcache(folio), folio);
> -       VM_WARN_ON_ONCE_FOLIO(!folio_test_swapbacked(folio), folio);
> -
>         si = __swap_entry_to_info(entry);
> -       new_tb = folio_to_swp_tb(folio);
>         ci_start = swp_cluster_offset(entry);
>         ci_end = ci_start + nr_pages;
>         ci_off = ci_start;
> @@ -168,7 +191,7 @@ int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
>                         err = -EEXIST;
>                         goto failed;
>                 }
> -               if (!alloc && unlikely(!__swap_count(swp_entry(swp_type(entry), offset)))) {
> +               if (unlikely(!__swap_count(swp_entry(swp_type(entry), offset)))) {
>                         err = -ENOENT;
>                         goto failed;
>                 }
> @@ -184,20 +207,11 @@ int swap_cache_add_folio(struct folio *folio, swp_entry_t entry,
>                  * Still need to pin the slots with SWAP_HAS_CACHE since
>                  * swap allocator depends on that.
>                  */
> -               if (!alloc)
> -                       __swapcache_set_cached(si, ci, swp_entry(swp_type(entry), offset));
> -               __swap_table_set(ci, ci_off, new_tb);
> +               __swapcache_set_cached(si, ci, swp_entry(swp_type(entry), offset));
>                 offset++;
>         } while (++ci_off < ci_end);
> -
> -       folio_ref_add(folio, nr_pages);
> -       folio_set_swapcache(folio);
> -       folio->swap = entry;
> +       __swap_cache_add_folio(ci, folio, entry);
>         swap_cluster_unlock(ci);
> -
> -       node_stat_mod_folio(folio, NR_FILE_PAGES, nr_pages);
> -       lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr_pages);
> -
>         if (shadowp)
>                 *shadowp = shadow;
>         return 0;
> @@ -466,7 +480,7 @@ static struct folio *__swap_cache_prepare_and_add(swp_entry_t entry,
>         __folio_set_locked(folio);
>         __folio_set_swapbacked(folio);
>         for (;;) {
> -               ret = swap_cache_add_folio(folio, entry, &shadow, false);
> +               ret = swap_cache_add_folio(folio, entry, &shadow);
>                 if (!ret)
>                         break;
>
> diff --git a/mm/swapfile.c b/mm/swapfile.c
> index 426b0b6d583f..8d98f28907bc 100644
> --- a/mm/swapfile.c
> +++ b/mm/swapfile.c
> @@ -875,28 +875,53 @@ static void swap_cluster_assert_table_empty(struct swap_cluster_info *ci,
>         }
>  }
>
> -static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
> -                               unsigned int start, unsigned char usage,
> -                               unsigned int order)
> +static bool cluster_alloc_range(struct swap_info_struct *si,
> +                               struct swap_cluster_info *ci,
> +                               struct folio *folio,
> +                               unsigned int offset)
>  {
> -       unsigned int nr_pages = 1 << order;
> +       unsigned long nr_pages;
> +       unsigned int order;
>
>         lockdep_assert_held(&ci->lock);
>
>         if (!(si->flags & SWP_WRITEOK))
>                 return false;
>
> +       /*
> +        * All mm swap allocation starts with a folio (folio_alloc_swap),
> +        * it's also the only allocation path for large orders allocation.
> +        * Such swap slots starts with count == 0 and will be increased
> +        * upon folio unmap.
> +        *
> +        * Else, it's a exclusive order 0 allocation for hibernation.
> +        * The slot starts with count == 1 and never increases.
> +        */
> +       if (likely(folio)) {
> +               order = folio_order(folio);
> +               nr_pages = 1 << order;
> +               /*
> +                * Pin the slot with SWAP_HAS_CACHE to satisfy swap_dup_entries.
> +                * This is the legacy allocation behavior, will drop it very soon.
> +                */
> +               memset(si->swap_map + offset, SWAP_HAS_CACHE, nr_pages);
> +               __swap_cache_add_folio(ci, folio, swp_entry(si->type, offset));
> +       } else {
> +               order = 0;
> +               nr_pages = 1;
> +               WARN_ON_ONCE(si->swap_map[offset]);
> +               si->swap_map[offset] = 1;
> +               swap_cluster_assert_table_empty(ci, offset, 1);
> +       }
> +
>         /*
>          * The first allocation in a cluster makes the
>          * cluster exclusive to this order
>          */
>         if (cluster_is_empty(ci))
>                 ci->order = order;
> -
> -       memset(si->swap_map + start, usage, nr_pages);
> -       swap_cluster_assert_table_empty(ci, start, nr_pages);
> -       swap_range_alloc(si, nr_pages);
>         ci->count += nr_pages;
> +       swap_range_alloc(si, nr_pages);
>
>         return true;
>  }
> @@ -904,13 +929,12 @@ static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster
>  /* Try use a new cluster for current CPU and allocate from it. */
>  static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
>                                             struct swap_cluster_info *ci,
> -                                           unsigned long offset,
> -                                           unsigned int order,
> -                                           unsigned char usage)
> +                                           struct folio *folio, unsigned long offset)
>  {
>         unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
>         unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER);
>         unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
> +       unsigned int order = likely(folio) ? folio_order(folio) : 0;
>         unsigned int nr_pages = 1 << order;
>         bool need_reclaim;
>
> @@ -930,7 +954,7 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
>                                 continue;
>                         offset = found;
>                 }
> -               if (!cluster_alloc_range(si, ci, offset, usage, order))
> +               if (!cluster_alloc_range(si, ci, folio, offset))
>                         break;
>                 found = offset;
>                 offset += nr_pages;
> @@ -952,8 +976,7 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
>
>  static unsigned int alloc_swap_scan_list(struct swap_info_struct *si,
>                                          struct list_head *list,
> -                                        unsigned int order,
> -                                        unsigned char usage,
> +                                        struct folio *folio,
>                                          bool scan_all)
>  {
>         unsigned int found = SWAP_ENTRY_INVALID;
> @@ -965,7 +988,7 @@ static unsigned int alloc_swap_scan_list(struct swap_info_struct *si,
>                 if (!ci)
>                         break;
>                 offset = cluster_offset(si, ci);
> -               found = alloc_swap_scan_cluster(si, ci, offset, order, usage);
> +               found = alloc_swap_scan_cluster(si, ci, folio, offset);
>                 if (found)
>                         break;
>         } while (scan_all);
> @@ -1026,10 +1049,11 @@ static void swap_reclaim_work(struct work_struct *work)
>   * Try to allocate swap entries with specified order and try set a new
>   * cluster for current CPU too.
>   */
> -static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
> -                                             unsigned char usage)
> +static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si,
> +                                             struct folio *folio)
>  {
>         struct swap_cluster_info *ci;
> +       unsigned int order = likely(folio) ? folio_order(folio) : 0;
>         unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
>
>         /*
> @@ -1051,8 +1075,7 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
>                 if (cluster_is_usable(ci, order)) {
>                         if (cluster_is_empty(ci))
>                                 offset = cluster_offset(si, ci);
> -                       found = alloc_swap_scan_cluster(si, ci, offset,
> -                                                       order, usage);
> +                       found = alloc_swap_scan_cluster(si, ci, folio, offset);
>                 } else {
>                         swap_cluster_unlock(ci);
>                 }
> @@ -1066,22 +1089,19 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
>          * to spread out the writes.
>          */
>         if (si->flags & SWP_PAGE_DISCARD) {
> -               found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
> -                                            false);
> +               found = alloc_swap_scan_list(si, &si->free_clusters, folio, false);
>                 if (found)
>                         goto done;
>         }
>
>         if (order < PMD_ORDER) {
> -               found = alloc_swap_scan_list(si, &si->nonfull_clusters[order],
> -                                            order, usage, true);
> +               found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], folio, true);
>                 if (found)
>                         goto done;
>         }
>
>         if (!(si->flags & SWP_PAGE_DISCARD)) {
> -               found = alloc_swap_scan_list(si, &si->free_clusters, order, usage,
> -                                            false);
> +               found = alloc_swap_scan_list(si, &si->free_clusters, folio, false);
>                 if (found)
>                         goto done;
>         }
> @@ -1097,8 +1117,7 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
>                  * failure is not critical. Scanning one cluster still
>                  * keeps the list rotated and reclaimed (for HAS_CACHE).
>                  */
> -               found = alloc_swap_scan_list(si, &si->frag_clusters[order], order,
> -                                            usage, false);
> +               found = alloc_swap_scan_list(si, &si->frag_clusters[order], folio, false);
>                 if (found)
>                         goto done;
>         }
> @@ -1112,13 +1131,11 @@ static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int o
>                  * Clusters here have at least one usable slots and can't fail order 0
>                  * allocation, but reclaim may drop si->lock and race with another user.
>                  */
> -               found = alloc_swap_scan_list(si, &si->frag_clusters[o],
> -                                            0, usage, true);
> +               found = alloc_swap_scan_list(si, &si->frag_clusters[o], folio, true);
>                 if (found)
>                         goto done;
>
> -               found = alloc_swap_scan_list(si, &si->nonfull_clusters[o],
> -                                            0, usage, true);
> +               found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], folio, true);
>                 if (found)
>                         goto done;
>         }
> @@ -1309,12 +1326,12 @@ static bool get_swap_device_info(struct swap_info_struct *si)
>   * Fast path try to get swap entries with specified order from current
>   * CPU's swap entry pool (a cluster).
>   */
> -static bool swap_alloc_fast(swp_entry_t *entry,
> -                           int order)
> +static bool swap_alloc_fast(struct folio *folio)
>  {
> +       unsigned int order = folio_order(folio);
>         struct swap_cluster_info *ci;
>         struct swap_info_struct *si;
> -       unsigned int offset, found = SWAP_ENTRY_INVALID;
> +       unsigned int offset;
>
>         /*
>          * Once allocated, swap_info_struct will never be completely freed,
> @@ -1329,22 +1346,18 @@ static bool swap_alloc_fast(swp_entry_t *entry,
>         if (cluster_is_usable(ci, order)) {
>                 if (cluster_is_empty(ci))
>                         offset = cluster_offset(si, ci);
> -               found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE);
> -               if (found)
> -                       *entry = swp_entry(si->type, found);
> +               alloc_swap_scan_cluster(si, ci, folio, offset);
>         } else {
>                 swap_cluster_unlock(ci);
>         }
>
>         put_swap_device(si);
> -       return !!found;
> +       return folio_test_swapcache(folio);
>  }
>
>  /* Rotate the device and switch to a new cluster */
> -static bool swap_alloc_slow(swp_entry_t *entry,
> -                           int order)
> +static void swap_alloc_slow(struct folio *folio)
>  {
> -       unsigned long offset;
>         struct swap_info_struct *si, *next;
>
>         spin_lock(&swap_avail_lock);
> @@ -1354,14 +1367,12 @@ static bool swap_alloc_slow(swp_entry_t *entry,
>                 plist_requeue(&si->avail_list, &swap_avail_head);
>                 spin_unlock(&swap_avail_lock);
>                 if (get_swap_device_info(si)) {
> -                       offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE);
> +                       cluster_alloc_swap_entry(si, folio);
>                         put_swap_device(si);
> -                       if (offset) {
> -                               *entry = swp_entry(si->type, offset);
> -                               return true;
> -                       }
> -                       if (order)
> -                               return false;
> +                       if (folio_test_swapcache(folio))
> +                               return;
> +                       if (folio_test_large(folio))
> +                               return;
>                 }
>
>                 spin_lock(&swap_avail_lock);
My bad, following diff was lost during rebase to mm-new,
swap_alloc_slow should return void now:
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 8d98f28907bc..0bc734eb32c4 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1391,7 +1391,6 @@ static void swap_alloc_slow(struct folio *folio)
                        goto start_over;
        }
        spin_unlock(&swap_avail_lock);
-       return false;
 }
Powered by blists - more mailing lists
 
