[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c4a729b8-be9e-4005-aab6-91723fcf0080@redhat.com>
Date: Tue, 2 Sep 2025 12:06:53 +0200
From: David Hildenbrand <david@...hat.com>
To: Kairui Song <kasong@...cent.com>, linux-mm@...ck.org
Cc: Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <willy@...radead.org>, Hugh Dickins <hughd@...gle.com>,
Chris Li <chrisl@...nel.org>, Barry Song <baohua@...nel.org>,
Baoquan He <bhe@...hat.com>, Nhat Pham <nphamcs@...il.com>,
Kemeng Shi <shikemeng@...weicloud.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Ying Huang <ying.huang@...ux.alibaba.com>,
Johannes Weiner <hannes@...xchg.org>, Yosry Ahmed <yosryahmed@...gle.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>, Zi Yan <ziy@...dia.com>,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 1/9] mm, swap: use unified helper for swap cache look up
On 22.08.25 21:20, Kairui Song wrote:
> From: Kairui Song <kasong@...cent.com>
>
> Always use swap_cache_get_folio for swap cache folio look up. The reason
> we are not using it in all places is that it also updates the readahead
> info, and some callsites want to avoid that.
>
> So decouple readahead update with swap cache lookup into a standalone
> helper, let the caller call the readahead update helper if that's
> needed. And convert all swap cache lookups to use swap_cache_get_folio.
>
> After this commit, there are only three special cases for accessing swap
> cache space now: huge memory splitting, migration and shmem replacing,
> because they need to lock the Xarray. Following commits will wrap their
> accesses to the swap cache too with special helpers.
>
> Signed-off-by: Kairui Song <kasong@...cent.com>
> ---
> mm/memory.c | 6 ++-
> mm/mincore.c | 3 +-
> mm/shmem.c | 4 +-
> mm/swap.h | 13 +++++--
> mm/swap_state.c | 99 +++++++++++++++++++++++-------------------------
> mm/swapfile.c | 11 +++---
> mm/userfaultfd.c | 5 +--
> 7 files changed, 72 insertions(+), 69 deletions(-)
>
> diff --git a/mm/memory.c b/mm/memory.c
> index d9de6c056179..10ef528a5f44 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -4660,9 +4660,11 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> if (unlikely(!si))
> goto out;
>
> - folio = swap_cache_get_folio(entry, vma, vmf->address);
> - if (folio)
> + folio = swap_cache_get_folio(entry);
> + if (folio) {
> + swap_update_readahead(folio, vma, vmf->address);
> page = folio_file_page(folio, swp_offset(entry));
> + }
> swapcache = folio;
>
> if (!folio) {
> diff --git a/mm/mincore.c b/mm/mincore.c
> index 2f3e1816a30d..8ec4719370e1 100644
> --- a/mm/mincore.c
> +++ b/mm/mincore.c
> @@ -76,8 +76,7 @@ static unsigned char mincore_swap(swp_entry_t entry, bool shmem)
> if (!si)
> return 0;
> }
> - folio = filemap_get_entry(swap_address_space(entry),
> - swap_cache_index(entry));
> + folio = swap_cache_get_folio(entry);
> if (shmem)
> put_swap_device(si);
> /* The swap cache space contains either folio, shadow or NULL */
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 13cc51df3893..e9d0d2784cd5 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -2354,7 +2354,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> }
>
> /* Look it up and read it in.. */
> - folio = swap_cache_get_folio(swap, NULL, 0);
> + folio = swap_cache_get_folio(swap);
> if (!folio) {
> if (data_race(si->flags & SWP_SYNCHRONOUS_IO)) {
> /* Direct swapin skipping swap cache & readahead */
> @@ -2379,6 +2379,8 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
> count_vm_event(PGMAJFAULT);
> count_memcg_event_mm(fault_mm, PGMAJFAULT);
> }
> + } else {
> + swap_update_readahead(folio, NULL, 0);
> }
>
> if (order > folio_order(folio)) {
> diff --git a/mm/swap.h b/mm/swap.h
> index 1ae44d4193b1..efb6d7ff9f30 100644
> --- a/mm/swap.h
> +++ b/mm/swap.h
> @@ -62,8 +62,7 @@ void delete_from_swap_cache(struct folio *folio);
> void clear_shadow_from_swap_cache(int type, unsigned long begin,
> unsigned long end);
> void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr);
> -struct folio *swap_cache_get_folio(swp_entry_t entry,
> - struct vm_area_struct *vma, unsigned long addr);
> +struct folio *swap_cache_get_folio(swp_entry_t entry);
> struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
> struct vm_area_struct *vma, unsigned long addr,
> struct swap_iocb **plug);
> @@ -74,6 +73,8 @@ struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
> struct mempolicy *mpol, pgoff_t ilx);
> struct folio *swapin_readahead(swp_entry_t entry, gfp_t flag,
> struct vm_fault *vmf);
> +void swap_update_readahead(struct folio *folio, struct vm_area_struct *vma,
> + unsigned long addr);
>
> static inline unsigned int folio_swap_flags(struct folio *folio)
> {
> @@ -159,6 +160,11 @@ static inline struct folio *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
> return NULL;
> }
>
> +static inline void swap_update_readahead(struct folio *folio,
> + struct vm_area_struct *vma, unsigned long addr)
> +{
> +}
> +
> static inline int swap_writeout(struct folio *folio,
> struct swap_iocb **swap_plug)
> {
> @@ -169,8 +175,7 @@ static inline void swapcache_clear(struct swap_info_struct *si, swp_entry_t entr
> {
> }
>
> -static inline struct folio *swap_cache_get_folio(swp_entry_t entry,
> - struct vm_area_struct *vma, unsigned long addr)
> +static inline struct folio *swap_cache_get_folio(swp_entry_t entry)
> {
> return NULL;
> }
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 99513b74b5d8..ff9eb761a103 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -69,6 +69,21 @@ void show_swap_cache_info(void)
> printk("Total swap = %lukB\n", K(total_swap_pages));
> }
>
> +/*
While at it, proper kerneldoc?
/**
etc.
Also documenting that it will only return a valid folio pointer or NULL
> + * Lookup a swap entry in the swap cache. A found folio will be returned
> + * unlocked and with its refcount incremented.
> + *
> + * Caller must lock the swap device or hold a reference to keep it valid.
> + */
> +struct folio *swap_cache_get_folio(swp_entry_t entry)
> +{
> + struct folio *folio = filemap_get_folio(swap_address_space(entry),
> + swap_cache_index(entry));
> + if (!IS_ERR(folio))
> + return folio;
> + return NULL;
Maybe better as (avoid one !)
if (IS_ERR(folio))
return NULL;
return folio;
or simply
return IS_ERR(folio) ? NULL : folio.
> +}
> +
> void *get_shadow_from_swap_cache(swp_entry_t entry)
> {
> struct address_space *address_space = swap_address_space(entry);
> @@ -273,54 +288,40 @@ static inline bool swap_use_vma_readahead(void)
> }
>
> /*
> - * Lookup a swap entry in the swap cache. A found folio will be returned
> - * unlocked and with its refcount incremented - we rely on the kernel
> - * lock getting page table operations atomic even if we drop the folio
> - * lock before returning.
> - *
> - * Caller must lock the swap device or hold a reference to keep it valid.
> + * Update the readahead statistics of a vma or globally.
> */
> -struct folio *swap_cache_get_folio(swp_entry_t entry,
> - struct vm_area_struct *vma, unsigned long addr)
This also sounds like a good kerneldoc candidate :)
In particular, documenting that it is valid to pass in vma == NULL (in
which case the addr is ignored).
> +void swap_update_readahead(struct folio *folio,
> + struct vm_area_struct *vma,
> + unsigned long addr)
> {
Apart from that LGTM.
--
Cheers
David / dhildenb
Powered by blists - more mailing lists