[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMgjq7Asotx_mWV-1aLJck_iwcOYi7=P22CT5ZKzrnAi10=nwQ@mail.gmail.com>
Date: Wed, 10 Jan 2024 10:42:34 +0800
From: Kairui Song <ryncsn@...il.com>
To: "Huang, Ying" <ying.huang@...el.com>
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
Chris Li <chrisl@...nel.org>, Hugh Dickins <hughd@...gle.com>,
Johannes Weiner <hannes@...xchg.org>, Matthew Wilcox <willy@...radead.org>, Michal Hocko <mhocko@...e.com>,
Yosry Ahmed <yosryahmed@...gle.com>, David Hildenbrand <david@...hat.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 5/9] mm/swap: introduce swapin_entry for unified
readahead policy
Huang, Ying <ying.huang@...el.com> 于2024年1月5日周五 15:30写道:
>
> Kairui Song <ryncsn@...il.com> writes:
>
> > From: Kairui Song <kasong@...cent.com>
> >
> > Introduce swapin_entry which merges swapin_readahead and swapin_direct
> > making it the main entry for swapin pages, and use a unified swapin
> > policy.
> >
> > This commit makes swapoff make use of this new helper and now swapping
> > off a 10G ZRAM (lzo-rle) is faster since readahead is skipped.
> >
> > Before:
> > time swapoff /dev/zram0
> > real 0m12.337s
> > user 0m0.001s
> > sys 0m12.329s
> >
> > After:
> > time swapoff /dev/zram0
> > real 0m9.728s
> > user 0m0.001s
> > sys 0m9.719s
> >
> > Signed-off-by: Kairui Song <kasong@...cent.com>
> > ---
> > mm/memory.c | 21 +++++++--------------
> > mm/swap.h | 16 ++++------------
> > mm/swap_state.c | 49 +++++++++++++++++++++++++++++++++----------------
> > mm/swapfile.c | 7 ++-----
> > 4 files changed, 46 insertions(+), 47 deletions(-)
> >
> > diff --git a/mm/memory.c b/mm/memory.c
> > index 0165c8cad489..b56254a875f8 100644
> > --- a/mm/memory.c
> > +++ b/mm/memory.c
> > @@ -3801,6 +3801,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > rmap_t rmap_flags = RMAP_NONE;
> > bool exclusive = false;
> > swp_entry_t entry;
> > + bool swapcached;
> > pte_t pte;
> > vm_fault_t ret = 0;
> >
> > @@ -3864,21 +3865,13 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> > swapcache = folio;
> >
> > if (!folio) {
> > - if (data_race(si->flags & SWP_SYNCHRONOUS_IO) &&
> > - __swap_count(entry) == 1) {
> > - /* skip swapcache and readahead */
> > - folio = swapin_direct(entry, GFP_HIGHUSER_MOVABLE, vmf);
> > - if (folio)
> > - page = &folio->page;
> > + folio = swapin_entry(entry, GFP_HIGHUSER_MOVABLE,
> > + vmf, &swapcached);
> > + if (folio) {
> > + page = folio_file_page(folio, swp_offset(entry));
> > + if (swapcached)
> > + swapcache = folio;
> > } else {
> > - page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
> > - vmf);
> > - if (page)
> > - folio = page_folio(page);
> > - swapcache = folio;
> > - }
> > -
> > - if (!folio) {
> > /*
> > * Back out if somebody else faulted in this pte
> > * while we released the pte lock.
> > diff --git a/mm/swap.h b/mm/swap.h
> > index 83eab7b67e77..502a2801f817 100644
> > --- a/mm/swap.h
> > +++ b/mm/swap.h
> > @@ -54,10 +54,8 @@ struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
> > bool skip_if_exists);
> > struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
> > struct mempolicy *mpol, pgoff_t ilx);
> > -struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
> > - struct vm_fault *vmf);
> > -struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
> > - struct vm_fault *vmf);
> > +struct folio *swapin_entry(swp_entry_t entry, gfp_t flag,
> > + struct vm_fault *vmf, bool *swapcached);
> >
> > static inline unsigned int folio_swap_flags(struct folio *folio)
> > {
> > @@ -88,14 +86,8 @@ static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
> > return NULL;
> > }
> >
> > -struct folio *swapin_direct(swp_entry_t entry, gfp_t flag,
> > - struct vm_fault *vmf)
> > -{
> > - return NULL;
> > -}
> > -
> > -static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
> > - struct vm_fault *vmf)
> > +static inline struct folio *swapin_entry(swp_entry_t swp, gfp_t gfp_mask,
> > + struct vm_fault *vmf, bool *swapcached)
> > {
> > return NULL;
> > }
> > diff --git a/mm/swap_state.c b/mm/swap_state.c
> > index d39c5369da21..66ff187aa5d3 100644
> > --- a/mm/swap_state.c
> > +++ b/mm/swap_state.c
> > @@ -316,6 +316,11 @@ void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
> > release_pages(pages, nr);
> > }
> >
> > +static inline bool swap_use_no_readahead(struct swap_info_struct *si, swp_entry_t entry)
> > +{
> > + return data_race(si->flags & SWP_SYNCHRONOUS_IO) && __swap_count(entry) == 1;
> > +}
> > +
Hi Ying,
Thanks for the review.
>
> It appears that there's only one caller of the function in the same
> file? Why add a function?
Later patch will extend the checker function.
I can defer this change so it won't cause confusion for reviewers.
>
> > static inline bool swap_use_vma_readahead(void)
> > {
> > return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
> > @@ -870,8 +875,8 @@ static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
> > * Returns the struct folio for entry and addr after the swap entry is read
> > * in.
> > */
> > -struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> > - struct vm_fault *vmf)
> > +static struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> > + struct vm_fault *vmf)
> > {
> > struct vm_area_struct *vma = vmf->vma;
> > struct folio *folio;
> > @@ -908,33 +913,45 @@ struct folio *swapin_direct(swp_entry_t entry, gfp_t gfp_mask,
> > }
> >
> > /**
> > - * swapin_readahead - swap in pages in hope we need them soon
> > + * swapin_entry - swap in a page from swap entry
> > * @entry: swap entry of this memory
> > * @gfp_mask: memory allocation flags
> > * @vmf: fault information
> > + * @swapcached: pointer to a bool used as indicator if the
> > + * page is swapped in through swapcache.
> > *
> > * Returns the struct page for entry and addr, after queueing swapin.
> > *
> > - * It's a main entry function for swap readahead. By the configuration,
> > + * It's a main entry function for swap in. By the configuration,
> > * it will read ahead blocks by cluster-based(ie, physical disk based)
> > - * or vma-based(ie, virtual address based on faulty address) readahead.
> > + * or vma-based(ie, virtual address based on faulty address) readahead,
> > + * or skip the readahead (ie, ramdisk based swap device).
> > */
> > -struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
> > - struct vm_fault *vmf)
> > +struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask,
> > + struct vm_fault *vmf, bool *swapcached)
>
> May be better to use
>
> struct folio *swapin_entry(swp_entry_t entry, gfp_t gfp_mask,
> struct vm_fault *vmf, struct folio **swapcache)
>
> In this way, we can reduce the number of source lines in the caller.
Following commit will rewrite this part to return a enum instead of
bool, so this is just a intermediate change. And do_swap_page is the
only caller that can benefit from this, not helpful for swapoff/shmem.
I think we can just keep it this way here.
Powered by blists - more mailing lists