[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <CAMgjq7DwOe_OJ4qyfY-2=kmaY4tfvQGaedie+6NvPPX7YD0YGA@mail.gmail.com>
Date: Mon, 8 Sep 2025 23:14:33 +0800
From: Kairui Song <ryncsn@...il.com>
To: David Hildenbrand <david@...hat.com>
Cc: linux-mm@...ck.org, Andrew Morton <akpm@...ux-foundation.org>,
Matthew Wilcox <willy@...radead.org>, Hugh Dickins <hughd@...gle.com>, Chris Li <chrisl@...nel.org>,
Barry Song <baohua@...nel.org>, Baoquan He <bhe@...hat.com>, Nhat Pham <nphamcs@...il.com>,
Kemeng Shi <shikemeng@...weicloud.com>, Baolin Wang <baolin.wang@...ux.alibaba.com>,
Ying Huang <ying.huang@...ux.alibaba.com>, Johannes Weiner <hannes@...xchg.org>,
Yosry Ahmed <yosryahmed@...gle.com>, Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Zi Yan <ziy@...dia.com>, linux-kernel@...r.kernel.org
Subject: Re: [PATCH v2 11/15] mm, swap: use the swap table for the swap cache
and switch API
On Mon, Sep 8, 2025 at 11:09 PM David Hildenbrand <david@...hat.com> wrote:
> > +static inline struct swap_cluster_info *swap_cluster_lock(
> > + struct swap_info_struct *si, pgoff_t offset, bool irq)
> > +{
> > + return NULL;
> > +}
> > +
> > +static inline struct swap_cluster_info *swap_cluster_lock_by_folio(
> > + struct folio *folio)
>
> I would probably call that "swap_cluster_get_and_lock" or sth like that ...
>
> > +{
> > + return NULL;
> > +}
> > +
> > +static inline struct swap_cluster_info *swap_cluster_lock_by_folio_irq(
> > + struct folio *folio)
> > +{
>
> ... then this would become "swap_cluster_get_and_lock_irq"
>
>
> Alterantively, separate the lookup from the locking of the cluster.
>
> swap_cluster_from_folio() / folio_get_swap_cluster()
> swap_cluster_lock()
> swap_cluster_lock_irq()
>
> Which might look cleaner in the end.
That's a very good suggestion.
>
> [...]
>
> > -struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
> > -static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
> > +struct address_space swap_space __read_mostly = {
> > + .a_ops = &swap_aops,
> > +};
> > +
> > static bool enable_vma_readahead __read_mostly = true;
> >
> > #define SWAP_RA_ORDER_CEILING 5
> > @@ -83,11 +86,21 @@ void show_swap_cache_info(void)
> > */
> > struct folio *swap_cache_get_folio(swp_entry_t entry)
> > {
> > - struct folio *folio = filemap_get_folio(swap_address_space(entry),
> > - swap_cache_index(entry));
> > - if (IS_ERR(folio))
> > - return NULL;
> > - return folio;
> > +
>
> ^ superfluous empty line.
>
> [...]
>
> >
> > @@ -420,6 +421,34 @@ static inline unsigned int cluster_offset(struct swap_info_struct *si,
> > return cluster_index(si, ci) * SWAPFILE_CLUSTER;
> > }
> >
> > +static int swap_table_alloc_table(struct swap_cluster_info *ci)
>
> swap_cluster_alloc_table ?
Good idea.
>
> > +{
> > + WARN_ON(ci->table);
> > + ci->table = kzalloc(sizeof(unsigned long) * SWAPFILE_CLUSTER, GFP_KERNEL);
> > + if (!ci->table)
> > + return -ENOMEM;
> > + return 0;
> > +}
> > +
> > +static void swap_cluster_free_table(struct swap_cluster_info *ci)
> > +{
> > + unsigned int ci_off;
> > + unsigned long swp_tb;
> > +
> > + if (!ci->table)
> > + return;
> > +
> > + for (ci_off = 0; ci_off < SWAPFILE_CLUSTER; ci_off++) {
> > + swp_tb = __swap_table_get(ci, ci_off);
> > + if (!swp_tb_is_null(swp_tb))
> > + pr_err_once("swap: unclean swap space on swapoff: 0x%lx",
> > + swp_tb);
> > + }
> > +
> > + kfree(ci->table);
> > + ci->table = NULL;
> > +}
> > +
> > static void move_cluster(struct swap_info_struct *si,
> > struct swap_cluster_info *ci, struct list_head *list,
> > enum swap_cluster_flags new_flags)
> > @@ -702,6 +731,26 @@ static bool cluster_scan_range(struct swap_info_struct *si,
> > return true;
> > }
> >
> > +/*
> > + * Currently, the swap table is not used for count tracking, just
> > + * do a sanity check here to ensure nothing leaked, so the swap
> > + * table should be empty upon freeing.
> > + */
> > +static void cluster_table_check(struct swap_cluster_info *ci,
> > + unsigned int start, unsigned int nr)
>
> "swap_cluster_assert_table_empty()"
>
> or sth like that that makes it clearer what you are checking for.
Agree.
>
> --
> Cheers
>
Thanks!
Powered by blists - more mailing lists