[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <0b36595c-db50-4e8b-96db-2d4ceeaa1908@redhat.com>
Date: Mon, 26 Aug 2024 08:53:15 +0200
From: David Hildenbrand <david@...hat.com>
To: Hugh Dickins <hughd@...gle.com>, Andrew Morton <akpm@...ux-foundation.org>
Cc: Baolin Wang <baolin.wang@...ux.alibaba.com>,
"Kirill A. Shutemov" <kirill.shutemov@...ux.intel.com>,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH] mm: shmem: extend shmem_unused_huge_shrink() to all sizes
On 26.08.24 01:25, Hugh Dickins wrote:
> Although shmem_get_folio_gfp() is correctly putting inodes on the
> shrinklist according to the folio size, shmem_unused_huge_shrink()
> was still dealing with that shrinklist in terms of HPAGE_PMD_SIZE.
>
> Generalize that; and to handle the mixture of sizes more sensibly,
> shmem_alloc_and_add_folio() give it a number of pages to be freed
> (approximate: no need to minimize that with an exact calculation)
> instead of a number of inodes to split.
That might be worth a comment in the code.
>
> Signed-off-by: Hugh Dickins <hughd@...gle.com>
> ---
> This patch would most naturally go into mm-unstable as 10/9 over
> Baolin's "support large folio swap-out and swap-in for shmem" series.
>
> mm/shmem.c | 45 ++++++++++++++++++++-------------------------
> 1 file changed, 20 insertions(+), 25 deletions(-)
>
> diff --git a/mm/shmem.c b/mm/shmem.c
> index 4dd0570962fa..4c9921c234b7 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -636,15 +636,14 @@ static const char *shmem_format_huge(int huge)
> #endif
>
> static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> - struct shrink_control *sc, unsigned long nr_to_split)
> + struct shrink_control *sc, unsigned long nr_to_free)
> {
> LIST_HEAD(list), *pos, *next;
> - LIST_HEAD(to_remove);
> struct inode *inode;
> struct shmem_inode_info *info;
> struct folio *folio;
> unsigned long batch = sc ? sc->nr_to_scan : 128;
> - int split = 0;
> + unsigned long split = 0, freed = 0;
>
> if (list_empty(&sbinfo->shrinklist))
> return SHRINK_STOP;
> @@ -662,13 +661,6 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> goto next;
> }
>
> - /* Check if there's anything to gain */
> - if (round_up(inode->i_size, PAGE_SIZE) ==
> - round_up(inode->i_size, HPAGE_PMD_SIZE)) {
> - list_move(&info->shrinklist, &to_remove);
> - goto next;
> - }
> -
> list_move(&info->shrinklist, &list);
> next:
> sbinfo->shrinklist_len--;
> @@ -677,34 +669,36 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
> }
> spin_unlock(&sbinfo->shrinklist_lock);
>
> - list_for_each_safe(pos, next, &to_remove) {
> - info = list_entry(pos, struct shmem_inode_info, shrinklist);
> - inode = &info->vfs_inode;
> - list_del_init(&info->shrinklist);
> - iput(inode);
> - }
> -
> list_for_each_safe(pos, next, &list) {
> + pgoff_t next, end;
> + loff_t i_size;
> int ret;
> - pgoff_t index;
>
> info = list_entry(pos, struct shmem_inode_info, shrinklist);
> inode = &info->vfs_inode;
>
> - if (nr_to_split && split >= nr_to_split)
> + if (nr_to_free && freed >= nr_to_free)
> goto move_back;
>
> - index = (inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT;
> - folio = filemap_get_folio(inode->i_mapping, index);
> - if (IS_ERR(folio))
> + i_size = i_size_read(inode);
> + folio = filemap_get_entry(inode->i_mapping, i_size / PAGE_SIZE);
> + if (!folio || xa_is_value(folio))
> goto drop;
>
> - /* No huge page at the end of the file: nothing to split */
> + /* No large page at the end of the file: nothing to split */
s/large page/large folio/
Or simply "Nothing to split."
> if (!folio_test_large(folio)) {
> folio_put(folio);
> goto drop;
> }
>
> + /* Check if there is anything to gain from splitting */
> + next = folio_next_index(folio);
> + end = shmem_fallocend(inode, DIV_ROUND_UP(i_size, PAGE_SIZE));
> + if (end <= folio->index || end >= next) {
> + folio_put(folio);
> + goto drop;
> + }
> +
Looks sensible to me
Reviewed-by: David Hildenbrand <david@...hat.com>
--
Cheers,
David / dhildenb
Powered by blists - more mailing lists