[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <Y7h4jsv6jl0XSIsk@casper.infradead.org>
Date: Fri, 6 Jan 2023 19:37:50 +0000
From: Matthew Wilcox <willy@...radead.org>
To: SeongJae Park <sj@...nel.org>
Cc: Andrew Morton <akpm@...ux-foundation.org>, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: Re: [PATCH 2/3] mm: use folio_headpage() instead of folio_page()
On Fri, Jan 06, 2023 at 05:40:27PM +0000, SeongJae Park wrote:
> diff --git a/mm/shmem.c b/mm/shmem.c
> index bc5c156ef470..8ae73973a7fc 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -3211,7 +3211,7 @@ static const char *shmem_get_link(struct dentry *dentry,
> folio = filemap_get_folio(inode->i_mapping, 0);
> if (!folio)
> return ERR_PTR(-ECHILD);
> - if (PageHWPoison(folio_page(folio, 0)) ||
> + if (PageHWPoison(folio_headpage(folio)) ||
This is actually incorrect. We don't want the head page, we want the
page at index 0. It's a subtle but important difference later on.
> @@ -3222,7 +3222,7 @@ static const char *shmem_get_link(struct dentry *dentry,
> return ERR_PTR(error);
> if (!folio)
> return ERR_PTR(-ECHILD);
> - if (PageHWPoison(folio_page(folio, 0))) {
> + if (PageHWPoison(folio_headpage(folio))) {
Same here.
> +++ b/mm/slab.c
> @@ -1373,7 +1373,7 @@ static struct slab *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
> /* Make the flag visible before any changes to folio->mapping */
> smp_wmb();
> /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
> - if (sk_memalloc_socks() && page_is_pfmemalloc(folio_page(folio, 0)))
> + if (sk_memalloc_socks() && page_is_pfmemalloc(folio_headpage(folio)))
We should have a folio_is_pfmemalloc().
> @@ -1389,7 +1389,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
>
> BUG_ON(!folio_test_slab(folio));
> __slab_clear_pfmemalloc(slab);
> - page_mapcount_reset(folio_page(folio, 0));
> + page_mapcount_reset(folio_headpage(folio));
This one should be &folio->page.
> @@ -1398,7 +1398,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct slab *slab)
> if (current->reclaim_state)
> current->reclaim_state->reclaimed_slab += 1 << order;
> unaccount_slab(slab, order, cachep);
> - __free_pages(folio_page(folio, 0), order);
> + __free_pages(folio_headpage(folio), order);
&folio->page.
> @@ -939,9 +939,9 @@ void free_large_kmalloc(struct folio *folio, void *object)
> kasan_kfree_large(object);
> kmsan_kfree_large(object);
>
> - mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
> + mod_lruvec_page_state(folio_headpage(folio), NR_SLAB_UNRECLAIMABLE_B,
> -(PAGE_SIZE << order));
lruvec_stat_mod_folio(folio, NR_SLAB_UNRECLAIMABLE_B, ...
> - __free_pages(folio_page(folio, 0), order);
> + __free_pages(folio_headpage(folio), order);
&folio->page.
> +++ b/mm/slub.c
> @@ -1859,7 +1859,7 @@ static inline struct slab *alloc_slab_page(gfp_t flags, int node,
> __folio_set_slab(folio);
> /* Make the flag visible before any changes to folio->mapping */
> smp_wmb();
> - if (page_is_pfmemalloc(folio_page(folio, 0)))
> + if (page_is_pfmemalloc(folio_headpage(folio)))
folio_is_pfmemalloc()
> @@ -2066,7 +2066,7 @@ static void __free_slab(struct kmem_cache *s, struct slab *slab)
> if (current->reclaim_state)
> current->reclaim_state->reclaimed_slab += pages;
> unaccount_slab(slab, order, s);
> - __free_pages(folio_page(folio, 0), order);
> + __free_pages(folio_headpage(folio), order);
&folio->page.
Powered by blists - more mailing lists