[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <1d9f9188-42b1-4f83-87e7-e02a22b67caa@suse.cz>
Date: Thu, 14 Nov 2024 09:23:58 +0100
From: Vlastimil Babka <vbabka@...e.cz>
To: Ryan Roberts <ryan.roberts@....com>,
Andrew Morton <akpm@...ux-foundation.org>,
Anshuman Khandual <anshuman.khandual@....com>,
Ard Biesheuvel <ardb@...nel.org>, Catalin Marinas <catalin.marinas@....com>,
David Hildenbrand <david@...hat.com>, Greg Marsden
<greg.marsden@...cle.com>, Ivan Ivanov <ivan.ivanov@...e.com>,
Kalesh Singh <kaleshsingh@...gle.com>, Marc Zyngier <maz@...nel.org>,
Mark Rutland <mark.rutland@....com>, Matthias Brugger <mbrugger@...e.com>,
Miroslav Benes <mbenes@...e.cz>, Will Deacon <will@...nel.org>
Cc: linux-arm-kernel@...ts.infradead.org, linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: Re: [RFC PATCH v1 04/57] mm/page_alloc: Make page_frag_cache
boot-time page size compatible
On 10/14/24 12:58, Ryan Roberts wrote:
> "struct page_frag_cache" has some optimizations that depend on page
> size. Let's refactor it a bit so that those optimizations can be
> determined at run-time for the case where page size is a boot-time
> parameter. For compile-time page size, the compiler should dead code
> strip and the result is very similar to before.
>
> One wrinkle is that we don't know if we need the size member until
> runtime. So remove the ifdeffery and always define offset as u32 (needed
> if PAGE_SIZE is >= 64K) and size as u16 (only used when PAGE_SIZE <=
> 32K). We move the members around a bit so that the overall size of the
> struct remains the same; 24 bytes for 64-bit and 16 bytes on 32 bit.
>
> Signed-off-by: Ryan Roberts <ryan.roberts@....com>
Looks ok, but ideally the PAGE_FRAG_CACHE_MAX_ORDER #define should also be
replaced by some variable that's populated just once. It can be static local
to page_alloc.c as nothing else seems to use it.
>
> page_alloc
> ---
>
> ***NOTE***
> Any confused maintainers may want to read the cover note here for context:
> https://lore.kernel.org/all/20241014105514.3206191-1-ryan.roberts@arm.com/
>
> include/linux/mm_types.h | 13 ++++++-------
> mm/page_alloc.c | 31 ++++++++++++++++++-------------
> 2 files changed, 24 insertions(+), 20 deletions(-)
>
> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index 4854249792545..0844ed7cfaa53 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -544,16 +544,15 @@ static inline void *folio_get_private(struct folio *folio)
>
> struct page_frag_cache {
> void * va;
> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> - __u16 offset;
> - __u16 size;
> -#else
> - __u32 offset;
> -#endif
> /* we maintain a pagecount bias, so that we dont dirty cache line
> * containing page->_refcount every time we allocate a fragment.
> */
> - unsigned int pagecnt_bias;
> + unsigned int pagecnt_bias;
> + __u32 offset;
> + /* size only used when PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE, in which
> + * case PAGE_FRAG_CACHE_MAX_SIZE is 32K and 16 bits is sufficient.
> + */
> + __u16 size;
> bool pfmemalloc;
> };
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 91ace8ca97e21..8678103b1b396 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -4822,13 +4822,18 @@ static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
> struct page *page = NULL;
> gfp_t gfp = gfp_mask;
>
> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> - gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
> - __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
> - page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
> - PAGE_FRAG_CACHE_MAX_ORDER);
> - nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
> -#endif
> + if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE) {
> + gfp_mask = (gfp_mask & ~__GFP_DIRECT_RECLAIM) | __GFP_COMP |
> + __GFP_NOWARN | __GFP_NORETRY | __GFP_NOMEMALLOC;
> + page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
> + PAGE_FRAG_CACHE_MAX_ORDER);
> + /*
> + * Cast to silence warning due to 16-bit nc->size. Not real
> + * because PAGE_SIZE only less than PAGE_FRAG_CACHE_MAX_SIZE
> + * when PAGE_FRAG_CACHE_MAX_SIZE is 32K.
> + */
> + nc->size = (__u16)(page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE);
> + }
> if (unlikely(!page))
> page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);
>
> @@ -4870,10 +4875,10 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> if (!page)
> return NULL;
>
> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> /* if size can vary use size else just use PAGE_SIZE */
> - size = nc->size;
> -#endif
> + if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> + size = nc->size;
> +
> /* Even if we own the page, we do not use atomic_set().
> * This would break get_page_unless_zero() users.
> */
> @@ -4897,10 +4902,10 @@ void *__page_frag_alloc_align(struct page_frag_cache *nc,
> goto refill;
> }
>
> -#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> /* if size can vary use size else just use PAGE_SIZE */
> - size = nc->size;
> -#endif
> + if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
> + size = nc->size;
> +
> /* OK, page count is 0, we can safely set it */
> set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
>
Powered by blists - more mailing lists