[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <ZBQjz9vzFaLjW0MM@kernel.org>
Date: Fri, 17 Mar 2023 10:24:47 +0200
From: Mike Rapoport <rppt@...nel.org>
To: Lorenzo Stoakes <lstoakes@...il.com>
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
Andrew Morton <akpm@...ux-foundation.org>,
Arnd Bergmann <arnd@...db.de>,
David Hildenbrand <david@...hat.com>,
Nick Piggin <npiggin@...il.com>,
Peter Zijlstra <peterz@...radead.org>,
Vlastimil Babka <vbabka@...e.cz>,
Mel Gorman <mgorman@...hsingularity.net>,
Uladzislau Rezki <urezki@...il.com>,
Christoph Hellwig <hch@...radead.org>
Subject: Re: [PATCH] mm: prefer xxx_page() alloc/free functions for order-0
pages
On Mon, Mar 13, 2023 at 12:27:14PM +0000, Lorenzo Stoakes wrote:
> Update instances of alloc_pages(..., 0), __get_free_pages(..., 0) and
> __free_pages(..., 0) to use alloc_page(), __get_free_page() and
> __free_page() respectively in core code.
>
> Signed-off-by: Lorenzo Stoakes <lstoakes@...il.com>
Reviewed-by: Mike Rapoport (IBM) <rppt@...nel.org>
But why limit this only to mm?
> ---
> include/asm-generic/pgalloc.h | 4 ++--
> mm/debug_vm_pgtable.c | 4 ++--
> mm/hugetlb_vmemmap.c | 2 +-
> mm/mmu_gather.c | 2 +-
> mm/page_alloc.c | 2 +-
> mm/vmalloc.c | 2 +-
> 6 files changed, 8 insertions(+), 8 deletions(-)
>
> diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
> index 977bea16cf1b..a7cf825befae 100644
> --- a/include/asm-generic/pgalloc.h
> +++ b/include/asm-generic/pgalloc.h
> @@ -123,11 +123,11 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
>
> if (mm == &init_mm)
> gfp = GFP_PGTABLE_KERNEL;
> - page = alloc_pages(gfp, 0);
> + page = alloc_page(gfp);
> if (!page)
> return NULL;
> if (!pgtable_pmd_page_ctor(page)) {
> - __free_pages(page, 0);
> + __free_page(page);
> return NULL;
> }
> return (pmd_t *)page_address(page);
> diff --git a/mm/debug_vm_pgtable.c b/mm/debug_vm_pgtable.c
> index 7887cc2b75bf..4362021b1ce7 100644
> --- a/mm/debug_vm_pgtable.c
> +++ b/mm/debug_vm_pgtable.c
> @@ -1048,7 +1048,7 @@ static void __init destroy_args(struct pgtable_debug_args *args)
>
> if (args->pte_pfn != ULONG_MAX) {
> page = pfn_to_page(args->pte_pfn);
> - __free_pages(page, 0);
> + __free_page(page);
>
> args->pte_pfn = ULONG_MAX;
> }
> @@ -1290,7 +1290,7 @@ static int __init init_args(struct pgtable_debug_args *args)
> }
> }
>
> - page = alloc_pages(GFP_KERNEL, 0);
> + page = alloc_page(GFP_KERNEL);
> if (page)
> args->pte_pfn = page_to_pfn(page);
>
> diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
> index a15cc56cf70a..1198064f80eb 100644
> --- a/mm/hugetlb_vmemmap.c
> +++ b/mm/hugetlb_vmemmap.c
> @@ -400,7 +400,7 @@ static int alloc_vmemmap_page_list(unsigned long start, unsigned long end,
> return 0;
> out:
> list_for_each_entry_safe(page, next, list, lru)
> - __free_pages(page, 0);
> + __free_page(page);
> return -ENOMEM;
> }
>
> diff --git a/mm/mmu_gather.c b/mm/mmu_gather.c
> index 2b93cf6ac9ae..ea9683e12936 100644
> --- a/mm/mmu_gather.c
> +++ b/mm/mmu_gather.c
> @@ -32,7 +32,7 @@ static bool tlb_next_batch(struct mmu_gather *tlb)
> if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
> return false;
>
> - batch = (void *)__get_free_pages(GFP_NOWAIT | __GFP_NOWARN, 0);
> + batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
> if (!batch)
> return false;
>
> diff --git a/mm/page_alloc.c b/mm/page_alloc.c
> index 680a4d76460e..256e8d3c8742 100644
> --- a/mm/page_alloc.c
> +++ b/mm/page_alloc.c
> @@ -5538,7 +5538,7 @@ EXPORT_SYMBOL(__get_free_pages);
>
> unsigned long get_zeroed_page(gfp_t gfp_mask)
> {
> - return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
> + return __get_free_page(gfp_mask | __GFP_ZERO);
> }
> EXPORT_SYMBOL(get_zeroed_page);
>
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 5e60e9792cbf..978194dc2bb8 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -2739,7 +2739,7 @@ void vfree(const void *addr)
> * High-order allocs for huge vmallocs are split, so
> * can be freed as an array of order-0 allocations
> */
> - __free_pages(page, 0);
> + __free_page(page);
> cond_resched();
> }
> atomic_long_sub(vm->nr_pages, &nr_vmalloc_pages);
> --
> 2.39.2
>
--
Sincerely yours,
Mike.
Powered by blists - more mailing lists