[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-ID: <1562935747.8510.26.camel@lca.pw>
Date: Fri, 12 Jul 2019 08:49:07 -0400
From: Qian Cai <cai@....pw>
To: linux-kernel@...r.kernel.org, akpm@...ux-foundation.org,
anshuman.khandual@....com, anton.ivanov@...bridgegreys.com,
aou@...s.berkeley.edu, arnd@...db.de, catalin.marinas@....com,
deanbo422@...il.com, deller@....de, geert@...ux-m68k.org,
green.hu@...il.com, guoren@...nel.org, gxt@....edu.cn,
lftan@...era.com, linux@...linux.org.uk, mattst88@...il.com,
mhocko@...e.com, mm-commits@...r.kernel.org, mpe@...erman.id.au,
palmer@...ive.com, paul.burton@...s.com, ralf@...ux-mips.org,
ren_guo@...ky.com, richard@....at, rkuo@...eaurora.org,
rppt@...ux.ibm.com, sammy@...my.net, torvalds@...ux-foundation.org,
willy@...radead.org
Subject: Re: [patch 105/147] arm64: switch to generic version of pte
allocation
Actually, this patch is slightly off. There is one delta need to apply (ignore
the part in pgtable.h which has already in mainline via the commit 615c48ad8f42
"arm64/mm: don't initialize pgd_cache twice") in.
https://lore.kernel.org/linux-mm/20190617151252.GF16810@rapoport-lnx/
On Thu, 2019-07-11 at 20:58 -0700, akpm@...ux-foundation.org wrote:
> From: Mike Rapoport <rppt@...ux.ibm.com>
> Subject: arm64: switch to generic version of pte allocation
>
> The PTE allocations in arm64 are identical to the generic ones modulo the
> GFP flags.
>
> Using the generic pte_alloc_one() functions ensures that the user page
> tables are allocated with __GFP_ACCOUNT set.
>
> The arm64 definition of PGALLOC_GFP is removed and replaced with
> GFP_PGTABLE_USER for p[gum]d_alloc_one() for the user page tables andpgtable.h
>
> GFP_PGTABLE_KERNEL for the kernel page tables. The KVM memory cache is now
> using GFP_PGTABLE_USER.
>
> The mappings created with create_pgd_mapping() are now using
> GFP_PGTABLE_KERNEL.
>
> The conversion to the generic version of pte_free_kernel() removes the NULL
> check for pte.
>
> The pte_free() version on arm64 is identical to the generic one and
> can be simply dropped.
>
> [cai@....pw: fix a bogus GFP flag in pgd_alloc()]
> Link: http://lkml.kernel.org/r/1559656836-24940-1-git-send-email-cai@lca.pw
> Link: http://lkml.kernel.org/r/1557296232-15361-5-git-send-email-rppt@linux.ib
> m.com
> Signed-off-by: Mike Rapoport <rppt@...ux.ibm.com>
> Cc: Albert Ou <aou@...s.berkeley.edu>
> Cc: Anshuman Khandual <anshuman.khandual@....com>
> Cc: Anton Ivanov <anton.ivanov@...bridgegreys.com>
> Cc: Arnd Bergmann <arnd@...db.de>
> Cc: Catalin Marinas <catalin.marinas@....com>
> Cc: Geert Uytterhoeven <geert@...ux-m68k.org>
> Cc: Greentime Hu <green.hu@...il.com>
> Cc: Guan Xuetao <gxt@....edu.cn>
> Cc: Guo Ren <guoren@...nel.org>
> Cc: Guo Ren <ren_guo@...ky.com>
> Cc: Helge Deller <deller@....de>
> Cc: Ley Foon Tan <lftan@...era.com>
> Cc: Matthew Wilcox <willy@...radead.org>
> Cc: Matt Turner <mattst88@...il.com>
> Cc: Michael Ellerman <mpe@...erman.id.au>
> Cc: Michal Hocko <mhocko@...e.com>
> Cc: Palmer Dabbelt <palmer@...ive.com>
> Cc: Paul Burton <paul.burton@...s.com>
> Cc: Ralf Baechle <ralf@...ux-mips.org>
> Cc: Richard Kuo <rkuo@...eaurora.org>
> Cc: Richard Weinberger <richard@....at>
> Cc: Russell King <linux@...linux.org.uk>
> Cc: Sam Creasey <sammy@...my.net>
> Cc: Vincent Chen <deanbo422@...il.com>
> Signed-off-by: Andrew Morton <akpm@...ux-foundation.org>
> ---
>
> arch/arm64/include/asm/pgalloc.h | 47 ++++-------------------------
> arch/arm64/mm/mmu.c | 2 -
> arch/arm64/mm/pgd.c | 9 ++++-
> virt/kvm/arm/mmu.c | 2 -
> 4 files changed, 17 insertions(+), 43 deletions(-)
>
> --- a/arch/arm64/include/asm/pgalloc.h~arm64-switch-to-generic-version-of-pte-
> allocation
> +++ a/arch/arm64/include/asm/pgalloc.h
> @@ -13,18 +13,23 @@
> #include <asm/cacheflush.h>
> #include <asm/tlbflush.h>
>
> +#include <asm-generic/pgalloc.h> /* for pte_{alloc,free}_one */
> +
> #define check_pgt_cache() do { } while (0)
>
> -#define PGALLOC_GFP (GFP_KERNEL | __GFP_ZERO)
> #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
>
> #if CONFIG_PGTABLE_LEVELS > 2
>
> static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
> {
> + gfp_t gfp = GFP_PGTABLE_USER;
> struct page *page;
>
> - page = alloc_page(PGALLOC_GFP);
> + if (mm == &init_mm)
> + gfp = GFP_PGTABLE_KERNEL;
> +
> + page = alloc_page(gfp);
> if (!page)
> return NULL;
> if (!pgtable_pmd_page_ctor(page)) {
> @@ -61,7 +66,7 @@ static inline void __pud_populate(pud_t
>
> static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
> {
> - return (pud_t *)__get_free_page(PGALLOC_GFP);
> + return (pud_t *)__get_free_page(GFP_PGTABLE_USER);
> }
>
> static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
> @@ -89,42 +94,6 @@ static inline void __pgd_populate(pgd_t
> extern pgd_t *pgd_alloc(struct mm_struct *mm);
> extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
>
> -static inline pte_t *
> -pte_alloc_one_kernel(struct mm_struct *mm)
> -{
> - return (pte_t *)__get_free_page(PGALLOC_GFP);
> -}
> -
> -static inline pgtable_t
> -pte_alloc_one(struct mm_struct *mm)
> -{
> - struct page *pte;
> -
> - pte = alloc_pages(PGALLOC_GFP, 0);
> - if (!pte)
> - return NULL;
> - if (!pgtable_page_ctor(pte)) {
> - __free_page(pte);
> - return NULL;
> - }
> - return pte;
> -}
> -
> -/*
> - * Free a PTE table.
> - */
> -static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
> -{
> - if (ptep)
> - free_page((unsigned long)ptep);
> -}
> -
> -static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
> -{
> - pgtable_page_dtor(pte);
> - __free_page(pte);
> -}
> -
> static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
> pmdval_t prot)
> {
> --- a/arch/arm64/mm/mmu.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/arch/arm64/mm/mmu.c
> @@ -362,7 +362,7 @@ static void __create_pgd_mapping(pgd_t *
>
> static phys_addr_t __pgd_pgtable_alloc(int shift)
> {
> - void *ptr = (void *)__get_free_page(PGALLOC_GFP);
> + void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL);
> BUG_ON(!ptr);
>
> /* Ensure the zeroed page is visible to the page table walker */
> --- a/arch/arm64/mm/pgd.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/arch/arm64/mm/pgd.c
> @@ -19,10 +19,15 @@ static struct kmem_cache *pgd_cache __ro
>
> pgd_t *pgd_alloc(struct mm_struct *mm)
> {
> + gfp_t gfp = GFP_PGTABLE_USER;
> +
> + if (unlikely(mm == &init_mm))
> + gfp = GFP_PGTABLE_KERNEL;
> +
> if (PGD_SIZE == PAGE_SIZE)
> - return (pgd_t *)__get_free_page(PGALLOC_GFP);
> + return (pgd_t *)__get_free_page(gfp);
> else
> - return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
> + return kmem_cache_alloc(pgd_cache, GFP_PGTABLE_KERNEL);
> }
>
> void pgd_free(struct mm_struct *mm, pgd_t *pgd)
> --- a/virt/kvm/arm/mmu.c~arm64-switch-to-generic-version-of-pte-allocation
> +++ a/virt/kvm/arm/mmu.c
> @@ -129,7 +129,7 @@ static int mmu_topup_memory_cache(struct
> if (cache->nobjs >= min)
> return 0;
> while (cache->nobjs < max) {
> - page = (void *)__get_free_page(PGALLOC_GFP);
> + page = (void *)__get_free_page(GFP_PGTABLE_USER);
> if (!page)
> return -ENOMEM;
> cache->objects[cache->nobjs++] = page;
> _
Powered by blists - more mailing lists