[<prev] [next>] [<thread-prev] [day] [month] [year] [list]
Message-ID: <w4ijtqxe5xnbp4hexccs7xwlqeiazzbix3rmlrskcwsizzityy@67kmfpp5pjp7>
Date: Thu, 18 Dec 2025 11:27:23 -0500
From: "Liam R. Howlett" <Liam.Howlett@...cle.com>
To: chengkaitao <pilgrimtao@...il.com>
Cc: davem@...emloft.net, andreas@...sler.com, akpm@...ux-foundation.org,
david@...nel.org, lorenzo.stoakes@...cle.com, vbabka@...e.cz,
rppt@...nel.org, surenb@...gle.com, mhocko@...e.com,
kevin.brodsky@....com, dave.hansen@...ux.intel.com, ziy@...dia.com,
chengkaitao@...inos.cn, willy@...radead.org,
zhengqi.arch@...edance.com, sparclinux@...r.kernel.org,
linux-kernel@...r.kernel.org, linux-mm@...ck.org
Subject: Re: [PATCH v2] sparc: Use vmemmap_populate_hugepages for
vmemmap_populate
* chengkaitao <pilgrimtao@...il.com> [251218 08:10]:
> From: Chengkaitao <chengkaitao@...inos.cn>
>
> 1. Added the vmemmap_false_pmd function to accommodate architectures
> that do not support basepages.
> 2. In the SPARC architecture, reimplemented vmemmap_populate using
> vmemmap_populate_hugepages.
>
> Signed-off-by: Chengkaitao <chengkaitao@...inos.cn>
Can you please fix your email client? Your SoB does not match the
sender and your responses do not match the From of the patch.
I assume a v3 will include Mike's suggestions on v1 so this is already
out of date?
Please rewrite the change log to include the information that you
discussed with Mike.
> ---
> v2:
> 1. Revert the whitespace deletions
> 2. Change vmemmap_false_pmd to vmemmap_pte_fallback_allowed
>
> Link to V1:
> https://lore.kernel.org/all/20251217120858.18713-1-pilgrimtao@gmail.com/
>
> arch/sparc/mm/init_64.c | 50 +++++++++++++++--------------------------
> include/linux/mm.h | 1 +
> mm/sparse-vmemmap.c | 7 +++++-
> 3 files changed, 25 insertions(+), 33 deletions(-)
>
> diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
> index df9f7c444c39..86b11150e701 100644
> --- a/arch/sparc/mm/init_64.c
> +++ b/arch/sparc/mm/init_64.c
> @@ -2581,8 +2581,8 @@ unsigned long _PAGE_CACHE __read_mostly;
> EXPORT_SYMBOL(_PAGE_CACHE);
>
> #ifdef CONFIG_SPARSEMEM_VMEMMAP
> -int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
> - int node, struct vmem_altmap *altmap)
> +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
> + unsigned long addr, unsigned long next)
> {
> unsigned long pte_base;
>
> @@ -2595,39 +2595,25 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
>
> pte_base |= _PAGE_PMD_HUGE;
>
> - vstart = vstart & PMD_MASK;
> - vend = ALIGN(vend, PMD_SIZE);
> - for (; vstart < vend; vstart += PMD_SIZE) {
> - pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
> - unsigned long pte;
> - p4d_t *p4d;
> - pud_t *pud;
> - pmd_t *pmd;
> -
> - if (!pgd)
> - return -ENOMEM;
> -
> - p4d = vmemmap_p4d_populate(pgd, vstart, node);
> - if (!p4d)
> - return -ENOMEM;
> -
> - pud = vmemmap_pud_populate(p4d, vstart, node);
> - if (!pud)
> - return -ENOMEM;
> -
> - pmd = pmd_offset(pud, vstart);
> - pte = pmd_val(*pmd);
> - if (!(pte & _PAGE_VALID)) {
> - void *block = vmemmap_alloc_block(PMD_SIZE, node);
> + pmd_val(*pmd) = pte_base | __pa(p);
> +}
>
> - if (!block)
> - return -ENOMEM;
> +bool __meminit vmemmap_pte_fallback_allowed(void)
> +{
> + return false;
> +}
>
> - pmd_val(*pmd) = pte_base | __pa(block);
> - }
> - }
> +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
> + unsigned long addr, unsigned long next)
> +{
> + vmemmap_verify((pte_t *)pmdp, node, addr, next);
> + return 1;
> +}
>
> - return 0;
> +int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
> + int node, struct vmem_altmap *altmap)
> +{
> + return vmemmap_populate_hugepages(vstart, vend, node, altmap);
> }
> #endif /* CONFIG_SPARSEMEM_VMEMMAP */
>
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 15076261d0c2..ca159b029a5d 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -4248,6 +4248,7 @@ void *vmemmap_alloc_block_buf(unsigned long size, int node,
> void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
> void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
> unsigned long addr, unsigned long next);
> +bool vmemmap_pte_fallback_allowed(void);
> int vmemmap_check_pmd(pmd_t *pmd, int node,
> unsigned long addr, unsigned long next);
> int vmemmap_populate_basepages(unsigned long start, unsigned long end,
> diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
> index 37522d6cb398..45eb38048949 100644
> --- a/mm/sparse-vmemmap.c
> +++ b/mm/sparse-vmemmap.c
> @@ -407,6 +407,11 @@ void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
> {
> }
>
> +bool __weak __meminit vmemmap_pte_fallback_allowed(void)
> +{
> + return true;
> +}
> +
> int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
> unsigned long addr, unsigned long next)
> {
> @@ -446,7 +451,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
> if (p) {
> vmemmap_set_pmd(pmd, p, node, addr, next);
> continue;
> - } else if (altmap) {
> + } else if (altmap || !vmemmap_pte_fallback_allowed()) {
> /*
> * No fallback: In any case we care about, the
> * altmap should be reasonably sized and aligned
> --
> 2.50.1 (Apple Git-155)
>
Powered by blists - more mailing lists