[<prev] [next>] [day] [month] [year] [list]
Message-ID: <20251217120858.18713-1-pilgrimtao@gmail.com>
Date: Wed, 17 Dec 2025 20:08:58 +0800
From: chengkaitao <pilgrimtao@...il.com>
To: davem@...emloft.net,
andreas@...sler.com,
akpm@...ux-foundation.org,
david@...nel.org,
lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
vbabka@...e.cz,
rppt@...nel.org,
surenb@...gle.com,
mhocko@...e.com
Cc: kevin.brodsky@....com,
dave.hansen@...ux.intel.com,
ziy@...dia.com,
chengkaitao@...inos.cn,
willy@...radead.org,
zhengqi.arch@...edance.com,
sparclinux@...r.kernel.org,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org
Subject: [PATCH] sparc: Use vmemmap_populate_hugepages for vmemmap_populate
From: Chengkaitao <chengkaitao@...inos.cn>
1. Added the vmemmap_false_pmd function to accommodate architectures
that do not support basepages.
2. In the SPARC architecture, reimplemented vmemmap_populate using
vmemmap_populate_hugepages.
Signed-off-by: Chengkaitao <chengkaitao@...inos.cn>
---
arch/sparc/mm/init_64.c | 56 ++++++++++++++++-------------------------
include/linux/mm.h | 1 +
mm/sparse-vmemmap.c | 7 +++++-
3 files changed, 28 insertions(+), 36 deletions(-)
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index df9f7c444c39..a80cdfa6ba98 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -5,7 +5,7 @@
* Copyright (C) 1996-1999 David S. Miller (davem@...p.rutgers.edu)
* Copyright (C) 1997-1999 Jakub Jelinek (jj@...site.mff.cuni.cz)
*/
-
+
#include <linux/extable.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -2397,11 +2397,11 @@ void __init paging_init(void)
* work.
*/
init_mm.pgd += ((shift) / (sizeof(pgd_t)));
-
+
memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
inherit_prom_mappings();
-
+
/* Ok, we can use our TLB miss and window trap handlers safely. */
setup_tba();
@@ -2581,8 +2581,8 @@ unsigned long _PAGE_CACHE __read_mostly;
EXPORT_SYMBOL(_PAGE_CACHE);
#ifdef CONFIG_SPARSEMEM_VMEMMAP
-int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
- int node, struct vmem_altmap *altmap)
+void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+ unsigned long addr, unsigned long next)
{
unsigned long pte_base;
@@ -2595,39 +2595,25 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
pte_base |= _PAGE_PMD_HUGE;
- vstart = vstart & PMD_MASK;
- vend = ALIGN(vend, PMD_SIZE);
- for (; vstart < vend; vstart += PMD_SIZE) {
- pgd_t *pgd = vmemmap_pgd_populate(vstart, node);
- unsigned long pte;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
-
- if (!pgd)
- return -ENOMEM;
-
- p4d = vmemmap_p4d_populate(pgd, vstart, node);
- if (!p4d)
- return -ENOMEM;
-
- pud = vmemmap_pud_populate(p4d, vstart, node);
- if (!pud)
- return -ENOMEM;
-
- pmd = pmd_offset(pud, vstart);
- pte = pmd_val(*pmd);
- if (!(pte & _PAGE_VALID)) {
- void *block = vmemmap_alloc_block(PMD_SIZE, node);
+ pmd_val(*pmd) = pte_base | __pa(p);
+}
- if (!block)
- return -ENOMEM;
+bool __meminit vmemmap_false_pmd(pmd_t *pmd, int node)
+{
+ return true;
+}
- pmd_val(*pmd) = pte_base | __pa(block);
- }
- }
+int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+ unsigned long addr, unsigned long next)
+{
+ vmemmap_verify((pte_t *)pmdp, node, addr, next);
+ return 1;
+}
- return 0;
+int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
+ int node, struct vmem_altmap *altmap)
+{
+ return vmemmap_populate_hugepages(vstart, vend, node, altmap);
}
#endif /* CONFIG_SPARSEMEM_VMEMMAP */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 15076261d0c2..5e005b0f947d 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -4248,6 +4248,7 @@ void *vmemmap_alloc_block_buf(unsigned long size, int node,
void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
void vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
unsigned long addr, unsigned long next);
+bool vmemmap_false_pmd(pmd_t *pmd, int node);
int vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next);
int vmemmap_populate_basepages(unsigned long start, unsigned long end,
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 37522d6cb398..bd54b8c6f56e 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -407,6 +407,11 @@ void __weak __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
{
}
+bool __weak __meminit vmemmap_false_pmd(pmd_t *pmd, int node)
+{
+ return 0;
+}
+
int __weak __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
unsigned long addr, unsigned long next)
{
@@ -446,7 +451,7 @@ int __meminit vmemmap_populate_hugepages(unsigned long start, unsigned long end,
if (p) {
vmemmap_set_pmd(pmd, p, node, addr, next);
continue;
- } else if (altmap) {
+ } else if (altmap || vmemmap_false_pmd(pmd, node)) {
/*
* No fallback: In any case we care about, the
* altmap should be reasonably sized and aligned
--
2.50.1 (Apple Git-155)
Powered by blists - more mailing lists