[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250617154345.2494405-14-david@redhat.com>
Date: Tue, 17 Jun 2025 17:43:44 +0200
From: David Hildenbrand <david@...hat.com>
To: linux-kernel@...r.kernel.org
Cc: linux-fsdevel@...r.kernel.org,
linux-mm@...ck.org,
nvdimm@...ts.linux.dev,
David Hildenbrand <david@...hat.com>,
Andrew Morton <akpm@...ux-foundation.org>,
Juergen Gross <jgross@...e.com>,
Stefano Stabellini <sstabellini@...nel.org>,
Oleksandr Tyshchenko <oleksandr_tyshchenko@...m.com>,
Dan Williams <dan.j.williams@...el.com>,
Alistair Popple <apopple@...dia.com>,
Matthew Wilcox <willy@...radead.org>,
Jan Kara <jack@...e.cz>,
Alexander Viro <viro@...iv.linux.org.uk>,
Christian Brauner <brauner@...nel.org>,
Zi Yan <ziy@...dia.com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
"Liam R. Howlett" <Liam.Howlett@...cle.com>,
Nico Pache <npache@...hat.com>,
Ryan Roberts <ryan.roberts@....com>,
Dev Jain <dev.jain@....com>,
Barry Song <baohua@...nel.org>,
Vlastimil Babka <vbabka@...e.cz>,
Mike Rapoport <rppt@...nel.org>,
Suren Baghdasaryan <surenb@...gle.com>,
Michal Hocko <mhocko@...e.com>,
Jann Horn <jannh@...gle.com>,
Pedro Falcato <pfalcato@...e.de>
Subject: [PATCH RFC 13/14] mm: introduce and use vm_normal_page_pud()
Let's introduce vm_normal_page_pud(), which ends up being fairly simple
because of our new common helpers and there not being a PUD-sized zero
folio.
Use vm_normal_page_pud() in folio_walk_start() to resolve a TODO,
structuring the code like the other (pmd/pte) cases. Defer
introducing vm_normal_folio_pud() until really used.
Signed-off-by: David Hildenbrand <david@...hat.com>
---
include/linux/mm.h | 1 +
mm/memory.c | 11 +++++++++++
mm/pagewalk.c | 20 ++++++++++----------
3 files changed, 22 insertions(+), 10 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ef709457c7076..022e8ef2c78ef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2361,6 +2361,7 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, pmd_t pmd);
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, pud_t pud);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
diff --git a/mm/memory.c b/mm/memory.c
index 34f961024e8e6..6c65f51248250 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -683,6 +683,17 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma, pmd_t pmd)
return page_folio(page);
return NULL;
}
+
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, pud_t pud)
+{
+ unsigned long pfn = pud_pfn(pud);
+
+ if (unlikely(pud_special(pud))) {
+ VM_WARN_ON_ONCE(!(vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)));
+ return NULL;
+ }
+ return vm_normal_page_pfn(vma, pfn);
+}
#endif
/**
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index 0edb7240d090c..8bd95cf326872 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -902,23 +902,23 @@ struct folio *folio_walk_start(struct folio_walk *fw,
fw->pudp = pudp;
fw->pud = pud;
- /*
- * TODO: FW_MIGRATION support for PUD migration entries
- * once there are relevant users.
- */
- if (!pud_present(pud) || pud_special(pud)) {
+ if (pud_none(pud)) {
spin_unlock(ptl);
goto not_found;
- } else if (!pud_leaf(pud)) {
+ } else if (pud_present(pud) && !pud_leaf(pud)) {
spin_unlock(ptl);
goto pmd_table;
+ } else if (pud_present(pud)) {
+ page = vm_normal_page_pud(vma, pud);
+ if (page)
+ goto found;
}
/*
- * TODO: vm_normal_page_pud() will be handy once we want to
- * support PUD mappings in VM_PFNMAP|VM_MIXEDMAP VMAs.
+ * TODO: FW_MIGRATION support for PUD migration entries
+ * once there are relevant users.
*/
- page = pud_page(pud);
- goto found;
+ spin_unlock(ptl);
+ goto not_found;
}
pmd_table:
--
2.49.0
Powered by blists - more mailing lists