[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240704043132.28501-10-osalvador@suse.de>
Date: Thu, 4 Jul 2024 06:30:56 +0200
From: Oscar Salvador <osalvador@...e.de>
To: Andrew Morton <akpm@...ux-foundation.org>
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
Peter Xu <peterx@...hat.com>,
Muchun Song <muchun.song@...ux.dev>,
David Hildenbrand <david@...hat.com>,
SeongJae Park <sj@...nel.org>,
Miaohe Lin <linmiaohe@...wei.com>,
Michal Hocko <mhocko@...e.com>,
Matthew Wilcox <willy@...radead.org>,
Christophe Leroy <christophe.leroy@...roup.eu>,
Oscar Salvador <osalvador@...e.de>
Subject: [PATCH 09/45] mm: Implement pud-version functions for swap and vm_normal_page_pud
HugeTLB pages will be handled on pud level as well, so we need to
implement pud-version of vm_normal_page_pud and swp-pud functions.
Signed-off-by: Oscar Salvador <osalvador@...e.de>
---
arch/powerpc/include/asm/book3s/64/pgtable.h | 1 +
include/linux/mm.h | 4 ++
include/linux/pgtable.h | 6 +++
include/linux/swapops.h | 15 ++++++
mm/memory.c | 51 ++++++++++++++++++++
5 files changed, 77 insertions(+)
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 519b1743a0f4..fa4bb8d6356f 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -687,6 +687,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) & ~_PAGE_PTE })
#define __swp_entry_to_pte(x) __pte((x).val | _PAGE_PTE)
#define __pmd_to_swp_entry(pmd) (__pte_to_swp_entry(pmd_pte(pmd)))
+#define __pud_to_swp_entry(pud) (__pte_to_swp_entry(pud_pte(pud)))
#define __swp_entry_to_pmd(x) (pte_pmd(__swp_entry_to_pte(x)))
#ifdef CONFIG_MEM_SOFT_DIRTY
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5f1075d19600..baade06b159b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2371,6 +2371,10 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
+struct folio *vm_normal_folio_pud(struct vm_area_struct *vma,
+ unsigned long addr, pud_t pud);
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t pud);
void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 3a7b8751747e..a9edeb86b7fe 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1990,4 +1990,10 @@ pgprot_t vm_get_page_prot(unsigned long vm_flags) \
} \
EXPORT_SYMBOL(vm_get_page_prot);
+#ifdef CONFIG_HUGETLB_PAGE
+#ifndef __pud_to_swp_entry
+#define __pud_to_swp_entry(pud) ((swp_entry_t) { pud_val(pud) })
+#endif
+#endif
+
#endif /* _LINUX_PGTABLE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index cb468e418ea1..182957f0d013 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -126,6 +126,21 @@ static inline int is_swap_pte(pte_t pte)
return !pte_none(pte) && !pte_present(pte);
}
+#ifdef CONFIG_HUGETLB_PAGE
+static inline int is_swap_pud(pud_t pud)
+{
+ return !pud_none(pud) && !pud_present(pud);
+}
+
+static inline swp_entry_t pud_to_swp_entry(pud_t pud)
+{
+ swp_entry_t arch_entry;
+
+ arch_entry = __pud_to_swp_entry(pud);
+ return swp_entry(__swp_type(arch_entry), __swp_offset(arch_entry));
+}
+#endif
+
/*
* Convert the arch-dependent pte representation of a swp_entry_t into an
* arch-independent swp_entry_t.
diff --git a/mm/memory.c b/mm/memory.c
index 0a769f34bbb2..90c5dfac35c6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -718,6 +718,57 @@ struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
}
#endif
+#ifdef CONFIG_HUGETLB_PAGE
+struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr,
+ pud_t pud)
+{
+ unsigned long pfn = pud_pfn(pud);
+
+ /*
+ * There is no pmd_special() but there may be special pmds, e.g.
+ * in a direct-access (dax) mapping, so let's just replicate the
+ * !CONFIG_ARCH_HAS_PTE_SPECIAL case from vm_normal_page() here.
+ */
+ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ if (!pfn_valid(pfn))
+ return NULL;
+ goto out;
+ } else {
+ unsigned long off;
+
+ off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+ }
+
+ if (pud_devmap(pud))
+ return NULL;
+ if (unlikely(pfn > highest_memmap_pfn))
+ return NULL;
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+ */
+out:
+ return pfn_to_page(pfn);
+}
+
+struct folio *vm_normal_folio_pud(struct vm_area_struct *vma,
+ unsigned long addr, pud_t pud)
+{
+ struct page *page = vm_normal_page_pud(vma, addr, pud);
+
+ if (page)
+ return page_folio(page);
+ return NULL;
+}
+#endif
+
static void restore_exclusive_pte(struct vm_area_struct *vma,
struct page *page, unsigned long address,
pte_t *ptep)
--
2.26.2
Powered by blists - more mailing lists