[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20240327152332.950956-6-peterx@redhat.com>
Date: Wed, 27 Mar 2024 11:23:24 -0400
From: peterx@...hat.com
To: linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Cc: Yang Shi <shy828301@...il.com>,
"Kirill A . Shutemov" <kirill@...temov.name>,
Mike Kravetz <mike.kravetz@...cle.com>,
John Hubbard <jhubbard@...dia.com>,
Michael Ellerman <mpe@...erman.id.au>,
peterx@...hat.com,
Andrew Jones <andrew.jones@...ux.dev>,
Muchun Song <muchun.song@...ux.dev>,
linux-riscv@...ts.infradead.org,
linuxppc-dev@...ts.ozlabs.org,
Christophe Leroy <christophe.leroy@...roup.eu>,
Andrew Morton <akpm@...ux-foundation.org>,
Christoph Hellwig <hch@...radead.org>,
Lorenzo Stoakes <lstoakes@...il.com>,
Matthew Wilcox <willy@...radead.org>,
Rik van Riel <riel@...riel.com>,
linux-arm-kernel@...ts.infradead.org,
Andrea Arcangeli <aarcange@...hat.com>,
David Hildenbrand <david@...hat.com>,
"Aneesh Kumar K . V" <aneesh.kumar@...nel.org>,
Vlastimil Babka <vbabka@...e.cz>,
James Houghton <jthoughton@...gle.com>,
Jason Gunthorpe <jgg@...dia.com>,
Mike Rapoport <rppt@...nel.org>,
Axel Rasmussen <axelrasmussen@...gle.com>
Subject: [PATCH v4 05/13] mm/arch: Provide pud_pfn() fallback
From: Peter Xu <peterx@...hat.com>
The comment in the code explains the reasons. We took a different approach
comparing to pmd_pfn() by providing a fallback function.
Another option is to provide some lower level config options (compare to
HUGETLB_PAGE or THP) to identify which layer an arch can support for such
huge mappings. However that can be an overkill.
Cc: Mike Rapoport (IBM) <rppt@...nel.org>
Cc: Matthew Wilcox <willy@...radead.org>
Reviewed-by: Jason Gunthorpe <jgg@...dia.com>
Signed-off-by: Peter Xu <peterx@...hat.com>
---
arch/riscv/include/asm/pgtable.h | 1 +
arch/s390/include/asm/pgtable.h | 1 +
arch/sparc/include/asm/pgtable_64.h | 1 +
arch/x86/include/asm/pgtable.h | 1 +
include/linux/pgtable.h | 10 ++++++++++
5 files changed, 14 insertions(+)
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 20242402fc11..0ca28cc8e3fa 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -646,6 +646,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 1a71cb19c089..6cbbe473f680 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -1414,6 +1414,7 @@ static inline unsigned long pud_deref(pud_t pud)
return (unsigned long)__va(pud_val(pud) & origin_mask);
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
return __pa(pud_deref(pud)) >> PAGE_SHIFT;
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 4d1bafaba942..26efc9bb644a 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -875,6 +875,7 @@ static inline bool pud_leaf(pud_t pud)
return pte_val(pte) & _PAGE_PMD_HUGE;
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
pte_t pte = __pte(pud_val(pud));
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index cefc7a84f7a4..273f7557218c 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -234,6 +234,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
return (pfn & pmd_pfn_mask(pmd)) >> PAGE_SHIFT;
}
+#define pud_pfn pud_pfn
static inline unsigned long pud_pfn(pud_t pud)
{
phys_addr_t pfn = pud_val(pud);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 600e17d03659..75fe309a4e10 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -1817,6 +1817,16 @@ typedef unsigned int pgtbl_mod_mask;
#define pte_leaf_size(x) PAGE_SIZE
#endif
+/*
+ * We always define pmd_pfn for all archs as it's used in lots of generic
+ * code. Now it happens too for pud_pfn (and can happen for larger
+ * mappings too in the future; we're not there yet). Instead of defining
+ * it for all archs (like pmd_pfn), provide a fallback.
+ */
+#ifndef pud_pfn
+#define pud_pfn(x) ({ BUILD_BUG(); 0; })
+#endif
+
/*
* Some architectures have MMUs that are configurable or selectable at boot
* time. These lead to variable PTRS_PER_x. For statically allocated arrays it
--
2.44.0
Powered by blists - more mailing lists