[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <3b2862e9ab3c90e03b4b0ac1a4d0cd4d0b783737.1765833318.git.luizcap@redhat.com>
Date: Mon, 15 Dec 2025 16:16:51 -0500
From: Luiz Capitulino <luizcap@...hat.com>
To: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
david@...nel.org
Cc: ryan.roberts@....com,
akpm@...ux-foundation.org,
lorenzo.stoakes@...cle.com
Subject: [PATCH 09/11] mm: replace thp_disabled_by_hw() with pgtable_has_pmd_leaves()
Despite its name, thp_disabled_by_hw() only checks whether the
architecture supports PMD-sized pages. It returns true when
TRANSPARENT_HUGEPAGE_UNSUPPORTED is set, which occurs if the
architecture implements arch_has_pmd_leaves() and that function
returns false.
Since pgtable_has_pmd_leaves() provides the same semantics, use it
instead.
Signed-off-by: Luiz Capitulino <luizcap@...hat.com>
---
include/linux/huge_mm.h | 7 -------
mm/huge_memory.c | 6 ++----
mm/memory.c | 2 +-
mm/shmem.c | 2 +-
4 files changed, 4 insertions(+), 13 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a4d9f964dfde..e291a650b10f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -47,7 +47,6 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
bool write);
enum transparent_hugepage_flag {
- TRANSPARENT_HUGEPAGE_UNSUPPORTED,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
@@ -352,12 +351,6 @@ static inline bool vma_thp_disabled(struct vm_area_struct *vma,
return mm_flags_test(MMF_DISABLE_THP_EXCEPT_ADVISED, vma->vm_mm);
}
-static inline bool thp_disabled_by_hw(void)
-{
- /* If the hardware/firmware marked hugepage support disabled. */
- return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
-}
-
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b80a897b9b6f..1e5ea2e47f79 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -122,7 +122,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
if (!vma->vm_mm) /* vdso */
return 0;
- if (thp_disabled_by_hw() || vma_thp_disabled(vma, vm_flags, forced_collapse))
+ if (!pgtable_has_pmd_leaves() || vma_thp_disabled(vma, vm_flags, forced_collapse))
return 0;
/* khugepaged doesn't collapse DAX vma, but page fault is fine. */
@@ -905,10 +905,8 @@ static int __init hugepage_init(void)
int err;
struct kobject *hugepage_kobj;
- if (!arch_has_pmd_leaves()) {
- transparent_hugepage_flags = 1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED;
+ if (!pgtable_has_pmd_leaves())
return -EINVAL;
- }
/*
* hugepages can't be allocated by the buddy allocator
diff --git a/mm/memory.c b/mm/memory.c
index e816d4b53bc0..c35df4c477c1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5383,7 +5383,7 @@ vm_fault_t do_set_pmd(struct vm_fault *vmf, struct folio *folio, struct page *pa
* PMD mappings if THPs are disabled. As we already have a THP,
* behave as if we are forcing a collapse.
*/
- if (thp_disabled_by_hw() || vma_thp_disabled(vma, vma->vm_flags,
+ if (!pgtable_has_pmd_leaves() || vma_thp_disabled(vma, vma->vm_flags,
/* forced_collapse=*/ true))
return ret;
diff --git a/mm/shmem.c b/mm/shmem.c
index ad5825667b49..6b350e336f8c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1800,7 +1800,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
vm_flags_t vm_flags = vma ? vma->vm_flags : 0;
unsigned int global_orders;
- if (thp_disabled_by_hw() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force)))
+ if (!pgtable_has_pmd_leaves() || (vma && vma_thp_disabled(vma, vm_flags, shmem_huge_force)))
return 0;
global_orders = shmem_huge_global_enabled(inode, index, write_end,
--
2.52.0
Powered by blists - more mailing lists