[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <7c8ee99eb146a9e8abd20d110cb591d33fa1ebae.1755677674.git.baolin.wang@linux.alibaba.com>
Date: Wed, 20 Aug 2025 17:07:16 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
hughd@...gle.com,
david@...hat.com,
lorenzo.stoakes@...cle.com
Cc: ziy@...dia.com,
Liam.Howlett@...cle.com,
npache@...hat.com,
ryan.roberts@....com,
dev.jain@....com,
baohua@...nel.org,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH 05/11] mm: shmem: kick khugepaged for enabling none-PMD-sized shmem mTHPs
When only non-PMD-sized mTHP is enabled (such as only 64K mTHP enabled),
we should also allow kicking khugepaged to attempt scanning and collapsing
64K shmem mTHP. Modify shmem_hpage_pmd_enabled() to support shmem mTHP
collapse, and while we are at it, rename it to make the function name
more clear.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
include/linux/shmem_fs.h | 4 ++--
mm/khugepaged.c | 2 +-
mm/shmem.c | 10 +++++-----
3 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 6d0f9c599ff7..cbe46e0c8bce 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -118,7 +118,7 @@ int shmem_unuse(unsigned int type);
unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
loff_t write_end, bool shmem_huge_force);
-bool shmem_hpage_pmd_enabled(void);
+bool shmem_hpage_enabled(void);
#else
static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
@@ -127,7 +127,7 @@ static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
return 0;
}
-static inline bool shmem_hpage_pmd_enabled(void)
+static inline bool shmem_hpage_enabled(void)
{
return false;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 53ca7bb72fbc..eb0b433d6ccb 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -453,7 +453,7 @@ static bool hugepage_enabled(void)
if (READ_ONCE(huge_anon_orders_inherit) &&
hugepage_global_enabled())
return true;
- if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_pmd_enabled())
+ if (IS_ENABLED(CONFIG_SHMEM) && shmem_hpage_enabled())
return true;
return false;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 13cc51df3893..a360738ab732 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1791,17 +1791,17 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-bool shmem_hpage_pmd_enabled(void)
+bool shmem_hpage_enabled(void)
{
if (shmem_huge == SHMEM_HUGE_DENY)
return false;
- if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_always))
+ if (READ_ONCE(huge_shmem_orders_always))
return true;
- if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_madvise))
+ if (READ_ONCE(huge_shmem_orders_madvise))
return true;
- if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_within_size))
+ if (READ_ONCE(huge_shmem_orders_within_size))
return true;
- if (test_bit(HPAGE_PMD_ORDER, &huge_shmem_orders_inherit) &&
+ if (READ_ONCE(huge_shmem_orders_inherit) &&
shmem_huge != SHMEM_HUGE_NEVER)
return true;
--
2.43.5
Powered by blists - more mailing lists