[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <c1a6fe55f668cfe87ad113faa49120f049ba9cb5.1748506520.git.baolin.wang@linux.alibaba.com>
Date: Thu, 29 May 2025 16:23:55 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
hughd@...gle.com,
david@...hat.com
Cc: lorenzo.stoakes@...cle.com,
Liam.Howlett@...cle.com,
npache@...hat.com,
ryan.roberts@....com,
dev.jain@....com,
ziy@...dia.com,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-fsdevel@...r.kernel.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 2/2] mm: shmem: disallow hugepages if the system-wide shmem THP sysfs settings are disabled
The MADV_COLLAPSE will ignore the system-wide shmem THP sysfs settings, which
means that even though we have disabled the shmem THP configuration, MADV_COLLAPSE
will still attempt to collapse into a shmem THP. This violates the rule we have
agreed upon: never means never.
Then the current strategy is:
For shmem, if none of always, madvise, within_size, and inherit have enabled
PMD-sized mTHP, then MADV_COLLAPSE will be prohibited from collapsing PMD-sized mTHP.
For tmpfs, if the mount option is set with the 'huge=never' parameter, then
MADV_COLLAPSE will be prohibited from collapsing PMD-sized mTHP.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/huge_memory.c | 2 +-
mm/shmem.c | 12 ++++++------
2 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d3e66136e41a..a8cfa37cae72 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -166,7 +166,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
* own flags.
*/
if (!in_pf && shmem_file(vma->vm_file))
- return shmem_allowable_huge_orders(file_inode(vma->vm_file),
+ return orders & shmem_allowable_huge_orders(file_inode(vma->vm_file),
vma, vma->vm_pgoff, 0,
!enforce_sysfs);
diff --git a/mm/shmem.c b/mm/shmem.c
index 4b42419ce6b2..4dbb28d85cd9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -613,7 +613,7 @@ static unsigned int shmem_get_orders_within_size(struct inode *inode,
}
static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
+ loff_t write_end,
struct vm_area_struct *vma,
unsigned long vm_flags)
{
@@ -625,7 +625,7 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
return 0;
if (shmem_huge == SHMEM_HUGE_DENY)
return 0;
- if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
+ if (shmem_huge == SHMEM_HUGE_FORCE)
return maybe_pmd_order;
/*
@@ -860,7 +860,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
}
static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
- loff_t write_end, bool shmem_huge_force,
+ loff_t write_end,
struct vm_area_struct *vma,
unsigned long vm_flags)
{
@@ -1261,7 +1261,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
STATX_ATTR_NODUMP);
generic_fillattr(idmap, request_mask, inode, stat);
- if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0))
+ if (shmem_huge_global_enabled(inode, 0, 0, NULL, 0))
stat->blksize = HPAGE_PMD_SIZE;
if (request_mask & STATX_BTIME) {
@@ -1768,7 +1768,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
return 0;
global_orders = shmem_huge_global_enabled(inode, index, write_end,
- shmem_huge_force, vma, vm_flags);
+ vma, vm_flags);
/* Tmpfs huge pages allocation */
if (!vma || !vma_is_anon_shmem(vma))
return global_orders;
@@ -1790,7 +1790,7 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
/* Allow mTHP that will be fully within i_size. */
mask |= shmem_get_orders_within_size(inode, within_size_orders, index, 0);
- if (vm_flags & VM_HUGEPAGE)
+ if (shmem_huge_force || (vm_flags & VM_HUGEPAGE))
mask |= READ_ONCE(huge_shmem_orders_madvise);
if (global_orders > 0)
--
2.43.5
Powered by blists - more mailing lists