[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <dfa8356b04fb2c9dfd908a7bbb353b942b0b113c.1731477422.git.baolin.wang@linux.alibaba.com>
Date: Wed, 13 Nov 2024 14:53:28 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: baolin.wang@...ux.alibaba.com
Cc: 21cnbao@...il.com,
akpm@...ux-foundation.org,
da.gomez@...sung.com,
david@...hat.com,
hughd@...gle.com,
ioworker0@...il.com,
linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
ryan.roberts@....com,
wangkefeng.wang@...wei.com,
willy@...radead.org
Subject: [PATCH] mm: shmem: add large folio support for tmpfs fix
As David suggested: "We can allow all orders up to MAX_PAGECACHE_ORDER,
since shmem_mapping_size_orders() handles it properly", therefore we can
drop the 'MAX_PAGECACHE_ORDER' condition.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/shmem.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index a3203cf8860f..d54b24d65193 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -590,19 +590,19 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
struct vm_area_struct *vma,
unsigned long vm_flags)
{
+ unsigned int maybe_pmd_order = HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER ?
+ 0 : BIT(HPAGE_PMD_ORDER);
unsigned long within_size_orders;
unsigned int order;
pgoff_t aligned_index;
loff_t i_size;
- if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
- return 0;
if (!S_ISREG(inode->i_mode))
return 0;
if (shmem_huge == SHMEM_HUGE_DENY)
return 0;
if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
- return BIT(HPAGE_PMD_ORDER);
+ return maybe_pmd_order;
/*
* The huge order allocation for anon shmem is controlled through
@@ -619,12 +619,12 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
if (vma)
- return BIT(HPAGE_PMD_ORDER);
+ return maybe_pmd_order;
return shmem_mapping_size_orders(inode->i_mapping, index, write_end);
case SHMEM_HUGE_WITHIN_SIZE:
if (vma)
- within_size_orders = BIT(HPAGE_PMD_ORDER);
+ within_size_orders = maybe_pmd_order;
else
within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
index, write_end);
@@ -642,7 +642,7 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
- return BIT(HPAGE_PMD_ORDER);
+ return maybe_pmd_order;
fallthrough;
default:
return 0;
--
2.39.3
Powered by blists - more mailing lists