[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <e2f4e483f75e54be0654fafb2147822faacac16d.1731038280.git.baolin.wang@linux.alibaba.com>
Date: Fri, 8 Nov 2024 12:12:57 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
hughd@...gle.com
Cc: willy@...radead.org,
david@...hat.com,
wangkefeng.wang@...wei.com,
21cnbao@...il.com,
ryan.roberts@....com,
ioworker0@...il.com,
da.gomez@...sung.com,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [PATCH 3/4] mm: shmem: add large folio support for tmpfs
Add large folio support for tmpfs write and fallocate paths matching the
same high order preference mechanism used in the iomap buffered IO path
as used in __filemap_get_folio().
Add shmem_mapping_size_orders() to get a hint for the orders of the folio
based on the file size which takes care of the mapping requirements.
Traditionally, tmpfs only supported PMD-sized huge folios. However nowadays
with other file systems supporting any sized large folios, and extending
anonymous to support mTHP, we should not restrict tmpfs to allocating only
PMD-sized huge folios, making it more special. Instead, we should allow
tmpfs can allocate any sized large folios.
Considering that tmpfs already has the 'huge=' option to control the huge
folios allocation, we can extend the 'huge=' option to allow any sized huge
folios. The semantics of the 'huge=' mount option are:
huge=never: no any sized huge folios
huge=always: any sized huge folios
huge=within_size: like 'always' but respect the i_size
huge=advise: like 'always' if requested with fadvise()/madvise()
Note: for tmpfs mmap() faults, due to the lack of a write size hint, still
allocate the PMD-sized huge folios if huge=always/within_size/advise is set.
Moreover, the 'deny' and 'force' testing options controlled by
'/sys/kernel/mm/transparent_hugepage/shmem_enabled', still retain the same
semantics. The 'deny' can disable any sized large folios for tmpfs, while
the 'force' can enable PMD sized large folios for tmpfs.
Co-developed-by: Daniel Gomez <da.gomez@...sung.com>
Signed-off-by: Daniel Gomez <da.gomez@...sung.com>
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/shmem.c | 91 +++++++++++++++++++++++++++++++++++++++++++++---------
1 file changed, 77 insertions(+), 14 deletions(-)
diff --git a/mm/shmem.c b/mm/shmem.c
index 361da46c4bd5..98503a93a404 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -549,10 +549,50 @@ static bool shmem_confirm_swap(struct address_space *mapping,
static int shmem_huge __read_mostly = SHMEM_HUGE_NEVER;
+/**
+ * shmem_mapping_size_orders - Get allowable folio orders for the given file size.
+ * @mapping: Target address_space.
+ * @index: The page index.
+ * @size: The suggested size of the folio to create.
+ *
+ * This returns a high order for folios (when supported) based on the file size
+ * which the mapping currently allows at the given index. The index is relevant
+ * due to alignment considerations the mapping might have. The returned order
+ * may be less than the size passed.
+ *
+ * Return: The orders.
+ */
+static inline unsigned int
+shmem_mapping_size_orders(struct address_space *mapping, pgoff_t index, loff_t write_end)
+{
+ unsigned int order;
+ size_t size;
+
+ if (!mapping_large_folio_support(mapping) || !write_end)
+ return 0;
+
+ /* Calculate the write size based on the write_end */
+ size = write_end - (index << PAGE_SHIFT);
+ order = filemap_get_order(size);
+ if (!order)
+ return 0;
+
+ /* If we're not aligned, allocate a smaller folio */
+ if (index & ((1UL << order) - 1))
+ order = __ffs(index);
+
+ order = min_t(size_t, order, MAX_PAGECACHE_ORDER);
+ return order > 0 ? BIT(order + 1) - 1 : 0;
+}
+
static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma,
unsigned long vm_flags)
{
+ unsigned long within_size_orders;
+ unsigned int order;
+ pgoff_t aligned_index;
loff_t i_size;
if (HPAGE_PMD_ORDER > MAX_PAGECACHE_ORDER)
@@ -564,15 +604,41 @@ static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index
if (shmem_huge_force || shmem_huge == SHMEM_HUGE_FORCE)
return BIT(HPAGE_PMD_ORDER);
+ /*
+ * The huge order allocation for anon shmem is controlled through
+ * the mTHP interface, so we still use PMD-sized huge order to
+ * check whether global control is enabled.
+ *
+ * For tmpfs mmap()'s huge order, we still use PMD-sized order to
+ * allocate huge pages due to lack of a write size hint.
+ *
+ * Otherwise, tmpfs will allow getting a highest order hint based on
+ * the size of write and fallocate paths, then will try each allowable
+ * huge orders.
+ */
switch (SHMEM_SB(inode->i_sb)->huge) {
case SHMEM_HUGE_ALWAYS:
- return BIT(HPAGE_PMD_ORDER);
- case SHMEM_HUGE_WITHIN_SIZE:
- index = round_up(index + 1, HPAGE_PMD_NR);
- i_size = max(write_end, i_size_read(inode));
- i_size = round_up(i_size, PAGE_SIZE);
- if (i_size >> PAGE_SHIFT >= index)
+ if (vma)
return BIT(HPAGE_PMD_ORDER);
+
+ return shmem_mapping_size_orders(inode->i_mapping, index, write_end);
+ case SHMEM_HUGE_WITHIN_SIZE:
+ if (vma)
+ within_size_orders = BIT(HPAGE_PMD_ORDER);
+ else
+ within_size_orders = shmem_mapping_size_orders(inode->i_mapping,
+ index, write_end);
+
+ order = highest_order(within_size_orders);
+ while (within_size_orders) {
+ aligned_index = round_up(index + 1, 1 << order);
+ i_size = max(write_end, i_size_read(inode));
+ i_size = round_up(i_size, PAGE_SIZE);
+ if (i_size >> PAGE_SHIFT >= aligned_index)
+ return within_size_orders;
+
+ order = next_order(&within_size_orders, order);
+ }
fallthrough;
case SHMEM_HUGE_ADVISE:
if (vm_flags & VM_HUGEPAGE)
@@ -776,6 +842,7 @@ static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
static unsigned int shmem_huge_global_enabled(struct inode *inode, pgoff_t index,
loff_t write_end, bool shmem_huge_force,
+ struct vm_area_struct *vma,
unsigned long vm_flags)
{
return 0;
@@ -1174,7 +1241,7 @@ static int shmem_getattr(struct mnt_idmap *idmap,
inode_unlock_shared(inode);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- if (shmem_huge_global_enabled(inode, 0, 0, false, 0) ==
+ if (shmem_huge_global_enabled(inode, 0, 0, false, NULL, 0) ==
BIT(HPAGE_PMD_ORDER))
stat->blksize = HPAGE_PMD_SIZE;
#endif
@@ -1693,14 +1760,10 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
return 0;
global_orders = shmem_huge_global_enabled(inode, index, write_end,
- shmem_huge_force, vm_flags);
- if (!vma || !vma_is_anon_shmem(vma)) {
- /*
- * For tmpfs, we now only support PMD sized THP if huge page
- * is enabled, otherwise fallback to order 0.
- */
+ shmem_huge_force, vma, vm_flags);
+ /* Tmpfs huge pages allocation? */
+ if (!vma || !vma_is_anon_shmem(vma))
return global_orders;
- }
/*
* Following the 'deny' semantics of the top level, force the huge
--
2.39.3
Powered by blists - more mailing lists