[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <bcfa80f6293affdebb7e7bf70200133b65e73a6b.1727338549.git.baolin.wang@linux.alibaba.com>
Date: Thu, 26 Sep 2024 16:27:27 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
hughd@...gle.com
Cc: willy@...radead.org,
david@...hat.com,
wangkefeng.wang@...wei.com,
21cnbao@...il.com,
ryan.roberts@....com,
ioworker0@...il.com,
da.gomez@...sung.com,
baolin.wang@...ux.alibaba.com,
linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [RFC PATCH v2 2/2] mm: shmem: use mTHP interface to control huge orders for tmpfs
For the huge orders allowed by writable mmap() faults on tmpfs,
the mTHP interface is used to control the allowable huge orders,
while 'huge_shmem_orders_inherit' maintains backward compatibility
with top-level interface.
For the huge orders allowed by write() and fallocate() paths on tmpfs,
getting a highest order hint based on the size of write and fallocate
paths, then will try each allowable huge orders filtered by the mTHP
interfaces if set.
Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
mm/memory.c | 4 ++--
mm/shmem.c | 51 ++++++++++++++++++++++++++-------------------------
2 files changed, 28 insertions(+), 27 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 2366578015ad..99dd75b84605 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -5098,10 +5098,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
/*
* Using per-page fault to maintain the uffd semantics, and same
- * approach also applies to non-anonymous-shmem faults to avoid
+ * approach also applies to non shmem/tmpfs faults to avoid
* inflating the RSS of the process.
*/
- if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
+ if (!vma_is_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
nr_pages = 1;
} else if (nr_pages > 1) {
pgoff_t idx = folio_page_idx(folio, page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 6dece90ff421..569d3ab37161 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1721,31 +1721,6 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
return 0;
- global_huge = shmem_huge_global_enabled(inode, index, write_end,
- shmem_huge_force, vma, vm_flags);
- if (!vma || !vma_is_anon_shmem(vma)) {
- size_t len;
-
- /*
- * For tmpfs, if top level huge page is enabled, we just allow
- * PMD sized THP to keep interface backward compatibility.
- */
- if (global_huge)
- return BIT(HPAGE_PMD_ORDER);
-
- if (!write_end)
- return 0;
-
- /*
- * Otherwise, get a highest order hint based on the size of
- * write and fallocate paths, then will try each allowable
- * huge orders.
- */
- len = write_end - (index << PAGE_SHIFT);
- order = shmem_mapping_size_order(inode->i_mapping, index, len);
- return order > 0 ? BIT(order + 1) - 1 : 0;
- }
-
/*
* Following the 'deny' semantics of the top level, force the huge
* option off from all mounts.
@@ -1776,9 +1751,35 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
if (vm_flags & VM_HUGEPAGE)
mask |= READ_ONCE(huge_shmem_orders_madvise);
+ global_huge = shmem_huge_global_enabled(inode, index, write_end,
+ shmem_huge_force, vma, vm_flags);
if (global_huge)
mask |= READ_ONCE(huge_shmem_orders_inherit);
+ /*
+ * For the huge orders allowed by writable mmap() faults on tmpfs,
+ * the mTHP interface is used to control the allowable huge orders,
+ * while 'huge_shmem_orders_inherit' maintains backward compatibility
+ * with top-level interface.
+ *
+ * For the huge orders allowed by write() and fallocate() paths on tmpfs,
+ * get a highest order hint based on the size of write and fallocate
+ * paths, then will try each allowable huge orders filtered by the mTHP
+ * interfaces if set.
+ */
+ if (!vma && !global_huge) {
+ size_t len;
+
+ if (!write_end)
+ return 0;
+
+ len = write_end - (index << PAGE_SHIFT);
+ order = shmem_mapping_size_order(inode->i_mapping, index, len);
+ if (!mask)
+ return order > 0 ? BIT(order + 1) - 1 : 0;
+
+ mask &= BIT(order + 1) - 1;
+ }
return THP_ORDERS_ALL_FILE_DEFAULT & mask;
}
--
2.39.3
Powered by blists - more mailing lists