lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <03c77c20a8a6ccdd90678dcc6bf7d4aeaa9d29ad.1721720891.git.baolin.wang@linux.alibaba.com>
Date: Wed, 24 Jul 2024 15:04:00 +0800
From: Baolin Wang <baolin.wang@...ux.alibaba.com>
To: akpm@...ux-foundation.org,
	hughd@...gle.com
Cc: willy@...radead.org,
	david@...hat.com,
	21cnbao@...il.com,
	ryan.roberts@....com,
	ziy@...dia.com,
	ioworker0@...il.com,
	da.gomez@...sung.com,
	p.raghav@...sung.com,
	baolin.wang@...ux.alibaba.com,
	linux-mm@...ck.org,
	linux-kernel@...r.kernel.org
Subject: [RFC PATCH 3/3] mm: shmem: use mTHP interface to control huge orders for tmpfs

For the huge orders allowed by writable mmap() faults on tmpfs,
the mTHP interface is used to control the allowable huge orders,
while 'huge_shmem_orders_inherit' maintains backward compatibility
with top-level interface.

For the huge orders allowed by write() and fallocate() paths on tmpfs,
getting a highest order hint based on the size of write and fallocate
paths, then will try each allowable huge orders filtered by the mTHP
interfaces if set.

Signed-off-by: Baolin Wang <baolin.wang@...ux.alibaba.com>
---
 mm/memory.c |  4 ++--
 mm/shmem.c  | 42 ++++++++++++++++++++++--------------------
 2 files changed, 24 insertions(+), 22 deletions(-)

diff --git a/mm/memory.c b/mm/memory.c
index 802d0d8a40f9..3a7f43c66db7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4877,10 +4877,10 @@ vm_fault_t finish_fault(struct vm_fault *vmf)
 
 	/*
 	 * Using per-page fault to maintain the uffd semantics, and same
-	 * approach also applies to non-anonymous-shmem faults to avoid
+	 * approach also applies to non shmem/tmpfs faults to avoid
 	 * inflating the RSS of the process.
 	 */
-	if (!vma_is_anon_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
+	if (!vma_is_shmem(vma) || unlikely(userfaultfd_armed(vma))) {
 		nr_pages = 1;
 	} else if (nr_pages > 1) {
 		pgoff_t idx = folio_page_idx(folio, page);
diff --git a/mm/shmem.c b/mm/shmem.c
index cc0c1b790267..8e60cc566196 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1692,26 +1692,6 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
 	if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED))
 		return 0;
 
-	global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force,
-						vma, vm_flags);
-	if (!vma || !vma_is_anon_shmem(vma)) {
-		/*
-		 * For tmpfs, if top level huge page is enabled, we just allow
-		 * PMD size THP to keep interface backward compatibility.
-		 */
-		if (global_huge)
-			return BIT(HPAGE_PMD_ORDER);
-
-		/*
-		 * Otherwise, get a highest order hint based on the size of
-		 * write and fallocate paths, then will try each allowable
-		 * huge orders.
-		 */
-		order = shmem_mapping_size_order(inode->i_mapping, index,
-						 len, SHMEM_SB(inode->i_sb));
-		return BIT(order + 1) - 1;
-	}
-
 	/*
 	 * Following the 'deny' semantics of the top level, force the huge
 	 * option off from all mounts.
@@ -1742,9 +1722,31 @@ unsigned long shmem_allowable_huge_orders(struct inode *inode,
 	if (vm_flags & VM_HUGEPAGE)
 		mask |= READ_ONCE(huge_shmem_orders_madvise);
 
+	global_huge = shmem_huge_global_enabled(inode, index, shmem_huge_force,
+						vma, vm_flags);
 	if (global_huge)
 		mask |= READ_ONCE(huge_shmem_orders_inherit);
 
+	/*
+	 * For the huge orders allowed by writable mmap() faults on tmpfs,
+	 * the mTHP interface is used to control the allowable huge orders,
+	 * while 'huge_shmem_orders_inherit' maintains backward compatibility
+	 * with top-level interface.
+	 *
+	 * For the huge orders allowed by write() and fallocate() paths on tmpfs,
+	 * get a highest order hint based on the size of write and fallocate
+	 * paths, then will try each allowable huge orders filtered by the mTHP
+	 * interfaces if set.
+	 */
+	if (!vma && !global_huge) {
+		int highest_order = shmem_mapping_size_order(inode->i_mapping, index, len,
+							     SHMEM_SB(inode->i_sb));
+
+		if (!mask)
+			return highest_order > 0 ? BIT(highest_order + 1) - 1 : 0;
+
+		mask &= BIT(highest_order + 1) - 1;
+	}
 	return orders & mask;
 }
 
-- 
2.39.3


Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ