[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20240702023401.41553-1-libang.li@antgroup.com>
Date: Tue, 02 Jul 2024 10:34:01 +0800
From: "Bang Li" <libang.li@...group.com>
To: ughd@...gle.com,
akpm@...ux-foundation.org
Cc: <david@...hat.com>,
<ryan.roberts@....com>,
<wangkefeng.wang@...wei.com>,
<baolin.wang@...ux.alibaba.com>,
<ioworker0@...il.com>,
<ziy@...dia.com>,
<linux-kernel@...r.kernel.org>,
<linux-mm@...ck.org>,
"Bang Li" <libang.li@...group.com>
Subject: [PATCH v2] mm: thp: support "THPeligible" semantics for mTHP with anonymous shmem
After the commit 7fb1b252afb5 ("mm: shmem: add mTHP support for
anonymous shmem"), we can configure different policies through
the multi-size THP sysfs interface for anonymous shmem. But
currently "THPeligible" indicates only whether the mapping is
eligible for allocating THP-pages as well as the THP is PMD
mappable or not for anonymous shmem, we need to support semantics
for mTHP with anonymous shmem similar to those for mTHP with
anonymous memory.
Signed-off-by: Bang Li <libang.li@...group.com>
---
Changes since v1 [1]:
- Put anonymous shmem mthp related logic into
thp_vma_allowable_orders() (per David)
[1] https://lore.kernel.org/linux-mm/20240628104926.34209-1-libang.li@antgroup.com/
---
include/linux/huge_mm.h | 11 +++++++++++
mm/huge_memory.c | 13 +++++++++----
mm/shmem.c | 9 +--------
3 files changed, 21 insertions(+), 12 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 212cca384d7e..f87136f38aa1 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -267,6 +267,10 @@ unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
}
+unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ bool global_huge);
+
struct thpsize {
struct kobject kobj;
struct list_head node;
@@ -460,6 +464,13 @@ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
return 0;
}
+static inline unsigned long shmem_allowable_huge_orders(struct inode *inode,
+ struct vm_area_struct *vma, pgoff_t index,
+ bool global_huge)
+{
+ return 0;
+}
+
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c7ce28f6b7f3..ea377bb4af91 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -151,10 +151,15 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
* Must be done before hugepage flags check since shmem has its
* own flags.
*/
- if (!in_pf && shmem_file(vma->vm_file))
- return shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
- !enforce_sysfs, vma->vm_mm, vm_flags)
- ? orders : 0;
+ if (!in_pf && shmem_file(vma->vm_file)) {
+ bool global_huge = shmem_is_huge(file_inode(vma->vm_file), vma->vm_pgoff,
+ !enforce_sysfs, vma->vm_mm, vm_flags);
+
+ if (!vma_is_anon_shmem(vma))
+ return global_huge? orders : 0;
+ return shmem_allowable_huge_orders(file_inode(vma->vm_file),
+ vma, vma->vm_pgoff, global_huge);
+ }
if (!vma_is_anonymous(vma)) {
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index d495c0701a83..aa85df9c662a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1622,7 +1622,7 @@ static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static unsigned long shmem_allowable_huge_orders(struct inode *inode,
+unsigned long shmem_allowable_huge_orders(struct inode *inode,
struct vm_area_struct *vma, pgoff_t index,
bool global_huge)
{
@@ -1707,13 +1707,6 @@ static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault
return orders;
}
#else
-static unsigned long shmem_allowable_huge_orders(struct inode *inode,
- struct vm_area_struct *vma, pgoff_t index,
- bool global_huge)
-{
- return 0;
-}
-
static unsigned long shmem_suitable_orders(struct inode *inode, struct vm_fault *vmf,
struct address_space *mapping, pgoff_t index,
unsigned long orders)
--
2.19.1.6.gb485710b
Powered by blists - more mailing lists