[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220602182159.248387-8-shy828301@gmail.com>
Date: Thu, 2 Jun 2022 11:21:59 -0700
From: Yang Shi <shy828301@...il.com>
To: vbabka@...e.cz, kirill.shutemov@...ux.intel.com,
akpm@...ux-foundation.org
Cc: shy828301@...il.com, linux-mm@...ck.org,
linux-kernel@...r.kernel.org
Subject: [v2 PATCH 7/7] mm: khugepaged: reorg some khugepaged helpers
The khugepaged_{enabled|always|req_madv} are not khugepaged only
anymore, move them to huge_mm.h and rename to hugepage_flags_xxx, and
remove khugepaged_req_madv due to no users.
Also move khugepaged_defrag to khugepaged.c since its only caller is in
that file, it doesn't have to be in a header file.
Signed-off-by: Yang Shi <shy828301@...il.com>
---
include/linux/huge_mm.h | 8 ++++++++
include/linux/khugepaged.h | 17 +----------------
mm/huge_memory.c | 4 ++--
mm/khugepaged.c | 18 +++++++++++-------
4 files changed, 22 insertions(+), 25 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index bbbaf3eba30a..452eaef24948 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -116,6 +116,14 @@ extern struct kobj_attribute shmem_enabled_attr;
extern unsigned long transparent_hugepage_flags;
+#define hugepage_flags_enabled() \
+ (transparent_hugepage_flags & \
+ ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
+ (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
+#define hugepage_flags_always() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_FLAG))
+
/*
* The vma size has to be large enough to hold an aligned HPAGE_PMD_SIZE area.
*/
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index e047be601268..9c3b56132eba 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -24,20 +24,6 @@ static inline void collapse_pte_mapped_thp(struct mm_struct *mm,
}
#endif
-#define khugepaged_enabled() \
- (transparent_hugepage_flags & \
- ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
-#define khugepaged_always() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_FLAG))
-#define khugepaged_req_madv() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
-#define khugepaged_defrag() \
- (transparent_hugepage_flags & \
- (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
-
static inline void khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
if (test_bit(MMF_VM_HUGEPAGE, &oldmm->flags))
@@ -53,8 +39,7 @@ static inline void khugepaged_exit(struct mm_struct *mm)
static inline void khugepaged_enter(struct vm_area_struct *vma,
unsigned long vm_flags)
{
- if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- khugepaged_enabled()) {
+ if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) {
if (hugepage_vma_check(vma, vm_flags, false, false))
__khugepaged_enter(vma->vm_mm);
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b95786ada466..866b98a39496 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -102,11 +102,11 @@ bool hugepage_vma_check(struct vm_area_struct *vma,
if (!in_pf && shmem_file(vma->vm_file))
return shmem_huge_enabled(vma);
- if (!khugepaged_enabled())
+ if (!hugepage_flags_enabled())
return false;
/* THP settings require madvise. */
- if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
+ if (!(vm_flags & VM_HUGEPAGE) && !hugepage_flags_always())
return false;
/* Only regular file is valid */
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index ab6183c5489f..2523c085625a 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -472,7 +472,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
unsigned long vm_flags)
{
if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
- khugepaged_enabled()) {
+ hugepage_flags_enabled()) {
if (hugepage_vma_check(vma, vm_flags, false, false))
__khugepaged_enter(vma->vm_mm);
}
@@ -763,6 +763,10 @@ static bool khugepaged_scan_abort(int nid)
return false;
}
+#define khugepaged_defrag() \
+ (transparent_hugepage_flags & \
+ (1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG))
+
/* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
{
@@ -860,7 +864,7 @@ static struct page *khugepaged_alloc_hugepage(bool *wait)
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
- } while (unlikely(!hpage) && likely(khugepaged_enabled()));
+ } while (unlikely(!hpage) && likely(hugepage_flags_enabled()));
return hpage;
}
@@ -2173,7 +2177,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
static int khugepaged_has_work(void)
{
return !list_empty(&khugepaged_scan.mm_head) &&
- khugepaged_enabled();
+ hugepage_flags_enabled();
}
static int khugepaged_wait_event(void)
@@ -2238,7 +2242,7 @@ static void khugepaged_wait_work(void)
return;
}
- if (khugepaged_enabled())
+ if (hugepage_flags_enabled())
wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
}
@@ -2269,7 +2273,7 @@ static void set_recommended_min_free_kbytes(void)
int nr_zones = 0;
unsigned long recommended_min;
- if (!khugepaged_enabled()) {
+ if (!hugepage_flags_enabled()) {
calculate_min_free_kbytes();
goto update_wmarks;
}
@@ -2319,7 +2323,7 @@ int start_stop_khugepaged(void)
int err = 0;
mutex_lock(&khugepaged_mutex);
- if (khugepaged_enabled()) {
+ if (hugepage_flags_enabled()) {
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
@@ -2345,7 +2349,7 @@ int start_stop_khugepaged(void)
void khugepaged_min_free_kbytes_update(void)
{
mutex_lock(&khugepaged_mutex);
- if (khugepaged_enabled() && khugepaged_thread)
+ if (hugepage_flags_enabled() && khugepaged_thread)
set_recommended_min_free_kbytes();
mutex_unlock(&khugepaged_mutex);
}
--
2.26.3
Powered by blists - more mailing lists