[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20230727080502.77895-22-zhengqi.arch@bytedance.com>
Date: Thu, 27 Jul 2023 16:04:34 +0800
From: Qi Zheng <zhengqi.arch@...edance.com>
To: akpm@...ux-foundation.org,
david@...morbit.com,
tkhai@...ru,
vbabka@...e.cz,
roman.gushchin@...ux.dev,
djwong@...nel.org,
brauner@...nel.org,
paulmck@...nel.org,
tytso@....edu,
steven.price@....com,
cel@...nel.org,
senozhatsky@...omium.org,
yujie.liu@...el.com,
gregkh@...uxfoundation.org,
muchun.song@...ux.dev
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
x86@...nel.org,
kvm@...r.kernel.org,
xen-devel@...ts.xenproject.org,
linux-erofs@...ts.ozlabs.org,
linux-f2fs-devel@...ts.sourceforge.net,
cluster-devel@...hat.com,
linux-nfs@...r.kernel.org,
linux-mtd@...ts.infradead.org,
rcu@...r.kernel.org,
netdev@...r.kernel.org,
dri-devel@...ts.freedesktop.org,
linux-arm-msm@...r.kernel.org,
dm-devel@...hat.com,
linux-raid@...r.kernel.org,
linux-bcache@...r.kernel.org,
virtualization@...ts.linux-foundation.org,
linux-fsdevel@...r.kernel.org,
linux-ext4@...r.kernel.org,
linux-xfs@...r.kernel.org,
linux-btrfs@...r.kernel.org,
Qi Zheng <zhengqi.arch@...edance.com>
Subject: [PATCH v3 21/49] mm: thp: dynamically allocate the thp-related shrinkers
Use new APIs to dynamically allocate the thp-zero and thp-deferred_split
shrinkers.
Signed-off-by: Qi Zheng <zhengqi.arch@...edance.com>
---
mm/huge_memory.c | 69 +++++++++++++++++++++++++++++++-----------------
1 file changed, 45 insertions(+), 24 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e371503f7746..a0dbb55b4913 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -65,7 +65,11 @@ unsigned long transparent_hugepage_flags __read_mostly =
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG);
-static struct shrinker deferred_split_shrinker;
+static struct shrinker *deferred_split_shrinker;
+static unsigned long deferred_split_count(struct shrinker *shrink,
+ struct shrink_control *sc);
+static unsigned long deferred_split_scan(struct shrinker *shrink,
+ struct shrink_control *sc);
static atomic_t huge_zero_refcount;
struct page *huge_zero_page __read_mostly;
@@ -229,11 +233,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
return 0;
}
-static struct shrinker huge_zero_page_shrinker = {
- .count_objects = shrink_huge_zero_page_count,
- .scan_objects = shrink_huge_zero_page_scan,
- .seeks = DEFAULT_SEEKS,
-};
+static struct shrinker *huge_zero_page_shrinker;
#ifdef CONFIG_SYSFS
static ssize_t enabled_show(struct kobject *kobj,
@@ -454,6 +454,40 @@ static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
}
#endif /* CONFIG_SYSFS */
+static int __init thp_shrinker_init(void)
+{
+ huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
+ if (!huge_zero_page_shrinker)
+ return -ENOMEM;
+
+ deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
+ SHRINKER_MEMCG_AWARE |
+ SHRINKER_NONSLAB,
+ "thp-deferred_split");
+ if (!deferred_split_shrinker) {
+ shrinker_free(huge_zero_page_shrinker);
+ return -ENOMEM;
+ }
+
+ huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
+ huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
+ huge_zero_page_shrinker->seeks = DEFAULT_SEEKS;
+ shrinker_register(huge_zero_page_shrinker);
+
+ deferred_split_shrinker->count_objects = deferred_split_count;
+ deferred_split_shrinker->scan_objects = deferred_split_scan;
+ deferred_split_shrinker->seeks = DEFAULT_SEEKS;
+ shrinker_register(deferred_split_shrinker);
+
+ return 0;
+}
+
+static void __init thp_shrinker_exit(void)
+{
+ shrinker_free(huge_zero_page_shrinker);
+ shrinker_free(deferred_split_shrinker);
+}
+
static int __init hugepage_init(void)
{
int err;
@@ -482,12 +516,9 @@ static int __init hugepage_init(void)
if (err)
goto err_slab;
- err = register_shrinker(&huge_zero_page_shrinker, "thp-zero");
- if (err)
- goto err_hzp_shrinker;
- err = register_shrinker(&deferred_split_shrinker, "thp-deferred_split");
+ err = thp_shrinker_init();
if (err)
- goto err_split_shrinker;
+ goto err_shrinker;
/*
* By default disable transparent hugepages on smaller systems,
@@ -505,10 +536,8 @@ static int __init hugepage_init(void)
return 0;
err_khugepaged:
- unregister_shrinker(&deferred_split_shrinker);
-err_split_shrinker:
- unregister_shrinker(&huge_zero_page_shrinker);
-err_hzp_shrinker:
+ thp_shrinker_exit();
+err_shrinker:
khugepaged_destroy();
err_slab:
hugepage_exit_sysfs(hugepage_kobj);
@@ -2833,7 +2862,7 @@ void deferred_split_folio(struct folio *folio)
#ifdef CONFIG_MEMCG
if (memcg)
set_shrinker_bit(memcg, folio_nid(folio),
- deferred_split_shrinker.id);
+ deferred_split_shrinker->id);
#endif
}
spin_unlock_irqrestore(&ds_queue->split_queue_lock, flags);
@@ -2907,14 +2936,6 @@ static unsigned long deferred_split_scan(struct shrinker *shrink,
return split;
}
-static struct shrinker deferred_split_shrinker = {
- .count_objects = deferred_split_count,
- .scan_objects = deferred_split_scan,
- .seeks = DEFAULT_SEEKS,
- .flags = SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE |
- SHRINKER_NONSLAB,
-};
-
#ifdef CONFIG_DEBUG_FS
static void split_huge_pages_all(void)
{
--
2.30.2
Powered by blists - more mailing lists