[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20250612105100.59144-3-p.raghav@samsung.com>
Date: Thu, 12 Jun 2025 12:50:57 +0200
From: Pankaj Raghav <p.raghav@...sung.com>
To: Suren Baghdasaryan <surenb@...gle.com>,
Ryan Roberts <ryan.roberts@....com>,
Mike Rapoport <rppt@...nel.org>,
Michal Hocko <mhocko@...e.com>,
Thomas Gleixner <tglx@...utronix.de>,
Nico Pache <npache@...hat.com>,
Dev Jain <dev.jain@....com>,
Baolin Wang <baolin.wang@...ux.alibaba.com>,
Borislav Petkov <bp@...en8.de>,
Ingo Molnar <mingo@...hat.com>,
"H . Peter Anvin" <hpa@...or.com>,
Vlastimil Babka <vbabka@...e.cz>,
Zi Yan <ziy@...dia.com>,
Dave Hansen <dave.hansen@...ux.intel.com>,
David Hildenbrand <david@...hat.com>,
Lorenzo Stoakes <lorenzo.stoakes@...cle.com>,
Andrew Morton <akpm@...ux-foundation.org>,
"Liam R . Howlett" <Liam.Howlett@...cle.com>,
Jens Axboe <axboe@...nel.dk>
Cc: linux-kernel@...r.kernel.org,
linux-mm@...ck.org,
willy@...radead.org,
x86@...nel.org,
linux-block@...r.kernel.org,
linux-fsdevel@...r.kernel.org,
"Darrick J . Wong" <djwong@...nel.org>,
mcgrof@...nel.org,
gost.dev@...sung.com,
kernel@...kajraghav.com,
hch@....de,
Pankaj Raghav <p.raghav@...sung.com>
Subject: [PATCH 2/5] huge_memory: add huge_zero_page_shrinker_(init|exit) function
Add huge_zero_page_shrinker_init() and huge_zero_page_shrinker_exit().
As shrinker will not be needed when static PMD zero page is enabled,
these two functions can be a no-op.
This is a preparation patch for static PMD zero page. No functional
changes.
Signed-off-by: Pankaj Raghav <p.raghav@...sung.com>
---
mm/huge_memory.c | 38 +++++++++++++++++++++++++++-----------
1 file changed, 27 insertions(+), 11 deletions(-)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d3e66136e41a..101b67ab2eb6 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -289,6 +289,24 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
}
static struct shrinker *huge_zero_page_shrinker;
+static int huge_zero_page_shrinker_init(void)
+{
+ huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
+ if (!huge_zero_page_shrinker)
+ return -ENOMEM;
+
+ huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
+ huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
+ shrinker_register(huge_zero_page_shrinker);
+ return 0;
+}
+
+static void huge_zero_page_shrinker_exit(void)
+{
+ shrinker_free(huge_zero_page_shrinker);
+ return;
+}
+
#ifdef CONFIG_SYSFS
static ssize_t enabled_show(struct kobject *kobj,
@@ -850,33 +868,31 @@ static inline void hugepage_exit_sysfs(struct kobject *hugepage_kobj)
static int __init thp_shrinker_init(void)
{
- huge_zero_page_shrinker = shrinker_alloc(0, "thp-zero");
- if (!huge_zero_page_shrinker)
- return -ENOMEM;
+ int ret = 0;
deferred_split_shrinker = shrinker_alloc(SHRINKER_NUMA_AWARE |
SHRINKER_MEMCG_AWARE |
SHRINKER_NONSLAB,
"thp-deferred_split");
- if (!deferred_split_shrinker) {
- shrinker_free(huge_zero_page_shrinker);
+ if (!deferred_split_shrinker)
return -ENOMEM;
- }
-
- huge_zero_page_shrinker->count_objects = shrink_huge_zero_page_count;
- huge_zero_page_shrinker->scan_objects = shrink_huge_zero_page_scan;
- shrinker_register(huge_zero_page_shrinker);
deferred_split_shrinker->count_objects = deferred_split_count;
deferred_split_shrinker->scan_objects = deferred_split_scan;
shrinker_register(deferred_split_shrinker);
+ ret = huge_zero_page_shrinker_init();
+ if (ret) {
+ shrinker_free(deferred_split_shrinker);
+ return ret;
+ }
+
return 0;
}
static void __init thp_shrinker_exit(void)
{
- shrinker_free(huge_zero_page_shrinker);
+ huge_zero_page_shrinker_exit();
shrinker_free(deferred_split_shrinker);
}
--
2.49.0
Powered by blists - more mailing lists