[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220628092235.91270-3-songmuchun@bytedance.com>
Date: Tue, 28 Jun 2022 17:22:29 +0800
From: Muchun Song <songmuchun@...edance.com>
To: mike.kravetz@...cle.com, david@...hat.com,
akpm@...ux-foundation.org, corbet@....net
Cc: linux-mm@...ck.org, linux-kernel@...r.kernel.org,
linux-doc@...r.kernel.org, duanxiongchun@...edance.com,
Muchun Song <songmuchun@...edance.com>,
Oscar Salvador <osalvador@...e.de>
Subject: [PATCH v2 2/8] mm: hugetlb_vmemmap: optimize vmemmap_optimize_mode handling
We hold an another reference to hugetlb_optimize_vmemmap_key when
making vmemmap_optimize_mode on, because we use static_key to tell
memory_hotplug that memory_hotplug.memmap_on_memory should be
overridden. However, this rule has gone when we have introduced
PageVmemmapSelfHosted. Therefore, we could simplify
vmemmap_optimize_mode handling by not holding an another reference
to hugetlb_optimize_vmemmap_key. This also means that we not
incur the extra page_fixed_fake_head checks if there are no
vmemmap optinmized hugetlb pages after this change.
Signed-off-by: Muchun Song <songmuchun@...edance.com>
Reviewed-by: Oscar Salvador <osalvador@...e.de>
Reviewed-by: Mike Kravetz <mike.kravetz@...cle.com>
---
include/linux/page-flags.h | 6 ++---
mm/hugetlb_vmemmap.c | 65 +++++-----------------------------------------
2 files changed, 9 insertions(+), 62 deletions(-)
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 2455405ab82b..b44cc24d7496 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -205,8 +205,7 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
-DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
- hugetlb_optimize_vmemmap_key);
+DECLARE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
/*
* If the feature of optimizing vmemmap pages associated with each HugeTLB
@@ -226,8 +225,7 @@ DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
*/
static __always_inline const struct page *page_fixed_fake_head(const struct page *page)
{
- if (!static_branch_maybe(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
- &hugetlb_optimize_vmemmap_key))
+ if (!static_branch_unlikely(&hugetlb_optimize_vmemmap_key))
return page;
/*
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 6d9801bb3fec..0c2f15a35d62 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -23,42 +23,15 @@
#define RESERVE_VMEMMAP_NR 1U
#define RESERVE_VMEMMAP_SIZE (RESERVE_VMEMMAP_NR << PAGE_SHIFT)
-enum vmemmap_optimize_mode {
- VMEMMAP_OPTIMIZE_OFF,
- VMEMMAP_OPTIMIZE_ON,
-};
-
-DEFINE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON,
- hugetlb_optimize_vmemmap_key);
+DEFINE_STATIC_KEY_FALSE(hugetlb_optimize_vmemmap_key);
EXPORT_SYMBOL(hugetlb_optimize_vmemmap_key);
-static enum vmemmap_optimize_mode vmemmap_optimize_mode =
+static bool vmemmap_optimize_enabled =
IS_ENABLED(CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP_DEFAULT_ON);
-static void vmemmap_optimize_mode_switch(enum vmemmap_optimize_mode to)
-{
- if (vmemmap_optimize_mode == to)
- return;
-
- if (to == VMEMMAP_OPTIMIZE_OFF)
- static_branch_dec(&hugetlb_optimize_vmemmap_key);
- else
- static_branch_inc(&hugetlb_optimize_vmemmap_key);
- WRITE_ONCE(vmemmap_optimize_mode, to);
-}
-
static int __init hugetlb_vmemmap_early_param(char *buf)
{
- bool enable;
- enum vmemmap_optimize_mode mode;
-
- if (kstrtobool(buf, &enable))
- return -EINVAL;
-
- mode = enable ? VMEMMAP_OPTIMIZE_ON : VMEMMAP_OPTIMIZE_OFF;
- vmemmap_optimize_mode_switch(mode);
-
- return 0;
+ return kstrtobool(buf, &vmemmap_optimize_enabled);
}
early_param("hugetlb_free_vmemmap", hugetlb_vmemmap_early_param);
@@ -100,7 +73,7 @@ int hugetlb_vmemmap_alloc(struct hstate *h, struct page *head)
static unsigned int vmemmap_optimizable_pages(struct hstate *h,
struct page *head)
{
- if (READ_ONCE(vmemmap_optimize_mode) == VMEMMAP_OPTIMIZE_OFF)
+ if (!READ_ONCE(vmemmap_optimize_enabled))
return 0;
if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG)) {
@@ -191,7 +164,6 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
if (!is_power_of_2(sizeof(struct page))) {
pr_warn_once("cannot optimize vmemmap pages because \"struct page\" crosses page boundaries\n");
- static_branch_disable(&hugetlb_optimize_vmemmap_key);
return;
}
@@ -212,36 +184,13 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
}
#ifdef CONFIG_PROC_SYSCTL
-static int hugetlb_optimize_vmemmap_handler(struct ctl_table *table, int write,
- void *buffer, size_t *length,
- loff_t *ppos)
-{
- int ret;
- enum vmemmap_optimize_mode mode;
- static DEFINE_MUTEX(sysctl_mutex);
-
- if (write && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- mutex_lock(&sysctl_mutex);
- mode = vmemmap_optimize_mode;
- table->data = &mode;
- ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
- if (write && !ret)
- vmemmap_optimize_mode_switch(mode);
- mutex_unlock(&sysctl_mutex);
-
- return ret;
-}
-
static struct ctl_table hugetlb_vmemmap_sysctls[] = {
{
.procname = "hugetlb_optimize_vmemmap",
- .maxlen = sizeof(enum vmemmap_optimize_mode),
+ .data = &vmemmap_optimize_enabled,
+ .maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = hugetlb_optimize_vmemmap_handler,
- .extra1 = SYSCTL_ZERO,
- .extra2 = SYSCTL_ONE,
+ .proc_handler = proc_dobool,
},
{ }
};
--
2.11.0
Powered by blists - more mailing lists