lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220824065058.81051-1-lizhe.67@bytedance.com>
Date:   Wed, 24 Aug 2022 14:50:58 +0800
From:   lizhe.67@...edance.com
To:     akpm@...ux-foundation.org, mhocko@...e.com, vbabka@...e.cz,
        mhiramat@...nel.org, keescook@...omium.org, Jason@...c4.com,
        mark-pk.tsai@...iatek.com, rostedt@...dmis.org, corbet@....net
Cc:     linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org, lizefan.x@...edance.com, yuanzhu@...edance.com,
        lizhe.67@...edance.com
Subject: [PATCH v2] page_ext: introduce boot parameter 'early_page_ext'

From: Li Zhe <lizhe.67@...edance.com>

In 'commit 2f1ee0913ce5 ("Revert "mm: use early_pfn_to_nid in page_ext_init"")',
we call page_ext_init() after page_alloc_init_late() to avoid some panic
problem. It seems that we cannot track early page allocations in current
kernel even if page structure has been initialized early.

This patch introduce a new boot parameter 'early_page_ext' to resolve this
problem. If we pass it to kernel, function page_ext_init() will be moved
up and feature 'deferred initialization of struct pages' will be disabled.
It can help us to catch early page allocations. This is useful especially
when we find that the free memory value is not the same right after
different kernel booting.

Changelogs:

v1->v2:
- use a cmd line parameter to move up function page_ext_init() instead of
  using CONFIG_DEFERRED_STRUCT_PAGE_INIT
- fix oom problem[1]

v1 patch: https://lore.kernel.org/lkml/Yv3r6Y1vh+6AbY4+@dhcp22.suse.cz/T/

[1]: https://lore.kernel.org/linux-mm/YwHmXLu5txij+p35@xsang-OptiPlex-9020/

Suggested-by: Michal Hocko <mhocko@...e.com>
Signed-off-by: Li Zhe <lizhe.67@...edance.com>
---
 .../admin-guide/kernel-parameters.txt         |  6 ++++++
 include/linux/page_ext.h                      | 14 ++++++++++---
 init/main.c                                   |  4 +++-
 mm/page_alloc.c                               |  2 ++
 mm/page_ext.c                                 | 21 ++++++++++++++++++-
 5 files changed, 42 insertions(+), 5 deletions(-)

diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index d7f30902fda0..7b5726828ac0 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -1471,6 +1471,12 @@
 			Permit 'security.evm' to be updated regardless of
 			current integrity status.
 
+	early_page_ext [KNL] Boot-time early page_ext initializing option.
+			This boot parameter disables the deferred initialization
+			of struct page and move up function page_ext_init() in
+			order to catch early page allocations. Available with
+			CONFIG_PAGE_EXTENSION=y.
+
 	failslab=
 	fail_usercopy=
 	fail_page_alloc=
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index fabb2e1e087f..3e081cf8a1ec 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -38,19 +38,22 @@ struct page_ext {
 
 extern unsigned long page_ext_size;
 extern void pgdat_page_ext_init(struct pglist_data *pgdat);
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+extern bool early_page_ext_enable(void);
+#endif
 
 #ifdef CONFIG_SPARSEMEM
 static inline void page_ext_init_flatmem(void)
 {
 }
-extern void page_ext_init(void);
+extern void page_ext_init(bool early);
 static inline void page_ext_init_flatmem_late(void)
 {
 }
 #else
 extern void page_ext_init_flatmem(void);
 extern void page_ext_init_flatmem_late(void);
-static inline void page_ext_init(void)
+static inline void page_ext_init(bool early)
 {
 }
 #endif
@@ -67,6 +70,11 @@ static inline struct page_ext *page_ext_next(struct page_ext *curr)
 #else /* !CONFIG_PAGE_EXTENSION */
 struct page_ext;
 
+static inline bool early_page_ext_enable(void)
+{
+	return false;
+}
+
 static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
 {
 }
@@ -76,7 +84,7 @@ static inline struct page_ext *lookup_page_ext(const struct page *page)
 	return NULL;
 }
 
-static inline void page_ext_init(void)
+static inline void page_ext_init(bool early)
 {
 }
 
diff --git a/init/main.c b/init/main.c
index 91642a4e69be..3760c0326525 100644
--- a/init/main.c
+++ b/init/main.c
@@ -849,6 +849,8 @@ static void __init mm_init(void)
 	pgtable_init();
 	debug_objects_mem_init();
 	vmalloc_init();
+	/* Should be run after vmap initialization */
+	page_ext_init(true);
 	/* Should be run before the first non-init thread is created */
 	init_espfix_bsp();
 	/* Should be run after espfix64 is set up. */
@@ -1606,7 +1608,7 @@ static noinline void __init kernel_init_freeable(void)
 	padata_init();
 	page_alloc_init_late();
 	/* Initialize page ext after all struct pages are initialized. */
-	page_ext_init();
+	page_ext_init(false);
 
 	do_basic_setup();
 
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e5486d47406e..e580b197aa1e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -482,6 +482,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
 {
 	static unsigned long prev_end_pfn, nr_initialised;
 
+	if (early_page_ext_enable())
+		return false;
 	/*
 	 * prev_end_pfn static that contains the end of previous zone
 	 * No need to protect because called very early in boot before smp_init.
diff --git a/mm/page_ext.c b/mm/page_ext.c
index 3dc715d7ac29..82ba561730ef 100644
--- a/mm/page_ext.c
+++ b/mm/page_ext.c
@@ -85,6 +85,22 @@ unsigned long page_ext_size = sizeof(struct page_ext);
 
 static unsigned long total_usage;
 
+#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
+static bool early_page_ext __meminitdata;
+bool __meminit early_page_ext_enable(void)
+{
+	return early_page_ext;
+}
+#else
+static bool early_page_ext __meminitdata = true;
+#endif
+static int __init setup_early_page_ext(char *str)
+{
+	early_page_ext = true;
+	return 0;
+}
+early_param("early_page_ext", setup_early_page_ext);
+
 static bool __init invoke_need_callbacks(void)
 {
 	int i;
@@ -378,11 +394,14 @@ static int __meminit page_ext_callback(struct notifier_block *self,
 	return notifier_from_errno(ret);
 }
 
-void __init page_ext_init(void)
+void __init page_ext_init(bool early)
 {
 	unsigned long pfn;
 	int nid;
 
+	if (early != early_page_ext)
+		return;
+
 	if (!invoke_need_callbacks())
 		return;
 
-- 
2.20.1

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ