lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-Id: <20220330153745.20465-2-songmuchun@bytedance.com>
Date:   Wed, 30 Mar 2022 23:37:42 +0800
From:   Muchun Song <songmuchun@...edance.com>
To:     corbet@....net, mike.kravetz@...cle.com, akpm@...ux-foundation.org,
        mcgrof@...nel.org, keescook@...omium.org, yzaikin@...gle.com,
        osalvador@...e.de, david@...hat.com, masahiroy@...nel.org
Cc:     linux-doc@...r.kernel.org, linux-kernel@...r.kernel.org,
        linux-mm@...ck.org, duanxiongchun@...edance.com, smuchun@...il.com,
        Muchun Song <songmuchun@...edance.com>
Subject: [PATCH v6 1/4] mm: hugetlb_vmemmap: introduce STRUCT_PAGE_SIZE_IS_POWER_OF_2

If the size of "struct page" is not the power of two and this
feature is enabled, then the vmemmap pages of HugeTLB will be
corrupted after remapping (panic is about to happen in theory).
But this only exists when !CONFIG_MEMCG && !CONFIG_SLUB on
x86_64.  However, it is not a conventional configuration nowadays.
So it is not a real word issue, just the result of a code review.
But we have to prevent anyone from configuring that combined
configuration.  In order to avoid many checks like "is_power_of_2
(sizeof(struct page))" through mm/hugetlb_vmemmap.c.  Introduce
STRUCT_PAGE_SIZE_IS_POWER_OF_2 to detect if the size of struct
page is power of 2 and make this feature depends on this new
macro.  Then we could prevent anyone do any unexpected
configuration.

Signed-off-by: Muchun Song <songmuchun@...edance.com>
Suggested-by: Luis Chamberlain <mcgrof@...nel.org>
---
 Kbuild                     | 15 ++++++++++++++-
 include/linux/mm_types.h   |  2 ++
 include/linux/page-flags.h |  3 ++-
 mm/hugetlb_vmemmap.c       |  8 ++------
 mm/hugetlb_vmemmap.h       |  4 ++--
 mm/struct_page_size.c      | 20 ++++++++++++++++++++
 6 files changed, 42 insertions(+), 10 deletions(-)
 create mode 100644 mm/struct_page_size.c

diff --git a/Kbuild b/Kbuild
index fa441b98c9f6..7f90ba21dd51 100644
--- a/Kbuild
+++ b/Kbuild
@@ -24,6 +24,19 @@ $(timeconst-file): kernel/time/timeconst.bc FORCE
 	$(call filechk,gentimeconst)
 
 #####
+# Generate struct_page_size.h.
+
+struct_page_size-file := include/generated/struct_page_size.h
+
+always-y += $(struct_page_size-file)
+targets += mm/struct_page_size.s
+
+mm/struct_page_size.s: $(timeconst-file) $(bounds-file)
+
+$(struct_page_size-file): mm/struct_page_size.s FORCE
+	$(call filechk,offsets,__LINUX_STRUCT_PAGE_SIZE_H__)
+
+#####
 # Generate asm-offsets.h
 
 offsets-file := include/generated/asm-offsets.h
@@ -31,7 +44,7 @@ offsets-file := include/generated/asm-offsets.h
 always-y += $(offsets-file)
 targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 
-arch/$(SRCARCH)/kernel/asm-offsets.s: $(timeconst-file) $(bounds-file)
+arch/$(SRCARCH)/kernel/asm-offsets.s: $(timeconst-file) $(bounds-file) $(struct_page_size-file)
 
 $(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
 	$(call filechk,offsets,__ASM_OFFSETS_H__)
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 8834e38c06a4..5fbff44a4310 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -223,6 +223,7 @@ struct page {
 #endif
 } _struct_page_alignment;
 
+#ifndef __GENERATING_STRUCT_PAGE_SIZE_IS_POWER_OF_2_H
 /**
  * struct folio - Represents a contiguous set of bytes.
  * @flags: Identical to the page flags.
@@ -844,5 +845,6 @@ enum fault_flag {
 	FAULT_FLAG_INSTRUCTION =	1 << 8,
 	FAULT_FLAG_INTERRUPTIBLE =	1 << 9,
 };
+#endif /* !__GENERATING_STRUCT_PAGE_SIZE_IS_POWER_OF_2_H */
 
 #endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index fc4f294cc8d7..15fcdff0e7ee 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -12,6 +12,7 @@
 #ifndef __GENERATING_BOUNDS_H
 #include <linux/mm_types.h>
 #include <generated/bounds.h>
+#include <generated/struct_page_size.h>
 #endif /* !__GENERATING_BOUNDS_H */
 
 /*
@@ -190,7 +191,7 @@ enum pageflags {
 
 #ifndef __GENERATING_BOUNDS_H
 
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#if defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP) && defined(STRUCT_PAGE_SIZE_IS_POWER_OF_2)
 DECLARE_STATIC_KEY_MAYBE(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP_DEFAULT_ON,
 			 hugetlb_free_vmemmap_enabled_key);
 
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 791626983c2e..951cf83010c7 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -178,6 +178,7 @@
 
 #include "hugetlb_vmemmap.h"
 
+#ifdef STRUCT_PAGE_SIZE_IS_POWER_OF_2
 /*
  * There are a lot of struct page structures associated with each HugeTLB page.
  * For tail pages, the value of compound_head is the same. So we can reuse first
@@ -194,12 +195,6 @@ EXPORT_SYMBOL(hugetlb_free_vmemmap_enabled_key);
 
 static int __init early_hugetlb_free_vmemmap_param(char *buf)
 {
-	/* We cannot optimize if a "struct page" crosses page boundaries. */
-	if (!is_power_of_2(sizeof(struct page))) {
-		pr_warn("cannot free vmemmap pages because \"struct page\" crosses page boundaries\n");
-		return 0;
-	}
-
 	if (!buf)
 		return -EINVAL;
 
@@ -302,3 +297,4 @@ void __init hugetlb_vmemmap_init(struct hstate *h)
 	pr_info("can free %d vmemmap pages for %s\n", h->nr_free_vmemmap_pages,
 		h->name);
 }
+#endif /* STRUCT_PAGE_SIZE_IS_POWER_OF_2 */
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h
index cb2bef8f9e73..b137fd8b6ba4 100644
--- a/mm/hugetlb_vmemmap.h
+++ b/mm/hugetlb_vmemmap.h
@@ -10,7 +10,7 @@
 #define _LINUX_HUGETLB_VMEMMAP_H
 #include <linux/hugetlb.h>
 
-#ifdef CONFIG_HUGETLB_PAGE_FREE_VMEMMAP
+#if defined(CONFIG_HUGETLB_PAGE_FREE_VMEMMAP) && defined(STRUCT_PAGE_SIZE_IS_POWER_OF_2)
 int alloc_huge_page_vmemmap(struct hstate *h, struct page *head);
 void free_huge_page_vmemmap(struct hstate *h, struct page *head);
 void hugetlb_vmemmap_init(struct hstate *h);
@@ -41,5 +41,5 @@ static inline unsigned int free_vmemmap_pages_per_hpage(struct hstate *h)
 {
 	return 0;
 }
-#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP */
+#endif /* CONFIG_HUGETLB_PAGE_FREE_VMEMMAP && STRUCT_PAGE_SIZE_IS_POWER_OF_2 */
 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
diff --git a/mm/struct_page_size.c b/mm/struct_page_size.c
new file mode 100644
index 000000000000..6fc29c1227a0
--- /dev/null
+++ b/mm/struct_page_size.c
@@ -0,0 +1,20 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generate definitions needed by the preprocessor.
+ * This code generates raw asm output which is post-processed
+ * to extract and format the required data.
+ */
+
+#define __GENERATING_STRUCT_PAGE_SIZE_IS_POWER_OF_2_H
+/* Include headers that define the enum constants of interest */
+#include <linux/mm_types.h>
+#include <linux/kbuild.h>
+#include <linux/log2.h>
+
+int main(void)
+{
+	if (is_power_of_2(sizeof(struct page)))
+		DEFINE(STRUCT_PAGE_SIZE_IS_POWER_OF_2, is_power_of_2(sizeof(struct page)));
+
+	return 0;
+}
-- 
2.11.0

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ