lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Thu, 17 Apr 2008 01:07:24 +0100 (IST)
From:	Mel Gorman <mel@....ul.ie>
To:	linux-mm@...ck.org
Cc:	Mel Gorman <mel@....ul.ie>, mingo@...e.hu,
	linux-kernel@...r.kernel.org
Subject: [PATCH 3/4] Make defencive checks around PFN values registered for memory usage


There are a number of different views to how much memory is currently
active. There is the arch-independent zone-sizing view, the bootmem allocator
and SPARSEMEMs view.  Architectures register this information at different
times and is not necessarily in sync particularly with view to some SPARSEMEM
limitations.

This patch introduces mminit_validate_physlimits() which is able to validate
and correct PFN ranges with respect to SPARSEMEM limitations. Ordinarily
they will be fixed silently but if mminit_debug_level is MMINIT_VERIFY or
higher, a message will be printed to dmesg.

Signed-off-by: Mel Gorman <mel@....ul.ie>
---

 mm/bootmem.c    |    1 +
 mm/internal.h   |   12 ++++++++++++
 mm/page_alloc.c |    2 ++
 mm/sparse.c     |   37 +++++++++++++++++++++++++++++--------
 4 files changed, 44 insertions(+), 8 deletions(-)

diff -rup -X /usr/src/patchset-0.6/bin//dontdiff linux-2.6.25-rc9-0020_memmap_init_debug/mm/bootmem.c linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/bootmem.c
--- linux-2.6.25-rc9-0020_memmap_init_debug/mm/bootmem.c	2008-04-11 21:32:29.000000000 +0100
+++ linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/bootmem.c	2008-04-17 00:20:47.000000000 +0100
@@ -91,6 +91,7 @@ static unsigned long __init init_bootmem
 	bootmem_data_t *bdata = pgdat->bdata;
 	unsigned long mapsize;
 
+	mminit_validate_physlimits(&start, &end);
 	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
 	bdata->node_boot_start = PFN_PHYS(start);
 	bdata->node_low_pfn = end;
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff linux-2.6.25-rc9-0020_memmap_init_debug/mm/internal.h linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/internal.h
--- linux-2.6.25-rc9-0020_memmap_init_debug/mm/internal.h	2008-04-17 00:20:33.000000000 +0100
+++ linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/internal.h	2008-04-17 00:20:47.000000000 +0100
@@ -98,4 +98,16 @@ static inline void mminit_verify_page_li
 {
 }
 #endif /* CONFIG_DEBUG_MEMORY_INIT */
+
+/* mminit_validate_physlimits is independent of CONFIG_DEBUG_MEMORY_INIT */
+#if defined(CONFIG_SPARSEMEM)
+extern void mminit_validate_physlimits(unsigned long *start_pfn,
+				unsigned long *end_pfn);
+#else
+static inline void mminit_validate_physlimits(unsigned long *start_pfn,
+				unsigned long *end_pfn)
+{
+}
+#endif /* CONFIG_SPARSEMEM */
+
 #endif
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff linux-2.6.25-rc9-0020_memmap_init_debug/mm/page_alloc.c linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/page_alloc.c
--- linux-2.6.25-rc9-0020_memmap_init_debug/mm/page_alloc.c	2008-04-17 00:20:33.000000000 +0100
+++ linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/page_alloc.c	2008-04-17 00:20:47.000000000 +0100
@@ -3510,6 +3510,8 @@ void __init add_active_range(unsigned in
 			  nid, start_pfn, end_pfn,
 			  nr_nodemap_entries, MAX_ACTIVE_REGIONS);
 
+	mminit_validate_physlimits(&start_pfn, &end_pfn);
+
 	/* Merge with existing active regions if possible */
 	for (i = 0; i < nr_nodemap_entries; i++) {
 		if (early_node_map[i].nid != nid)
diff -rup -X /usr/src/patchset-0.6/bin//dontdiff linux-2.6.25-rc9-0020_memmap_init_debug/mm/sparse.c linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/sparse.c
--- linux-2.6.25-rc9-0020_memmap_init_debug/mm/sparse.c	2008-04-16 15:47:34.000000000 +0100
+++ linux-2.6.25-rc9-0025_defensive_pfn_checks/mm/sparse.c	2008-04-17 00:20:47.000000000 +0100
@@ -11,6 +11,7 @@
 #include <asm/dma.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
+#include "internal.h"
 
 /*
  * Permanent SPARSEMEM data:
@@ -146,22 +147,41 @@ static inline int sparse_early_nid(struc
 	return (section->section_mem_map >> SECTION_NID_SHIFT);
 }
 
-/* Record a memory area against a node. */
-void __init memory_present(int nid, unsigned long start, unsigned long end)
+/* Validate the physical addressing limitations of the model */
+void __meminit mminit_validate_physlimits(unsigned long *start_pfn,
+						unsigned long *end_pfn)
 {
-	unsigned long max_arch_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
-	unsigned long pfn;
+	unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
 
 	/*
 	 * Sanity checks - do not allow an architecture to pass
 	 * in larger pfns than the maximum scope of sparsemem:
 	 */
-	if (start >= max_arch_pfn)
-		return;
-	if (end >= max_arch_pfn)
-		end = max_arch_pfn;
+	if (*start_pfn > max_sparsemem_pfn) {
+		mminit_debug_printk(MMINIT_VERIFY, "pfnvalidation",
+			"Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
+			*start_pfn, *end_pfn, max_sparsemem_pfn);
+		WARN_ON_ONCE(1);
+		*start_pfn = max_sparsemem_pfn;
+		*end_pfn = max_sparsemem_pfn;
+	}
+
+	if (*end_pfn > max_sparsemem_pfn) {
+		mminit_debug_printk(MMINIT_VERIFY, "pfnvalidation",
+			"End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
+			*start_pfn, *end_pfn, max_sparsemem_pfn);
+		WARN_ON_ONCE(1);
+		*end_pfn = max_sparsemem_pfn;
+	}
+}
+
+/* Record a memory area against a node. */
+void __init memory_present(int nid, unsigned long start, unsigned long end)
+{
+	unsigned long pfn;
 
 	start &= PAGE_SECTION_MASK;
+	mminit_validate_physlimits(&start, &end);
 	for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
 		unsigned long section = pfn_to_section_nr(pfn);
 		struct mem_section *ms;
@@ -186,6 +206,7 @@ unsigned long __init node_memmap_size_by
 	unsigned long pfn;
 	unsigned long nr_pages = 0;
 
+	mminit_validate_physlimits(&start_pfn, &end_pfn);
 	for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
 		if (nid != early_pfn_to_nid(pfn))
 			continue;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ