lists.openwall.net   lists  /  announce  owl-users  owl-dev  john-users  john-dev  passwdqc-users  yescrypt  popa3d-users  /  oss-security  kernel-hardening  musl  sabotage  tlsify  passwords  /  crypt-dev  xvendor  /  Bugtraq  Full-Disclosure  linux-kernel  linux-netdev  linux-ext4  linux-hardening  linux-cve-announce  PHC 
Open Source and information security mailing list archives
 
Hash Suite: Windows password security audit tool. GUI, reports in PDF.
[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Date:	Sun, 28 Mar 2010 19:43:00 -0700
From:	Yinghai Lu <yinghai@...nel.org>
To:	Ingo Molnar <mingo@...e.hu>, Thomas Gleixner <tglx@...utronix.de>,
	"H. Peter Anvin" <hpa@...or.com>,
	Andrew Morton <akpm@...ux-foundation.org>,
	David Miller <davem@...emloft.net>,
	Benjamin Herrenschmidt <benh@...nel.crashing.org>,
	Linus Torvalds <torvalds@...ux-foundation.org>
Cc:	Johannes Weiner <hannes@...xchg.org>, linux-kernel@...r.kernel.org,
	linux-arch@...r.kernel.org, Yinghai Lu <yinghai@...nel.org>
Subject: [PATCH 07/31] lmb: Add reserve_lmb/free_lmb

They will check if the region array is big enough.

__check_and_double_region_array will try to double the region if that array spare
slots if not big enough.
find_lmb_area() is used to find good postion for new region array.
Old array will be copied to new array.

Arch code should provide to get_max_mapped, so the new array have accessiable
address

Signed-off-by: Yinghai Lu <yinghai@...nel.org>
---
 include/linux/lmb.h |    4 ++
 mm/lmb.c            |   89 +++++++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 93 insertions(+), 0 deletions(-)

diff --git a/include/linux/lmb.h b/include/linux/lmb.h
index 05234bd..95ae3f4 100644
--- a/include/linux/lmb.h
+++ b/include/linux/lmb.h
@@ -83,9 +83,13 @@ lmb_end_pfn(struct lmb_region *type, unsigned long region_nr)
 	       lmb_size_pages(type, region_nr);
 }
 
+void reserve_lmb(u64 start, u64 end, char *name);
+void free_lmb(u64 start, u64 end);
+void add_lmb_memory(u64 start, u64 end);
 u64 __find_lmb_area(u64 ei_start, u64 ei_last, u64 start, u64 end,
 			 u64 size, u64 align);
 u64 find_lmb_area(u64 start, u64 end, u64 size, u64 align);
+u64 get_max_mapped(void);
 
 #include <asm/lmb.h>
 
diff --git a/mm/lmb.c b/mm/lmb.c
index d5d5dc4..9798458 100644
--- a/mm/lmb.c
+++ b/mm/lmb.c
@@ -551,6 +551,95 @@ int lmb_find(struct lmb_property *res)
 	return -1;
 }
 
+u64 __weak __init get_max_mapped(void)
+{
+	u64 end = max_low_pfn;
+
+	end <<= PAGE_SHIFT;
+
+	return end;
+}
+
+static void __init __check_and_double_region_array(struct lmb_region *type,
+			 struct lmb_property *static_region,
+			 u64 ex_start, u64 ex_end)
+{
+	u64 start, end, size, mem;
+	struct lmb_property *new, *old;
+	unsigned long rgnsz = type->nr_regions;
+
+	/* Do we have enough slots left ? */
+	if ((rgnsz - type->cnt) > max_t(unsigned long, rgnsz/8, 2))
+		return;
+
+	old = type->region;
+	/* Double the array size */
+	size = sizeof(struct lmb_property) * rgnsz * 2;
+	if (old == static_region)
+		start = 0;
+	else
+		start = __pa(old) + sizeof(struct lmb_property) * rgnsz;
+	end = ex_start;
+	mem = -1ULL;
+	if (start + size < end)
+		mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
+	if (mem == -1ULL) {
+		start = ex_end;
+		end = get_max_mapped();
+		if (start + size < end)
+			mem = find_lmb_area(start, end, size, sizeof(struct lmb_property));
+	}
+	if (mem == -1ULL)
+		panic("can not find more space for lmb.reserved.region array");
+
+	new = __va(mem);
+	/* Copy old to new */
+	memcpy(&new[0], &old[0], sizeof(struct lmb_property) * rgnsz);
+	memset(&new[rgnsz], 0, sizeof(struct lmb_property) * rgnsz);
+
+	memset(&old[0], 0, sizeof(struct lmb_property) * rgnsz);
+	type->region = new;
+	type->nr_regions = rgnsz * 2;
+	printk(KERN_DEBUG "lmb.reserved.region array is doubled to %ld at [%llx - %llx]\n",
+		type->nr_regions, mem, mem + size - 1);
+
+	/* Reserve new array and free old one */
+	lmb_reserve(mem, sizeof(struct lmb_property) * rgnsz * 2);
+	if (old != static_region)
+		lmb_free(__pa(old), sizeof(struct lmb_property) * rgnsz);
+}
+
+void __init add_lmb_memory(u64 start, u64 end)
+{
+	__check_and_double_region_array(&lmb.memory, &lmb_memory_region[0], start, end);
+	lmb_add(start, end - start);
+}
+
+void __init reserve_lmb(u64 start, u64 end, char *name)
+{
+	if (start == end)
+		return;
+
+	if (WARN_ONCE(start > end, "reserve_lmb: wrong range [%#llx, %#llx]\n", start, end))
+		return;
+
+	__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+	lmb_reserve(start, end - start);
+}
+
+void __init free_lmb(u64 start, u64 end)
+{
+	if (start == end)
+		return;
+
+	if (WARN_ONCE(start > end, "free_lmb: wrong range [%#llx, %#llx]\n", start, end))
+		return;
+
+	/* keep punching hole, could run out of slots too */
+	__check_and_double_region_array(&lmb.reserved, &lmb_reserved_region[0], start, end);
+	lmb_free(start, end - start);
+}
+
 static int __init find_overlapped_early(u64 start, u64 end)
 {
 	int i;
-- 
1.6.4.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@...r.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Powered by blists - more mailing lists

Powered by Openwall GNU/*/Linux Powered by OpenVZ