[<prev] [next>] [<thread-prev] [thread-next>] [day] [month] [year] [list]
Message-ID: <20171101113203.27741-2-fanc.fnst@cn.fujitsu.com>
Date: Wed, 1 Nov 2017 19:32:00 +0800
From: Chao Fan <fanc.fnst@...fujitsu.com>
To: <linux-kernel@...r.kernel.org>, <x86@...nel.org>, <hpa@...or.com>,
<tglx@...utronix.de>, <mingo@...hat.com>, <bhe@...hat.com>,
<keescook@...omium.org>, <yasu.isimatu@...il.com>
CC: <indou.takao@...fujitsu.com>, <caoj.fnst@...fujitsu.com>,
<douly.fnst@...fujitsu.com>, Chao Fan <fanc.fnst@...fujitsu.com>
Subject: [PATCH v2 1/4] kaslr: parse the extended movable_node=nn[KMG]@ss[KMG]
Extend the movable_node to movable_node=nn[KMG]@ss[KMG].
Since in current code, kaslr may choose the memory region in movable
nodes to extract kernel, which will make the nodes can't be hot-removed.
To solve it, we can specify the region in immovable node. Create
immovable_mem to store the regions in immovable_mem, where should be
chosen by kaslr.
Multiple regions can be specified, comma delimited.
Considering the usage of memory, only support for 4 regions.
4 regions contains 2 nodes at least, enough for kernel to extract.
Signed-off-by: Chao Fan <fanc.fnst@...fujitsu.com>
---
arch/x86/boot/compressed/kaslr.c | 76 +++++++++++++++++++++++++++++++++++++++-
1 file changed, 75 insertions(+), 1 deletion(-)
diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c
index 17818ba6906f..0a591c0023f1 100644
--- a/arch/x86/boot/compressed/kaslr.c
+++ b/arch/x86/boot/compressed/kaslr.c
@@ -107,6 +107,15 @@ enum mem_avoid_index {
static struct mem_vector mem_avoid[MEM_AVOID_MAX];
+/* Only supporting at most 4 immovable memory regions with kaslr */
+#define MAX_IMMOVABLE_MEM 4
+
+/* Store the memory regions in immovable node */
+static struct mem_vector immovable_mem[MAX_IMMOVABLE_MEM];
+
+/* The immovable regions user specify, not more than 4 */
+static int num_immovable_region;
+
static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two)
{
/* Item one is entirely before item two. */
@@ -167,6 +176,38 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size)
return -EINVAL;
}
+static int parse_immovable_mem(char *p,
+ unsigned long long *start,
+ unsigned long long *size)
+{
+ char *oldp;
+
+ if (!p)
+ return -EINVAL;
+
+ oldp = p;
+ *size = memparse(p, &p);
+ if (p == oldp)
+ return -EINVAL;
+
+ /* We support nn[KMG]@ss[KMG] and nn[KMG]. */
+ switch (*p) {
+ case '@':
+ *start = memparse(p + 1, &p);
+ return 0;
+ default:
+ /*
+ * If w/o offset, only size specified, movable_node=nn[KMG]
+ * has the same behaviour as movable_node=nn[KMG]@0. It means
+ * the region starts from 0.
+ */
+ *start = 0;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
static void mem_avoid_memmap(char *str)
{
static int i;
@@ -206,6 +247,36 @@ static void mem_avoid_memmap(char *str)
memmap_too_large = true;
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+static void mem_mark_immovable(char *str)
+{
+ static int i;
+
+ while (str && (i < MAX_IMMOVABLE_MEM)) {
+ int rc;
+ unsigned long long start, size;
+ char *k = strchr(str, ',');
+
+ if (k)
+ *k++ = 0;
+
+ rc = parse_immovable_mem(str, &start, &size);
+ if (rc < 0)
+ break;
+ str = k;
+
+ immovable_mem[i].start = start;
+ immovable_mem[i].size = size;
+ i++;
+ }
+ num_immovable_region = i;
+}
+#else
+static inline void mem_mark_immovable(char *str)
+{
+}
+#endif
+
static int handle_mem_memmap(void)
{
char *args = (char *)get_cmd_line_ptr();
@@ -214,7 +285,8 @@ static int handle_mem_memmap(void)
char *param, *val;
u64 mem_size;
- if (!strstr(args, "memmap=") && !strstr(args, "mem="))
+ if (!strstr(args, "memmap=") && !strstr(args, "mem=") &&
+ !strstr(args, "movable_node="))
return 0;
tmp_cmdline = malloc(len + 1);
@@ -239,6 +311,8 @@ static int handle_mem_memmap(void)
if (!strcmp(param, "memmap")) {
mem_avoid_memmap(val);
+ } else if (!strcmp(param, "movable_node")) {
+ mem_mark_immovable(val);
} else if (!strcmp(param, "mem")) {
char *p = val;
--
2.13.6
Powered by blists - more mailing lists