lists.openwall.net | lists / announce owl-users owl-dev john-users john-dev passwdqc-users yescrypt popa3d-users / oss-security kernel-hardening musl sabotage tlsify passwords / crypt-dev xvendor / Bugtraq Full-Disclosure linux-kernel linux-netdev linux-ext4 linux-hardening linux-cve-announce PHC | |
Open Source and information security mailing list archives
| ||
|
Message-ID: <20171212122526.GD15970@localhost.localdomain> Date: Tue, 12 Dec 2017 20:25:26 +0800 From: Chao Fan <fanc.fnst@...fujitsu.com> To: <linux-kernel@...r.kernel.org>, <x86@...nel.org>, <hpa@...or.com>, <tglx@...utronix.de>, <mingo@...hat.com>, <bhe@...hat.com>, <keescook@...omium.org>, <yasu.isimatu@...il.com> CC: <indou.takao@...fujitsu.com>, <caoj.fnst@...fujitsu.com>, <douly.fnst@...fujitsu.com> Subject: Re: [PATCH v4 1/4] kaslr: add immovable_mem=nn[KMG]@ss[KMG] to specify extracting memory On Tue, Dec 12, 2017 at 08:07:02PM +0800, Chao Fan wrote: >In current code, kaslr may choose the memory region in movable >nodes to extract kernel, which will make the nodes can't be hot-removed. >To solve it, we can specify the memory region in immovable node. >Create immovable_mem to store the regions in immovable_mem, where should >be chosen by kaslr. > >Also change the "handle_mem_memmap" to "handle_mem_filter", since >it will not only handle memmap parameter now. >Since "immovable_mem=" only works with "movable_node", so "immovable_mem=" >doesn't work alone. If specify "movable_node" without "immovable_mem=", >disable KASLR. > >Multiple regions can be specified, comma delimited. >Considering the usage of memory, only support for 4 regions. >4 regions contains 2 nodes at least, enough for kernel to extract. > I tried reuse codes like this: @@ -129,7 +142,7 @@ char *skip_spaces(const char *str) #include "../../../../lib/cmdline.c" static int -parse_memmap(char *p, unsigned long long *start, unsigned long long *size) +parse_mem_filter(char *p, unsigned long long *start, unsigned long long *size) { char *oldp; @@ -149,6 +162,9 @@ parse_memmap(char *p, unsigned long long *start, unsigned long long *size) case '#': case '$': case '!': +#ifdef CONFIG_MEMORY_HOTPLUG + case '%': +#endif *start = memparse(p + 1, &p); return 0; case '@': @@ -183,7 +199,7 @@ static void mem_avoid_memmap(char *str) if (k) *k++ = 0; - rc = parse_memmap(str, &start, &size); + rc = parse_mem_filter(str, &start, &size); if (rc < 0) break; str = k; @@ -206,17 +222,68 @@ static void mem_avoid_memmap(char *str) memmap_too_large = true; } -static int handle_mem_memmap(void) +#ifdef CONFIG_MEMORY_HOTPLUG +static void parse_immovable_mem_regions(char *str) +{ + static int i; + + while (str && (i < MAX_IMMOVABLE_MEM)) { + int rc; + unsigned long long start, size; + char *k = strchr(str, ','); + + if (k) + *k++ = 0; + + rc = parse_mem_filter(str, &start, &size); + if (rc < 0) + break; + str = k; + + immovable_mem[i].start = start; + immovable_mem[i].size = size; + i++; + } + num_immovable_region = i; +} +#else +static inline void parse_immovable_mem_regions(char *str) +{ +} +#endif So that we can reuse and rename parse_memmap(), then use '%' for "immovable_mem=", but I found in this version, '!' '@' '#' '$' will also work well as '%'. I didn't find a good method to avoid it, so I gave up the reuse. Thanks, Chao Fan >Signed-off-by: Chao Fan <fanc.fnst@...fujitsu.com> >--- > arch/x86/boot/compressed/kaslr.c | 112 +++++++++++++++++++++++++++++++++++++-- > 1 file changed, 109 insertions(+), 3 deletions(-) > >diff --git a/arch/x86/boot/compressed/kaslr.c b/arch/x86/boot/compressed/kaslr.c >index 8199a6187251..40f299d8cd34 100644 >--- a/arch/x86/boot/compressed/kaslr.c >+++ b/arch/x86/boot/compressed/kaslr.c >@@ -108,6 +108,19 @@ enum mem_avoid_index { > > static struct mem_vector mem_avoid[MEM_AVOID_MAX]; > >+#ifdef CONFIG_MEMORY_HOTPLUG >+/* Only supporting at most 4 immovable memory regions with kaslr */ >+#define MAX_IMMOVABLE_MEM 4 >+ >+static bool lack_immovable_mem; >+ >+/* Store the memory regions in immovable node */ >+static struct mem_vector immovable_mem[MAX_IMMOVABLE_MEM]; >+ >+/* The immovable regions user specify, not more than 4 */ >+static int num_immovable_region; >+#endif >+ > static bool mem_overlaps(struct mem_vector *one, struct mem_vector *two) > { > /* Item one is entirely before item two. */ >@@ -206,17 +219,99 @@ static void mem_avoid_memmap(char *str) > memmap_too_large = true; > } > >-static int handle_mem_memmap(void) >+#ifdef CONFIG_MEMORY_HOTPLUG >+static int parse_immovable_mem(char *p, >+ unsigned long long *start, >+ unsigned long long *size) >+{ >+ char *oldp; >+ >+ if (!p) >+ return -EINVAL; >+ >+ oldp = p; >+ *size = memparse(p, &p); >+ if (p == oldp) >+ return -EINVAL; >+ >+ switch (*p) { >+ case '@': >+ *start = memparse(p + 1, &p); >+ return 0; >+ default: >+ /* >+ * If w/o offset, only size specified, immovable_mem=nn[KMG] >+ * has the same behaviour as immovable_mem=nn[KMG]@0. It means >+ * the region starts from 0. >+ */ >+ *start = 0; >+ return 0; >+ } >+ >+ return -EINVAL; >+} >+ >+static void parse_immovable_mem_regions(char *str) >+{ >+ static int i; >+ >+ while (str && (i < MAX_IMMOVABLE_MEM)) { >+ int rc; >+ unsigned long long start, size; >+ char *k = strchr(str, ','); >+ >+ if (k) >+ *k++ = 0; >+ >+ rc = parse_immovable_mem(str, &start, &size); >+ if (rc < 0) >+ break; >+ str = k; >+ >+ immovable_mem[i].start = start; >+ immovable_mem[i].size = size; >+ i++; >+ } >+ num_immovable_region = i; >+} >+#else >+static inline void parse_immovable_mem_regions(char *str) >+{ >+} >+#endif >+ >+static int handle_mem_filter(void) > { > char *args = (char *)get_cmd_line_ptr(); > size_t len = strlen((char *)args); >+ bool exist_movable_node = false; > char *tmp_cmdline; > char *param, *val; > u64 mem_size; > >- if (!strstr(args, "memmap=") && !strstr(args, "mem=")) >+ if (!strstr(args, "memmap=") && !strstr(args, "mem=") && >+ !strstr(args, "movable_node")) > return 0; > >+#ifdef CONFIG_MEMORY_HOTPLUG >+ if (strstr(args, "movable_node")) { >+ /* >+ * Confirm "movable_node" specified, otherwise >+ * "immovable_mem=" doesn't work. >+ */ >+ exist_movable_node = true; >+ >+ /* >+ * If only specify "movable_node" without "immovable_mem=", >+ * disable KASLR. >+ */ >+ if (!strstr(args, "immovable_mem=")) { >+ lack_immovable_mem = true; >+ return 0; >+ } >+ } >+#endif >+ > tmp_cmdline = malloc(len + 1); > if (!tmp_cmdline) > error("Failed to allocate space for tmp_cmdline"); >@@ -239,6 +334,9 @@ static int handle_mem_memmap(void) > > if (!strcmp(param, "memmap")) { > mem_avoid_memmap(val); >+ } else if (!strcmp(param, "immovable_mem=") && >+ exist_movable_node) { >+ parse_immovable_mem_regions(val); > } else if (!strcmp(param, "mem")) { > char *p = val; > >@@ -378,7 +476,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size, > /* We don't need to set a mapping for setup_data. */ > > /* Mark the memmap regions we need to avoid */ >- handle_mem_memmap(); >+ handle_mem_filter(); > > #ifdef CONFIG_X86_VERBOSE_BOOTUP > /* Make sure video RAM can be used. */ >@@ -673,6 +771,14 @@ static unsigned long find_random_phys_addr(unsigned long minimum, > return 0; > } > >+#ifdef CONFIG_MEMORY_HOTPLUG >+ /* Check if specify "movable_node" without "immovable_mem=". */ >+ if (lack_immovable_mem) { >+ debug_putstr("Fail KASLR when movable_node specified without immovable_mem=.\n"); >+ return 0; >+ } >+#endif >+ > /* Make sure minimum is aligned. */ > minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN); > >-- >2.14.3 >
Powered by blists - more mailing lists